]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/jffs2/wbuf.c
[JFFS2] Add length argument to jffs2_add_physical_node_ref()
[net-next-2.6.git] / fs / jffs2 / wbuf.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
daba5cc4 12 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
1da177e4
LT
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
4e57b681
TS
21#include <linux/jiffies.h>
22
1da177e4
LT
23#include "nodelist.h"
24
25/* For testing write failures */
26#undef BREAKME
27#undef BREAKMEHEADER
28
29#ifdef BREAKME
30static unsigned char *brokenbuf;
31#endif
32
daba5cc4
AB
33#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35
1da177e4
LT
36/* max. erase failures before we mark a block bad */
37#define MAX_ERASE_FAILURES 2
38
1da177e4
LT
39struct jffs2_inodirty {
40 uint32_t ino;
41 struct jffs2_inodirty *next;
42};
43
44static struct jffs2_inodirty inodirty_nomem;
45
46static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47{
48 struct jffs2_inodirty *this = c->wbuf_inodes;
49
50 /* If a malloc failed, consider _everything_ dirty */
51 if (this == &inodirty_nomem)
52 return 1;
53
54 /* If ino == 0, _any_ non-GC writes mean 'yes' */
55 if (this && !ino)
56 return 1;
57
58 /* Look to see if the inode in question is pending in the wbuf */
59 while (this) {
60 if (this->ino == ino)
61 return 1;
62 this = this->next;
63 }
64 return 0;
65}
66
67static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68{
69 struct jffs2_inodirty *this;
70
71 this = c->wbuf_inodes;
72
73 if (this != &inodirty_nomem) {
74 while (this) {
75 struct jffs2_inodirty *next = this->next;
76 kfree(this);
77 this = next;
78 }
79 }
80 c->wbuf_inodes = NULL;
81}
82
83static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84{
85 struct jffs2_inodirty *new;
86
87 /* Mark the superblock dirty so that kupdated will flush... */
4d952709 88 jffs2_erase_pending_trigger(c);
1da177e4
LT
89
90 if (jffs2_wbuf_pending_for_ino(c, ino))
91 return;
92
93 new = kmalloc(sizeof(*new), GFP_KERNEL);
94 if (!new) {
95 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 jffs2_clear_wbuf_ino_list(c);
97 c->wbuf_inodes = &inodirty_nomem;
98 return;
99 }
100 new->ino = ino;
101 new->next = c->wbuf_inodes;
102 c->wbuf_inodes = new;
103 return;
104}
105
106static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107{
108 struct list_head *this, *next;
109 static int n;
110
111 if (list_empty(&c->erasable_pending_wbuf_list))
112 return;
113
114 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116
117 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_erase_pending_trigger(c);
126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 list_add_tail(&jeb->list, &c->erasable_list);
131 }
132 }
133}
134
7f716cf3
EH
135#define REFILE_NOTEMPTY 0
136#define REFILE_ANYWAY 1
137
138static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1da177e4
LT
139{
140 D1(printk("About to refile bad block at %08x\n", jeb->offset));
141
1da177e4
LT
142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
148 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149 list_add(&jeb->list, &c->bad_used_list);
150 } else {
9b88f473 151 BUG_ON(allow_empty == REFILE_NOTEMPTY);
1da177e4
LT
152 /* It has to have had some nodes or we couldn't be here */
153 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154 list_add(&jeb->list, &c->erase_pending_list);
155 c->nr_erasing_blocks++;
156 jffs2_erase_pending_trigger(c);
157 }
1da177e4
LT
158
159 /* Adjust its size counts accordingly */
160 c->wasted_size += jeb->free_size;
161 c->free_size -= jeb->free_size;
162 jeb->wasted_size += jeb->free_size;
163 jeb->free_size = 0;
164
e0c8e42f
AB
165 jffs2_dbg_dump_block_lists_nolock(c);
166 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
168}
169
170/* Recover from failure to write wbuf. Recover the nodes up to the
171 * wbuf, not the one which we were starting to try to write. */
172
173static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
174{
175 struct jffs2_eraseblock *jeb, *new_jeb;
176 struct jffs2_raw_node_ref **first_raw, **raw;
177 size_t retlen;
178 int ret;
179 unsigned char *buf;
180 uint32_t start, end, ofs, len;
181
182 spin_lock(&c->erase_completion_lock);
183
184 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
185
7f716cf3 186 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
1da177e4
LT
187
188 /* Find the first node to be recovered, by skipping over every
189 node which ends before the wbuf starts, or which is obsolete. */
190 first_raw = &jeb->first_node;
182ec4ee 191 while (*first_raw &&
1da177e4
LT
192 (ref_obsolete(*first_raw) ||
193 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
194 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
195 ref_offset(*first_raw), ref_flags(*first_raw),
196 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
197 c->wbuf_ofs));
198 first_raw = &(*first_raw)->next_phys;
199 }
200
201 if (!*first_raw) {
202 /* All nodes were obsolete. Nothing to recover. */
203 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
204 spin_unlock(&c->erase_completion_lock);
205 return;
206 }
207
208 start = ref_offset(*first_raw);
209 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
210
211 /* Find the last node to be recovered */
212 raw = first_raw;
213 while ((*raw)) {
214 if (!ref_obsolete(*raw))
215 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
216
217 raw = &(*raw)->next_phys;
218 }
219 spin_unlock(&c->erase_completion_lock);
220
221 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
222
223 buf = NULL;
224 if (start < c->wbuf_ofs) {
225 /* First affected node was already partially written.
226 * Attempt to reread the old data into our buffer. */
227
228 buf = kmalloc(end - start, GFP_KERNEL);
229 if (!buf) {
230 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
231
232 goto read_failed;
233 }
234
235 /* Do the read... */
236 if (jffs2_cleanmarker_oob(c))
237 ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
238 else
239 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
182ec4ee 240
1da177e4
LT
241 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
242 /* ECC recovered */
243 ret = 0;
244 }
245 if (ret || retlen != c->wbuf_ofs - start) {
246 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
247
248 kfree(buf);
249 buf = NULL;
250 read_failed:
251 first_raw = &(*first_raw)->next_phys;
252 /* If this was the only node to be recovered, give up */
253 if (!(*first_raw))
254 return;
255
256 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
257 start = ref_offset(*first_raw);
258 } else {
259 /* Read succeeded. Copy the remaining data from the wbuf */
260 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
261 }
262 }
263 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
264 Either 'buf' contains the data, or we find it in the wbuf */
265
266
267 /* ... and get an allocation of space from a shiny new block instead */
e631ddba 268 ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
1da177e4
LT
269 if (ret) {
270 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
9b88f473 271 kfree(buf);
1da177e4
LT
272 return;
273 }
274 if (end-start >= c->wbuf_pagesize) {
7f716cf3 275 /* Need to do another write immediately, but it's possible
9b88f473 276 that this is just because the wbuf itself is completely
182ec4ee
TG
277 full, and there's nothing earlier read back from the
278 flash. Hence 'buf' isn't necessarily what we're writing
9b88f473 279 from. */
7f716cf3 280 unsigned char *rewrite_buf = buf?:c->wbuf;
1da177e4
LT
281 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
282
283 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
284 towrite, ofs));
182ec4ee 285
1da177e4
LT
286#ifdef BREAKMEHEADER
287 static int breakme;
288 if (breakme++ == 20) {
289 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
290 breakme = 0;
291 c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
292 brokenbuf, NULL, c->oobinfo);
293 ret = -EIO;
294 } else
295#endif
296 if (jffs2_cleanmarker_oob(c))
297 ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
7f716cf3 298 rewrite_buf, NULL, c->oobinfo);
1da177e4 299 else
7f716cf3 300 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
1da177e4
LT
301
302 if (ret || retlen != towrite) {
303 /* Argh. We tried. Really we did. */
304 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
9b88f473 305 kfree(buf);
1da177e4
LT
306
307 if (retlen) {
308 struct jffs2_raw_node_ref *raw2;
309
310 raw2 = jffs2_alloc_raw_node_ref();
311 if (!raw2)
312 return;
313
314 raw2->flash_offset = ofs | REF_OBSOLETE;
1da177e4
LT
315 raw2->next_in_ino = NULL;
316
b64335f2 317 jffs2_add_physical_node_ref(c, raw2, ref_totlen(c, jeb, *first_raw));
1da177e4
LT
318 }
319 return;
320 }
321 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
322
323 c->wbuf_len = (end - start) - towrite;
324 c->wbuf_ofs = ofs + towrite;
7f716cf3 325 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
1da177e4 326 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
f99d49ad 327 kfree(buf);
1da177e4
LT
328 } else {
329 /* OK, now we're left with the dregs in whichever buffer we're using */
330 if (buf) {
331 memcpy(c->wbuf, buf, end-start);
332 kfree(buf);
333 } else {
334 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
335 }
336 c->wbuf_ofs = ofs;
337 c->wbuf_len = end - start;
338 }
339
340 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
341 new_jeb = &c->blocks[ofs / c->sector_size];
342
343 spin_lock(&c->erase_completion_lock);
344 if (new_jeb->first_node) {
345 /* Odd, but possible with ST flash later maybe */
346 new_jeb->last_node->next_phys = *first_raw;
347 } else {
348 new_jeb->first_node = *first_raw;
349 }
350
351 raw = first_raw;
352 while (*raw) {
353 uint32_t rawlen = ref_totlen(c, jeb, *raw);
354
355 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
356 rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
357
358 if (ref_obsolete(*raw)) {
359 /* Shouldn't really happen much */
360 new_jeb->dirty_size += rawlen;
361 new_jeb->free_size -= rawlen;
362 c->dirty_size += rawlen;
363 } else {
364 new_jeb->used_size += rawlen;
365 new_jeb->free_size -= rawlen;
366 jeb->dirty_size += rawlen;
367 jeb->used_size -= rawlen;
368 c->dirty_size += rawlen;
369 }
370 c->free_size -= rawlen;
371 (*raw)->flash_offset = ofs | ref_flags(*raw);
372 ofs += rawlen;
373 new_jeb->last_node = *raw;
374
375 raw = &(*raw)->next_phys;
376 }
377
378 /* Fix up the original jeb now it's on the bad_list */
379 *first_raw = NULL;
380 if (first_raw == &jeb->first_node) {
381 jeb->last_node = NULL;
382 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
383 list_del(&jeb->list);
384 list_add(&jeb->list, &c->erase_pending_list);
385 c->nr_erasing_blocks++;
386 jffs2_erase_pending_trigger(c);
387 }
388 else
389 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
390
e0c8e42f
AB
391 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
392 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 393
e0c8e42f
AB
394 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
395 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
1da177e4
LT
396
397 spin_unlock(&c->erase_completion_lock);
398
399 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
400}
401
402/* Meaning of pad argument:
403 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
404 1: Pad, do not adjust nextblock free_size
405 2: Pad, adjust nextblock free_size
406*/
407#define NOPAD 0
408#define PAD_NOACCOUNT 1
409#define PAD_ACCOUNTING 2
410
411static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
412{
413 int ret;
414 size_t retlen;
415
3be36675 416 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
1da177e4 417 del_timer() the timer we never initialised. */
3be36675 418 if (!jffs2_is_writebuffered(c))
1da177e4
LT
419 return 0;
420
421 if (!down_trylock(&c->alloc_sem)) {
422 up(&c->alloc_sem);
423 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
424 BUG();
425 }
426
3be36675 427 if (!c->wbuf_len) /* already checked c->wbuf above */
1da177e4
LT
428 return 0;
429
430 /* claim remaining space on the page
431 this happens, if we have a change to a new block,
432 or if fsync forces us to flush the writebuffer.
433 if we have a switch to next page, we will not have
182ec4ee 434 enough remaining space for this.
1da177e4 435 */
daba5cc4 436 if (pad ) {
1da177e4
LT
437 c->wbuf_len = PAD(c->wbuf_len);
438
439 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
440 with 8 byte page size */
441 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
182ec4ee 442
1da177e4
LT
443 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
444 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
445 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
446 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
447 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
448 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
449 }
450 }
451 /* else jffs2_flash_writev has actually filled in the rest of the
452 buffer for us, and will deal with the node refs etc. later. */
182ec4ee 453
1da177e4
LT
454#ifdef BREAKME
455 static int breakme;
456 if (breakme++ == 20) {
457 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
458 breakme = 0;
459 c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
460 &retlen, brokenbuf, NULL, c->oobinfo);
461 ret = -EIO;
182ec4ee 462 } else
1da177e4 463#endif
182ec4ee 464
1da177e4
LT
465 if (jffs2_cleanmarker_oob(c))
466 ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
467 else
468 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
469
470 if (ret || retlen != c->wbuf_pagesize) {
471 if (ret)
472 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
473 else {
474 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
475 retlen, c->wbuf_pagesize);
476 ret = -EIO;
477 }
478
479 jffs2_wbuf_recover(c);
480
481 return ret;
482 }
483
484 spin_lock(&c->erase_completion_lock);
485
486 /* Adjust free size of the block if we padded. */
daba5cc4 487 if (pad) {
1da177e4
LT
488 struct jffs2_eraseblock *jeb;
489
490 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
491
492 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
493 (jeb==c->nextblock)?"next":"", jeb->offset));
494
182ec4ee 495 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
1da177e4
LT
496 padded. If there is less free space in the block than that,
497 something screwed up */
498 if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
499 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
500 c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
501 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
502 jeb->offset, jeb->free_size);
503 BUG();
504 }
505 jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
506 c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
507 jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
508 c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
509 }
510
511 /* Stick any now-obsoleted blocks on the erase_pending_list */
512 jffs2_refile_wbuf_blocks(c);
513 jffs2_clear_wbuf_ino_list(c);
514 spin_unlock(&c->erase_completion_lock);
515
516 memset(c->wbuf,0xff,c->wbuf_pagesize);
517 /* adjust write buffer offset, else we get a non contiguous write bug */
518 c->wbuf_ofs += c->wbuf_pagesize;
519 c->wbuf_len = 0;
520 return 0;
521}
522
182ec4ee 523/* Trigger garbage collection to flush the write-buffer.
1da177e4 524 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
182ec4ee 525 outstanding. If ino arg non-zero, do it only if a write for the
1da177e4
LT
526 given inode is outstanding. */
527int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
528{
529 uint32_t old_wbuf_ofs;
530 uint32_t old_wbuf_len;
531 int ret = 0;
532
533 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
534
8aee6ac1
DW
535 if (!c->wbuf)
536 return 0;
537
1da177e4
LT
538 down(&c->alloc_sem);
539 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
540 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
541 up(&c->alloc_sem);
542 return 0;
543 }
544
545 old_wbuf_ofs = c->wbuf_ofs;
546 old_wbuf_len = c->wbuf_len;
547
548 if (c->unchecked_size) {
549 /* GC won't make any progress for a while */
550 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
551 down_write(&c->wbuf_sem);
552 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7f716cf3
EH
553 /* retry flushing wbuf in case jffs2_wbuf_recover
554 left some data in the wbuf */
555 if (ret)
7f716cf3 556 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
1da177e4
LT
557 up_write(&c->wbuf_sem);
558 } else while (old_wbuf_len &&
559 old_wbuf_ofs == c->wbuf_ofs) {
560
561 up(&c->alloc_sem);
562
563 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
564
565 ret = jffs2_garbage_collect_pass(c);
566 if (ret) {
567 /* GC failed. Flush it with padding instead */
568 down(&c->alloc_sem);
569 down_write(&c->wbuf_sem);
570 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7f716cf3
EH
571 /* retry flushing wbuf in case jffs2_wbuf_recover
572 left some data in the wbuf */
573 if (ret)
7f716cf3 574 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
1da177e4
LT
575 up_write(&c->wbuf_sem);
576 break;
577 }
578 down(&c->alloc_sem);
579 }
580
581 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
582
583 up(&c->alloc_sem);
584 return ret;
585}
586
587/* Pad write-buffer to end and write it, wasting space. */
588int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
589{
590 int ret;
591
8aee6ac1
DW
592 if (!c->wbuf)
593 return 0;
594
1da177e4
LT
595 down_write(&c->wbuf_sem);
596 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7f716cf3
EH
597 /* retry - maybe wbuf recover left some data in wbuf. */
598 if (ret)
599 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
1da177e4
LT
600 up_write(&c->wbuf_sem);
601
602 return ret;
603}
1da177e4
LT
604int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
605{
606 struct kvec outvecs[3];
607 uint32_t totlen = 0;
608 uint32_t split_ofs = 0;
609 uint32_t old_totlen;
610 int ret, splitvec = -1;
611 int invec, outvec;
612 size_t wbuf_retlen;
613 unsigned char *wbuf_ptr;
614 size_t donelen = 0;
615 uint32_t outvec_to = to;
616
617 /* If not NAND flash, don't bother */
3be36675 618 if (!jffs2_is_writebuffered(c))
1da177e4 619 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
182ec4ee 620
1da177e4
LT
621 down_write(&c->wbuf_sem);
622
623 /* If wbuf_ofs is not initialized, set it to target address */
624 if (c->wbuf_ofs == 0xFFFFFFFF) {
625 c->wbuf_ofs = PAGE_DIV(to);
182ec4ee 626 c->wbuf_len = PAGE_MOD(to);
1da177e4
LT
627 memset(c->wbuf,0xff,c->wbuf_pagesize);
628 }
629
630 /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
631 fail for ECC'd NOR because cleanmarker == 16, so a block starts at
632 xxx0010. */
633 if (jffs2_nor_ecc(c)) {
634 if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
635 c->wbuf_ofs = PAGE_DIV(to);
636 c->wbuf_len = PAGE_MOD(to);
637 memset(c->wbuf,0xff,c->wbuf_pagesize);
638 }
639 }
182ec4ee
TG
640
641 /* Sanity checks on target address.
642 It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
643 and it's permitted to write at the beginning of a new
1da177e4
LT
644 erase block. Anything else, and you die.
645 New block starts at xxx000c (0-b = block header)
646 */
3be36675 647 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
1da177e4
LT
648 /* It's a write to a new block */
649 if (c->wbuf_len) {
650 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
651 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
652 if (ret) {
653 /* the underlying layer has to check wbuf_len to do the cleanup */
654 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
655 *retlen = 0;
656 goto exit;
657 }
658 }
659 /* set pointer to new block */
660 c->wbuf_ofs = PAGE_DIV(to);
182ec4ee
TG
661 c->wbuf_len = PAGE_MOD(to);
662 }
1da177e4
LT
663
664 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
665 /* We're not writing immediately after the writebuffer. Bad. */
666 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
667 if (c->wbuf_len)
668 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
669 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
670 BUG();
671 }
672
673 /* Note outvecs[3] above. We know count is never greater than 2 */
674 if (count > 2) {
675 printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
676 BUG();
677 }
678
679 invec = 0;
680 outvec = 0;
681
182ec4ee 682 /* Fill writebuffer first, if already in use */
1da177e4
LT
683 if (c->wbuf_len) {
684 uint32_t invec_ofs = 0;
685
182ec4ee 686 /* adjust alignment offset */
1da177e4
LT
687 if (c->wbuf_len != PAGE_MOD(to)) {
688 c->wbuf_len = PAGE_MOD(to);
689 /* take care of alignment to next page */
690 if (!c->wbuf_len)
691 c->wbuf_len = c->wbuf_pagesize;
692 }
182ec4ee 693
1da177e4
LT
694 while(c->wbuf_len < c->wbuf_pagesize) {
695 uint32_t thislen;
182ec4ee 696
1da177e4
LT
697 if (invec == count)
698 goto alldone;
699
700 thislen = c->wbuf_pagesize - c->wbuf_len;
701
702 if (thislen >= invecs[invec].iov_len)
703 thislen = invecs[invec].iov_len;
182ec4ee 704
1da177e4
LT
705 invec_ofs = thislen;
706
707 memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
708 c->wbuf_len += thislen;
709 donelen += thislen;
710 /* Get next invec, if actual did not fill the buffer */
182ec4ee 711 if (c->wbuf_len < c->wbuf_pagesize)
1da177e4 712 invec++;
182ec4ee
TG
713 }
714
1da177e4
LT
715 /* write buffer is full, flush buffer */
716 ret = __jffs2_flush_wbuf(c, NOPAD);
717 if (ret) {
718 /* the underlying layer has to check wbuf_len to do the cleanup */
719 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
720 /* Retlen zero to make sure our caller doesn't mark the space dirty.
721 We've already done everything that's necessary */
722 *retlen = 0;
723 goto exit;
724 }
725 outvec_to += donelen;
726 c->wbuf_ofs = outvec_to;
727
728 /* All invecs done ? */
729 if (invec == count)
730 goto alldone;
731
732 /* Set up the first outvec, containing the remainder of the
733 invec we partially used */
734 if (invecs[invec].iov_len > invec_ofs) {
735 outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
736 totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
737 if (totlen > c->wbuf_pagesize) {
738 splitvec = outvec;
739 split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
740 }
741 outvec++;
742 }
743 invec++;
744 }
745
746 /* OK, now we've flushed the wbuf and the start of the bits
747 we have been asked to write, now to write the rest.... */
748
749 /* totlen holds the amount of data still to be written */
750 old_totlen = totlen;
751 for ( ; invec < count; invec++,outvec++ ) {
752 outvecs[outvec].iov_base = invecs[invec].iov_base;
753 totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
754 if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
755 splitvec = outvec;
756 split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
757 old_totlen = totlen;
758 }
759 }
760
761 /* Now the outvecs array holds all the remaining data to write */
762 /* Up to splitvec,split_ofs is to be written immediately. The rest
763 goes into the (now-empty) wbuf */
764
765 if (splitvec != -1) {
766 uint32_t remainder;
767
768 remainder = outvecs[splitvec].iov_len - split_ofs;
769 outvecs[splitvec].iov_len = split_ofs;
770
771 /* We did cross a page boundary, so we write some now */
772 if (jffs2_cleanmarker_oob(c))
182ec4ee 773 ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
1da177e4
LT
774 else
775 ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
182ec4ee 776
1da177e4
LT
777 if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
778 /* At this point we have no problem,
7f716cf3
EH
779 c->wbuf is empty. However refile nextblock to avoid
780 writing again to same address.
1da177e4 781 */
7f716cf3
EH
782 struct jffs2_eraseblock *jeb;
783
784 spin_lock(&c->erase_completion_lock);
785
786 jeb = &c->blocks[outvec_to / c->sector_size];
787 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
788
789 *retlen = 0;
790 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
791 goto exit;
792 }
182ec4ee 793
1da177e4
LT
794 donelen += wbuf_retlen;
795 c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
796
797 if (remainder) {
798 outvecs[splitvec].iov_base += split_ofs;
799 outvecs[splitvec].iov_len = remainder;
800 } else {
801 splitvec++;
802 }
803
804 } else {
805 splitvec = 0;
806 }
807
808 /* Now splitvec points to the start of the bits we have to copy
809 into the wbuf */
810 wbuf_ptr = c->wbuf;
811
812 for ( ; splitvec < outvec; splitvec++) {
813 /* Don't copy the wbuf into itself */
814 if (outvecs[splitvec].iov_base == c->wbuf)
815 continue;
816 memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
817 wbuf_ptr += outvecs[splitvec].iov_len;
818 donelen += outvecs[splitvec].iov_len;
819 }
820 c->wbuf_len = wbuf_ptr - c->wbuf;
821
822 /* If there's a remainder in the wbuf and it's a non-GC write,
823 remember that the wbuf affects this ino */
824alldone:
825 *retlen = donelen;
826
e631ddba
FH
827 if (jffs2_sum_active()) {
828 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
829 if (res)
830 return res;
831 }
832
1da177e4
LT
833 if (c->wbuf_len && ino)
834 jffs2_wbuf_dirties_inode(c, ino);
835
836 ret = 0;
182ec4ee 837
1da177e4
LT
838exit:
839 up_write(&c->wbuf_sem);
840 return ret;
841}
842
843/*
844 * This is the entry for flash write.
845 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
846*/
847int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
848{
849 struct kvec vecs[1];
850
3be36675 851 if (!jffs2_is_writebuffered(c))
e631ddba 852 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
1da177e4
LT
853
854 vecs[0].iov_base = (unsigned char *) buf;
855 vecs[0].iov_len = len;
856 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
857}
858
859/*
860 Handle readback from writebuffer and ECC failure return
861*/
862int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
863{
864 loff_t orbf = 0, owbf = 0, lwbf = 0;
865 int ret;
866
3be36675 867 if (!jffs2_is_writebuffered(c))
1da177e4
LT
868 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
869
3be36675 870 /* Read flash */
894214d1 871 down_read(&c->wbuf_sem);
3be36675
AV
872 if (jffs2_cleanmarker_oob(c))
873 ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
874 else
875 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
876
877 if ( (ret == -EBADMSG) && (*retlen == len) ) {
878 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
879 len, ofs);
182ec4ee
TG
880 /*
881 * We have the raw data without ECC correction in the buffer, maybe
3be36675
AV
882 * we are lucky and all data or parts are correct. We check the node.
883 * If data are corrupted node check will sort it out.
884 * We keep this block, it will fail on write or erase and the we
885 * mark it bad. Or should we do that now? But we should give him a chance.
182ec4ee 886 * Maybe we had a system crash or power loss before the ecc write or
3be36675
AV
887 * a erase was completed.
888 * So we return success. :)
889 */
890 ret = 0;
182ec4ee 891 }
3be36675 892
1da177e4
LT
893 /* if no writebuffer available or write buffer empty, return */
894 if (!c->wbuf_pagesize || !c->wbuf_len)
894214d1 895 goto exit;
1da177e4
LT
896
897 /* if we read in a different block, return */
3be36675 898 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
894214d1 899 goto exit;
1da177e4
LT
900
901 if (ofs >= c->wbuf_ofs) {
902 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
903 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
904 goto exit;
905 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
182ec4ee 906 if (lwbf > len)
1da177e4 907 lwbf = len;
182ec4ee 908 } else {
1da177e4
LT
909 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
910 if (orbf > len) /* is write beyond write buffer ? */
911 goto exit;
912 lwbf = len - orbf; /* number of bytes to copy */
182ec4ee 913 if (lwbf > c->wbuf_len)
1da177e4 914 lwbf = c->wbuf_len;
182ec4ee 915 }
1da177e4
LT
916 if (lwbf > 0)
917 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
918
919exit:
920 up_read(&c->wbuf_sem);
921 return ret;
922}
923
924/*
925 * Check, if the out of band area is empty
926 */
927int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
928{
929 unsigned char *buf;
930 int ret = 0;
931 int i,len,page;
932 size_t retlen;
933 int oob_size;
934
935 /* allocate a buffer for all oob data in this sector */
936 oob_size = c->mtd->oobsize;
937 len = 4 * oob_size;
938 buf = kmalloc(len, GFP_KERNEL);
939 if (!buf) {
940 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
941 return -ENOMEM;
942 }
182ec4ee 943 /*
1da177e4
LT
944 * if mode = 0, we scan for a total empty oob area, else we have
945 * to take care of the cleanmarker in the first page of the block
946 */
947 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
948 if (ret) {
949 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
950 goto out;
951 }
182ec4ee 952
1da177e4
LT
953 if (retlen < len) {
954 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
955 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
956 ret = -EIO;
957 goto out;
958 }
182ec4ee 959
1da177e4
LT
960 /* Special check for first page */
961 for(i = 0; i < oob_size ; i++) {
962 /* Yeah, we know about the cleanmarker. */
182ec4ee 963 if (mode && i >= c->fsdata_pos &&
1da177e4
LT
964 i < c->fsdata_pos + c->fsdata_len)
965 continue;
966
967 if (buf[i] != 0xFF) {
968 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
730554d9 969 buf[i], i, jeb->offset));
182ec4ee 970 ret = 1;
1da177e4
LT
971 goto out;
972 }
973 }
974
182ec4ee 975 /* we know, we are aligned :) */
1da177e4
LT
976 for (page = oob_size; page < len; page += sizeof(long)) {
977 unsigned long dat = *(unsigned long *)(&buf[page]);
978 if(dat != -1) {
182ec4ee 979 ret = 1;
1da177e4
LT
980 goto out;
981 }
982 }
983
984out:
182ec4ee
TG
985 kfree(buf);
986
1da177e4
LT
987 return ret;
988}
989
990/*
991* Scan for a valid cleanmarker and for bad blocks
992* For virtual blocks (concatenated physical blocks) check the cleanmarker
993* only in the first page of the first physical block, but scan for bad blocks in all
994* physical blocks
995*/
996int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
997{
998 struct jffs2_unknown_node n;
999 unsigned char buf[2 * NAND_MAX_OOBSIZE];
1000 unsigned char *p;
1001 int ret, i, cnt, retval = 0;
1002 size_t retlen, offset;
1003 int oob_size;
1004
1005 offset = jeb->offset;
1006 oob_size = c->mtd->oobsize;
1007
1008 /* Loop through the physical blocks */
1009 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1010 /* Check first if the block is bad. */
1011 if (c->mtd->block_isbad (c->mtd, offset)) {
1012 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1013 return 2;
1014 }
1015 /*
1016 * We read oob data from page 0 and 1 of the block.
1017 * page 0 contains cleanmarker and badblock info
1018 * page 1 contains failure count of this block
1019 */
1020 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1021
1022 if (ret) {
1023 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1024 return ret;
1025 }
1026 if (retlen < (oob_size << 1)) {
1027 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1028 return -EIO;
1029 }
1030
1031 /* Check cleanmarker only on the first physical block */
1032 if (!cnt) {
1033 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1034 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1035 n.totlen = cpu_to_je32 (8);
1036 p = (unsigned char *) &n;
1037
1038 for (i = 0; i < c->fsdata_len; i++) {
1039 if (buf[c->fsdata_pos + i] != p[i]) {
1040 retval = 1;
1041 }
1042 }
1043 D1(if (retval == 1) {
1044 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1045 printk(KERN_WARNING "OOB at %08x was ", offset);
1046 for (i=0; i < oob_size; i++) {
1047 printk("%02x ", buf[i]);
1048 }
1049 printk("\n");
1050 })
1051 }
1052 offset += c->mtd->erasesize;
1053 }
1054 return retval;
1055}
1056
1057int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1058{
1059 struct jffs2_unknown_node n;
1060 int ret;
1061 size_t retlen;
1062
1063 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1064 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1065 n.totlen = cpu_to_je32(8);
1066
1067 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
182ec4ee 1068
1da177e4
LT
1069 if (ret) {
1070 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1071 return ret;
1072 }
1073 if (retlen != c->fsdata_len) {
1074 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1075 return ret;
1076 }
1077 return 0;
1078}
1079
182ec4ee 1080/*
1da177e4
LT
1081 * On NAND we try to mark this block bad. If the block was erased more
1082 * than MAX_ERASE_FAILURES we mark it finaly bad.
1083 * Don't care about failures. This block remains on the erase-pending
1084 * or badblock list as long as nobody manipulates the flash with
1085 * a bootloader or something like that.
1086 */
1087
1088int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1089{
1090 int ret;
1091
1092 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1093 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1094 return 0;
1095
1096 if (!c->mtd->block_markbad)
1097 return 1; // What else can we do?
1098
1099 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1100 ret = c->mtd->block_markbad(c->mtd, bad_offset);
182ec4ee 1101
1da177e4
LT
1102 if (ret) {
1103 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1104 return ret;
1105 }
1106 return 1;
1107}
1108
1109#define NAND_JFFS2_OOB16_FSDALEN 8
1110
1111static struct nand_oobinfo jffs2_oobinfo_docecc = {
1112 .useecc = MTD_NANDECC_PLACE,
1113 .eccbytes = 6,
1114 .eccpos = {0,1,2,3,4,5}
1115};
1116
1117
1118static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1119{
1120 struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1121
1122 /* Do this only, if we have an oob buffer */
1123 if (!c->mtd->oobsize)
1124 return 0;
182ec4ee 1125
1da177e4
LT
1126 /* Cleanmarker is out-of-band, so inline size zero */
1127 c->cleanmarker_size = 0;
1128
1129 /* Should we use autoplacement ? */
1130 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1131 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1132 /* Get the position of the free bytes */
1133 if (!oinfo->oobfree[0][1]) {
1134 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1135 return -ENOSPC;
1136 }
1137 c->fsdata_pos = oinfo->oobfree[0][0];
1138 c->fsdata_len = oinfo->oobfree[0][1];
1139 if (c->fsdata_len > 8)
1140 c->fsdata_len = 8;
1141 } else {
1142 /* This is just a legacy fallback and should go away soon */
1143 switch(c->mtd->ecctype) {
1144 case MTD_ECC_RS_DiskOnChip:
1145 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1146 c->oobinfo = &jffs2_oobinfo_docecc;
1147 c->fsdata_pos = 6;
1148 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1149 c->badblock_pos = 15;
1150 break;
182ec4ee 1151
1da177e4
LT
1152 default:
1153 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1154 return -EINVAL;
1155 }
1156 }
1157 return 0;
1158}
1159
1160int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1161{
1162 int res;
1163
1164 /* Initialise write buffer */
1165 init_rwsem(&c->wbuf_sem);
1166 c->wbuf_pagesize = c->mtd->oobblock;
1167 c->wbuf_ofs = 0xFFFFFFFF;
182ec4ee 1168
1da177e4
LT
1169 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1170 if (!c->wbuf)
1171 return -ENOMEM;
1172
1173 res = jffs2_nand_set_oobinfo(c);
1174
1175#ifdef BREAKME
1176 if (!brokenbuf)
1177 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1178 if (!brokenbuf) {
1179 kfree(c->wbuf);
1180 return -ENOMEM;
1181 }
1182 memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1183#endif
1184 return res;
1185}
1186
1187void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1188{
1189 kfree(c->wbuf);
1190}
1191
8f15fd55
AV
1192int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1193 c->cleanmarker_size = 0; /* No cleanmarkers needed */
182ec4ee 1194
8f15fd55
AV
1195 /* Initialize write buffer */
1196 init_rwsem(&c->wbuf_sem);
8f15fd55 1197
182ec4ee 1198
daba5cc4 1199 c->wbuf_pagesize = c->mtd->erasesize;
182ec4ee 1200
daba5cc4
AB
1201 /* Find a suitable c->sector_size
1202 * - Not too much sectors
1203 * - Sectors have to be at least 4 K + some bytes
1204 * - All known dataflashes have erase sizes of 528 or 1056
1205 * - we take at least 8 eraseblocks and want to have at least 8K size
1206 * - The concatenation should be a power of 2
1207 */
1208
1209 c->sector_size = 8 * c->mtd->erasesize;
182ec4ee 1210
daba5cc4
AB
1211 while (c->sector_size < 8192) {
1212 c->sector_size *= 2;
1213 }
182ec4ee 1214
daba5cc4
AB
1215 /* It may be necessary to adjust the flash size */
1216 c->flash_size = c->mtd->size;
8f15fd55 1217
daba5cc4
AB
1218 if ((c->flash_size % c->sector_size) != 0) {
1219 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1220 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1221 };
182ec4ee 1222
daba5cc4 1223 c->wbuf_ofs = 0xFFFFFFFF;
8f15fd55
AV
1224 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1225 if (!c->wbuf)
1226 return -ENOMEM;
1227
daba5cc4 1228 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
8f15fd55
AV
1229
1230 return 0;
1231}
1232
1233void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1234 kfree(c->wbuf);
1235}
8f15fd55 1236
1da177e4
LT
1237int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
1238 /* Cleanmarker is actually larger on the flashes */
1239 c->cleanmarker_size = 16;
1240
1241 /* Initialize write buffer */
1242 init_rwsem(&c->wbuf_sem);
1243 c->wbuf_pagesize = c->mtd->eccsize;
1244 c->wbuf_ofs = 0xFFFFFFFF;
1245
1246 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1247 if (!c->wbuf)
1248 return -ENOMEM;
1249
1250 return 0;
1251}
1252
1253void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
1254 kfree(c->wbuf);
1255}
59da721a
NP
1256
1257int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1258 /* Cleanmarker currently occupies a whole programming region */
1259 c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd);
1260
1261 /* Initialize write buffer */
1262 init_rwsem(&c->wbuf_sem);
1263 c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd);
1264 c->wbuf_ofs = 0xFFFFFFFF;
1265
1266 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1267 if (!c->wbuf)
1268 return -ENOMEM;
1269
1270 return 0;
1271}
1272
1273void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1274 kfree(c->wbuf);
1275}