]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/jffs2/gc.c
[JFFS2][XATTR] rid unnecessary writing of delete marker.
[net-next-2.6.git] / fs / jffs2 / gc.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
182ec4ee 10 * $Id: gc.c,v 1.155 2005/11/07 11:14:39 gleixner Exp $
1da177e4
LT
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/mtd/mtd.h>
16#include <linux/slab.h>
17#include <linux/pagemap.h>
18#include <linux/crc32.h>
19#include <linux/compiler.h>
20#include <linux/stat.h>
21#include "nodelist.h"
22#include "compr.h"
23
182ec4ee 24static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
1da177e4
LT
25 struct jffs2_inode_cache *ic,
26 struct jffs2_raw_node_ref *raw);
182ec4ee 27static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1da177e4 28 struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
182ec4ee 29static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1da177e4 30 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
182ec4ee 31static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1da177e4
LT
32 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
33static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
34 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
35 uint32_t start, uint32_t end);
36static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
37 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
38 uint32_t start, uint32_t end);
39static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
40 struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
41
42/* Called with erase_completion_lock held */
43static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
44{
45 struct jffs2_eraseblock *ret;
46 struct list_head *nextlist = NULL;
47 int n = jiffies % 128;
48
49 /* Pick an eraseblock to garbage collect next. This is where we'll
50 put the clever wear-levelling algorithms. Eventually. */
51 /* We possibly want to favour the dirtier blocks more when the
52 number of free blocks is low. */
a42163d7 53again:
1da177e4
LT
54 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
55 D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
56 nextlist = &c->bad_used_list;
57 } else if (n < 50 && !list_empty(&c->erasable_list)) {
182ec4ee 58 /* Note that most of them will have gone directly to be erased.
1da177e4
LT
59 So don't favour the erasable_list _too_ much. */
60 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
61 nextlist = &c->erasable_list;
62 } else if (n < 110 && !list_empty(&c->very_dirty_list)) {
63 /* Most of the time, pick one off the very_dirty list */
64 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
65 nextlist = &c->very_dirty_list;
66 } else if (n < 126 && !list_empty(&c->dirty_list)) {
67 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
68 nextlist = &c->dirty_list;
69 } else if (!list_empty(&c->clean_list)) {
70 D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
71 nextlist = &c->clean_list;
72 } else if (!list_empty(&c->dirty_list)) {
73 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
74
75 nextlist = &c->dirty_list;
76 } else if (!list_empty(&c->very_dirty_list)) {
77 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
78 nextlist = &c->very_dirty_list;
79 } else if (!list_empty(&c->erasable_list)) {
80 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
81
82 nextlist = &c->erasable_list;
a42163d7
AB
83 } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
84 /* There are blocks are wating for the wbuf sync */
85 D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"));
3cceb9f6 86 spin_unlock(&c->erase_completion_lock);
a42163d7 87 jffs2_flush_wbuf_pad(c);
3cceb9f6 88 spin_lock(&c->erase_completion_lock);
a42163d7 89 goto again;
1da177e4
LT
90 } else {
91 /* Eep. All were empty */
92 D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
93 return NULL;
94 }
95
96 ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
97 list_del(&ret->list);
98 c->gcblock = ret;
99 ret->gc_node = ret->first_node;
100 if (!ret->gc_node) {
101 printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
102 BUG();
103 }
182ec4ee 104
1da177e4
LT
105 /* Have we accidentally picked a clean block with wasted space ? */
106 if (ret->wasted_size) {
107 D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
108 ret->dirty_size += ret->wasted_size;
109 c->wasted_size -= ret->wasted_size;
110 c->dirty_size += ret->wasted_size;
111 ret->wasted_size = 0;
112 }
113
1da177e4
LT
114 return ret;
115}
116
117/* jffs2_garbage_collect_pass
118 * Make a single attempt to progress GC. Move one node, and possibly
119 * start erasing one eraseblock.
120 */
121int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
122{
123 struct jffs2_inode_info *f;
124 struct jffs2_inode_cache *ic;
125 struct jffs2_eraseblock *jeb;
126 struct jffs2_raw_node_ref *raw;
127 int ret = 0, inum, nlink;
aa98d7cf 128 int xattr = 0;
1da177e4
LT
129
130 if (down_interruptible(&c->alloc_sem))
131 return -EINTR;
132
133 for (;;) {
134 spin_lock(&c->erase_completion_lock);
135 if (!c->unchecked_size)
136 break;
137
138 /* We can't start doing GC yet. We haven't finished checking
139 the node CRCs etc. Do it now. */
182ec4ee 140
1da177e4 141 /* checked_ino is protected by the alloc_sem */
aa98d7cf 142 if (c->checked_ino > c->highest_ino && xattr) {
1da177e4
LT
143 printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
144 c->unchecked_size);
e0c8e42f 145 jffs2_dbg_dump_block_lists_nolock(c);
1da177e4
LT
146 spin_unlock(&c->erase_completion_lock);
147 BUG();
148 }
149
150 spin_unlock(&c->erase_completion_lock);
151
aa98d7cf
KK
152 if (!xattr)
153 xattr = jffs2_verify_xattr(c);
154
1da177e4
LT
155 spin_lock(&c->inocache_lock);
156
157 ic = jffs2_get_ino_cache(c, c->checked_ino++);
158
159 if (!ic) {
160 spin_unlock(&c->inocache_lock);
161 continue;
162 }
163
164 if (!ic->nlink) {
165 D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
166 ic->ino));
167 spin_unlock(&c->inocache_lock);
168 continue;
169 }
170 switch(ic->state) {
171 case INO_STATE_CHECKEDABSENT:
172 case INO_STATE_PRESENT:
173 D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
174 spin_unlock(&c->inocache_lock);
175 continue;
176
177 case INO_STATE_GC:
178 case INO_STATE_CHECKING:
179 printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
180 spin_unlock(&c->inocache_lock);
181 BUG();
182
183 case INO_STATE_READING:
184 /* We need to wait for it to finish, lest we move on
182ec4ee 185 and trigger the BUG() above while we haven't yet
1da177e4
LT
186 finished checking all its nodes */
187 D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
d96fb997
DW
188 /* We need to come back again for the _same_ inode. We've
189 made no progress in this case, but that should be OK */
190 c->checked_ino--;
191
1da177e4
LT
192 up(&c->alloc_sem);
193 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
194 return 0;
195
196 default:
197 BUG();
198
199 case INO_STATE_UNCHECKED:
200 ;
201 }
202 ic->state = INO_STATE_CHECKING;
203 spin_unlock(&c->inocache_lock);
204
205 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
206
207 ret = jffs2_do_crccheck_inode(c, ic);
208 if (ret)
209 printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
210
211 jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
212 up(&c->alloc_sem);
213 return ret;
214 }
215
216 /* First, work out which block we're garbage-collecting */
217 jeb = c->gcblock;
218
219 if (!jeb)
220 jeb = jffs2_find_gc_block(c);
221
222 if (!jeb) {
223 D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
224 spin_unlock(&c->erase_completion_lock);
225 up(&c->alloc_sem);
226 return -EIO;
227 }
228
229 D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
230 D1(if (c->nextblock)
231 printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
232
233 if (!jeb->used_size) {
234 up(&c->alloc_sem);
235 goto eraseit;
236 }
237
238 raw = jeb->gc_node;
182ec4ee 239
1da177e4
LT
240 while(ref_obsolete(raw)) {
241 D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
99988f7b 242 raw = ref_next(raw);
1da177e4
LT
243 if (unlikely(!raw)) {
244 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
182ec4ee 245 printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
1da177e4
LT
246 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
247 jeb->gc_node = raw;
248 spin_unlock(&c->erase_completion_lock);
249 up(&c->alloc_sem);
250 BUG();
251 }
252 }
253 jeb->gc_node = raw;
254
255 D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
256
257 if (!raw->next_in_ino) {
258 /* Inode-less node. Clean marker, snapshot or something like that */
1da177e4 259 spin_unlock(&c->erase_completion_lock);
6171586a
DW
260 if (ref_flags(raw) == REF_PRISTINE) {
261 /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
262 jffs2_garbage_collect_pristine(c, NULL, raw);
263 } else {
264 /* Just mark it obsolete */
265 jffs2_mark_node_obsolete(c, raw);
266 }
1da177e4
LT
267 up(&c->alloc_sem);
268 goto eraseit_lock;
269 }
270
271 ic = jffs2_raw_ref_to_ic(raw);
272
084702e0 273#ifdef CONFIG_JFFS2_FS_XATTR
aa98d7cf 274 /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
084702e0
KK
275 * We can decide whether this node is inode or xattr by ic->class. */
276 if (ic->class == RAWNODE_CLASS_XATTR_DATUM
277 || ic->class == RAWNODE_CLASS_XATTR_REF) {
084702e0
KK
278 spin_unlock(&c->erase_completion_lock);
279
280 if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
c9f700f8 281 ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
084702e0 282 } else {
c9f700f8 283 ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
084702e0 284 }
aa98d7cf 285 goto release_sem;
084702e0
KK
286 }
287#endif
aa98d7cf 288
1da177e4 289 /* We need to hold the inocache. Either the erase_completion_lock or
182ec4ee 290 the inocache_lock are sufficient; we trade down since the inocache_lock
1da177e4
LT
291 causes less contention. */
292 spin_lock(&c->inocache_lock);
293
294 spin_unlock(&c->erase_completion_lock);
295
296 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
297
298 /* Three possibilities:
299 1. Inode is already in-core. We must iget it and do proper
300 updating to its fragtree, etc.
301 2. Inode is not in-core, node is REF_PRISTINE. We lock the
302 inocache to prevent a read_inode(), copy the node intact.
303 3. Inode is not in-core, node is not pristine. We must iget()
304 and take the slow path.
305 */
306
307 switch(ic->state) {
308 case INO_STATE_CHECKEDABSENT:
182ec4ee 309 /* It's been checked, but it's not currently in-core.
1da177e4
LT
310 We can just copy any pristine nodes, but have
311 to prevent anyone else from doing read_inode() while
312 we're at it, so we set the state accordingly */
313 if (ref_flags(raw) == REF_PRISTINE)
314 ic->state = INO_STATE_GC;
315 else {
182ec4ee 316 D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
1da177e4
LT
317 ic->ino));
318 }
319 break;
320
321 case INO_STATE_PRESENT:
322 /* It's in-core. GC must iget() it. */
323 break;
324
325 case INO_STATE_UNCHECKED:
326 case INO_STATE_CHECKING:
327 case INO_STATE_GC:
328 /* Should never happen. We should have finished checking
182ec4ee
TG
329 by the time we actually start doing any GC, and since
330 we're holding the alloc_sem, no other garbage collection
1da177e4
LT
331 can happen.
332 */
333 printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
334 ic->ino, ic->state);
335 up(&c->alloc_sem);
336 spin_unlock(&c->inocache_lock);
337 BUG();
338
339 case INO_STATE_READING:
340 /* Someone's currently trying to read it. We must wait for
341 them to finish and then go through the full iget() route
342 to do the GC. However, sometimes read_inode() needs to get
343 the alloc_sem() (for marking nodes invalid) so we must
344 drop the alloc_sem before sleeping. */
345
346 up(&c->alloc_sem);
347 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
348 ic->ino, ic->state));
349 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
182ec4ee 350 /* And because we dropped the alloc_sem we must start again from the
1da177e4
LT
351 beginning. Ponder chance of livelock here -- we're returning success
352 without actually making any progress.
353
182ec4ee 354 Q: What are the chances that the inode is back in INO_STATE_READING
1da177e4
LT
355 again by the time we next enter this function? And that this happens
356 enough times to cause a real delay?
357
182ec4ee 358 A: Small enough that I don't care :)
1da177e4
LT
359 */
360 return 0;
361 }
362
363 /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
182ec4ee 364 node intact, and we don't have to muck about with the fragtree etc.
1da177e4
LT
365 because we know it's not in-core. If it _was_ in-core, we go through
366 all the iget() crap anyway */
367
368 if (ic->state == INO_STATE_GC) {
369 spin_unlock(&c->inocache_lock);
370
371 ret = jffs2_garbage_collect_pristine(c, ic, raw);
372
373 spin_lock(&c->inocache_lock);
374 ic->state = INO_STATE_CHECKEDABSENT;
375 wake_up(&c->inocache_wq);
376
377 if (ret != -EBADFD) {
378 spin_unlock(&c->inocache_lock);
379 goto release_sem;
380 }
381
382 /* Fall through if it wanted us to, with inocache_lock held */
383 }
384
385 /* Prevent the fairly unlikely race where the gcblock is
386 entirely obsoleted by the final close of a file which had
387 the only valid nodes in the block, followed by erasure,
388 followed by freeing of the ic because the erased block(s)
389 held _all_ the nodes of that inode.... never been seen but
390 it's vaguely possible. */
391
392 inum = ic->ino;
393 nlink = ic->nlink;
394 spin_unlock(&c->inocache_lock);
395
396 f = jffs2_gc_fetch_inode(c, inum, nlink);
397 if (IS_ERR(f)) {
398 ret = PTR_ERR(f);
399 goto release_sem;
400 }
401 if (!f) {
402 ret = 0;
403 goto release_sem;
404 }
405
406 ret = jffs2_garbage_collect_live(c, jeb, raw, f);
407
408 jffs2_gc_release_inode(c, f);
409
410 release_sem:
411 up(&c->alloc_sem);
412
413 eraseit_lock:
414 /* If we've finished this block, start it erasing */
415 spin_lock(&c->erase_completion_lock);
416
417 eraseit:
418 if (c->gcblock && !c->gcblock->used_size) {
419 D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
420 /* We're GC'ing an empty block? */
421 list_add_tail(&c->gcblock->list, &c->erase_pending_list);
422 c->gcblock = NULL;
423 c->nr_erasing_blocks++;
424 jffs2_erase_pending_trigger(c);
425 }
426 spin_unlock(&c->erase_completion_lock);
427
428 return ret;
429}
430
431static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
432 struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
433{
434 struct jffs2_node_frag *frag;
435 struct jffs2_full_dnode *fn = NULL;
436 struct jffs2_full_dirent *fd;
437 uint32_t start = 0, end = 0, nrfrags = 0;
438 int ret = 0;
439
440 down(&f->sem);
441
442 /* Now we have the lock for this inode. Check that it's still the one at the head
443 of the list. */
444
445 spin_lock(&c->erase_completion_lock);
446
447 if (c->gcblock != jeb) {
448 spin_unlock(&c->erase_completion_lock);
449 D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
450 goto upnout;
451 }
452 if (ref_obsolete(raw)) {
453 spin_unlock(&c->erase_completion_lock);
454 D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
455 /* They'll call again */
456 goto upnout;
457 }
458 spin_unlock(&c->erase_completion_lock);
459
460 /* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
461 if (f->metadata && f->metadata->raw == raw) {
462 fn = f->metadata;
463 ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
464 goto upnout;
465 }
466
467 /* FIXME. Read node and do lookup? */
468 for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
469 if (frag->node && frag->node->raw == raw) {
470 fn = frag->node;
471 end = frag->ofs + frag->size;
472 if (!nrfrags++)
473 start = frag->ofs;
474 if (nrfrags == frag->node->frags)
475 break; /* We've found them all */
476 }
477 }
478 if (fn) {
479 if (ref_flags(raw) == REF_PRISTINE) {
480 ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
481 if (!ret) {
482 /* Urgh. Return it sensibly. */
483 frag->node->raw = f->inocache->nodes;
182ec4ee 484 }
1da177e4
LT
485 if (ret != -EBADFD)
486 goto upnout;
487 }
488 /* We found a datanode. Do the GC */
489 if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
490 /* It crosses a page boundary. Therefore, it must be a hole. */
491 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
492 } else {
493 /* It could still be a hole. But we GC the page this way anyway */
494 ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
495 }
496 goto upnout;
497 }
182ec4ee 498
1da177e4
LT
499 /* Wasn't a dnode. Try dirent */
500 for (fd = f->dents; fd; fd=fd->next) {
501 if (fd->raw == raw)
502 break;
503 }
504
505 if (fd && fd->ino) {
506 ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
507 } else if (fd) {
508 ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
509 } else {
510 printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
511 ref_offset(raw), f->inocache->ino);
512 if (ref_obsolete(raw)) {
513 printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
514 } else {
e0c8e42f
AB
515 jffs2_dbg_dump_node(c, ref_offset(raw));
516 BUG();
1da177e4
LT
517 }
518 }
519 upnout:
520 up(&f->sem);
521
522 return ret;
523}
524
182ec4ee 525static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
1da177e4
LT
526 struct jffs2_inode_cache *ic,
527 struct jffs2_raw_node_ref *raw)
528{
529 union jffs2_node_union *node;
1da177e4
LT
530 size_t retlen;
531 int ret;
532 uint32_t phys_ofs, alloclen;
533 uint32_t crc, rawlen;
534 int retried = 0;
535
536 D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
537
6171586a 538 alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
1da177e4
LT
539
540 /* Ask for a small amount of space (or the totlen if smaller) because we
541 don't want to force wastage of the end of a block if splitting would
542 work. */
6171586a
DW
543 if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
544 alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
545
9fe4854c 546 ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
6171586a 547 /* 'rawlen' is not the exact summary size; it is only an upper estimation */
e631ddba 548
1da177e4
LT
549 if (ret)
550 return ret;
551
552 if (alloclen < rawlen) {
553 /* Doesn't fit untouched. We'll go the old route and split it */
554 return -EBADFD;
555 }
556
557 node = kmalloc(rawlen, GFP_KERNEL);
558 if (!node)
559 return -ENOMEM;
560
561 ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
562 if (!ret && retlen != rawlen)
563 ret = -EIO;
564 if (ret)
565 goto out_node;
566
567 crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
568 if (je32_to_cpu(node->u.hdr_crc) != crc) {
569 printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
570 ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
571 goto bail;
572 }
573
574 switch(je16_to_cpu(node->u.nodetype)) {
575 case JFFS2_NODETYPE_INODE:
576 crc = crc32(0, node, sizeof(node->i)-8);
577 if (je32_to_cpu(node->i.node_crc) != crc) {
578 printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
579 ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
580 goto bail;
581 }
582
583 if (je32_to_cpu(node->i.dsize)) {
584 crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
585 if (je32_to_cpu(node->i.data_crc) != crc) {
586 printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
587 ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
588 goto bail;
589 }
590 }
591 break;
592
593 case JFFS2_NODETYPE_DIRENT:
594 crc = crc32(0, node, sizeof(node->d)-8);
595 if (je32_to_cpu(node->d.node_crc) != crc) {
596 printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
597 ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
598 goto bail;
599 }
600
601 if (node->d.nsize) {
602 crc = crc32(0, node->d.name, node->d.nsize);
603 if (je32_to_cpu(node->d.name_crc) != crc) {
604 printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent ode at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
605 ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
606 goto bail;
607 }
608 }
609 break;
610 default:
6171586a
DW
611 /* If it's inode-less, we don't _know_ what it is. Just copy it intact */
612 if (ic) {
613 printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
614 ref_offset(raw), je16_to_cpu(node->u.nodetype));
615 goto bail;
616 }
1da177e4
LT
617 }
618
1da177e4
LT
619 /* OK, all the CRCs are good; this node can just be copied as-is. */
620 retry:
2f785402 621 phys_ofs = write_ofs(c);
1da177e4
LT
622
623 ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
624
625 if (ret || (retlen != rawlen)) {
626 printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
2f785402 627 rawlen, phys_ofs, ret, retlen);
1da177e4 628 if (retlen) {
2f785402 629 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
1da177e4 630 } else {
2f785402 631 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs);
1da177e4 632 }
2f785402 633 if (!retried) {
1da177e4
LT
634 /* Try to reallocate space and retry */
635 uint32_t dummy;
636 struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
637
638 retried = 1;
639
640 D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
182ec4ee 641
730554d9
AB
642 jffs2_dbg_acct_sanity_check(c,jeb);
643 jffs2_dbg_acct_paranoia_check(c, jeb);
1da177e4 644
9fe4854c 645 ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
e631ddba
FH
646 /* this is not the exact summary size of it,
647 it is only an upper estimation */
1da177e4
LT
648
649 if (!ret) {
650 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
651
730554d9
AB
652 jffs2_dbg_acct_sanity_check(c,jeb);
653 jffs2_dbg_acct_paranoia_check(c, jeb);
1da177e4
LT
654
655 goto retry;
656 }
657 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
1da177e4
LT
658 }
659
1da177e4
LT
660 if (!ret)
661 ret = -EIO;
662 goto out_node;
663 }
2f785402 664 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
1da177e4 665
1da177e4
LT
666 jffs2_mark_node_obsolete(c, raw);
667 D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
668
669 out_node:
670 kfree(node);
671 return ret;
672 bail:
673 ret = -EBADFD;
674 goto out_node;
675}
676
182ec4ee 677static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1da177e4
LT
678 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
679{
680 struct jffs2_full_dnode *new_fn;
681 struct jffs2_raw_inode ri;
8557fd51 682 struct jffs2_node_frag *last_frag;
aef9ab47 683 union jffs2_device_node dev;
1da177e4 684 char *mdata = NULL, mdatalen = 0;
9fe4854c 685 uint32_t alloclen, ilen;
1da177e4
LT
686 int ret;
687
688 if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
689 S_ISCHR(JFFS2_F_I_MODE(f)) ) {
690 /* For these, we don't actually need to read the old node */
aef9ab47 691 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
1da177e4 692 mdata = (char *)&dev;
1da177e4
LT
693 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
694 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
695 mdatalen = fn->size;
696 mdata = kmalloc(fn->size, GFP_KERNEL);
697 if (!mdata) {
698 printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
699 return -ENOMEM;
700 }
701 ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
702 if (ret) {
703 printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
704 kfree(mdata);
705 return ret;
706 }
707 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
708
709 }
182ec4ee 710
9fe4854c 711 ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
e631ddba 712 JFFS2_SUMMARY_INODE_SIZE);
1da177e4
LT
713 if (ret) {
714 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
715 sizeof(ri)+ mdatalen, ret);
716 goto out;
717 }
182ec4ee 718
8557fd51
AB
719 last_frag = frag_last(&f->fragtree);
720 if (last_frag)
721 /* Fetch the inode length from the fragtree rather then
722 * from i_size since i_size may have not been updated yet */
723 ilen = last_frag->ofs + last_frag->size;
724 else
725 ilen = JFFS2_F_I_SIZE(f);
182ec4ee 726
1da177e4
LT
727 memset(&ri, 0, sizeof(ri));
728 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
729 ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
730 ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
731 ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
732
733 ri.ino = cpu_to_je32(f->inocache->ino);
734 ri.version = cpu_to_je32(++f->highest_version);
735 ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
736 ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
737 ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
8557fd51 738 ri.isize = cpu_to_je32(ilen);
1da177e4
LT
739 ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
740 ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
741 ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
742 ri.offset = cpu_to_je32(0);
743 ri.csize = cpu_to_je32(mdatalen);
744 ri.dsize = cpu_to_je32(mdatalen);
745 ri.compr = JFFS2_COMPR_NONE;
746 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
747 ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
748
9fe4854c 749 new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
1da177e4
LT
750
751 if (IS_ERR(new_fn)) {
752 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
753 ret = PTR_ERR(new_fn);
754 goto out;
755 }
756 jffs2_mark_node_obsolete(c, fn->raw);
757 jffs2_free_full_dnode(fn);
758 f->metadata = new_fn;
759 out:
760 if (S_ISLNK(JFFS2_F_I_MODE(f)))
761 kfree(mdata);
762 return ret;
763}
764
182ec4ee 765static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1da177e4
LT
766 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
767{
768 struct jffs2_full_dirent *new_fd;
769 struct jffs2_raw_dirent rd;
9fe4854c 770 uint32_t alloclen;
1da177e4
LT
771 int ret;
772
773 rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
774 rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
775 rd.nsize = strlen(fd->name);
776 rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
777 rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
778
779 rd.pino = cpu_to_je32(f->inocache->ino);
780 rd.version = cpu_to_je32(++f->highest_version);
781 rd.ino = cpu_to_je32(fd->ino);
3a69e0cd
AB
782 /* If the times on this inode were set by explicit utime() they can be different,
783 so refrain from splatting them. */
784 if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
785 rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
182ec4ee 786 else
3a69e0cd 787 rd.mctime = cpu_to_je32(0);
1da177e4
LT
788 rd.type = fd->type;
789 rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
790 rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
182ec4ee 791
9fe4854c 792 ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
e631ddba 793 JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
1da177e4
LT
794 if (ret) {
795 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
796 sizeof(rd)+rd.nsize, ret);
797 return ret;
798 }
9fe4854c 799 new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
1da177e4
LT
800
801 if (IS_ERR(new_fd)) {
802 printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
803 return PTR_ERR(new_fd);
804 }
805 jffs2_add_fd_to_list(c, new_fd, &f->dents);
806 return 0;
807}
808
182ec4ee 809static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1da177e4
LT
810 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
811{
812 struct jffs2_full_dirent **fdp = &f->dents;
813 int found = 0;
814
815 /* On a medium where we can't actually mark nodes obsolete
816 pernamently, such as NAND flash, we need to work out
817 whether this deletion dirent is still needed to actively
818 delete a 'real' dirent with the same name that's still
819 somewhere else on the flash. */
820 if (!jffs2_can_mark_obsolete(c)) {
821 struct jffs2_raw_dirent *rd;
822 struct jffs2_raw_node_ref *raw;
823 int ret;
824 size_t retlen;
825 int name_len = strlen(fd->name);
826 uint32_t name_crc = crc32(0, fd->name, name_len);
827 uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
828
829 rd = kmalloc(rawlen, GFP_KERNEL);
830 if (!rd)
831 return -ENOMEM;
832
833 /* Prevent the erase code from nicking the obsolete node refs while
834 we're looking at them. I really don't like this extra lock but
835 can't see any alternative. Suggestions on a postcard to... */
836 down(&c->erase_free_sem);
837
838 for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
839
840 /* We only care about obsolete ones */
841 if (!(ref_obsolete(raw)))
842 continue;
843
844 /* Any dirent with the same name is going to have the same length... */
845 if (ref_totlen(c, NULL, raw) != rawlen)
846 continue;
847
182ec4ee 848 /* Doesn't matter if there's one in the same erase block. We're going to
1da177e4 849 delete it too at the same time. */
3be36675 850 if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
1da177e4
LT
851 continue;
852
853 D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
854
855 /* This is an obsolete node belonging to the same directory, and it's of the right
856 length. We need to take a closer look...*/
857 ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
858 if (ret) {
859 printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
860 /* If we can't read it, we don't need to continue to obsolete it. Continue */
861 continue;
862 }
863 if (retlen != rawlen) {
864 printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
865 retlen, rawlen, ref_offset(raw));
866 continue;
867 }
868
869 if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
870 continue;
871
872 /* If the name CRC doesn't match, skip */
873 if (je32_to_cpu(rd->name_crc) != name_crc)
874 continue;
875
876 /* If the name length doesn't match, or it's another deletion dirent, skip */
877 if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
878 continue;
879
880 /* OK, check the actual name now */
881 if (memcmp(rd->name, fd->name, name_len))
882 continue;
883
884 /* OK. The name really does match. There really is still an older node on
885 the flash which our deletion dirent obsoletes. So we have to write out
886 a new deletion dirent to replace it */
887 up(&c->erase_free_sem);
888
889 D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
890 ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
891 kfree(rd);
892
893 return jffs2_garbage_collect_dirent(c, jeb, f, fd);
894 }
895
896 up(&c->erase_free_sem);
897 kfree(rd);
898 }
899
182ec4ee 900 /* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
3a69e0cd
AB
901 we should update the metadata node with those times accordingly */
902
1da177e4
LT
903 /* No need for it any more. Just mark it obsolete and remove it from the list */
904 while (*fdp) {
905 if ((*fdp) == fd) {
906 found = 1;
907 *fdp = fd->next;
908 break;
909 }
910 fdp = &(*fdp)->next;
911 }
912 if (!found) {
913 printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
914 }
915 jffs2_mark_node_obsolete(c, fd->raw);
916 jffs2_free_full_dirent(fd);
917 return 0;
918}
919
920static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
921 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
922 uint32_t start, uint32_t end)
923{
924 struct jffs2_raw_inode ri;
925 struct jffs2_node_frag *frag;
926 struct jffs2_full_dnode *new_fn;
9fe4854c 927 uint32_t alloclen, ilen;
1da177e4
LT
928 int ret;
929
930 D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
931 f->inocache->ino, start, end));
182ec4ee 932
1da177e4
LT
933 memset(&ri, 0, sizeof(ri));
934
935 if(fn->frags > 1) {
936 size_t readlen;
937 uint32_t crc;
182ec4ee 938 /* It's partially obsoleted by a later write. So we have to
1da177e4
LT
939 write it out again with the _same_ version as before */
940 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
941 if (readlen != sizeof(ri) || ret) {
942 printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
943 goto fill;
944 }
945 if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
946 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
947 ref_offset(fn->raw),
948 je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
949 return -EIO;
950 }
951 if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
952 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
953 ref_offset(fn->raw),
954 je32_to_cpu(ri.totlen), sizeof(ri));
955 return -EIO;
956 }
957 crc = crc32(0, &ri, sizeof(ri)-8);
958 if (crc != je32_to_cpu(ri.node_crc)) {
959 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
182ec4ee 960 ref_offset(fn->raw),
1da177e4
LT
961 je32_to_cpu(ri.node_crc), crc);
962 /* FIXME: We could possibly deal with this by writing new holes for each frag */
182ec4ee 963 printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
1da177e4
LT
964 start, end, f->inocache->ino);
965 goto fill;
966 }
967 if (ri.compr != JFFS2_COMPR_ZERO) {
968 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
182ec4ee 969 printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
1da177e4
LT
970 start, end, f->inocache->ino);
971 goto fill;
972 }
973 } else {
974 fill:
975 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
976 ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
977 ri.totlen = cpu_to_je32(sizeof(ri));
978 ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
979
980 ri.ino = cpu_to_je32(f->inocache->ino);
981 ri.version = cpu_to_je32(++f->highest_version);
982 ri.offset = cpu_to_je32(start);
983 ri.dsize = cpu_to_je32(end - start);
984 ri.csize = cpu_to_je32(0);
985 ri.compr = JFFS2_COMPR_ZERO;
986 }
182ec4ee 987
8557fd51
AB
988 frag = frag_last(&f->fragtree);
989 if (frag)
990 /* Fetch the inode length from the fragtree rather then
991 * from i_size since i_size may have not been updated yet */
992 ilen = frag->ofs + frag->size;
993 else
994 ilen = JFFS2_F_I_SIZE(f);
995
1da177e4
LT
996 ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
997 ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
998 ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
8557fd51 999 ri.isize = cpu_to_je32(ilen);
1da177e4
LT
1000 ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
1001 ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
1002 ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
1003 ri.data_crc = cpu_to_je32(0);
1004 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1005
9fe4854c
DW
1006 ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
1007 JFFS2_SUMMARY_INODE_SIZE);
1da177e4
LT
1008 if (ret) {
1009 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
1010 sizeof(ri), ret);
1011 return ret;
1012 }
9fe4854c 1013 new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
1da177e4
LT
1014
1015 if (IS_ERR(new_fn)) {
1016 printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
1017 return PTR_ERR(new_fn);
1018 }
1019 if (je32_to_cpu(ri.version) == f->highest_version) {
1020 jffs2_add_full_dnode_to_inode(c, f, new_fn);
1021 if (f->metadata) {
1022 jffs2_mark_node_obsolete(c, f->metadata->raw);
1023 jffs2_free_full_dnode(f->metadata);
1024 f->metadata = NULL;
1025 }
1026 return 0;
1027 }
1028
182ec4ee 1029 /*
1da177e4
LT
1030 * We should only get here in the case where the node we are
1031 * replacing had more than one frag, so we kept the same version
182ec4ee 1032 * number as before. (Except in case of error -- see 'goto fill;'
1da177e4
LT
1033 * above.)
1034 */
1035 D1(if(unlikely(fn->frags <= 1)) {
1036 printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
1037 fn->frags, je32_to_cpu(ri.version), f->highest_version,
1038 je32_to_cpu(ri.ino));
1039 });
1040
1041 /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
1042 mark_ref_normal(new_fn->raw);
1043
182ec4ee 1044 for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
1da177e4
LT
1045 frag; frag = frag_next(frag)) {
1046 if (frag->ofs > fn->size + fn->ofs)
1047 break;
1048 if (frag->node == fn) {
1049 frag->node = new_fn;
1050 new_fn->frags++;
1051 fn->frags--;
1052 }
1053 }
1054 if (fn->frags) {
1055 printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n");
1056 BUG();
1057 }
1058 if (!new_fn->frags) {
1059 printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
1060 BUG();
1061 }
182ec4ee 1062
1da177e4
LT
1063 jffs2_mark_node_obsolete(c, fn->raw);
1064 jffs2_free_full_dnode(fn);
182ec4ee 1065
1da177e4
LT
1066 return 0;
1067}
1068
1069static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1070 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
1071 uint32_t start, uint32_t end)
1072{
1073 struct jffs2_full_dnode *new_fn;
1074 struct jffs2_raw_inode ri;
9fe4854c 1075 uint32_t alloclen, offset, orig_end, orig_start;
1da177e4
LT
1076 int ret = 0;
1077 unsigned char *comprbuf = NULL, *writebuf;
1078 unsigned long pg;
1079 unsigned char *pg_ptr;
182ec4ee 1080
1da177e4
LT
1081 memset(&ri, 0, sizeof(ri));
1082
1083 D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
1084 f->inocache->ino, start, end));
1085
1086 orig_end = end;
1087 orig_start = start;
1088
1089 if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
1090 /* Attempt to do some merging. But only expand to cover logically
1091 adjacent frags if the block containing them is already considered
182ec4ee
TG
1092 to be dirty. Otherwise we end up with GC just going round in
1093 circles dirtying the nodes it already wrote out, especially
1da177e4
LT
1094 on NAND where we have small eraseblocks and hence a much higher
1095 chance of nodes having to be split to cross boundaries. */
1096
1097 struct jffs2_node_frag *frag;
1098 uint32_t min, max;
1099
1100 min = start & ~(PAGE_CACHE_SIZE-1);
1101 max = min + PAGE_CACHE_SIZE;
1102
1103 frag = jffs2_lookup_node_frag(&f->fragtree, start);
1104
1105 /* BUG_ON(!frag) but that'll happen anyway... */
1106
1107 BUG_ON(frag->ofs != start);
1108
1109 /* First grow down... */
1110 while((frag = frag_prev(frag)) && frag->ofs >= min) {
1111
1112 /* If the previous frag doesn't even reach the beginning, there's
1113 excessive fragmentation. Just merge. */
1114 if (frag->ofs > min) {
1115 D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
1116 frag->ofs, frag->ofs+frag->size));
1117 start = frag->ofs;
1118 continue;
1119 }
1120 /* OK. This frag holds the first byte of the page. */
1121 if (!frag->node || !frag->node->raw) {
1122 D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
1123 frag->ofs, frag->ofs+frag->size));
1124 break;
1125 } else {
1126
182ec4ee 1127 /* OK, it's a frag which extends to the beginning of the page. Does it live
1da177e4
LT
1128 in a block which is still considered clean? If so, don't obsolete it.
1129 If not, cover it anyway. */
1130
1131 struct jffs2_raw_node_ref *raw = frag->node->raw;
1132 struct jffs2_eraseblock *jeb;
1133
1134 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1135
1136 if (jeb == c->gcblock) {
1137 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1138 frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
1139 start = frag->ofs;
1140 break;
1141 }
1142 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1143 D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
1144 frag->ofs, frag->ofs+frag->size, jeb->offset));
1145 break;
1146 }
1147
1148 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
1149 frag->ofs, frag->ofs+frag->size, jeb->offset));
1150 start = frag->ofs;
1151 break;
1152 }
1153 }
1154
1155 /* ... then up */
1156
1157 /* Find last frag which is actually part of the node we're to GC. */
1158 frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
1159
1160 while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
1161
1162 /* If the previous frag doesn't even reach the beginning, there's lots
1163 of fragmentation. Just merge. */
1164 if (frag->ofs+frag->size < max) {
1165 D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
1166 frag->ofs, frag->ofs+frag->size));
1167 end = frag->ofs + frag->size;
1168 continue;
1169 }
1170
1171 if (!frag->node || !frag->node->raw) {
1172 D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
1173 frag->ofs, frag->ofs+frag->size));
1174 break;
1175 } else {
1176
182ec4ee 1177 /* OK, it's a frag which extends to the beginning of the page. Does it live
1da177e4
LT
1178 in a block which is still considered clean? If so, don't obsolete it.
1179 If not, cover it anyway. */
1180
1181 struct jffs2_raw_node_ref *raw = frag->node->raw;
1182 struct jffs2_eraseblock *jeb;
1183
1184 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1185
1186 if (jeb == c->gcblock) {
1187 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1188 frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
1189 end = frag->ofs + frag->size;
1190 break;
1191 }
1192 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1193 D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
1194 frag->ofs, frag->ofs+frag->size, jeb->offset));
1195 break;
1196 }
1197
1198 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
1199 frag->ofs, frag->ofs+frag->size, jeb->offset));
1200 end = frag->ofs + frag->size;
1201 break;
1202 }
1203 }
182ec4ee 1204 D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
1da177e4
LT
1205 orig_start, orig_end, start, end));
1206
8557fd51 1207 D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
1da177e4
LT
1208 BUG_ON(end < orig_end);
1209 BUG_ON(start > orig_start);
1210 }
182ec4ee 1211
1da177e4
LT
1212 /* First, use readpage() to read the appropriate page into the page cache */
1213 /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
1214 * triggered garbage collection in the first place?
1215 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
1216 * page OK. We'll actually write it out again in commit_write, which is a little
1217 * suboptimal, but at least we're correct.
1218 */
1219 pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
1220
1221 if (IS_ERR(pg_ptr)) {
1222 printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
1223 return PTR_ERR(pg_ptr);
1224 }
1225
1226 offset = start;
1227 while(offset < orig_end) {
1228 uint32_t datalen;
1229 uint32_t cdatalen;
1230 uint16_t comprtype = JFFS2_COMPR_NONE;
1231
9fe4854c 1232 ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
e631ddba 1233 &alloclen, JFFS2_SUMMARY_INODE_SIZE);
1da177e4
LT
1234
1235 if (ret) {
1236 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
1237 sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
1238 break;
1239 }
1240 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
1241 datalen = end - offset;
1242
1243 writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
1244
1245 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
1246
1247 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1248 ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
1249 ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
1250 ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
1251
1252 ri.ino = cpu_to_je32(f->inocache->ino);
1253 ri.version = cpu_to_je32(++f->highest_version);
1254 ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
1255 ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
1256 ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
1257 ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
1258 ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
1259 ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
1260 ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
1261 ri.offset = cpu_to_je32(offset);
1262 ri.csize = cpu_to_je32(cdatalen);
1263 ri.dsize = cpu_to_je32(datalen);
1264 ri.compr = comprtype & 0xff;
1265 ri.usercompr = (comprtype >> 8) & 0xff;
1266 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1267 ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
182ec4ee 1268
9fe4854c 1269 new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
1da177e4
LT
1270
1271 jffs2_free_comprbuf(comprbuf, writebuf);
1272
1273 if (IS_ERR(new_fn)) {
1274 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
1275 ret = PTR_ERR(new_fn);
1276 break;
1277 }
1278 ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
1279 offset += datalen;
1280 if (f->metadata) {
1281 jffs2_mark_node_obsolete(c, f->metadata->raw);
1282 jffs2_free_full_dnode(f->metadata);
1283 f->metadata = NULL;
1284 }
1285 }
1286
1287 jffs2_gc_release_page(c, pg_ptr, &pg);
1288 return ret;
1289}