]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/jffs2/nodemgmt.c
[MTD] nandsim: enhance nandsim to allow arbitrary NAND size
[net-next-2.6.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
733802d9 10 * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
1da177e4
LT
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/compiler.h>
18#include <linux/sched.h> /* For cond_resched() */
19#include "nodelist.h"
e631ddba 20#include "debug.h"
1da177e4
LT
21
22/**
23 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @c: superblock info
25 * @minsize: Minimum acceptable size of allocation
1da177e4
LT
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28 *
29 * Requests a block of physical space on the flash. Returns zero for success
9fe4854c
DW
30 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
31 * error if appropriate. Doesn't return len since that's
1da177e4
LT
32 *
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
36 *
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
39 */
40
e631ddba 41static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
9fe4854c 42 uint32_t *len, uint32_t sumsize);
1da177e4 43
9fe4854c 44int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
e631ddba 45 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
46{
47 int ret = -EAGAIN;
48 int blocksneeded = c->resv_blocks_write;
49 /* align it */
50 minsize = PAD(minsize);
51
52 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
53 down(&c->alloc_sem);
54
55 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
56
57 spin_lock(&c->erase_completion_lock);
58
59 /* this needs a little more thought (true <tglx> :)) */
60 while(ret == -EAGAIN) {
61 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
62 int ret;
63 uint32_t dirty, avail;
64
65 /* calculate real dirty size
66 * dirty_size contains blocks on erase_pending_list
67 * those blocks are counted in c->nr_erasing_blocks.
68 * If one block is actually erased, it is not longer counted as dirty_space
69 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
70 * with c->nr_erasing_blocks * c->sector_size again.
71 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
72 * This helps us to force gc and pick eventually a clean block to spread the load.
73 * We add unchecked_size here, as we hopefully will find some space to use.
74 * This will affect the sum only once, as gc first finishes checking
75 * of nodes.
76 */
77 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
78 if (dirty < c->nospc_dirty_size) {
79 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 80 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
1da177e4
LT
81 break;
82 }
83 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
84 dirty, c->unchecked_size, c->sector_size));
85
86 spin_unlock(&c->erase_completion_lock);
87 up(&c->alloc_sem);
88 return -ENOSPC;
89 }
182ec4ee 90
1da177e4
LT
91 /* Calc possibly available space. Possibly available means that we
92 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 * more usable space. This will affect the sum only once, as gc first finishes checking
94 * of nodes.
182ec4ee 95 + Return -ENOSPC, if the maximum possibly available space is less or equal than
1da177e4
LT
96 * blocksneeded * sector_size.
97 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 * the check above passes.
99 */
100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 if ( (avail / c->sector_size) <= blocksneeded) {
102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 103 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
1da177e4
LT
104 break;
105 }
106
107 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
108 avail, blocksneeded * c->sector_size));
109 spin_unlock(&c->erase_completion_lock);
110 up(&c->alloc_sem);
111 return -ENOSPC;
112 }
113
114 up(&c->alloc_sem);
115
116 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
117 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
118 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
119 spin_unlock(&c->erase_completion_lock);
182ec4ee 120
1da177e4
LT
121 ret = jffs2_garbage_collect_pass(c);
122 if (ret)
123 return ret;
124
125 cond_resched();
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 down(&c->alloc_sem);
131 spin_lock(&c->erase_completion_lock);
132 }
133
9fe4854c 134 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4
LT
135 if (ret) {
136 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 }
138 }
139 spin_unlock(&c->erase_completion_lock);
2f785402 140 if (!ret)
046b8b98 141 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
1da177e4
LT
142 if (ret)
143 up(&c->alloc_sem);
144 return ret;
145}
146
9fe4854c
DW
147int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148 uint32_t *len, uint32_t sumsize)
1da177e4
LT
149{
150 int ret = -EAGAIN;
151 minsize = PAD(minsize);
152
153 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154
155 spin_lock(&c->erase_completion_lock);
156 while(ret == -EAGAIN) {
9fe4854c 157 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4
LT
158 if (ret) {
159 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
160 }
161 }
162 spin_unlock(&c->erase_completion_lock);
2f785402 163 if (!ret)
046b8b98 164 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
2f785402 165
1da177e4
LT
166 return ret;
167}
168
e631ddba
FH
169
170/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
171
172static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 173{
e631ddba
FH
174
175 /* Check, if we have a dirty block now, or if it was dirty already */
176 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
177 c->dirty_size += jeb->wasted_size;
178 c->wasted_size -= jeb->wasted_size;
179 jeb->dirty_size += jeb->wasted_size;
180 jeb->wasted_size = 0;
181 if (VERYDIRTY(c, jeb->dirty_size)) {
182 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 list_add_tail(&jeb->list, &c->very_dirty_list);
185 } else {
186 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
187 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
188 list_add_tail(&jeb->list, &c->dirty_list);
189 }
182ec4ee 190 } else {
e631ddba
FH
191 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193 list_add_tail(&jeb->list, &c->clean_list);
194 }
195 c->nextblock = NULL;
196
197}
198
199/* Select a new jeb for nextblock */
200
201static int jffs2_find_nextblock(struct jffs2_sb_info *c)
202{
203 struct list_head *next;
182ec4ee 204
e631ddba
FH
205 /* Take the next block off the 'free' list */
206
207 if (list_empty(&c->free_list)) {
208
209 if (!c->nr_erasing_blocks &&
210 !list_empty(&c->erasable_list)) {
211 struct jffs2_eraseblock *ejeb;
212
213 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
f116629d 214 list_move_tail(&ejeb->list, &c->erase_pending_list);
e631ddba
FH
215 c->nr_erasing_blocks++;
216 jffs2_erase_pending_trigger(c);
217 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
218 ejeb->offset));
219 }
220
221 if (!c->nr_erasing_blocks &&
222 !list_empty(&c->erasable_pending_wbuf_list)) {
223 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
224 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 225 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
226 jffs2_flush_wbuf_pad(c);
227 spin_lock(&c->erase_completion_lock);
e631ddba
FH
228 /* Have another go. It'll be on the erasable_list now */
229 return -EAGAIN;
1da177e4 230 }
e631ddba
FH
231
232 if (!c->nr_erasing_blocks) {
233 /* Ouch. We're in GC, or we wouldn't have got here.
234 And there's no space left. At all. */
182ec4ee
TG
235 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
236 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
e631ddba
FH
237 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
238 return -ENOSPC;
1da177e4 239 }
e631ddba
FH
240
241 spin_unlock(&c->erase_completion_lock);
242 /* Don't wait for it; just erase one right now */
243 jffs2_erase_pending_blocks(c, 1);
244 spin_lock(&c->erase_completion_lock);
245
246 /* An erase may have failed, decreasing the
247 amount of free space available. So we must
248 restart from the beginning */
249 return -EAGAIN;
1da177e4 250 }
e631ddba
FH
251
252 next = c->free_list.next;
253 list_del(next);
254 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
255 c->nr_free_blocks--;
182ec4ee 256
e631ddba
FH
257 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
258
259 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
260
261 return 0;
262}
263
264/* Called with alloc sem _and_ erase_completion_lock */
9fe4854c
DW
265static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
266 uint32_t *len, uint32_t sumsize)
e631ddba
FH
267{
268 struct jffs2_eraseblock *jeb = c->nextblock;
9fe4854c 269 uint32_t reserved_size; /* for summary information at the end of the jeb */
e631ddba
FH
270 int ret;
271
272 restart:
273 reserved_size = 0;
274
275 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
276 /* NOSUM_SIZE means not to generate summary */
277
278 if (jeb) {
279 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
733802d9 280 dbg_summary("minsize=%d , jeb->free=%d ,"
e631ddba
FH
281 "summary->size=%d , sumsize=%d\n",
282 minsize, jeb->free_size,
283 c->summary->sum_size, sumsize);
284 }
285
286 /* Is there enough space for writing out the current node, or we have to
287 write out summary information now, close this jeb and select new nextblock? */
288 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
289 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
290
291 /* Has summary been disabled for this jeb? */
292 if (jffs2_sum_is_disabled(c->summary)) {
293 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
294 goto restart;
1da177e4
LT
295 }
296
e631ddba 297 /* Writing out the collected summary information */
733802d9 298 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
e631ddba
FH
299 ret = jffs2_sum_write_sumnode(c);
300
301 if (ret)
302 return ret;
303
304 if (jffs2_sum_is_disabled(c->summary)) {
305 /* jffs2_write_sumnode() couldn't write out the summary information
306 diabling summary for this jeb and free the collected information
307 */
308 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
309 goto restart;
310 }
311
312 jffs2_close_nextblock(c, jeb);
313 jeb = NULL;
34c0e906
FH
314 /* keep always valid value in reserved_size */
315 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
e631ddba
FH
316 }
317 } else {
318 if (jeb && minsize > jeb->free_size) {
fc6612f6
DW
319 uint32_t waste;
320
e631ddba
FH
321 /* Skip the end of this block and file it as having some dirty space */
322 /* If there's a pending write to it, flush now */
323
324 if (jffs2_wbuf_dirty(c)) {
1da177e4 325 spin_unlock(&c->erase_completion_lock);
e631ddba 326 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
1da177e4
LT
327 jffs2_flush_wbuf_pad(c);
328 spin_lock(&c->erase_completion_lock);
e631ddba
FH
329 jeb = c->nextblock;
330 goto restart;
1da177e4
LT
331 }
332
fc6612f6
DW
333 spin_unlock(&c->erase_completion_lock);
334
335 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
336 if (ret)
337 return ret;
338 /* Just lock it again and continue. Nothing much can change because
339 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
340 we hold c->erase_completion_lock in the majority of this function...
341 but that's a question for another (more caffeine-rich) day. */
342 spin_lock(&c->erase_completion_lock);
343
344 waste = jeb->free_size;
345 jffs2_link_node_ref(c, jeb,
346 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
347 waste, NULL);
348 /* FIXME: that made it count as dirty. Convert to wasted */
349 jeb->dirty_size -= waste;
350 c->dirty_size -= waste;
351 jeb->wasted_size += waste;
352 c->wasted_size += waste;
1da177e4 353
e631ddba
FH
354 jffs2_close_nextblock(c, jeb);
355 jeb = NULL;
1da177e4 356 }
e631ddba
FH
357 }
358
359 if (!jeb) {
360
361 ret = jffs2_find_nextblock(c);
362 if (ret)
363 return ret;
1da177e4 364
e631ddba 365 jeb = c->nextblock;
1da177e4
LT
366
367 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
368 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
369 goto restart;
370 }
371 }
372 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
373 enough space */
e631ddba 374 *len = jeb->free_size - reserved_size;
1da177e4
LT
375
376 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
377 !jeb->first_node->next_in_ino) {
182ec4ee 378 /* Only node in it beforehand was a CLEANMARKER node (we think).
1da177e4 379 So mark it obsolete now that there's going to be another node
182ec4ee 380 in the block. This will reduce used_size to zero but We've
1da177e4
LT
381 already set c->nextblock so that jffs2_mark_node_obsolete()
382 won't try to refile it to the dirty_list.
383 */
384 spin_unlock(&c->erase_completion_lock);
385 jffs2_mark_node_obsolete(c, jeb->first_node);
386 spin_lock(&c->erase_completion_lock);
387 }
388
9fe4854c
DW
389 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
390 *len, jeb->offset + (c->sector_size - jeb->free_size)));
1da177e4
LT
391 return 0;
392}
393
394/**
395 * jffs2_add_physical_node_ref - add a physical node reference to the list
396 * @c: superblock info
397 * @new: new node reference to add
398 * @len: length of this physical node
1da177e4 399 *
182ec4ee 400 * Should only be used to report nodes for which space has been allocated
1da177e4
LT
401 * by jffs2_reserve_space.
402 *
403 * Must be called with the alloc_sem held.
404 */
182ec4ee 405
2f785402
DW
406struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
407 uint32_t ofs, uint32_t len,
408 struct jffs2_inode_cache *ic)
1da177e4
LT
409{
410 struct jffs2_eraseblock *jeb;
2f785402 411 struct jffs2_raw_node_ref *new;
1da177e4 412
2f785402 413 jeb = &c->blocks[ofs / c->sector_size];
1da177e4 414
2f785402
DW
415 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
416 ofs & ~3, ofs & 3, len));
1da177e4 417#if 1
2f785402
DW
418 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
419 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
420 even after refiling c->nextblock */
421 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
422 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
1da177e4 423 printk(KERN_WARNING "argh. node added in wrong place\n");
2f785402 424 return ERR_PTR(-EINVAL);
1da177e4
LT
425 }
426#endif
427 spin_lock(&c->erase_completion_lock);
428
2f785402 429 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
1da177e4 430
9b88f473 431 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4
LT
432 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
433 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
434 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
435 if (jffs2_wbuf_dirty(c)) {
436 /* Flush the last write in the block if it's outstanding */
437 spin_unlock(&c->erase_completion_lock);
438 jffs2_flush_wbuf_pad(c);
439 spin_lock(&c->erase_completion_lock);
440 }
441
442 list_add_tail(&jeb->list, &c->clean_list);
443 c->nextblock = NULL;
444 }
e0c8e42f
AB
445 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
446 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
447
448 spin_unlock(&c->erase_completion_lock);
449
2f785402 450 return new;
1da177e4
LT
451}
452
453
454void jffs2_complete_reservation(struct jffs2_sb_info *c)
455{
456 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
457 jffs2_garbage_collect_trigger(c);
458 up(&c->alloc_sem);
459}
460
461static inline int on_list(struct list_head *obj, struct list_head *head)
462{
463 struct list_head *this;
464
465 list_for_each(this, head) {
466 if (this == obj) {
467 D1(printk("%p is on list at %p\n", obj, head));
468 return 1;
469
470 }
471 }
472 return 0;
473}
474
475void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
476{
477 struct jffs2_eraseblock *jeb;
478 int blocknr;
479 struct jffs2_unknown_node n;
480 int ret, addedsize;
481 size_t retlen;
1417fc44 482 uint32_t freed_len;
1da177e4 483
9bfeb691 484 if(unlikely(!ref)) {
1da177e4
LT
485 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
486 return;
487 }
488 if (ref_obsolete(ref)) {
489 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
490 return;
491 }
492 blocknr = ref->flash_offset / c->sector_size;
493 if (blocknr >= c->nr_blocks) {
494 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
495 BUG();
496 }
497 jeb = &c->blocks[blocknr];
498
499 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 500 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
182ec4ee
TG
501 /* Hm. This may confuse static lock analysis. If any of the above
502 three conditions is false, we're going to return from this
1da177e4
LT
503 function without actually obliterating any nodes or freeing
504 any jffs2_raw_node_refs. So we don't need to stop erases from
505 happening, or protect against people holding an obsolete
506 jffs2_raw_node_ref without the erase_completion_lock. */
507 down(&c->erase_free_sem);
508 }
509
510 spin_lock(&c->erase_completion_lock);
511
1417fc44
DW
512 freed_len = ref_totlen(c, jeb, ref);
513
1da177e4 514 if (ref_flags(ref) == REF_UNCHECKED) {
1417fc44 515 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
1da177e4 516 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
1417fc44 517 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
518 BUG();
519 })
1417fc44
DW
520 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
521 jeb->unchecked_size -= freed_len;
522 c->unchecked_size -= freed_len;
1da177e4 523 } else {
1417fc44 524 D1(if (unlikely(jeb->used_size < freed_len)) {
1da177e4 525 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
1417fc44 526 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
527 BUG();
528 })
1417fc44
DW
529 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
530 jeb->used_size -= freed_len;
531 c->used_size -= freed_len;
1da177e4
LT
532 }
533
534 // Take care, that wasted size is taken into concern
1417fc44 535 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
c7c16c8e 536 D1(printk("Dirtying\n"));
1417fc44
DW
537 addedsize = freed_len;
538 jeb->dirty_size += freed_len;
539 c->dirty_size += freed_len;
1da177e4
LT
540
541 /* Convert wasted space to dirty, if not a bad block */
542 if (jeb->wasted_size) {
543 if (on_list(&jeb->list, &c->bad_used_list)) {
544 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
545 jeb->offset));
546 addedsize = 0; /* To fool the refiling code later */
547 } else {
548 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
549 jeb->wasted_size, jeb->offset));
550 addedsize += jeb->wasted_size;
551 jeb->dirty_size += jeb->wasted_size;
552 c->dirty_size += jeb->wasted_size;
553 c->wasted_size -= jeb->wasted_size;
554 jeb->wasted_size = 0;
555 }
556 }
557 } else {
c7c16c8e 558 D1(printk("Wasting\n"));
1da177e4 559 addedsize = 0;
1417fc44
DW
560 jeb->wasted_size += freed_len;
561 c->wasted_size += freed_len;
1da177e4
LT
562 }
563 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
182ec4ee 564
e0c8e42f
AB
565 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
566 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 567
31fbdf7a
AB
568 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
569 /* Flash scanning is in progress. Don't muck about with the block
1da177e4 570 lists because they're not ready yet, and don't actually
182ec4ee 571 obliterate nodes that look obsolete. If they weren't
1da177e4
LT
572 marked obsolete on the flash at the time they _became_
573 obsolete, there was probably a reason for that. */
574 spin_unlock(&c->erase_completion_lock);
575 /* We didn't lock the erase_free_sem */
576 return;
577 }
578
579 if (jeb == c->nextblock) {
580 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
581 } else if (!jeb->used_size && !jeb->unchecked_size) {
582 if (jeb == c->gcblock) {
583 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
584 c->gcblock = NULL;
585 } else {
586 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
587 list_del(&jeb->list);
588 }
589 if (jffs2_wbuf_dirty(c)) {
590 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
591 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
592 } else {
593 if (jiffies & 127) {
594 /* Most of the time, we just erase it immediately. Otherwise we
595 spend ages scanning it on mount, etc. */
596 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
597 list_add_tail(&jeb->list, &c->erase_pending_list);
598 c->nr_erasing_blocks++;
599 jffs2_erase_pending_trigger(c);
600 } else {
601 /* Sometimes, however, we leave it elsewhere so it doesn't get
602 immediately reused, and we spread the load a bit. */
603 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
604 list_add_tail(&jeb->list, &c->erasable_list);
182ec4ee 605 }
1da177e4
LT
606 }
607 D1(printk(KERN_DEBUG "Done OK\n"));
608 } else if (jeb == c->gcblock) {
609 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
610 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
611 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
612 list_del(&jeb->list);
613 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
614 list_add_tail(&jeb->list, &c->dirty_list);
615 } else if (VERYDIRTY(c, jeb->dirty_size) &&
616 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
617 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
618 list_del(&jeb->list);
619 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
620 list_add_tail(&jeb->list, &c->very_dirty_list);
621 } else {
622 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
182ec4ee
TG
623 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
624 }
1da177e4
LT
625
626 spin_unlock(&c->erase_completion_lock);
627
31fbdf7a
AB
628 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
629 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
630 /* We didn't lock the erase_free_sem */
631 return;
632 }
633
634 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
635 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
636 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
c38c1b61 637 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
1da177e4
LT
638
639 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
640 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
641 if (ret) {
642 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
643 goto out_erase_sem;
644 }
645 if (retlen != sizeof(n)) {
646 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
647 goto out_erase_sem;
648 }
1417fc44
DW
649 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
650 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
1da177e4
LT
651 goto out_erase_sem;
652 }
653 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
654 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
655 goto out_erase_sem;
656 }
657 /* XXX FIXME: This is ugly now */
658 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
659 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
660 if (ret) {
661 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
662 goto out_erase_sem;
663 }
664 if (retlen != sizeof(n)) {
665 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
666 goto out_erase_sem;
667 }
668
669 /* Nodes which have been marked obsolete no longer need to be
670 associated with any inode. Remove them from the per-inode list.
182ec4ee
TG
671
672 Note we can't do this for NAND at the moment because we need
1da177e4
LT
673 obsolete dirent nodes to stay on the lists, because of the
674 horridness in jffs2_garbage_collect_deletion_dirent(). Also
182ec4ee 675 because we delete the inocache, and on NAND we need that to
1da177e4
LT
676 stay around until all the nodes are actually erased, in order
677 to stop us from giving the same inode number to another newly
678 created inode. */
679 if (ref->next_in_ino) {
680 struct jffs2_inode_cache *ic;
681 struct jffs2_raw_node_ref **p;
682
683 spin_lock(&c->erase_completion_lock);
684
685 ic = jffs2_raw_ref_to_ic(ref);
686 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
687 ;
688
689 *p = ref->next_in_ino;
690 ref->next_in_ino = NULL;
691
c9f700f8
KK
692 switch (ic->class) {
693#ifdef CONFIG_JFFS2_FS_XATTR
694 case RAWNODE_CLASS_XATTR_DATUM:
695 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
696 break;
697 case RAWNODE_CLASS_XATTR_REF:
698 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
699 break;
700#endif
701 default:
702 if (ic->nodes == (void *)ic && ic->nlink == 0)
703 jffs2_del_ino_cache(c, ic);
704 break;
705 }
1da177e4
LT
706 spin_unlock(&c->erase_completion_lock);
707 }
708
1da177e4
LT
709 out_erase_sem:
710 up(&c->erase_free_sem);
711}
712
1da177e4
LT
713int jffs2_thread_should_wake(struct jffs2_sb_info *c)
714{
715 int ret = 0;
716 uint32_t dirty;
717
718 if (c->unchecked_size) {
719 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
720 c->unchecked_size, c->checked_ino));
721 return 1;
722 }
723
724 /* dirty_size contains blocks on erase_pending_list
725 * those blocks are counted in c->nr_erasing_blocks.
726 * If one block is actually erased, it is not longer counted as dirty_space
727 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
728 * with c->nr_erasing_blocks * c->sector_size again.
729 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
730 * This helps us to force gc and pick eventually a clean block to spread the load.
731 */
732 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
733
182ec4ee
TG
734 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
735 (dirty > c->nospc_dirty_size))
1da177e4
LT
736 ret = 1;
737
182ec4ee 738 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
1da177e4
LT
739 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
740
741 return ret;
742}