]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/jffs2/nodemgmt.c
[MTD] Introduce MTD_BIT_WRITEABLE
[net-next-2.6.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
733802d9 10 * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
1da177e4
LT
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/compiler.h>
18#include <linux/sched.h> /* For cond_resched() */
19#include "nodelist.h"
e631ddba 20#include "debug.h"
1da177e4
LT
21
22/**
23 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @c: superblock info
25 * @minsize: Minimum acceptable size of allocation
26 * @ofs: Returned value of node offset
27 * @len: Returned value of allocation length
28 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 *
30 * Requests a block of physical space on the flash. Returns zero for success
31 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
32 * or other error if appropriate.
33 *
34 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35 * allocation semaphore, to prevent more than one allocation from being
36 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 *
38 * jffs2_reserve_space() may trigger garbage collection in order to make room
39 * for the requested allocation.
40 */
41
e631ddba
FH
42static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 uint32_t *ofs, uint32_t *len, uint32_t sumsize);
1da177e4 44
e631ddba
FH
45int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
46 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
47{
48 int ret = -EAGAIN;
49 int blocksneeded = c->resv_blocks_write;
50 /* align it */
51 minsize = PAD(minsize);
52
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
54 down(&c->alloc_sem);
55
56 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
57
58 spin_lock(&c->erase_completion_lock);
59
60 /* this needs a little more thought (true <tglx> :)) */
61 while(ret == -EAGAIN) {
62 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 int ret;
64 uint32_t dirty, avail;
65
66 /* calculate real dirty size
67 * dirty_size contains blocks on erase_pending_list
68 * those blocks are counted in c->nr_erasing_blocks.
69 * If one block is actually erased, it is not longer counted as dirty_space
70 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
71 * with c->nr_erasing_blocks * c->sector_size again.
72 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
73 * This helps us to force gc and pick eventually a clean block to spread the load.
74 * We add unchecked_size here, as we hopefully will find some space to use.
75 * This will affect the sum only once, as gc first finishes checking
76 * of nodes.
77 */
78 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
79 if (dirty < c->nospc_dirty_size) {
80 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 81 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
1da177e4
LT
82 break;
83 }
84 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
85 dirty, c->unchecked_size, c->sector_size));
86
87 spin_unlock(&c->erase_completion_lock);
88 up(&c->alloc_sem);
89 return -ENOSPC;
90 }
182ec4ee 91
1da177e4
LT
92 /* Calc possibly available space. Possibly available means that we
93 * don't know, if unchecked size contains obsoleted nodes, which could give us some
94 * more usable space. This will affect the sum only once, as gc first finishes checking
95 * of nodes.
182ec4ee 96 + Return -ENOSPC, if the maximum possibly available space is less or equal than
1da177e4
LT
97 * blocksneeded * sector_size.
98 * This blocks endless gc looping on a filesystem, which is nearly full, even if
99 * the check above passes.
100 */
101 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
102 if ( (avail / c->sector_size) <= blocksneeded) {
103 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 104 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
1da177e4
LT
105 break;
106 }
107
108 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 avail, blocksneeded * c->sector_size));
110 spin_unlock(&c->erase_completion_lock);
111 up(&c->alloc_sem);
112 return -ENOSPC;
113 }
114
115 up(&c->alloc_sem);
116
117 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
119 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
120 spin_unlock(&c->erase_completion_lock);
182ec4ee 121
1da177e4
LT
122 ret = jffs2_garbage_collect_pass(c);
123 if (ret)
124 return ret;
125
126 cond_resched();
127
128 if (signal_pending(current))
129 return -EINTR;
130
131 down(&c->alloc_sem);
132 spin_lock(&c->erase_completion_lock);
133 }
134
e631ddba 135 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
1da177e4
LT
136 if (ret) {
137 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
138 }
139 }
140 spin_unlock(&c->erase_completion_lock);
141 if (ret)
142 up(&c->alloc_sem);
143 return ret;
144}
145
e631ddba
FH
146int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
147 uint32_t *len, uint32_t sumsize)
1da177e4
LT
148{
149 int ret = -EAGAIN;
150 minsize = PAD(minsize);
151
152 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
153
154 spin_lock(&c->erase_completion_lock);
155 while(ret == -EAGAIN) {
e631ddba 156 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
1da177e4
LT
157 if (ret) {
158 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 }
160 }
161 spin_unlock(&c->erase_completion_lock);
162 return ret;
163}
164
e631ddba
FH
165
166/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
167
168static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 169{
e631ddba
FH
170
171 /* Check, if we have a dirty block now, or if it was dirty already */
172 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
173 c->dirty_size += jeb->wasted_size;
174 c->wasted_size -= jeb->wasted_size;
175 jeb->dirty_size += jeb->wasted_size;
176 jeb->wasted_size = 0;
177 if (VERYDIRTY(c, jeb->dirty_size)) {
178 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
179 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
180 list_add_tail(&jeb->list, &c->very_dirty_list);
181 } else {
182 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 list_add_tail(&jeb->list, &c->dirty_list);
185 }
182ec4ee 186 } else {
e631ddba
FH
187 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 list_add_tail(&jeb->list, &c->clean_list);
190 }
191 c->nextblock = NULL;
192
193}
194
195/* Select a new jeb for nextblock */
196
197static int jffs2_find_nextblock(struct jffs2_sb_info *c)
198{
199 struct list_head *next;
182ec4ee 200
e631ddba
FH
201 /* Take the next block off the 'free' list */
202
203 if (list_empty(&c->free_list)) {
204
205 if (!c->nr_erasing_blocks &&
206 !list_empty(&c->erasable_list)) {
207 struct jffs2_eraseblock *ejeb;
208
209 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
210 list_del(&ejeb->list);
211 list_add_tail(&ejeb->list, &c->erase_pending_list);
212 c->nr_erasing_blocks++;
213 jffs2_erase_pending_trigger(c);
214 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
215 ejeb->offset));
216 }
217
218 if (!c->nr_erasing_blocks &&
219 !list_empty(&c->erasable_pending_wbuf_list)) {
220 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
221 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 222 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
223 jffs2_flush_wbuf_pad(c);
224 spin_lock(&c->erase_completion_lock);
e631ddba
FH
225 /* Have another go. It'll be on the erasable_list now */
226 return -EAGAIN;
1da177e4 227 }
e631ddba
FH
228
229 if (!c->nr_erasing_blocks) {
230 /* Ouch. We're in GC, or we wouldn't have got here.
231 And there's no space left. At all. */
182ec4ee
TG
232 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
233 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
e631ddba
FH
234 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
235 return -ENOSPC;
1da177e4 236 }
e631ddba
FH
237
238 spin_unlock(&c->erase_completion_lock);
239 /* Don't wait for it; just erase one right now */
240 jffs2_erase_pending_blocks(c, 1);
241 spin_lock(&c->erase_completion_lock);
242
243 /* An erase may have failed, decreasing the
244 amount of free space available. So we must
245 restart from the beginning */
246 return -EAGAIN;
1da177e4 247 }
e631ddba
FH
248
249 next = c->free_list.next;
250 list_del(next);
251 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
252 c->nr_free_blocks--;
182ec4ee 253
e631ddba
FH
254 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
255
256 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
257
258 return 0;
259}
260
261/* Called with alloc sem _and_ erase_completion_lock */
262static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
263{
264 struct jffs2_eraseblock *jeb = c->nextblock;
265 uint32_t reserved_size; /* for summary information at the end of the jeb */
266 int ret;
267
268 restart:
269 reserved_size = 0;
270
271 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
272 /* NOSUM_SIZE means not to generate summary */
273
274 if (jeb) {
275 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
733802d9 276 dbg_summary("minsize=%d , jeb->free=%d ,"
e631ddba
FH
277 "summary->size=%d , sumsize=%d\n",
278 minsize, jeb->free_size,
279 c->summary->sum_size, sumsize);
280 }
281
282 /* Is there enough space for writing out the current node, or we have to
283 write out summary information now, close this jeb and select new nextblock? */
284 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
285 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
286
287 /* Has summary been disabled for this jeb? */
288 if (jffs2_sum_is_disabled(c->summary)) {
289 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
290 goto restart;
1da177e4
LT
291 }
292
e631ddba 293 /* Writing out the collected summary information */
733802d9 294 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
e631ddba
FH
295 ret = jffs2_sum_write_sumnode(c);
296
297 if (ret)
298 return ret;
299
300 if (jffs2_sum_is_disabled(c->summary)) {
301 /* jffs2_write_sumnode() couldn't write out the summary information
302 diabling summary for this jeb and free the collected information
303 */
304 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
305 goto restart;
306 }
307
308 jffs2_close_nextblock(c, jeb);
309 jeb = NULL;
34c0e906
FH
310 /* keep always valid value in reserved_size */
311 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
e631ddba
FH
312 }
313 } else {
314 if (jeb && minsize > jeb->free_size) {
315 /* Skip the end of this block and file it as having some dirty space */
316 /* If there's a pending write to it, flush now */
317
318 if (jffs2_wbuf_dirty(c)) {
1da177e4 319 spin_unlock(&c->erase_completion_lock);
e631ddba 320 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
1da177e4
LT
321 jffs2_flush_wbuf_pad(c);
322 spin_lock(&c->erase_completion_lock);
e631ddba
FH
323 jeb = c->nextblock;
324 goto restart;
1da177e4
LT
325 }
326
e631ddba
FH
327 c->wasted_size += jeb->free_size;
328 c->free_size -= jeb->free_size;
329 jeb->wasted_size += jeb->free_size;
330 jeb->free_size = 0;
1da177e4 331
e631ddba
FH
332 jffs2_close_nextblock(c, jeb);
333 jeb = NULL;
1da177e4 334 }
e631ddba
FH
335 }
336
337 if (!jeb) {
338
339 ret = jffs2_find_nextblock(c);
340 if (ret)
341 return ret;
1da177e4 342
e631ddba 343 jeb = c->nextblock;
1da177e4
LT
344
345 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
346 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
347 goto restart;
348 }
349 }
350 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
351 enough space */
352 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
e631ddba 353 *len = jeb->free_size - reserved_size;
1da177e4
LT
354
355 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
356 !jeb->first_node->next_in_ino) {
182ec4ee 357 /* Only node in it beforehand was a CLEANMARKER node (we think).
1da177e4 358 So mark it obsolete now that there's going to be another node
182ec4ee 359 in the block. This will reduce used_size to zero but We've
1da177e4
LT
360 already set c->nextblock so that jffs2_mark_node_obsolete()
361 won't try to refile it to the dirty_list.
362 */
363 spin_unlock(&c->erase_completion_lock);
364 jffs2_mark_node_obsolete(c, jeb->first_node);
365 spin_lock(&c->erase_completion_lock);
366 }
367
368 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
369 return 0;
370}
371
372/**
373 * jffs2_add_physical_node_ref - add a physical node reference to the list
374 * @c: superblock info
375 * @new: new node reference to add
376 * @len: length of this physical node
1da177e4 377 *
182ec4ee 378 * Should only be used to report nodes for which space has been allocated
1da177e4
LT
379 * by jffs2_reserve_space.
380 *
381 * Must be called with the alloc_sem held.
382 */
182ec4ee 383
fcb75787
DW
384int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new,
385 uint32_t len, struct jffs2_inode_cache *ic)
1da177e4
LT
386{
387 struct jffs2_eraseblock *jeb;
1da177e4
LT
388
389 jeb = &c->blocks[new->flash_offset / c->sector_size];
ca89a517 390#ifdef TEST_TOTLEN
b64335f2 391 new->__totlen = len;
ca89a517 392#endif
1da177e4
LT
393
394 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
395#if 1
3118db3d
EH
396 /* we could get some obsolete nodes after nextblock was refiled
397 in wbuf.c */
9b88f473
EH
398 if ((c->nextblock || !ref_obsolete(new))
399 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
1da177e4
LT
400 printk(KERN_WARNING "argh. node added in wrong place\n");
401 jffs2_free_raw_node_ref(new);
402 return -EINVAL;
403 }
404#endif
405 spin_lock(&c->erase_completion_lock);
406
fcb75787 407 jffs2_link_node_ref(c, jeb, new, len, ic);
1da177e4 408
9b88f473 409 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4
LT
410 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
411 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
412 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
413 if (jffs2_wbuf_dirty(c)) {
414 /* Flush the last write in the block if it's outstanding */
415 spin_unlock(&c->erase_completion_lock);
416 jffs2_flush_wbuf_pad(c);
417 spin_lock(&c->erase_completion_lock);
418 }
419
420 list_add_tail(&jeb->list, &c->clean_list);
421 c->nextblock = NULL;
422 }
e0c8e42f
AB
423 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
424 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
425
426 spin_unlock(&c->erase_completion_lock);
427
428 return 0;
429}
430
431
432void jffs2_complete_reservation(struct jffs2_sb_info *c)
433{
434 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
435 jffs2_garbage_collect_trigger(c);
436 up(&c->alloc_sem);
437}
438
439static inline int on_list(struct list_head *obj, struct list_head *head)
440{
441 struct list_head *this;
442
443 list_for_each(this, head) {
444 if (this == obj) {
445 D1(printk("%p is on list at %p\n", obj, head));
446 return 1;
447
448 }
449 }
450 return 0;
451}
452
453void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
454{
455 struct jffs2_eraseblock *jeb;
456 int blocknr;
457 struct jffs2_unknown_node n;
458 int ret, addedsize;
459 size_t retlen;
1417fc44 460 uint32_t freed_len;
1da177e4
LT
461
462 if(!ref) {
463 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
464 return;
465 }
466 if (ref_obsolete(ref)) {
467 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
468 return;
469 }
470 blocknr = ref->flash_offset / c->sector_size;
471 if (blocknr >= c->nr_blocks) {
472 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
473 BUG();
474 }
475 jeb = &c->blocks[blocknr];
476
477 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 478 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
182ec4ee
TG
479 /* Hm. This may confuse static lock analysis. If any of the above
480 three conditions is false, we're going to return from this
1da177e4
LT
481 function without actually obliterating any nodes or freeing
482 any jffs2_raw_node_refs. So we don't need to stop erases from
483 happening, or protect against people holding an obsolete
484 jffs2_raw_node_ref without the erase_completion_lock. */
485 down(&c->erase_free_sem);
486 }
487
488 spin_lock(&c->erase_completion_lock);
489
1417fc44
DW
490 freed_len = ref_totlen(c, jeb, ref);
491
1da177e4 492 if (ref_flags(ref) == REF_UNCHECKED) {
1417fc44 493 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
1da177e4 494 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
1417fc44 495 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
496 BUG();
497 })
1417fc44
DW
498 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
499 jeb->unchecked_size -= freed_len;
500 c->unchecked_size -= freed_len;
1da177e4 501 } else {
1417fc44 502 D1(if (unlikely(jeb->used_size < freed_len)) {
1da177e4 503 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
1417fc44 504 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
505 BUG();
506 })
1417fc44
DW
507 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
508 jeb->used_size -= freed_len;
509 c->used_size -= freed_len;
1da177e4
LT
510 }
511
512 // Take care, that wasted size is taken into concern
1417fc44 513 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
6f401a40 514 D1(printk(KERN_DEBUG "Dirtying\n"));
1417fc44
DW
515 addedsize = freed_len;
516 jeb->dirty_size += freed_len;
517 c->dirty_size += freed_len;
1da177e4
LT
518
519 /* Convert wasted space to dirty, if not a bad block */
520 if (jeb->wasted_size) {
521 if (on_list(&jeb->list, &c->bad_used_list)) {
522 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
523 jeb->offset));
524 addedsize = 0; /* To fool the refiling code later */
525 } else {
526 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
527 jeb->wasted_size, jeb->offset));
528 addedsize += jeb->wasted_size;
529 jeb->dirty_size += jeb->wasted_size;
530 c->dirty_size += jeb->wasted_size;
531 c->wasted_size -= jeb->wasted_size;
532 jeb->wasted_size = 0;
533 }
534 }
535 } else {
6f401a40 536 D1(printk(KERN_DEBUG "Wasting\n"));
1da177e4 537 addedsize = 0;
1417fc44
DW
538 jeb->wasted_size += freed_len;
539 c->wasted_size += freed_len;
1da177e4
LT
540 }
541 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
182ec4ee 542
e0c8e42f
AB
543 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
544 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 545
31fbdf7a
AB
546 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
547 /* Flash scanning is in progress. Don't muck about with the block
1da177e4 548 lists because they're not ready yet, and don't actually
182ec4ee 549 obliterate nodes that look obsolete. If they weren't
1da177e4
LT
550 marked obsolete on the flash at the time they _became_
551 obsolete, there was probably a reason for that. */
552 spin_unlock(&c->erase_completion_lock);
553 /* We didn't lock the erase_free_sem */
554 return;
555 }
556
557 if (jeb == c->nextblock) {
558 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
559 } else if (!jeb->used_size && !jeb->unchecked_size) {
560 if (jeb == c->gcblock) {
561 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
562 c->gcblock = NULL;
563 } else {
564 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
565 list_del(&jeb->list);
566 }
567 if (jffs2_wbuf_dirty(c)) {
568 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
569 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
570 } else {
571 if (jiffies & 127) {
572 /* Most of the time, we just erase it immediately. Otherwise we
573 spend ages scanning it on mount, etc. */
574 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
575 list_add_tail(&jeb->list, &c->erase_pending_list);
576 c->nr_erasing_blocks++;
577 jffs2_erase_pending_trigger(c);
578 } else {
579 /* Sometimes, however, we leave it elsewhere so it doesn't get
580 immediately reused, and we spread the load a bit. */
581 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
582 list_add_tail(&jeb->list, &c->erasable_list);
182ec4ee 583 }
1da177e4
LT
584 }
585 D1(printk(KERN_DEBUG "Done OK\n"));
586 } else if (jeb == c->gcblock) {
587 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
588 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
589 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
590 list_del(&jeb->list);
591 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
592 list_add_tail(&jeb->list, &c->dirty_list);
593 } else if (VERYDIRTY(c, jeb->dirty_size) &&
594 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
595 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
596 list_del(&jeb->list);
597 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
598 list_add_tail(&jeb->list, &c->very_dirty_list);
599 } else {
600 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
182ec4ee
TG
601 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
602 }
1da177e4
LT
603
604 spin_unlock(&c->erase_completion_lock);
605
31fbdf7a
AB
606 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
607 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
608 /* We didn't lock the erase_free_sem */
609 return;
610 }
611
612 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
613 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
614 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
615 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
616
617 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
618 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
619 if (ret) {
620 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
621 goto out_erase_sem;
622 }
623 if (retlen != sizeof(n)) {
624 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
625 goto out_erase_sem;
626 }
1417fc44
DW
627 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
628 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
1da177e4
LT
629 goto out_erase_sem;
630 }
631 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
632 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
633 goto out_erase_sem;
634 }
635 /* XXX FIXME: This is ugly now */
636 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
637 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
638 if (ret) {
639 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
640 goto out_erase_sem;
641 }
642 if (retlen != sizeof(n)) {
643 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
644 goto out_erase_sem;
645 }
646
647 /* Nodes which have been marked obsolete no longer need to be
648 associated with any inode. Remove them from the per-inode list.
182ec4ee
TG
649
650 Note we can't do this for NAND at the moment because we need
1da177e4
LT
651 obsolete dirent nodes to stay on the lists, because of the
652 horridness in jffs2_garbage_collect_deletion_dirent(). Also
182ec4ee 653 because we delete the inocache, and on NAND we need that to
1da177e4
LT
654 stay around until all the nodes are actually erased, in order
655 to stop us from giving the same inode number to another newly
656 created inode. */
657 if (ref->next_in_ino) {
658 struct jffs2_inode_cache *ic;
659 struct jffs2_raw_node_ref **p;
660
661 spin_lock(&c->erase_completion_lock);
662
663 ic = jffs2_raw_ref_to_ic(ref);
0eac940b
DW
664 /* It seems we should never call jffs2_mark_node_obsolete() for
665 XATTR nodes.... yet. Make sure we notice if/when we change
666 that :) */
667 BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
1da177e4
LT
668 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
669 ;
670
671 *p = ref->next_in_ino;
672 ref->next_in_ino = NULL;
673
437316d9 674 if (ic->nodes == (void *)ic && ic->nlink == 0)
1da177e4 675 jffs2_del_ino_cache(c, ic);
1da177e4
LT
676
677 spin_unlock(&c->erase_completion_lock);
678 }
679
680
681 /* Merge with the next node in the physical list, if there is one
682 and if it's also obsolete and if it doesn't belong to any inode */
683 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
684 !ref->next_phys->next_in_ino) {
685 struct jffs2_raw_node_ref *n = ref->next_phys;
182ec4ee 686
1da177e4
LT
687 spin_lock(&c->erase_completion_lock);
688
ca89a517 689#ifdef TEST_TOTLEN
1da177e4 690 ref->__totlen += n->__totlen;
ca89a517 691#endif
1da177e4
LT
692 ref->next_phys = n->next_phys;
693 if (jeb->last_node == n) jeb->last_node = ref;
694 if (jeb->gc_node == n) {
695 /* gc will be happy continuing gc on this node */
696 jeb->gc_node=ref;
697 }
698 spin_unlock(&c->erase_completion_lock);
699
700 jffs2_free_raw_node_ref(n);
701 }
182ec4ee 702
1da177e4
LT
703 /* Also merge with the previous node in the list, if there is one
704 and that one is obsolete */
705 if (ref != jeb->first_node ) {
706 struct jffs2_raw_node_ref *p = jeb->first_node;
707
708 spin_lock(&c->erase_completion_lock);
709
710 while (p->next_phys != ref)
711 p = p->next_phys;
182ec4ee 712
1da177e4 713 if (ref_obsolete(p) && !ref->next_in_ino) {
ca89a517 714#ifdef TEST_TOTLEN
1da177e4 715 p->__totlen += ref->__totlen;
ca89a517 716#endif
1da177e4
LT
717 if (jeb->last_node == ref) {
718 jeb->last_node = p;
719 }
720 if (jeb->gc_node == ref) {
721 /* gc will be happy continuing gc on this node */
722 jeb->gc_node=p;
723 }
724 p->next_phys = ref->next_phys;
725 jffs2_free_raw_node_ref(ref);
726 }
727 spin_unlock(&c->erase_completion_lock);
728 }
729 out_erase_sem:
730 up(&c->erase_free_sem);
731}
732
1da177e4
LT
733int jffs2_thread_should_wake(struct jffs2_sb_info *c)
734{
735 int ret = 0;
736 uint32_t dirty;
737
738 if (c->unchecked_size) {
739 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
740 c->unchecked_size, c->checked_ino));
741 return 1;
742 }
743
744 /* dirty_size contains blocks on erase_pending_list
745 * those blocks are counted in c->nr_erasing_blocks.
746 * If one block is actually erased, it is not longer counted as dirty_space
747 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
748 * with c->nr_erasing_blocks * c->sector_size again.
749 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
750 * This helps us to force gc and pick eventually a clean block to spread the load.
751 */
752 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
753
182ec4ee
TG
754 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
755 (dirty > c->nospc_dirty_size))
1da177e4
LT
756 ret = 1;
757
182ec4ee 758 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
1da177e4
LT
759 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
760
761 return ret;
762}