2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
22 * UBI wear-leveling sub-system.
24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only %0xFF bytes.
34 * When physical eraseblocks are returned to the WL sub-system by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL sub-system.
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL sub-system may pick a free physical eraseblock with low erase
47 * counter, and so forth.
49 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
52 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53 * in a physical eraseblock, it has to be moved. Technically this is the same
54 * as moving it for wear-leveling reasons.
56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
59 * (temporarily) in the @wl->pq queue.
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
67 * All this protection stuff is needed because:
68 * o we don't want to move physical eraseblocks just after we have given them
69 * to the user; instead, we first want to let users fill them up with data;
71 * o there is a chance that the user will put the physical eraseblock very
72 * soon, so it makes sense not to move it for some time, but wait; this is
73 * especially important in case of "short term" physical eraseblocks.
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree).
89 * Depending on the sub-state, wear-leveling entries of the used physical
90 * eraseblocks may be kept in one of those structures.
92 * Note, in this implementation, we keep a small in-RAM object for each physical
93 * eraseblock. This is surely not a scalable solution. But it appears to be good
94 * enough for moderately large flashes and it is simple. In future, one may
95 * re-work this sub-system and make it more scalable.
97 * At the moment this sub-system does not utilize the sequence number, which
98 * was introduced relatively recently. But it would be wise to do this because
99 * the sequence number of a logical eraseblock characterizes how old is it. For
100 * example, when we move a PEB with low erase counter, and we need to pick the
101 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
102 * pick target PEB with an average EC if our PEB is not very "old". This is a
103 * room for future re-works of the WL sub-system.
106 #include <linux/slab.h>
107 #include <linux/crc32.h>
108 #include <linux/freezer.h>
109 #include <linux/kthread.h>
112 /* Number of physical eraseblocks reserved for wear-leveling purposes */
113 #define WL_RESERVED_PEBS 1
116 * Maximum difference between two erase counters. If this threshold is
117 * exceeded, the WL sub-system starts moving data from used physical
118 * eraseblocks with low erase counter to free physical eraseblocks with high
121 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
124 * When a physical eraseblock is moved, the WL sub-system has to pick the target
125 * physical eraseblock to move to. The simplest way would be just to pick the
126 * one with the highest erase counter. But in certain workloads this could lead
127 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
128 * situation when the picked physical eraseblock is constantly erased after the
129 * data is written to it. So, we have a constant which limits the highest erase
130 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
131 * does not pick eraseblocks with erase counter greater than the lowest erase
132 * counter plus %WL_FREE_MAX_DIFF.
134 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
137 * Maximum number of consecutive background thread failures which is enough to
138 * switch to read-only mode.
140 #define WL_MAX_FAILURES 32
143 * struct ubi_work - UBI work description data structure.
144 * @list: a link in the list of pending works
145 * @func: worker function
146 * @e: physical eraseblock to erase
147 * @torture: if the physical eraseblock has to be tortured
149 * The @func pointer points to the worker function. If the @cancel argument is
150 * not zero, the worker has to free the resources and exit immediately. The
151 * worker has to return zero in case of success and a negative error code in
155 struct list_head list;
156 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
157 /* The below fields are only relevant to erasure works */
158 struct ubi_wl_entry *e;
162 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
163 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
164 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
165 struct rb_root *root);
166 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
168 #define paranoid_check_ec(ubi, pnum, ec) 0
169 #define paranoid_check_in_wl_tree(e, root)
170 #define paranoid_check_in_pq(ubi, e) 0
174 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
175 * @e: the wear-leveling entry to add
176 * @root: the root of the tree
178 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
179 * the @ubi->used and @ubi->free RB-trees.
181 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
183 struct rb_node **p, *parent = NULL;
187 struct ubi_wl_entry *e1;
190 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
194 else if (e->ec > e1->ec)
197 ubi_assert(e->pnum != e1->pnum);
198 if (e->pnum < e1->pnum)
205 rb_link_node(&e->u.rb, parent, p);
206 rb_insert_color(&e->u.rb, root);
210 * do_work - do one pending work.
211 * @ubi: UBI device description object
213 * This function returns zero in case of success and a negative error code in
216 static int do_work(struct ubi_device *ubi)
219 struct ubi_work *wrk;
224 * @ubi->work_sem is used to synchronize with the workers. Workers take
225 * it in read mode, so many of them may be doing works at a time. But
226 * the queue flush code has to be sure the whole queue of works is
227 * done, and it takes the mutex in write mode.
229 down_read(&ubi->work_sem);
230 spin_lock(&ubi->wl_lock);
231 if (list_empty(&ubi->works)) {
232 spin_unlock(&ubi->wl_lock);
233 up_read(&ubi->work_sem);
237 wrk = list_entry(ubi->works.next, struct ubi_work, list);
238 list_del(&wrk->list);
239 ubi->works_count -= 1;
240 ubi_assert(ubi->works_count >= 0);
241 spin_unlock(&ubi->wl_lock);
244 * Call the worker function. Do not touch the work structure
245 * after this call as it will have been freed or reused by that
246 * time by the worker function.
248 err = wrk->func(ubi, wrk, 0);
250 ubi_err("work failed with error code %d", err);
251 up_read(&ubi->work_sem);
257 * produce_free_peb - produce a free physical eraseblock.
258 * @ubi: UBI device description object
260 * This function tries to make a free PEB by means of synchronous execution of
261 * pending works. This may be needed if, for example the background thread is
262 * disabled. Returns zero in case of success and a negative error code in case
265 static int produce_free_peb(struct ubi_device *ubi)
269 spin_lock(&ubi->wl_lock);
270 while (!ubi->free.rb_node) {
271 spin_unlock(&ubi->wl_lock);
273 dbg_wl("do one work synchronously");
278 spin_lock(&ubi->wl_lock);
280 spin_unlock(&ubi->wl_lock);
286 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
287 * @e: the wear-leveling entry to check
288 * @root: the root of the tree
290 * This function returns non-zero if @e is in the @root RB-tree and zero if it
293 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
299 struct ubi_wl_entry *e1;
301 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
303 if (e->pnum == e1->pnum) {
310 else if (e->ec > e1->ec)
313 ubi_assert(e->pnum != e1->pnum);
314 if (e->pnum < e1->pnum)
325 * prot_queue_add - add physical eraseblock to the protection queue.
326 * @ubi: UBI device description object
327 * @e: the physical eraseblock to add
329 * This function adds @e to the tail of the protection queue @ubi->pq, where
330 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
334 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
336 int pq_tail = ubi->pq_head - 1;
339 pq_tail = UBI_PROT_QUEUE_LEN - 1;
340 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
341 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
342 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
346 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
347 * @root: the RB-tree where to look for
348 * @max: highest possible erase counter
350 * This function looks for a wear leveling entry with erase counter closest to
351 * @max and less then @max.
353 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
356 struct ubi_wl_entry *e;
358 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
363 struct ubi_wl_entry *e1;
365 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
378 * ubi_wl_get_peb - get a physical eraseblock.
379 * @ubi: UBI device description object
380 * @dtype: type of data which will be stored in this physical eraseblock
382 * This function returns a physical eraseblock in case of success and a
383 * negative error code in case of failure. Might sleep.
385 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
388 struct ubi_wl_entry *e, *first, *last;
390 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
391 dtype == UBI_UNKNOWN);
394 spin_lock(&ubi->wl_lock);
395 if (!ubi->free.rb_node) {
396 if (ubi->works_count == 0) {
397 ubi_assert(list_empty(&ubi->works));
398 ubi_err("no free eraseblocks");
399 spin_unlock(&ubi->wl_lock);
402 spin_unlock(&ubi->wl_lock);
404 err = produce_free_peb(ubi);
413 * For long term data we pick a physical eraseblock with high
414 * erase counter. But the highest erase counter we can pick is
415 * bounded by the the lowest erase counter plus
418 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
422 * For unknown data we pick a physical eraseblock with medium
423 * erase counter. But we by no means can pick a physical
424 * eraseblock with erase counter greater or equivalent than the
425 * lowest erase counter plus %WL_FREE_MAX_DIFF.
427 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
429 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
431 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
432 e = rb_entry(ubi->free.rb_node,
433 struct ubi_wl_entry, u.rb);
435 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
436 e = find_wl_entry(&ubi->free, medium_ec);
441 * For short term data we pick a physical eraseblock with the
442 * lowest erase counter as we expect it will be erased soon.
444 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
450 paranoid_check_in_wl_tree(e, &ubi->free);
453 * Move the physical eraseblock to the protection queue where it will
454 * be protected from being moved for some time.
456 rb_erase(&e->u.rb, &ubi->free);
457 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
458 prot_queue_add(ubi, e);
459 spin_unlock(&ubi->wl_lock);
464 * prot_queue_del - remove a physical eraseblock from the protection queue.
465 * @ubi: UBI device description object
466 * @pnum: the physical eraseblock to remove
468 * This function deletes PEB @pnum from the protection queue and returns zero
469 * in case of success and %-ENODEV if the PEB was not found.
471 static int prot_queue_del(struct ubi_device *ubi, int pnum)
473 struct ubi_wl_entry *e;
475 e = ubi->lookuptbl[pnum];
479 if (paranoid_check_in_pq(ubi, e))
482 list_del(&e->u.list);
483 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
488 * sync_erase - synchronously erase a physical eraseblock.
489 * @ubi: UBI device description object
490 * @e: the the physical eraseblock to erase
491 * @torture: if the physical eraseblock has to be tortured
493 * This function returns zero in case of success and a negative error code in
496 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
500 struct ubi_ec_hdr *ec_hdr;
501 unsigned long long ec = e->ec;
503 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
505 err = paranoid_check_ec(ubi, e->pnum, e->ec);
509 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
513 err = ubi_io_sync_erase(ubi, e->pnum, torture);
518 if (ec > UBI_MAX_ERASECOUNTER) {
520 * Erase counter overflow. Upgrade UBI and use 64-bit
521 * erase counters internally.
523 ubi_err("erase counter overflow at PEB %d, EC %llu",
529 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
531 ec_hdr->ec = cpu_to_be64(ec);
533 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
538 spin_lock(&ubi->wl_lock);
539 if (e->ec > ubi->max_ec)
541 spin_unlock(&ubi->wl_lock);
549 * serve_prot_queue - check if it is time to stop protecting PEBs.
550 * @ubi: UBI device description object
552 * This function is called after each erase operation and removes PEBs from the
553 * tail of the protection queue. These PEBs have been protected for long enough
554 * and should be moved to the used tree.
556 static void serve_prot_queue(struct ubi_device *ubi)
558 struct ubi_wl_entry *e, *tmp;
562 * There may be several protected physical eraseblock to remove,
567 spin_lock(&ubi->wl_lock);
568 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
569 dbg_wl("PEB %d EC %d protection over, move to used tree",
572 list_del(&e->u.list);
573 wl_tree_add(e, &ubi->used);
576 * Let's be nice and avoid holding the spinlock for
579 spin_unlock(&ubi->wl_lock);
586 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
588 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
589 spin_unlock(&ubi->wl_lock);
593 * schedule_ubi_work - schedule a work.
594 * @ubi: UBI device description object
595 * @wrk: the work to schedule
597 * This function adds a work defined by @wrk to the tail of the pending works
600 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
602 spin_lock(&ubi->wl_lock);
603 list_add_tail(&wrk->list, &ubi->works);
604 ubi_assert(ubi->works_count >= 0);
605 ubi->works_count += 1;
606 if (ubi->thread_enabled)
607 wake_up_process(ubi->bgt_thread);
608 spin_unlock(&ubi->wl_lock);
611 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
615 * schedule_erase - schedule an erase work.
616 * @ubi: UBI device description object
617 * @e: the WL entry of the physical eraseblock to erase
618 * @torture: if the physical eraseblock has to be tortured
620 * This function returns zero in case of success and a %-ENOMEM in case of
623 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
626 struct ubi_work *wl_wrk;
628 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
629 e->pnum, e->ec, torture);
631 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
635 wl_wrk->func = &erase_worker;
637 wl_wrk->torture = torture;
639 schedule_ubi_work(ubi, wl_wrk);
644 * wear_leveling_worker - wear-leveling worker function.
645 * @ubi: UBI device description object
646 * @wrk: the work object
647 * @cancel: non-zero if the worker has to free memory and exit
649 * This function copies a more worn out physical eraseblock to a less worn out
650 * one. Returns zero in case of success and a negative error code in case of
653 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
656 int err, scrubbing = 0, torture = 0, protect = 0;
657 struct ubi_wl_entry *e1, *e2;
658 struct ubi_vid_hdr *vid_hdr;
664 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
668 mutex_lock(&ubi->move_mutex);
669 spin_lock(&ubi->wl_lock);
670 ubi_assert(!ubi->move_from && !ubi->move_to);
671 ubi_assert(!ubi->move_to_put);
673 if (!ubi->free.rb_node ||
674 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
676 * No free physical eraseblocks? Well, they must be waiting in
677 * the queue to be erased. Cancel movement - it will be
678 * triggered again when a free physical eraseblock appears.
680 * No used physical eraseblocks? They must be temporarily
681 * protected from being moved. They will be moved to the
682 * @ubi->used tree later and the wear-leveling will be
685 dbg_wl("cancel WL, a list is empty: free %d, used %d",
686 !ubi->free.rb_node, !ubi->used.rb_node);
690 if (!ubi->scrub.rb_node) {
692 * Now pick the least worn-out used physical eraseblock and a
693 * highly worn-out free physical eraseblock. If the erase
694 * counters differ much enough, start wear-leveling.
696 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
697 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
699 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
700 dbg_wl("no WL needed: min used EC %d, max free EC %d",
704 paranoid_check_in_wl_tree(e1, &ubi->used);
705 rb_erase(&e1->u.rb, &ubi->used);
706 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
707 e1->pnum, e1->ec, e2->pnum, e2->ec);
709 /* Perform scrubbing */
711 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
712 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
713 paranoid_check_in_wl_tree(e1, &ubi->scrub);
714 rb_erase(&e1->u.rb, &ubi->scrub);
715 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
718 paranoid_check_in_wl_tree(e2, &ubi->free);
719 rb_erase(&e2->u.rb, &ubi->free);
722 spin_unlock(&ubi->wl_lock);
725 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
726 * We so far do not know which logical eraseblock our physical
727 * eraseblock (@e1) belongs to. We have to read the volume identifier
730 * Note, we are protected from this PEB being unmapped and erased. The
731 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
732 * which is being moved was unmapped.
735 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
736 if (err && err != UBI_IO_BITFLIPS) {
737 if (err == UBI_IO_PEB_FREE) {
739 * We are trying to move PEB without a VID header. UBI
740 * always write VID headers shortly after the PEB was
741 * given, so we have a situation when it has not yet
742 * had a chance to write it, because it was preempted.
743 * So add this PEB to the protection queue so far,
744 * because presubably more data will be written there
745 * (including the missin VID header), and then we'll
748 dbg_wl("PEB %d has no VID header", e1->pnum);
753 ubi_err("error %d while reading VID header from PEB %d",
758 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
760 if (err == MOVE_CANCEL_RACE) {
762 * The LEB has not been moved because the volume is
763 * being deleted or the PEB has been put meanwhile. We
764 * should prevent this PEB from being selected for
765 * wear-leveling movement again, so put it to the
772 if (err == MOVE_CANCEL_BITFLIPS ||
773 err == MOVE_TARGET_WR_ERR) {
774 /* Target PEB bit-flips or write error, torture it */
785 /* The PEB has been successfully moved */
786 ubi_free_vid_hdr(ubi, vid_hdr);
788 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
791 spin_lock(&ubi->wl_lock);
792 if (!ubi->move_to_put) {
793 wl_tree_add(e2, &ubi->used);
796 ubi->move_from = ubi->move_to = NULL;
797 ubi->move_to_put = ubi->wl_scheduled = 0;
798 spin_unlock(&ubi->wl_lock);
800 err = schedule_erase(ubi, e1, 0);
802 kmem_cache_free(ubi_wl_entry_slab, e1);
803 kmem_cache_free(ubi_wl_entry_slab, e2);
809 * Well, the target PEB was put meanwhile, schedule it for
812 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
813 err = schedule_erase(ubi, e2, 0);
815 kmem_cache_free(ubi_wl_entry_slab, e2);
821 mutex_unlock(&ubi->move_mutex);
825 * For some reasons the LEB was not moved, might be an error, might be
826 * something else. @e1 was not changed, so return it back. @e2 might
827 * have been changed, schedule it for erasure.
830 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
831 e1->pnum, e2->pnum, err);
832 spin_lock(&ubi->wl_lock);
834 prot_queue_add(ubi, e1);
836 wl_tree_add(e1, &ubi->scrub);
838 wl_tree_add(e1, &ubi->used);
839 ubi_assert(!ubi->move_to_put);
840 ubi->move_from = ubi->move_to = NULL;
841 ubi->wl_scheduled = 0;
842 spin_unlock(&ubi->wl_lock);
844 ubi_free_vid_hdr(ubi, vid_hdr);
845 err = schedule_erase(ubi, e2, torture);
847 kmem_cache_free(ubi_wl_entry_slab, e2);
850 mutex_unlock(&ubi->move_mutex);
854 ubi_err("error %d while moving PEB %d to PEB %d",
855 err, e1->pnum, e2->pnum);
856 spin_lock(&ubi->wl_lock);
857 ubi->move_from = ubi->move_to = NULL;
858 ubi->move_to_put = ubi->wl_scheduled = 0;
859 spin_unlock(&ubi->wl_lock);
861 ubi_free_vid_hdr(ubi, vid_hdr);
862 kmem_cache_free(ubi_wl_entry_slab, e1);
863 kmem_cache_free(ubi_wl_entry_slab, e2);
867 mutex_unlock(&ubi->move_mutex);
868 ubi_assert(err != 0);
869 return err < 0 ? err : -EIO;
872 ubi->wl_scheduled = 0;
873 spin_unlock(&ubi->wl_lock);
874 mutex_unlock(&ubi->move_mutex);
875 ubi_free_vid_hdr(ubi, vid_hdr);
880 * ensure_wear_leveling - schedule wear-leveling if it is needed.
881 * @ubi: UBI device description object
883 * This function checks if it is time to start wear-leveling and schedules it
884 * if yes. This function returns zero in case of success and a negative error
885 * code in case of failure.
887 static int ensure_wear_leveling(struct ubi_device *ubi)
890 struct ubi_wl_entry *e1;
891 struct ubi_wl_entry *e2;
892 struct ubi_work *wrk;
894 spin_lock(&ubi->wl_lock);
895 if (ubi->wl_scheduled)
896 /* Wear-leveling is already in the work queue */
900 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
901 * the WL worker has to be scheduled anyway.
903 if (!ubi->scrub.rb_node) {
904 if (!ubi->used.rb_node || !ubi->free.rb_node)
905 /* No physical eraseblocks - no deal */
909 * We schedule wear-leveling only if the difference between the
910 * lowest erase counter of used physical eraseblocks and a high
911 * erase counter of free physical eraseblocks is greater than
914 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
915 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
917 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
919 dbg_wl("schedule wear-leveling");
921 dbg_wl("schedule scrubbing");
923 ubi->wl_scheduled = 1;
924 spin_unlock(&ubi->wl_lock);
926 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
932 wrk->func = &wear_leveling_worker;
933 schedule_ubi_work(ubi, wrk);
937 spin_lock(&ubi->wl_lock);
938 ubi->wl_scheduled = 0;
940 spin_unlock(&ubi->wl_lock);
945 * erase_worker - physical eraseblock erase worker function.
946 * @ubi: UBI device description object
947 * @wl_wrk: the work object
948 * @cancel: non-zero if the worker has to free memory and exit
950 * This function erases a physical eraseblock and perform torture testing if
951 * needed. It also takes care about marking the physical eraseblock bad if
952 * needed. Returns zero in case of success and a negative error code in case of
955 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
958 struct ubi_wl_entry *e = wl_wrk->e;
959 int pnum = e->pnum, err, need;
962 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
964 kmem_cache_free(ubi_wl_entry_slab, e);
968 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
970 err = sync_erase(ubi, e, wl_wrk->torture);
972 /* Fine, we've erased it successfully */
975 spin_lock(&ubi->wl_lock);
976 wl_tree_add(e, &ubi->free);
977 spin_unlock(&ubi->wl_lock);
980 * One more erase operation has happened, take care about
981 * protected physical eraseblocks.
983 serve_prot_queue(ubi);
985 /* And take care about wear-leveling */
986 err = ensure_wear_leveling(ubi);
990 ubi_err("failed to erase PEB %d, error %d", pnum, err);
992 kmem_cache_free(ubi_wl_entry_slab, e);
994 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
998 /* Re-schedule the LEB for erasure */
999 err1 = schedule_erase(ubi, e, 0);
1005 } else if (err != -EIO) {
1007 * If this is not %-EIO, we have no idea what to do. Scheduling
1008 * this physical eraseblock for erasure again would cause
1009 * errors again and again. Well, lets switch to RO mode.
1014 /* It is %-EIO, the PEB went bad */
1016 if (!ubi->bad_allowed) {
1017 ubi_err("bad physical eraseblock %d detected", pnum);
1021 spin_lock(&ubi->volumes_lock);
1022 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1024 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1025 ubi->avail_pebs -= need;
1026 ubi->rsvd_pebs += need;
1027 ubi->beb_rsvd_pebs += need;
1029 ubi_msg("reserve more %d PEBs", need);
1032 if (ubi->beb_rsvd_pebs == 0) {
1033 spin_unlock(&ubi->volumes_lock);
1034 ubi_err("no reserved physical eraseblocks");
1038 spin_unlock(&ubi->volumes_lock);
1039 ubi_msg("mark PEB %d as bad", pnum);
1041 err = ubi_io_mark_bad(ubi, pnum);
1045 spin_lock(&ubi->volumes_lock);
1046 ubi->beb_rsvd_pebs -= 1;
1047 ubi->bad_peb_count += 1;
1048 ubi->good_peb_count -= 1;
1049 ubi_calculate_reserved(ubi);
1050 if (ubi->beb_rsvd_pebs == 0)
1051 ubi_warn("last PEB from the reserved pool was used");
1052 spin_unlock(&ubi->volumes_lock);
1062 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1063 * @ubi: UBI device description object
1064 * @pnum: physical eraseblock to return
1065 * @torture: if this physical eraseblock has to be tortured
1067 * This function is called to return physical eraseblock @pnum to the pool of
1068 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1069 * occurred to this @pnum and it has to be tested. This function returns zero
1070 * in case of success, and a negative error code in case of failure.
1072 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1075 struct ubi_wl_entry *e;
1077 dbg_wl("PEB %d", pnum);
1078 ubi_assert(pnum >= 0);
1079 ubi_assert(pnum < ubi->peb_count);
1082 spin_lock(&ubi->wl_lock);
1083 e = ubi->lookuptbl[pnum];
1084 if (e == ubi->move_from) {
1086 * User is putting the physical eraseblock which was selected to
1087 * be moved. It will be scheduled for erasure in the
1088 * wear-leveling worker.
1090 dbg_wl("PEB %d is being moved, wait", pnum);
1091 spin_unlock(&ubi->wl_lock);
1093 /* Wait for the WL worker by taking the @ubi->move_mutex */
1094 mutex_lock(&ubi->move_mutex);
1095 mutex_unlock(&ubi->move_mutex);
1097 } else if (e == ubi->move_to) {
1099 * User is putting the physical eraseblock which was selected
1100 * as the target the data is moved to. It may happen if the EBA
1101 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1102 * but the WL sub-system has not put the PEB to the "used" tree
1103 * yet, but it is about to do this. So we just set a flag which
1104 * will tell the WL worker that the PEB is not needed anymore
1105 * and should be scheduled for erasure.
1107 dbg_wl("PEB %d is the target of data moving", pnum);
1108 ubi_assert(!ubi->move_to_put);
1109 ubi->move_to_put = 1;
1110 spin_unlock(&ubi->wl_lock);
1113 if (in_wl_tree(e, &ubi->used)) {
1114 paranoid_check_in_wl_tree(e, &ubi->used);
1115 rb_erase(&e->u.rb, &ubi->used);
1116 } else if (in_wl_tree(e, &ubi->scrub)) {
1117 paranoid_check_in_wl_tree(e, &ubi->scrub);
1118 rb_erase(&e->u.rb, &ubi->scrub);
1120 err = prot_queue_del(ubi, e->pnum);
1122 ubi_err("PEB %d not found", pnum);
1124 spin_unlock(&ubi->wl_lock);
1129 spin_unlock(&ubi->wl_lock);
1131 err = schedule_erase(ubi, e, torture);
1133 spin_lock(&ubi->wl_lock);
1134 wl_tree_add(e, &ubi->used);
1135 spin_unlock(&ubi->wl_lock);
1142 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1143 * @ubi: UBI device description object
1144 * @pnum: the physical eraseblock to schedule
1146 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1147 * needs scrubbing. This function schedules a physical eraseblock for
1148 * scrubbing which is done in background. This function returns zero in case of
1149 * success and a negative error code in case of failure.
1151 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1153 struct ubi_wl_entry *e;
1155 dbg_msg("schedule PEB %d for scrubbing", pnum);
1158 spin_lock(&ubi->wl_lock);
1159 e = ubi->lookuptbl[pnum];
1160 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1161 spin_unlock(&ubi->wl_lock);
1165 if (e == ubi->move_to) {
1167 * This physical eraseblock was used to move data to. The data
1168 * was moved but the PEB was not yet inserted to the proper
1169 * tree. We should just wait a little and let the WL worker
1172 spin_unlock(&ubi->wl_lock);
1173 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1178 if (in_wl_tree(e, &ubi->used)) {
1179 paranoid_check_in_wl_tree(e, &ubi->used);
1180 rb_erase(&e->u.rb, &ubi->used);
1184 err = prot_queue_del(ubi, e->pnum);
1186 ubi_err("PEB %d not found", pnum);
1188 spin_unlock(&ubi->wl_lock);
1193 wl_tree_add(e, &ubi->scrub);
1194 spin_unlock(&ubi->wl_lock);
1197 * Technically scrubbing is the same as wear-leveling, so it is done
1200 return ensure_wear_leveling(ubi);
1204 * ubi_wl_flush - flush all pending works.
1205 * @ubi: UBI device description object
1207 * This function returns zero in case of success and a negative error code in
1210 int ubi_wl_flush(struct ubi_device *ubi)
1215 * Erase while the pending works queue is not empty, but not more than
1216 * the number of currently pending works.
1218 dbg_wl("flush (%d pending works)", ubi->works_count);
1219 while (ubi->works_count) {
1226 * Make sure all the works which have been done in parallel are
1229 down_write(&ubi->work_sem);
1230 up_write(&ubi->work_sem);
1233 * And in case last was the WL worker and it canceled the LEB
1234 * movement, flush again.
1236 while (ubi->works_count) {
1237 dbg_wl("flush more (%d pending works)", ubi->works_count);
1247 * tree_destroy - destroy an RB-tree.
1248 * @root: the root of the tree to destroy
1250 static void tree_destroy(struct rb_root *root)
1253 struct ubi_wl_entry *e;
1259 else if (rb->rb_right)
1262 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1266 if (rb->rb_left == &e->u.rb)
1269 rb->rb_right = NULL;
1272 kmem_cache_free(ubi_wl_entry_slab, e);
1278 * ubi_thread - UBI background thread.
1279 * @u: the UBI device description object pointer
1281 int ubi_thread(void *u)
1284 struct ubi_device *ubi = u;
1286 ubi_msg("background thread \"%s\" started, PID %d",
1287 ubi->bgt_name, task_pid_nr(current));
1293 if (kthread_should_stop())
1296 if (try_to_freeze())
1299 spin_lock(&ubi->wl_lock);
1300 if (list_empty(&ubi->works) || ubi->ro_mode ||
1301 !ubi->thread_enabled) {
1302 set_current_state(TASK_INTERRUPTIBLE);
1303 spin_unlock(&ubi->wl_lock);
1307 spin_unlock(&ubi->wl_lock);
1311 ubi_err("%s: work failed with error code %d",
1312 ubi->bgt_name, err);
1313 if (failures++ > WL_MAX_FAILURES) {
1315 * Too many failures, disable the thread and
1316 * switch to read-only mode.
1318 ubi_msg("%s: %d consecutive failures",
1319 ubi->bgt_name, WL_MAX_FAILURES);
1321 ubi->thread_enabled = 0;
1330 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1335 * cancel_pending - cancel all pending works.
1336 * @ubi: UBI device description object
1338 static void cancel_pending(struct ubi_device *ubi)
1340 while (!list_empty(&ubi->works)) {
1341 struct ubi_work *wrk;
1343 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1344 list_del(&wrk->list);
1345 wrk->func(ubi, wrk, 1);
1346 ubi->works_count -= 1;
1347 ubi_assert(ubi->works_count >= 0);
1352 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1353 * @ubi: UBI device description object
1354 * @si: scanning information
1356 * This function returns zero in case of success, and a negative error code in
1359 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1362 struct rb_node *rb1, *rb2;
1363 struct ubi_scan_volume *sv;
1364 struct ubi_scan_leb *seb, *tmp;
1365 struct ubi_wl_entry *e;
1367 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1368 spin_lock_init(&ubi->wl_lock);
1369 mutex_init(&ubi->move_mutex);
1370 init_rwsem(&ubi->work_sem);
1371 ubi->max_ec = si->max_ec;
1372 INIT_LIST_HEAD(&ubi->works);
1374 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1377 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1378 if (!ubi->lookuptbl)
1381 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1382 INIT_LIST_HEAD(&ubi->pq[i]);
1385 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1388 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1392 e->pnum = seb->pnum;
1394 ubi->lookuptbl[e->pnum] = e;
1395 if (schedule_erase(ubi, e, 0)) {
1396 kmem_cache_free(ubi_wl_entry_slab, e);
1401 list_for_each_entry(seb, &si->free, u.list) {
1404 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1408 e->pnum = seb->pnum;
1410 ubi_assert(e->ec >= 0);
1411 wl_tree_add(e, &ubi->free);
1412 ubi->lookuptbl[e->pnum] = e;
1415 list_for_each_entry(seb, &si->corr, u.list) {
1418 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1422 e->pnum = seb->pnum;
1424 ubi->lookuptbl[e->pnum] = e;
1425 if (schedule_erase(ubi, e, 0)) {
1426 kmem_cache_free(ubi_wl_entry_slab, e);
1431 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1432 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1435 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1439 e->pnum = seb->pnum;
1441 ubi->lookuptbl[e->pnum] = e;
1443 dbg_wl("add PEB %d EC %d to the used tree",
1445 wl_tree_add(e, &ubi->used);
1447 dbg_wl("add PEB %d EC %d to the scrub tree",
1449 wl_tree_add(e, &ubi->scrub);
1454 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1455 ubi_err("no enough physical eraseblocks (%d, need %d)",
1456 ubi->avail_pebs, WL_RESERVED_PEBS);
1459 ubi->avail_pebs -= WL_RESERVED_PEBS;
1460 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1462 /* Schedule wear-leveling if needed */
1463 err = ensure_wear_leveling(ubi);
1470 cancel_pending(ubi);
1471 tree_destroy(&ubi->used);
1472 tree_destroy(&ubi->free);
1473 tree_destroy(&ubi->scrub);
1474 kfree(ubi->lookuptbl);
1479 * protection_queue_destroy - destroy the protection queue.
1480 * @ubi: UBI device description object
1482 static void protection_queue_destroy(struct ubi_device *ubi)
1485 struct ubi_wl_entry *e, *tmp;
1487 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1488 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1489 list_del(&e->u.list);
1490 kmem_cache_free(ubi_wl_entry_slab, e);
1496 * ubi_wl_close - close the wear-leveling sub-system.
1497 * @ubi: UBI device description object
1499 void ubi_wl_close(struct ubi_device *ubi)
1501 dbg_wl("close the WL sub-system");
1502 cancel_pending(ubi);
1503 protection_queue_destroy(ubi);
1504 tree_destroy(&ubi->used);
1505 tree_destroy(&ubi->free);
1506 tree_destroy(&ubi->scrub);
1507 kfree(ubi->lookuptbl);
1510 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1513 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1514 * @ubi: UBI device description object
1515 * @pnum: the physical eraseblock number to check
1516 * @ec: the erase counter to check
1518 * This function returns zero if the erase counter of physical eraseblock @pnum
1519 * is equivalent to @ec, %1 if not, and a negative error code if an error
1522 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1526 struct ubi_ec_hdr *ec_hdr;
1528 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1532 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1533 if (err && err != UBI_IO_BITFLIPS) {
1534 /* The header does not have to exist */
1539 read_ec = be64_to_cpu(ec_hdr->ec);
1540 if (ec != read_ec) {
1541 ubi_err("paranoid check failed for PEB %d", pnum);
1542 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1543 ubi_dbg_dump_stack();
1554 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1555 * @e: the wear-leveling entry to check
1556 * @root: the root of the tree
1558 * This function returns zero if @e is in the @root RB-tree and %1 if it is
1561 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1562 struct rb_root *root)
1564 if (in_wl_tree(e, root))
1567 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1568 e->pnum, e->ec, root);
1569 ubi_dbg_dump_stack();
1574 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1576 * @ubi: UBI device description object
1577 * @e: the wear-leveling entry to check
1579 * This function returns zero if @e is in @ubi->pq and %1 if it is not.
1581 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1583 struct ubi_wl_entry *p;
1586 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1587 list_for_each_entry(p, &ubi->pq[i], u.list)
1591 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1593 ubi_dbg_dump_stack();
1596 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */