2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
22 * UBI wear-leveling sub-system.
24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only %0xFF bytes.
34 * When physical eraseblocks are returned to the WL sub-system by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL sub-system.
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL sub-system may pick a free physical eraseblock with low erase
47 * counter, and so forth.
49 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
52 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53 * in a physical eraseblock, it has to be moved. Technically this is the same
54 * as moving it for wear-leveling reasons.
56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used,
59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
61 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good
63 * enough for moderately large flashes and it is simple. In future, one may
64 * re-work this sub-system and make it more scalable.
66 * At the moment this sub-system does not utilize the sequence number, which
67 * was introduced relatively recently. But it would be wise to do this because
68 * the sequence number of a logical eraseblock characterizes how old is it. For
69 * example, when we move a PEB with low erase counter, and we need to pick the
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a
72 * room for future re-works of the WL sub-system.
74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
78 #include <linux/slab.h>
79 #include <linux/crc32.h>
80 #include <linux/freezer.h>
81 #include <linux/kthread.h>
84 /* Number of physical eraseblocks reserved for wear-leveling purposes */
85 #define WL_RESERVED_PEBS 1
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
91 #define ST_PROTECTION 16
92 #define U_PROTECTION 10
93 #define LT_PROTECTION 4
96 * Maximum difference between two erase counters. If this threshold is
97 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high
101 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
104 * When a physical eraseblock is moved, the WL sub-system has to pick the target
105 * physical eraseblock to move to. The simplest way would be just to pick the
106 * one with the highest erase counter. But in certain workloads this could lead
107 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
108 * situation when the picked physical eraseblock is constantly erased after the
109 * data is written to it. So, we have a constant which limits the highest erase
110 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
111 * does not pick eraseblocks with erase counter greater then the lowest erase
112 * counter plus %WL_FREE_MAX_DIFF.
114 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
117 * Maximum number of consecutive background thread failures which is enough to
118 * switch to read-only mode.
120 #define WL_MAX_FAILURES 32
123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
169 struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
177 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works
179 * @func: worker function
180 * @priv: private data of the worker function
181 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured
184 * The @func pointer points to the worker function. If the @cancel argument is
185 * not zero, the worker has to free the resources and exit immediately. The
186 * worker has to return zero in case of success and a negative error code in
190 struct list_head list;
191 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
192 /* The below fields are only relevant to erasure works */
193 struct ubi_wl_entry *e;
197 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
198 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
199 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root);
202 #define paranoid_check_ec(ubi, pnum, ec) 0
203 #define paranoid_check_in_wl_tree(e, root)
207 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
208 * @e: the wear-leveling entry to add
209 * @root: the root of the tree
211 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
212 * the @ubi->used and @ubi->free RB-trees.
214 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
216 struct rb_node **p, *parent = NULL;
220 struct ubi_wl_entry *e1;
223 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
227 else if (e->ec > e1->ec)
230 ubi_assert(e->pnum != e1->pnum);
231 if (e->pnum < e1->pnum)
238 rb_link_node(&e->u.rb, parent, p);
239 rb_insert_color(&e->u.rb, root);
243 * do_work - do one pending work.
244 * @ubi: UBI device description object
246 * This function returns zero in case of success and a negative error code in
249 static int do_work(struct ubi_device *ubi)
252 struct ubi_work *wrk;
257 * @ubi->work_sem is used to synchronize with the workers. Workers take
258 * it in read mode, so many of them may be doing works at a time. But
259 * the queue flush code has to be sure the whole queue of works is
260 * done, and it takes the mutex in write mode.
262 down_read(&ubi->work_sem);
263 spin_lock(&ubi->wl_lock);
264 if (list_empty(&ubi->works)) {
265 spin_unlock(&ubi->wl_lock);
266 up_read(&ubi->work_sem);
270 wrk = list_entry(ubi->works.next, struct ubi_work, list);
271 list_del(&wrk->list);
272 ubi->works_count -= 1;
273 ubi_assert(ubi->works_count >= 0);
274 spin_unlock(&ubi->wl_lock);
277 * Call the worker function. Do not touch the work structure
278 * after this call as it will have been freed or reused by that
279 * time by the worker function.
281 err = wrk->func(ubi, wrk, 0);
283 ubi_err("work failed with error code %d", err);
284 up_read(&ubi->work_sem);
290 * produce_free_peb - produce a free physical eraseblock.
291 * @ubi: UBI device description object
293 * This function tries to make a free PEB by means of synchronous execution of
294 * pending works. This may be needed if, for example the background thread is
295 * disabled. Returns zero in case of success and a negative error code in case
298 static int produce_free_peb(struct ubi_device *ubi)
302 spin_lock(&ubi->wl_lock);
303 while (!ubi->free.rb_node) {
304 spin_unlock(&ubi->wl_lock);
306 dbg_wl("do one work synchronously");
311 spin_lock(&ubi->wl_lock);
313 spin_unlock(&ubi->wl_lock);
319 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
320 * @e: the wear-leveling entry to check
321 * @root: the root of the tree
323 * This function returns non-zero if @e is in the @root RB-tree and zero if it
326 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
332 struct ubi_wl_entry *e1;
334 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
336 if (e->pnum == e1->pnum) {
343 else if (e->ec > e1->ec)
346 ubi_assert(e->pnum != e1->pnum);
347 if (e->pnum < e1->pnum)
358 * prot_tree_add - add physical eraseblock to protection trees.
359 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
362 * @ec: for how many erase operations this PEB should be protected
364 * @wl->lock has to be locked.
366 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
367 struct ubi_wl_prot_entry *pe, int ec)
369 struct rb_node **p, *parent = NULL;
370 struct ubi_wl_prot_entry *pe1;
373 pe->abs_ec = ubi->abs_ec + ec;
375 p = &ubi->prot.pnum.rb_node;
378 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
380 if (e->pnum < pe1->e->pnum)
385 rb_link_node(&pe->rb_pnum, parent, p);
386 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
388 p = &ubi->prot.aec.rb_node;
392 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
394 if (pe->abs_ec < pe1->abs_ec)
399 rb_link_node(&pe->rb_aec, parent, p);
400 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
404 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
405 * @root: the RB-tree where to look for
406 * @max: highest possible erase counter
408 * This function looks for a wear leveling entry with erase counter closest to
409 * @max and less then @max.
411 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
414 struct ubi_wl_entry *e;
416 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
421 struct ubi_wl_entry *e1;
423 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
436 * ubi_wl_get_peb - get a physical eraseblock.
437 * @ubi: UBI device description object
438 * @dtype: type of data which will be stored in this physical eraseblock
440 * This function returns a physical eraseblock in case of success and a
441 * negative error code in case of failure. Might sleep.
443 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
445 int err, protect, medium_ec;
446 struct ubi_wl_entry *e, *first, *last;
447 struct ubi_wl_prot_entry *pe;
449 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
450 dtype == UBI_UNKNOWN);
452 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
457 spin_lock(&ubi->wl_lock);
458 if (!ubi->free.rb_node) {
459 if (ubi->works_count == 0) {
460 ubi_assert(list_empty(&ubi->works));
461 ubi_err("no free eraseblocks");
462 spin_unlock(&ubi->wl_lock);
466 spin_unlock(&ubi->wl_lock);
468 err = produce_free_peb(ubi);
479 * For long term data we pick a physical eraseblock with high
480 * erase counter. But the highest erase counter we can pick is
481 * bounded by the the lowest erase counter plus
484 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
485 protect = LT_PROTECTION;
489 * For unknown data we pick a physical eraseblock with medium
490 * erase counter. But we by no means can pick a physical
491 * eraseblock with erase counter greater or equivalent than the
492 * lowest erase counter plus %WL_FREE_MAX_DIFF.
494 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
496 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
498 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
499 e = rb_entry(ubi->free.rb_node,
500 struct ubi_wl_entry, u.rb);
502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
503 e = find_wl_entry(&ubi->free, medium_ec);
505 protect = U_PROTECTION;
509 * For short term data we pick a physical eraseblock with the
510 * lowest erase counter as we expect it will be erased soon.
512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
513 protect = ST_PROTECTION;
522 * Move the physical eraseblock to the protection trees where it will
523 * be protected from being moved for some time.
525 paranoid_check_in_wl_tree(e, &ubi->free);
526 rb_erase(&e->u.rb, &ubi->free);
527 prot_tree_add(ubi, e, pe, protect);
529 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
530 spin_unlock(&ubi->wl_lock);
536 * prot_tree_del - remove a physical eraseblock from the protection trees
537 * @ubi: UBI device description object
538 * @pnum: the physical eraseblock to remove
540 * This function returns PEB @pnum from the protection trees and returns zero
541 * in case of success and %-ENODEV if the PEB was not found in the protection
544 static int prot_tree_del(struct ubi_device *ubi, int pnum)
547 struct ubi_wl_prot_entry *pe = NULL;
549 p = ubi->prot.pnum.rb_node;
552 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
554 if (pnum == pe->e->pnum)
557 if (pnum < pe->e->pnum)
566 ubi_assert(pe->e->pnum == pnum);
567 rb_erase(&pe->rb_aec, &ubi->prot.aec);
568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
574 * sync_erase - synchronously erase a physical eraseblock.
575 * @ubi: UBI device description object
576 * @e: the the physical eraseblock to erase
577 * @torture: if the physical eraseblock has to be tortured
579 * This function returns zero in case of success and a negative error code in
582 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
586 struct ubi_ec_hdr *ec_hdr;
587 unsigned long long ec = e->ec;
589 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
591 err = paranoid_check_ec(ubi, e->pnum, e->ec);
595 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
599 err = ubi_io_sync_erase(ubi, e->pnum, torture);
604 if (ec > UBI_MAX_ERASECOUNTER) {
606 * Erase counter overflow. Upgrade UBI and use 64-bit
607 * erase counters internally.
609 ubi_err("erase counter overflow at PEB %d, EC %llu",
615 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
617 ec_hdr->ec = cpu_to_be64(ec);
619 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
624 spin_lock(&ubi->wl_lock);
625 if (e->ec > ubi->max_ec)
627 spin_unlock(&ubi->wl_lock);
635 * check_protection_over - check if it is time to stop protecting some PEBs.
636 * @ubi: UBI device description object
638 * This function is called after each erase operation, when the absolute erase
639 * counter is incremented, to check if some physical eraseblock have not to be
640 * protected any longer. These physical eraseblocks are moved from the
641 * protection trees to the used tree.
643 static void check_protection_over(struct ubi_device *ubi)
645 struct ubi_wl_prot_entry *pe;
648 * There may be several protected physical eraseblock to remove,
652 spin_lock(&ubi->wl_lock);
653 if (!ubi->prot.aec.rb_node) {
654 spin_unlock(&ubi->wl_lock);
658 pe = rb_entry(rb_first(&ubi->prot.aec),
659 struct ubi_wl_prot_entry, rb_aec);
661 if (pe->abs_ec > ubi->abs_ec) {
662 spin_unlock(&ubi->wl_lock);
666 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
670 wl_tree_add(pe->e, &ubi->used);
671 spin_unlock(&ubi->wl_lock);
679 * schedule_ubi_work - schedule a work.
680 * @ubi: UBI device description object
681 * @wrk: the work to schedule
683 * This function enqueues a work defined by @wrk to the tail of the pending
686 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
688 spin_lock(&ubi->wl_lock);
689 list_add_tail(&wrk->list, &ubi->works);
690 ubi_assert(ubi->works_count >= 0);
691 ubi->works_count += 1;
692 if (ubi->thread_enabled)
693 wake_up_process(ubi->bgt_thread);
694 spin_unlock(&ubi->wl_lock);
697 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
701 * schedule_erase - schedule an erase work.
702 * @ubi: UBI device description object
703 * @e: the WL entry of the physical eraseblock to erase
704 * @torture: if the physical eraseblock has to be tortured
706 * This function returns zero in case of success and a %-ENOMEM in case of
709 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
712 struct ubi_work *wl_wrk;
714 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
715 e->pnum, e->ec, torture);
717 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
721 wl_wrk->func = &erase_worker;
723 wl_wrk->torture = torture;
725 schedule_ubi_work(ubi, wl_wrk);
730 * wear_leveling_worker - wear-leveling worker function.
731 * @ubi: UBI device description object
732 * @wrk: the work object
733 * @cancel: non-zero if the worker has to free memory and exit
735 * This function copies a more worn out physical eraseblock to a less worn out
736 * one. Returns zero in case of success and a negative error code in case of
739 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
742 int err, scrubbing = 0, torture = 0;
743 struct ubi_wl_prot_entry *uninitialized_var(pe);
744 struct ubi_wl_entry *e1, *e2;
745 struct ubi_vid_hdr *vid_hdr;
751 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
755 mutex_lock(&ubi->move_mutex);
756 spin_lock(&ubi->wl_lock);
757 ubi_assert(!ubi->move_from && !ubi->move_to);
758 ubi_assert(!ubi->move_to_put);
760 if (!ubi->free.rb_node ||
761 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
763 * No free physical eraseblocks? Well, they must be waiting in
764 * the queue to be erased. Cancel movement - it will be
765 * triggered again when a free physical eraseblock appears.
767 * No used physical eraseblocks? They must be temporarily
768 * protected from being moved. They will be moved to the
769 * @ubi->used tree later and the wear-leveling will be
772 dbg_wl("cancel WL, a list is empty: free %d, used %d",
773 !ubi->free.rb_node, !ubi->used.rb_node);
777 if (!ubi->scrub.rb_node) {
779 * Now pick the least worn-out used physical eraseblock and a
780 * highly worn-out free physical eraseblock. If the erase
781 * counters differ much enough, start wear-leveling.
783 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
784 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
786 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
787 dbg_wl("no WL needed: min used EC %d, max free EC %d",
791 paranoid_check_in_wl_tree(e1, &ubi->used);
792 rb_erase(&e1->u.rb, &ubi->used);
793 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
794 e1->pnum, e1->ec, e2->pnum, e2->ec);
796 /* Perform scrubbing */
798 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
799 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
800 paranoid_check_in_wl_tree(e1, &ubi->scrub);
801 rb_erase(&e1->u.rb, &ubi->scrub);
802 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
805 paranoid_check_in_wl_tree(e2, &ubi->free);
806 rb_erase(&e2->u.rb, &ubi->free);
809 spin_unlock(&ubi->wl_lock);
812 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
813 * We so far do not know which logical eraseblock our physical
814 * eraseblock (@e1) belongs to. We have to read the volume identifier
817 * Note, we are protected from this PEB being unmapped and erased. The
818 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
819 * which is being moved was unmapped.
822 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
823 if (err && err != UBI_IO_BITFLIPS) {
824 if (err == UBI_IO_PEB_FREE) {
826 * We are trying to move PEB without a VID header. UBI
827 * always write VID headers shortly after the PEB was
828 * given, so we have a situation when it did not have
829 * chance to write it down because it was preempted.
830 * Just re-schedule the work, so that next time it will
831 * likely have the VID header in place.
833 dbg_wl("PEB %d has no VID header", e1->pnum);
837 ubi_err("error %d while reading VID header from PEB %d",
844 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
851 /* Target PEB write error, torture it */
857 * The LEB has not been moved because the volume is being
858 * deleted or the PEB has been put meanwhile. We should prevent
859 * this PEB from being selected for wear-leveling movement
860 * again, so put it to the protection tree.
863 dbg_wl("canceled moving PEB %d", e1->pnum);
864 ubi_assert(err == 1);
866 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
872 ubi_free_vid_hdr(ubi, vid_hdr);
875 spin_lock(&ubi->wl_lock);
876 prot_tree_add(ubi, e1, pe, U_PROTECTION);
877 ubi_assert(!ubi->move_to_put);
878 ubi->move_from = ubi->move_to = NULL;
879 ubi->wl_scheduled = 0;
880 spin_unlock(&ubi->wl_lock);
883 err = schedule_erase(ubi, e2, 0);
886 mutex_unlock(&ubi->move_mutex);
890 /* The PEB has been successfully moved */
891 ubi_free_vid_hdr(ubi, vid_hdr);
894 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
897 spin_lock(&ubi->wl_lock);
898 if (!ubi->move_to_put) {
899 wl_tree_add(e2, &ubi->used);
902 ubi->move_from = ubi->move_to = NULL;
903 ubi->move_to_put = ubi->wl_scheduled = 0;
904 spin_unlock(&ubi->wl_lock);
906 err = schedule_erase(ubi, e1, 0);
914 * Well, the target PEB was put meanwhile, schedule it for
917 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
918 err = schedule_erase(ubi, e2, 0);
924 mutex_unlock(&ubi->move_mutex);
928 * For some reasons the LEB was not moved, might be an error, might be
929 * something else. @e1 was not changed, so return it back. @e2 might
930 * have been changed, schedule it for erasure.
933 dbg_wl("canceled moving PEB %d", e1->pnum);
934 ubi_free_vid_hdr(ubi, vid_hdr);
936 spin_lock(&ubi->wl_lock);
938 wl_tree_add(e1, &ubi->scrub);
940 wl_tree_add(e1, &ubi->used);
941 ubi_assert(!ubi->move_to_put);
942 ubi->move_from = ubi->move_to = NULL;
943 ubi->wl_scheduled = 0;
944 spin_unlock(&ubi->wl_lock);
947 err = schedule_erase(ubi, e2, torture);
951 mutex_unlock(&ubi->move_mutex);
955 ubi_err("error %d while moving PEB %d to PEB %d",
956 err, e1->pnum, e2->pnum);
958 ubi_free_vid_hdr(ubi, vid_hdr);
959 spin_lock(&ubi->wl_lock);
960 ubi->move_from = ubi->move_to = NULL;
961 ubi->move_to_put = ubi->wl_scheduled = 0;
962 spin_unlock(&ubi->wl_lock);
965 kmem_cache_free(ubi_wl_entry_slab, e1);
967 kmem_cache_free(ubi_wl_entry_slab, e2);
970 mutex_unlock(&ubi->move_mutex);
974 ubi->wl_scheduled = 0;
975 spin_unlock(&ubi->wl_lock);
976 mutex_unlock(&ubi->move_mutex);
977 ubi_free_vid_hdr(ubi, vid_hdr);
982 * ensure_wear_leveling - schedule wear-leveling if it is needed.
983 * @ubi: UBI device description object
985 * This function checks if it is time to start wear-leveling and schedules it
986 * if yes. This function returns zero in case of success and a negative error
987 * code in case of failure.
989 static int ensure_wear_leveling(struct ubi_device *ubi)
992 struct ubi_wl_entry *e1;
993 struct ubi_wl_entry *e2;
994 struct ubi_work *wrk;
996 spin_lock(&ubi->wl_lock);
997 if (ubi->wl_scheduled)
998 /* Wear-leveling is already in the work queue */
1002 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1003 * the WL worker has to be scheduled anyway.
1005 if (!ubi->scrub.rb_node) {
1006 if (!ubi->used.rb_node || !ubi->free.rb_node)
1007 /* No physical eraseblocks - no deal */
1011 * We schedule wear-leveling only if the difference between the
1012 * lowest erase counter of used physical eraseblocks and a high
1013 * erase counter of free physical eraseblocks is greater then
1014 * %UBI_WL_THRESHOLD.
1016 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1017 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1019 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1021 dbg_wl("schedule wear-leveling");
1023 dbg_wl("schedule scrubbing");
1025 ubi->wl_scheduled = 1;
1026 spin_unlock(&ubi->wl_lock);
1028 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1034 wrk->func = &wear_leveling_worker;
1035 schedule_ubi_work(ubi, wrk);
1039 spin_lock(&ubi->wl_lock);
1040 ubi->wl_scheduled = 0;
1042 spin_unlock(&ubi->wl_lock);
1047 * erase_worker - physical eraseblock erase worker function.
1048 * @ubi: UBI device description object
1049 * @wl_wrk: the work object
1050 * @cancel: non-zero if the worker has to free memory and exit
1052 * This function erases a physical eraseblock and perform torture testing if
1053 * needed. It also takes care about marking the physical eraseblock bad if
1054 * needed. Returns zero in case of success and a negative error code in case of
1057 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1060 struct ubi_wl_entry *e = wl_wrk->e;
1061 int pnum = e->pnum, err, need;
1064 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1066 kmem_cache_free(ubi_wl_entry_slab, e);
1070 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1072 err = sync_erase(ubi, e, wl_wrk->torture);
1074 /* Fine, we've erased it successfully */
1077 spin_lock(&ubi->wl_lock);
1079 wl_tree_add(e, &ubi->free);
1080 spin_unlock(&ubi->wl_lock);
1083 * One more erase operation has happened, take care about
1084 * protected physical eraseblocks.
1086 check_protection_over(ubi);
1088 /* And take care about wear-leveling */
1089 err = ensure_wear_leveling(ubi);
1093 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1095 kmem_cache_free(ubi_wl_entry_slab, e);
1097 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1101 /* Re-schedule the LEB for erasure */
1102 err1 = schedule_erase(ubi, e, 0);
1108 } else if (err != -EIO) {
1110 * If this is not %-EIO, we have no idea what to do. Scheduling
1111 * this physical eraseblock for erasure again would cause
1112 * errors again and again. Well, lets switch to RO mode.
1117 /* It is %-EIO, the PEB went bad */
1119 if (!ubi->bad_allowed) {
1120 ubi_err("bad physical eraseblock %d detected", pnum);
1124 spin_lock(&ubi->volumes_lock);
1125 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1127 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1128 ubi->avail_pebs -= need;
1129 ubi->rsvd_pebs += need;
1130 ubi->beb_rsvd_pebs += need;
1132 ubi_msg("reserve more %d PEBs", need);
1135 if (ubi->beb_rsvd_pebs == 0) {
1136 spin_unlock(&ubi->volumes_lock);
1137 ubi_err("no reserved physical eraseblocks");
1141 spin_unlock(&ubi->volumes_lock);
1142 ubi_msg("mark PEB %d as bad", pnum);
1144 err = ubi_io_mark_bad(ubi, pnum);
1148 spin_lock(&ubi->volumes_lock);
1149 ubi->beb_rsvd_pebs -= 1;
1150 ubi->bad_peb_count += 1;
1151 ubi->good_peb_count -= 1;
1152 ubi_calculate_reserved(ubi);
1153 if (ubi->beb_rsvd_pebs == 0)
1154 ubi_warn("last PEB from the reserved pool was used");
1155 spin_unlock(&ubi->volumes_lock);
1165 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1166 * @ubi: UBI device description object
1167 * @pnum: physical eraseblock to return
1168 * @torture: if this physical eraseblock has to be tortured
1170 * This function is called to return physical eraseblock @pnum to the pool of
1171 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1172 * occurred to this @pnum and it has to be tested. This function returns zero
1173 * in case of success, and a negative error code in case of failure.
1175 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1178 struct ubi_wl_entry *e;
1180 dbg_wl("PEB %d", pnum);
1181 ubi_assert(pnum >= 0);
1182 ubi_assert(pnum < ubi->peb_count);
1185 spin_lock(&ubi->wl_lock);
1186 e = ubi->lookuptbl[pnum];
1187 if (e == ubi->move_from) {
1189 * User is putting the physical eraseblock which was selected to
1190 * be moved. It will be scheduled for erasure in the
1191 * wear-leveling worker.
1193 dbg_wl("PEB %d is being moved, wait", pnum);
1194 spin_unlock(&ubi->wl_lock);
1196 /* Wait for the WL worker by taking the @ubi->move_mutex */
1197 mutex_lock(&ubi->move_mutex);
1198 mutex_unlock(&ubi->move_mutex);
1200 } else if (e == ubi->move_to) {
1202 * User is putting the physical eraseblock which was selected
1203 * as the target the data is moved to. It may happen if the EBA
1204 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1205 * but the WL sub-system has not put the PEB to the "used" tree
1206 * yet, but it is about to do this. So we just set a flag which
1207 * will tell the WL worker that the PEB is not needed anymore
1208 * and should be scheduled for erasure.
1210 dbg_wl("PEB %d is the target of data moving", pnum);
1211 ubi_assert(!ubi->move_to_put);
1212 ubi->move_to_put = 1;
1213 spin_unlock(&ubi->wl_lock);
1216 if (in_wl_tree(e, &ubi->used)) {
1217 paranoid_check_in_wl_tree(e, &ubi->used);
1218 rb_erase(&e->u.rb, &ubi->used);
1219 } else if (in_wl_tree(e, &ubi->scrub)) {
1220 paranoid_check_in_wl_tree(e, &ubi->scrub);
1221 rb_erase(&e->u.rb, &ubi->scrub);
1223 err = prot_tree_del(ubi, e->pnum);
1225 ubi_err("PEB %d not found", pnum);
1227 spin_unlock(&ubi->wl_lock);
1232 spin_unlock(&ubi->wl_lock);
1234 err = schedule_erase(ubi, e, torture);
1236 spin_lock(&ubi->wl_lock);
1237 wl_tree_add(e, &ubi->used);
1238 spin_unlock(&ubi->wl_lock);
1245 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1246 * @ubi: UBI device description object
1247 * @pnum: the physical eraseblock to schedule
1249 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1250 * needs scrubbing. This function schedules a physical eraseblock for
1251 * scrubbing which is done in background. This function returns zero in case of
1252 * success and a negative error code in case of failure.
1254 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1256 struct ubi_wl_entry *e;
1258 dbg_msg("schedule PEB %d for scrubbing", pnum);
1261 spin_lock(&ubi->wl_lock);
1262 e = ubi->lookuptbl[pnum];
1263 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1264 spin_unlock(&ubi->wl_lock);
1268 if (e == ubi->move_to) {
1270 * This physical eraseblock was used to move data to. The data
1271 * was moved but the PEB was not yet inserted to the proper
1272 * tree. We should just wait a little and let the WL worker
1275 spin_unlock(&ubi->wl_lock);
1276 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1281 if (in_wl_tree(e, &ubi->used)) {
1282 paranoid_check_in_wl_tree(e, &ubi->used);
1283 rb_erase(&e->u.rb, &ubi->used);
1287 err = prot_tree_del(ubi, e->pnum);
1289 ubi_err("PEB %d not found", pnum);
1291 spin_unlock(&ubi->wl_lock);
1296 wl_tree_add(e, &ubi->scrub);
1297 spin_unlock(&ubi->wl_lock);
1300 * Technically scrubbing is the same as wear-leveling, so it is done
1303 return ensure_wear_leveling(ubi);
1307 * ubi_wl_flush - flush all pending works.
1308 * @ubi: UBI device description object
1310 * This function returns zero in case of success and a negative error code in
1313 int ubi_wl_flush(struct ubi_device *ubi)
1318 * Erase while the pending works queue is not empty, but not more then
1319 * the number of currently pending works.
1321 dbg_wl("flush (%d pending works)", ubi->works_count);
1322 while (ubi->works_count) {
1329 * Make sure all the works which have been done in parallel are
1332 down_write(&ubi->work_sem);
1333 up_write(&ubi->work_sem);
1336 * And in case last was the WL worker and it canceled the LEB
1337 * movement, flush again.
1339 while (ubi->works_count) {
1340 dbg_wl("flush more (%d pending works)", ubi->works_count);
1350 * tree_destroy - destroy an RB-tree.
1351 * @root: the root of the tree to destroy
1353 static void tree_destroy(struct rb_root *root)
1356 struct ubi_wl_entry *e;
1362 else if (rb->rb_right)
1365 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1369 if (rb->rb_left == &e->u.rb)
1372 rb->rb_right = NULL;
1375 kmem_cache_free(ubi_wl_entry_slab, e);
1381 * ubi_thread - UBI background thread.
1382 * @u: the UBI device description object pointer
1384 int ubi_thread(void *u)
1387 struct ubi_device *ubi = u;
1389 ubi_msg("background thread \"%s\" started, PID %d",
1390 ubi->bgt_name, task_pid_nr(current));
1396 if (kthread_should_stop())
1399 if (try_to_freeze())
1402 spin_lock(&ubi->wl_lock);
1403 if (list_empty(&ubi->works) || ubi->ro_mode ||
1404 !ubi->thread_enabled) {
1405 set_current_state(TASK_INTERRUPTIBLE);
1406 spin_unlock(&ubi->wl_lock);
1410 spin_unlock(&ubi->wl_lock);
1414 ubi_err("%s: work failed with error code %d",
1415 ubi->bgt_name, err);
1416 if (failures++ > WL_MAX_FAILURES) {
1418 * Too many failures, disable the thread and
1419 * switch to read-only mode.
1421 ubi_msg("%s: %d consecutive failures",
1422 ubi->bgt_name, WL_MAX_FAILURES);
1424 ubi->thread_enabled = 0;
1433 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1438 * cancel_pending - cancel all pending works.
1439 * @ubi: UBI device description object
1441 static void cancel_pending(struct ubi_device *ubi)
1443 while (!list_empty(&ubi->works)) {
1444 struct ubi_work *wrk;
1446 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1447 list_del(&wrk->list);
1448 wrk->func(ubi, wrk, 1);
1449 ubi->works_count -= 1;
1450 ubi_assert(ubi->works_count >= 0);
1455 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1456 * @ubi: UBI device description object
1457 * @si: scanning information
1459 * This function returns zero in case of success, and a negative error code in
1462 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1465 struct rb_node *rb1, *rb2;
1466 struct ubi_scan_volume *sv;
1467 struct ubi_scan_leb *seb, *tmp;
1468 struct ubi_wl_entry *e;
1471 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1472 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1473 spin_lock_init(&ubi->wl_lock);
1474 mutex_init(&ubi->move_mutex);
1475 init_rwsem(&ubi->work_sem);
1476 ubi->max_ec = si->max_ec;
1477 INIT_LIST_HEAD(&ubi->works);
1479 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1482 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1483 if (!ubi->lookuptbl)
1486 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1489 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1493 e->pnum = seb->pnum;
1495 ubi->lookuptbl[e->pnum] = e;
1496 if (schedule_erase(ubi, e, 0)) {
1497 kmem_cache_free(ubi_wl_entry_slab, e);
1502 list_for_each_entry(seb, &si->free, u.list) {
1505 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1509 e->pnum = seb->pnum;
1511 ubi_assert(e->ec >= 0);
1512 wl_tree_add(e, &ubi->free);
1513 ubi->lookuptbl[e->pnum] = e;
1516 list_for_each_entry(seb, &si->corr, u.list) {
1519 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1523 e->pnum = seb->pnum;
1525 ubi->lookuptbl[e->pnum] = e;
1526 if (schedule_erase(ubi, e, 0)) {
1527 kmem_cache_free(ubi_wl_entry_slab, e);
1532 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1533 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1536 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1540 e->pnum = seb->pnum;
1542 ubi->lookuptbl[e->pnum] = e;
1544 dbg_wl("add PEB %d EC %d to the used tree",
1546 wl_tree_add(e, &ubi->used);
1548 dbg_wl("add PEB %d EC %d to the scrub tree",
1550 wl_tree_add(e, &ubi->scrub);
1555 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1556 ubi_err("no enough physical eraseblocks (%d, need %d)",
1557 ubi->avail_pebs, WL_RESERVED_PEBS);
1560 ubi->avail_pebs -= WL_RESERVED_PEBS;
1561 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1563 /* Schedule wear-leveling if needed */
1564 err = ensure_wear_leveling(ubi);
1571 cancel_pending(ubi);
1572 tree_destroy(&ubi->used);
1573 tree_destroy(&ubi->free);
1574 tree_destroy(&ubi->scrub);
1575 kfree(ubi->lookuptbl);
1580 * protection_trees_destroy - destroy the protection RB-trees.
1581 * @ubi: UBI device description object
1583 static void protection_trees_destroy(struct ubi_device *ubi)
1586 struct ubi_wl_prot_entry *pe;
1588 rb = ubi->prot.aec.rb_node;
1592 else if (rb->rb_right)
1595 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1599 if (rb->rb_left == &pe->rb_aec)
1602 rb->rb_right = NULL;
1605 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1612 * ubi_wl_close - close the wear-leveling sub-system.
1613 * @ubi: UBI device description object
1615 void ubi_wl_close(struct ubi_device *ubi)
1617 dbg_wl("close the WL sub-system");
1618 cancel_pending(ubi);
1619 protection_trees_destroy(ubi);
1620 tree_destroy(&ubi->used);
1621 tree_destroy(&ubi->free);
1622 tree_destroy(&ubi->scrub);
1623 kfree(ubi->lookuptbl);
1626 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1629 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1630 * @ubi: UBI device description object
1631 * @pnum: the physical eraseblock number to check
1632 * @ec: the erase counter to check
1634 * This function returns zero if the erase counter of physical eraseblock @pnum
1635 * is equivalent to @ec, %1 if not, and a negative error code if an error
1638 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1642 struct ubi_ec_hdr *ec_hdr;
1644 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1648 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1649 if (err && err != UBI_IO_BITFLIPS) {
1650 /* The header does not have to exist */
1655 read_ec = be64_to_cpu(ec_hdr->ec);
1656 if (ec != read_ec) {
1657 ubi_err("paranoid check failed for PEB %d", pnum);
1658 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1659 ubi_dbg_dump_stack();
1670 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1671 * @e: the wear-leveling entry to check
1672 * @root: the root of the tree
1674 * This function returns zero if @e is in the @root RB-tree and %1 if it is
1677 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1678 struct rb_root *root)
1680 if (in_wl_tree(e, root))
1683 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1684 e->pnum, e->ec, root);
1685 ubi_dbg_dump_stack();
1689 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */