]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/mtd/ubi/wl.c
UBI: fix comment
[linux-2.6-omap-h63xx.git] / drivers / mtd / ubi / wl.c
index a5a9b8d873025ad5e1ac39fe8c38d377487e5619..7d32f71d6f1e93c855b7b2a4c4489ca1cf9ea47a 100644 (file)
  */
 #define WL_MAX_FAILURES 32
 
-/**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
-       struct rb_node rb;
-       int ec;
-       int pnum;
-};
-
 /**
  * struct ubi_wl_prot_entry - PEB protection entry.
  * @rb_pnum: link in the @wl->prot.pnum RB-tree
@@ -208,7 +193,7 @@ struct ubi_work {
 };
 
 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec);
+static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
                                     struct rb_root *root);
 #else
@@ -216,20 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
 #define paranoid_check_in_wl_tree(e, root)
 #endif
 
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
-/**
- * tree_empty - a helper function to check if an RB-tree is empty.
- * @root: the root of the tree
- *
- * This function returns non-zero if the RB-tree is empty and zero if not.
- */
-static inline int tree_empty(struct rb_root *root)
-{
-       return root->rb_node == NULL;
-}
-
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
  * @e: the wear-leveling entry to add
@@ -266,45 +237,6 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
        rb_insert_color(&e->rb, root);
 }
 
-
-/*
- * Helper functions to add and delete wear-leveling entries from different
- * trees.
- */
-
-static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
-{
-       wl_tree_add(e, &ubi->free);
-}
-static inline void used_tree_add(struct ubi_device *ubi,
-                                struct ubi_wl_entry *e)
-{
-       wl_tree_add(e, &ubi->used);
-}
-static inline void scrub_tree_add(struct ubi_device *ubi,
-                                 struct ubi_wl_entry *e)
-{
-       wl_tree_add(e, &ubi->scrub);
-}
-static inline void free_tree_del(struct ubi_device *ubi,
-                                struct ubi_wl_entry *e)
-{
-       paranoid_check_in_wl_tree(e, &ubi->free);
-       rb_erase(&e->rb, &ubi->free);
-}
-static inline void used_tree_del(struct ubi_device *ubi,
-                                struct ubi_wl_entry *e)
-{
-       paranoid_check_in_wl_tree(e, &ubi->used);
-       rb_erase(&e->rb, &ubi->used);
-}
-static inline void scrub_tree_del(struct ubi_device *ubi,
-                                 struct ubi_wl_entry *e)
-{
-       paranoid_check_in_wl_tree(e, &ubi->scrub);
-       rb_erase(&e->rb, &ubi->scrub);
-}
-
 /**
  * do_work - do one pending work.
  * @ubi: UBI device description object
@@ -317,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
        int err;
        struct ubi_work *wrk;
 
-       spin_lock(&ubi->wl_lock);
+       cond_resched();
 
+       /*
+        * @ubi->work_sem is used to synchronize with the workers. Workers take
+        * it in read mode, so many of them may be doing works at a time. But
+        * the queue flush code has to be sure the whole queue of works is
+        * done, and it takes the mutex in write mode.
+        */
+       down_read(&ubi->work_sem);
+       spin_lock(&ubi->wl_lock);
        if (list_empty(&ubi->works)) {
                spin_unlock(&ubi->wl_lock);
+               up_read(&ubi->work_sem);
                return 0;
        }
 
        wrk = list_entry(ubi->works.next, struct ubi_work, list);
        list_del(&wrk->list);
+       ubi->works_count -= 1;
+       ubi_assert(ubi->works_count >= 0);
        spin_unlock(&ubi->wl_lock);
 
        /*
@@ -336,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
        err = wrk->func(ubi, wrk, 0);
        if (err)
                ubi_err("work failed with error code %d", err);
+       up_read(&ubi->work_sem);
 
-       spin_lock(&ubi->wl_lock);
-       ubi->works_count -= 1;
-       ubi_assert(ubi->works_count >= 0);
-       spin_unlock(&ubi->wl_lock);
        return err;
 }
 
@@ -358,7 +298,7 @@ static int produce_free_peb(struct ubi_device *ubi)
        int err;
 
        spin_lock(&ubi->wl_lock);
-       while (tree_empty(&ubi->free)) {
+       while (!ubi->free.rb_node) {
                spin_unlock(&ubi->wl_lock);
 
                dbg_wl("do one work synchronously");
@@ -508,13 +448,13 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
        ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
                   dtype == UBI_UNKNOWN);
 
-       pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL);
+       pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
        if (!pe)
                return -ENOMEM;
 
 retry:
        spin_lock(&ubi->wl_lock);
-       if (tree_empty(&ubi->free)) {
+       if (!ubi->free.rb_node) {
                if (ubi->works_count == 0) {
                        ubi_assert(list_empty(&ubi->works));
                        ubi_err("no free eraseblocks");
@@ -585,7 +525,8 @@ retry:
         * Move the physical eraseblock to the protection trees where it will
         * be protected from being moved for some time.
         */
-       free_tree_del(ubi, e);
+       paranoid_check_in_wl_tree(e, &ubi->free);
+       rb_erase(&e->rb, &ubi->free);
        prot_tree_add(ubi, e, pe, protect);
 
        dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
@@ -598,8 +539,12 @@ retry:
  * prot_tree_del - remove a physical eraseblock from the protection trees
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock to remove
+ *
+ * This function returns PEB @pnum from the protection trees and returns zero
+ * in case of success and %-ENODEV if the PEB was not found in the protection
+ * trees.
  */
-static void prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_tree_del(struct ubi_device *ubi, int pnum)
 {
        struct rb_node *p;
        struct ubi_wl_prot_entry *pe = NULL;
@@ -610,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
                pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
 
                if (pnum == pe->e->pnum)
-                       break;
+                       goto found;
 
                if (pnum < pe->e->pnum)
                        p = p->rb_left;
@@ -618,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
                        p = p->rb_right;
        }
 
+       return -ENODEV;
+
+found:
        ubi_assert(pe->e->pnum == pnum);
        rb_erase(&pe->rb_aec, &ubi->prot.aec);
        rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
        kfree(pe);
+       return 0;
 }
 
 /**
@@ -645,7 +594,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
        if (err > 0)
                return -EINVAL;
 
-       ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+       ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
        if (!ec_hdr)
                return -ENOMEM;
 
@@ -704,7 +653,7 @@ static void check_protection_over(struct ubi_device *ubi)
         */
        while (1) {
                spin_lock(&ubi->wl_lock);
-               if (tree_empty(&ubi->prot.aec)) {
+               if (!ubi->prot.aec.rb_node) {
                        spin_unlock(&ubi->wl_lock);
                        break;
                }
@@ -721,7 +670,7 @@ static void check_protection_over(struct ubi_device *ubi)
                       pe->e->pnum, ubi->abs_ec, pe->abs_ec);
                rb_erase(&pe->rb_aec, &ubi->prot.aec);
                rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
-               used_tree_add(ubi, pe->e);
+               wl_tree_add(pe->e, &ubi->used);
                spin_unlock(&ubi->wl_lock);
 
                kfree(pe);
@@ -768,7 +717,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
        dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
               e->pnum, e->ec, torture);
 
-       wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
+       wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
        if (!wl_wrk)
                return -ENOMEM;
 
@@ -793,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                                int cancel)
 {
-       int err, put = 0;
+       int err, put = 0, scrubbing = 0, protect = 0;
+       struct ubi_wl_prot_entry *pe;
        struct ubi_wl_entry *e1, *e2;
        struct ubi_vid_hdr *vid_hdr;
 
@@ -802,25 +752,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
        if (cancel)
                return 0;
 
-       vid_hdr = ubi_zalloc_vid_hdr(ubi);
+       vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
        if (!vid_hdr)
                return -ENOMEM;
 
+       mutex_lock(&ubi->move_mutex);
        spin_lock(&ubi->wl_lock);
+       ubi_assert(!ubi->move_from && !ubi->move_to);
+       ubi_assert(!ubi->move_to_put);
 
-       /*
-        * Only one WL worker at a time is supported at this implementation, so
-        * make sure a PEB is not being moved already.
-        */
-       if (ubi->move_to || tree_empty(&ubi->free) ||
-           (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) {
+       if (!ubi->free.rb_node ||
+           (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
                /*
-                * Only one WL worker at a time is supported at this
-                * implementation, so if a LEB is already being moved, cancel.
-                *
-                * No free physical eraseblocks? Well, we cancel wear-leveling
-                * then. It will be triggered again when a free physical
-                * eraseblock appears.
+                * No free physical eraseblocks? Well, they must be waiting in
+                * the queue to be erased. Cancel movement - it will be
+                * triggered again when a free physical eraseblock appears.
                 *
                 * No used physical eraseblocks? They must be temporarily
                 * protected from being moved. They will be moved to the
@@ -828,14 +774,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                 * triggered again.
                 */
                dbg_wl("cancel WL, a list is empty: free %d, used %d",
-                      tree_empty(&ubi->free), tree_empty(&ubi->used));
-               ubi->wl_scheduled = 0;
-               spin_unlock(&ubi->wl_lock);
-               ubi_free_vid_hdr(ubi, vid_hdr);
-               return 0;
+                      !ubi->free.rb_node, !ubi->used.rb_node);
+               goto out_cancel;
        }
 
-       if (tree_empty(&ubi->scrub)) {
+       if (!ubi->scrub.rb_node) {
                /*
                 * Now pick the least worn-out used physical eraseblock and a
                 * highly worn-out free physical eraseblock. If the erase
@@ -847,24 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
                        dbg_wl("no WL needed: min used EC %d, max free EC %d",
                               e1->ec, e2->ec);
-                       ubi->wl_scheduled = 0;
-                       spin_unlock(&ubi->wl_lock);
-                       ubi_free_vid_hdr(ubi, vid_hdr);
-                       return 0;
+                       goto out_cancel;
                }
-               used_tree_del(ubi, e1);
+               paranoid_check_in_wl_tree(e1, &ubi->used);
+               rb_erase(&e1->rb, &ubi->used);
                dbg_wl("move PEB %d EC %d to PEB %d EC %d",
                       e1->pnum, e1->ec, e2->pnum, e2->ec);
        } else {
+               /* Perform scrubbing */
+               scrubbing = 1;
                e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
                e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
-               scrub_tree_del(ubi, e1);
+               paranoid_check_in_wl_tree(e1, &ubi->scrub);
+               rb_erase(&e1->rb, &ubi->scrub);
                dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
        }
 
-       free_tree_del(ubi, e2);
-       ubi_assert(!ubi->move_from && !ubi->move_to);
-       ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
+       paranoid_check_in_wl_tree(e2, &ubi->free);
+       rb_erase(&e2->rb, &ubi->free);
        ubi->move_from = e1;
        ubi->move_to = e2;
        spin_unlock(&ubi->wl_lock);
@@ -874,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
         * We so far do not know which logical eraseblock our physical
         * eraseblock (@e1) belongs to. We have to read the volume identifier
         * header first.
+        *
+        * Note, we are protected from this PEB being unmapped and erased. The
+        * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+        * which is being moved was unmapped.
         */
 
        err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -888,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                         * likely have the VID header in place.
                         */
                        dbg_wl("PEB %d has no VID header", e1->pnum);
-                       err = 0;
-               } else {
-                       ubi_err("error %d while reading VID header from PEB %d",
-                               err, e1->pnum);
-                       if (err > 0)
-                               err = -EIO;
+                       goto out_not_moved;
                }
-               goto error;
+
+               ubi_err("error %d while reading VID header from PEB %d",
+                       err, e1->pnum);
+               if (err > 0)
+                       err = -EIO;
+               goto out_error;
        }
 
        err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
        if (err) {
-               if (err == UBI_IO_BITFLIPS)
-                       err = 0;
-               goto error;
+
+               if (err < 0)
+                       goto out_error;
+               if (err == 1)
+                       goto out_not_moved;
+
+               /*
+                * For some reason the LEB was not moved - it might be because
+                * the volume is being deleted. We should prevent this PEB from
+                * being selected for wear-levelling movement for some "time",
+                * so put it to the protection tree.
+                */
+
+               dbg_wl("cancelled moving PEB %d", e1->pnum);
+               pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+               if (!pe) {
+                       err = -ENOMEM;
+                       goto out_error;
+               }
+
+               protect = 1;
        }
 
        ubi_free_vid_hdr(ubi, vid_hdr);
        spin_lock(&ubi->wl_lock);
+       if (protect)
+               prot_tree_add(ubi, e1, pe, protect);
        if (!ubi->move_to_put)
-               used_tree_add(ubi, e2);
+               wl_tree_add(e2, &ubi->used);
        else
                put = 1;
        ubi->move_from = ubi->move_to = NULL;
-       ubi->move_from_put = ubi->move_to_put = 0;
-       ubi->wl_scheduled = 0;
+       ubi->move_to_put = ubi->wl_scheduled = 0;
        spin_unlock(&ubi->wl_lock);
 
        if (put) {
@@ -923,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                 */
                dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
                err = schedule_erase(ubi, e2, 0);
-               if (err) {
-                       kmem_cache_free(wl_entries_slab, e2);
-                       ubi_ro_mode(ubi);
-               }
+               if (err)
+                       goto out_error;
        }
 
-       err = schedule_erase(ubi, e1, 0);
-       if (err) {
-               kmem_cache_free(wl_entries_slab, e1);
-               ubi_ro_mode(ubi);
+       if (!protect) {
+               err = schedule_erase(ubi, e1, 0);
+               if (err)
+                       goto out_error;
        }
 
+
        dbg_wl("done");
-       return err;
+       mutex_unlock(&ubi->move_mutex);
+       return 0;
 
        /*
-        * Some error occurred. @e1 was not changed, so return it back. @e2
-        * might be changed, schedule it for erasure.
+        * For some reasons the LEB was not moved, might be an error, might be
+        * something else. @e1 was not changed, so return it back. @e2 might
+        * be changed, schedule it for erasure.
         */
-error:
-       if (err)
-               dbg_wl("error %d occurred, cancel operation", err);
-       ubi_assert(err <= 0);
-
+out_not_moved:
        ubi_free_vid_hdr(ubi, vid_hdr);
        spin_lock(&ubi->wl_lock);
-       ubi->wl_scheduled = 0;
-       if (ubi->move_from_put)
-               put = 1;
+       if (scrubbing)
+               wl_tree_add(e1, &ubi->scrub);
        else
-               used_tree_add(ubi, e1);
+               wl_tree_add(e1, &ubi->used);
        ubi->move_from = ubi->move_to = NULL;
-       ubi->move_from_put = ubi->move_to_put = 0;
+       ubi->move_to_put = ubi->wl_scheduled = 0;
        spin_unlock(&ubi->wl_lock);
 
-       if (put) {
-               /*
-                * Well, the target PEB was put meanwhile, schedule it for
-                * erasure.
-                */
-               dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
-               err = schedule_erase(ubi, e1, 0);
-               if (err) {
-                       kmem_cache_free(wl_entries_slab, e1);
-                       ubi_ro_mode(ubi);
-               }
-       }
-
        err = schedule_erase(ubi, e2, 0);
-       if (err) {
-               kmem_cache_free(wl_entries_slab, e2);
-               ubi_ro_mode(ubi);
-       }
+       if (err)
+               goto out_error;
+
+       mutex_unlock(&ubi->move_mutex);
+       return 0;
+
+out_error:
+       ubi_err("error %d while moving PEB %d to PEB %d",
+               err, e1->pnum, e2->pnum);
 
-       yield();
+       ubi_free_vid_hdr(ubi, vid_hdr);
+       spin_lock(&ubi->wl_lock);
+       ubi->move_from = ubi->move_to = NULL;
+       ubi->move_to_put = ubi->wl_scheduled = 0;
+       spin_unlock(&ubi->wl_lock);
+
+       kmem_cache_free(ubi_wl_entry_slab, e1);
+       kmem_cache_free(ubi_wl_entry_slab, e2);
+       ubi_ro_mode(ubi);
+
+       mutex_unlock(&ubi->move_mutex);
        return err;
+
+out_cancel:
+       ubi->wl_scheduled = 0;
+       spin_unlock(&ubi->wl_lock);
+       mutex_unlock(&ubi->move_mutex);
+       ubi_free_vid_hdr(ubi, vid_hdr);
+       return 0;
 }
 
 /**
@@ -1005,8 +976,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
         * If the ubi->scrub tree is not empty, scrubbing is needed, and the
         * the WL worker has to be scheduled anyway.
         */
-       if (tree_empty(&ubi->scrub)) {
-               if (tree_empty(&ubi->used) || tree_empty(&ubi->free))
+       if (!ubi->scrub.rb_node) {
+               if (!ubi->used.rb_node || !ubi->free.rb_node)
                        /* No physical eraseblocks - no deal */
                        goto out_unlock;
 
@@ -1028,7 +999,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
        ubi->wl_scheduled = 1;
        spin_unlock(&ubi->wl_lock);
 
-       wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
+       wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
        if (!wrk) {
                err = -ENOMEM;
                goto out_cancel;
@@ -1066,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
        if (cancel) {
                dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
                kfree(wl_wrk);
-               kmem_cache_free(wl_entries_slab, e);
+               kmem_cache_free(ubi_wl_entry_slab, e);
                return 0;
        }
 
@@ -1079,7 +1050,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 
                spin_lock(&ubi->wl_lock);
                ubi->abs_ec += 1;
-               free_tree_add(ubi, e);
+               wl_tree_add(e, &ubi->free);
                spin_unlock(&ubi->wl_lock);
 
                /*
@@ -1093,8 +1064,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                return err;
        }
 
+       ubi_err("failed to erase PEB %d, error %d", pnum, err);
        kfree(wl_wrk);
-       kmem_cache_free(wl_entries_slab, e);
+       kmem_cache_free(ubi_wl_entry_slab, e);
 
        if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
            err == -EBUSY) {
@@ -1164,8 +1136,7 @@ out_ro:
 }
 
 /**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
- * unit.
+ * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock to return
  * @torture: if this physical eraseblock has to be tortured
@@ -1173,7 +1144,7 @@ out_ro:
  * This function is called to return physical eraseblock @pnum to the pool of
  * free physical eraseblocks. The @torture flag has to be set if an I/O error
  * occurred to this @pnum and it has to be tested. This function returns zero
- * in case of success and a negative error code in case of failure.
+ * in case of success, and a negative error code in case of failure.
  */
 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
 {
@@ -1184,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
        ubi_assert(pnum >= 0);
        ubi_assert(pnum < ubi->peb_count);
 
+retry:
        spin_lock(&ubi->wl_lock);
-
        e = ubi->lookuptbl[pnum];
        if (e == ubi->move_from) {
                /*
@@ -1193,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
                 * be moved. It will be scheduled for erasure in the
                 * wear-leveling worker.
                 */
-               dbg_wl("PEB %d is being moved", pnum);
-               ubi_assert(!ubi->move_from_put);
-               ubi->move_from_put = 1;
+               dbg_wl("PEB %d is being moved, wait", pnum);
                spin_unlock(&ubi->wl_lock);
-               return 0;
+
+               /* Wait for the WL worker by taking the @ubi->move_mutex */
+               mutex_lock(&ubi->move_mutex);
+               mutex_unlock(&ubi->move_mutex);
+               goto retry;
        } else if (e == ubi->move_to) {
                /*
                 * User is putting the physical eraseblock which was selected
                 * as the target the data is moved to. It may happen if the EBA
-                * unit already re-mapped the LEB but the WL unit did has not
-                * put the PEB to the "used" tree.
+                * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
+                * the WL unit has not put the PEB to the "used" tree yet, but
+                * it is about to do this. So we just set a flag which will
+                * tell the WL worker that the PEB is not needed anymore and
+                * should be scheduled for erasure.
                 */
                dbg_wl("PEB %d is the target of data moving", pnum);
                ubi_assert(!ubi->move_to_put);
@@ -1211,19 +1187,28 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
                spin_unlock(&ubi->wl_lock);
                return 0;
        } else {
-               if (in_wl_tree(e, &ubi->used))
-                       used_tree_del(ubi, e);
-               else if (in_wl_tree(e, &ubi->scrub))
-                       scrub_tree_del(ubi, e);
-               else
-                       prot_tree_del(ubi, e->pnum);
+               if (in_wl_tree(e, &ubi->used)) {
+                       paranoid_check_in_wl_tree(e, &ubi->used);
+                       rb_erase(&e->rb, &ubi->used);
+               } else if (in_wl_tree(e, &ubi->scrub)) {
+                       paranoid_check_in_wl_tree(e, &ubi->scrub);
+                       rb_erase(&e->rb, &ubi->scrub);
+               } else {
+                       err = prot_tree_del(ubi, e->pnum);
+                       if (err) {
+                               ubi_err("PEB %d not found", pnum);
+                               ubi_ro_mode(ubi);
+                               spin_unlock(&ubi->wl_lock);
+                               return err;
+                       }
+               }
        }
        spin_unlock(&ubi->wl_lock);
 
        err = schedule_erase(ubi, e, torture);
        if (err) {
                spin_lock(&ubi->wl_lock);
-               used_tree_add(ubi, e);
+               wl_tree_add(e, &ubi->used);
                spin_unlock(&ubi->wl_lock);
        }
 
@@ -1267,12 +1252,22 @@ retry:
                goto retry;
        }
 
-       if (in_wl_tree(e, &ubi->used))
-               used_tree_del(ubi, e);
-       else
-               prot_tree_del(ubi, pnum);
+       if (in_wl_tree(e, &ubi->used)) {
+               paranoid_check_in_wl_tree(e, &ubi->used);
+               rb_erase(&e->rb, &ubi->used);
+       } else {
+               int err;
 
-       scrub_tree_add(ubi, e);
+               err = prot_tree_del(ubi, e->pnum);
+               if (err) {
+                       ubi_err("PEB %d not found", pnum);
+                       ubi_ro_mode(ubi);
+                       spin_unlock(&ubi->wl_lock);
+                       return err;
+               }
+       }
+
+       wl_tree_add(e, &ubi->scrub);
        spin_unlock(&ubi->wl_lock);
 
        /*
@@ -1291,17 +1286,32 @@ retry:
  */
 int ubi_wl_flush(struct ubi_device *ubi)
 {
-       int err, pending_count;
-
-       pending_count = ubi->works_count;
-
-       dbg_wl("flush (%d pending works)", pending_count);
+       int err;
 
        /*
         * Erase while the pending works queue is not empty, but not more then
         * the number of currently pending works.
         */
-       while (pending_count-- > 0) {
+       dbg_wl("flush (%d pending works)", ubi->works_count);
+       while (ubi->works_count) {
+               err = do_work(ubi);
+               if (err)
+                       return err;
+       }
+
+       /*
+        * Make sure all the works which have been done in parallel are
+        * finished.
+        */
+       down_write(&ubi->work_sem);
+       up_write(&ubi->work_sem);
+
+       /*
+        * And in case last was the WL worker and it cancelled the LEB
+        * movement, flush again.
+        */
+       while (ubi->works_count) {
+               dbg_wl("flush more (%d pending works)", ubi->works_count);
                err = do_work(ubi);
                if (err)
                        return err;
@@ -1336,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
                                        rb->rb_right = NULL;
                        }
 
-                       kmem_cache_free(wl_entries_slab, e);
+                       kmem_cache_free(ubi_wl_entry_slab, e);
                }
        }
 }
@@ -1351,7 +1361,7 @@ static int ubi_thread(void *u)
        struct ubi_device *ubi = u;
 
        ubi_msg("background thread \"%s\" started, PID %d",
-               ubi->bgt_name, current->pid);
+               ubi->bgt_name, task_pid_nr(current));
 
        set_freezable();
        for (;;) {
@@ -1436,6 +1446,8 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
        ubi->used = ubi->free = ubi->scrub = RB_ROOT;
        ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
        spin_lock_init(&ubi->wl_lock);
+       mutex_init(&ubi->move_mutex);
+       init_rwsem(&ubi->work_sem);
        ubi->max_ec = si->max_ec;
        INIT_LIST_HEAD(&ubi->works);
 
@@ -1449,14 +1461,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                return err;
        }
 
-       if (ubi_devices_cnt == 0) {
-               wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
-                                                   sizeof(struct ubi_wl_entry),
-                                                   0, 0, NULL);
-               if (!wl_entries_slab)
-                       return -ENOMEM;
-       }
-
        err = -ENOMEM;
        ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
        if (!ubi->lookuptbl)
@@ -1465,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
        list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
                cond_resched();
 
-               e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+               e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
@@ -1473,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                e->ec = seb->ec;
                ubi->lookuptbl[e->pnum] = e;
                if (schedule_erase(ubi, e, 0)) {
-                       kmem_cache_free(wl_entries_slab, e);
+                       kmem_cache_free(ubi_wl_entry_slab, e);
                        goto out_free;
                }
        }
@@ -1481,21 +1485,21 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
        list_for_each_entry(seb, &si->free, u.list) {
                cond_resched();
 
-               e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+               e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
                e->pnum = seb->pnum;
                e->ec = seb->ec;
                ubi_assert(e->ec >= 0);
-               free_tree_add(ubi, e);
+               wl_tree_add(e, &ubi->free);
                ubi->lookuptbl[e->pnum] = e;
        }
 
        list_for_each_entry(seb, &si->corr, u.list) {
                cond_resched();
 
-               e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+               e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
@@ -1503,7 +1507,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                e->ec = seb->ec;
                ubi->lookuptbl[e->pnum] = e;
                if (schedule_erase(ubi, e, 0)) {
-                       kmem_cache_free(wl_entries_slab, e);
+                       kmem_cache_free(ubi_wl_entry_slab, e);
                        goto out_free;
                }
        }
@@ -1512,7 +1516,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
                        cond_resched();
 
-                       e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+                       e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                        if (!e)
                                goto out_free;
 
@@ -1522,16 +1526,16 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                        if (!seb->scrub) {
                                dbg_wl("add PEB %d EC %d to the used tree",
                                       e->pnum, e->ec);
-                               used_tree_add(ubi, e);
+                               wl_tree_add(e, &ubi->used);
                        } else {
                                dbg_wl("add PEB %d EC %d to the scrub tree",
                                       e->pnum, e->ec);
-                               scrub_tree_add(ubi, e);
+                               wl_tree_add(e, &ubi->scrub);
                        }
                }
        }
 
-       if (WL_RESERVED_PEBS > ubi->avail_pebs) {
+       if (ubi->avail_pebs < WL_RESERVED_PEBS) {
                ubi_err("no enough physical eraseblocks (%d, need %d)",
                        ubi->avail_pebs, WL_RESERVED_PEBS);
                goto out_free;
@@ -1552,8 +1556,6 @@ out_free:
        tree_destroy(&ubi->free);
        tree_destroy(&ubi->scrub);
        kfree(ubi->lookuptbl);
-       if (ubi_devices_cnt == 0)
-               kmem_cache_destroy(wl_entries_slab);
        return err;
 }
 
@@ -1583,7 +1585,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
                                        rb->rb_right = NULL;
                        }
 
-                       kmem_cache_free(wl_entries_slab, pe->e);
+                       kmem_cache_free(ubi_wl_entry_slab, pe->e);
                        kfree(pe);
                }
        }
@@ -1607,8 +1609,6 @@ void ubi_wl_close(struct ubi_device *ubi)
        tree_destroy(&ubi->free);
        tree_destroy(&ubi->scrub);
        kfree(ubi->lookuptbl);
-       if (ubi_devices_cnt == 1)
-               kmem_cache_destroy(wl_entries_slab);
 }
 
 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
@@ -1624,13 +1624,13 @@ void ubi_wl_close(struct ubi_device *ubi)
  * is equivalent to @ec, %1 if not, and a negative error code if an error
  * occurred.
  */
-static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
+static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
 {
        int err;
        long long read_ec;
        struct ubi_ec_hdr *ec_hdr;
 
-       ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+       ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
        if (!ec_hdr)
                return -ENOMEM;