extern void init_workqueues(void);
 int execute_in_process_context(work_func_t fn, struct execute_work *);
 
+extern int flush_work(struct work_struct *work);
+
 extern int cancel_work_sync(struct work_struct *work);
 
 /*
 
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
+/**
+ * flush_work - block until a work_struct's callback has terminated
+ * @work: the work which is to be flushed
+ *
+ * It is expected that, prior to calling flush_work(), the caller has
+ * arranged for the work to not be requeued, otherwise it doesn't make
+ * sense to use this function.
+ */
+int flush_work(struct work_struct *work)
+{
+       struct cpu_workqueue_struct *cwq;
+       struct list_head *prev;
+       struct wq_barrier barr;
+
+       might_sleep();
+       cwq = get_wq_data(work);
+       if (!cwq)
+               return 0;
+
+       prev = NULL;
+       spin_lock_irq(&cwq->lock);
+       if (!list_empty(&work->entry)) {
+               /*
+                * See the comment near try_to_grab_pending()->smp_rmb().
+                * If it was re-queued under us we are not going to wait.
+                */
+               smp_rmb();
+               if (unlikely(cwq != get_wq_data(work)))
+                       goto out;
+               prev = &work->entry;
+       } else {
+               if (cwq->current_work != work)
+                       goto out;
+               prev = &cwq->worklist;
+       }
+       insert_wq_barrier(cwq, &barr, prev->next);
+out:
+       spin_unlock_irq(&cwq->lock);
+       if (!prev)
+               return 0;
+
+       wait_for_completion(&barr.done);
+       return 1;
+}
+EXPORT_SYMBOL_GPL(flush_work);
+
 /*
  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
  * so this work can't be re-armed in any way.