2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/freezer.h>
23 #include <linux/ftrace.h>
24 #include "async-thread.h"
26 #define WORK_QUEUED_BIT 0
27 #define WORK_DONE_BIT 1
28 #define WORK_ORDER_DONE_BIT 2
31 * container for the kthread task pointer and the list of pending work
32 * One of these is allocated per thread.
34 struct btrfs_worker_thread {
35 /* pool we belong to */
36 struct btrfs_workers *workers;
38 /* list of struct btrfs_work that are waiting for service */
39 struct list_head pending;
41 /* list of worker threads from struct btrfs_workers */
42 struct list_head worker_list;
45 struct task_struct *task;
47 /* number of things on the pending list */
50 unsigned long sequence;
52 /* protects the pending list. */
55 /* set to non-zero when this thread is already awake and kicking */
58 /* are we currently idle */
63 * helper function to move a thread onto the idle list after it
64 * has finished some requests.
66 static void check_idle_worker(struct btrfs_worker_thread *worker)
68 if (!worker->idle && atomic_read(&worker->num_pending) <
69 worker->workers->idle_thresh / 2) {
71 spin_lock_irqsave(&worker->workers->lock, flags);
73 list_move(&worker->worker_list, &worker->workers->idle_list);
74 spin_unlock_irqrestore(&worker->workers->lock, flags);
79 * helper function to move a thread off the idle list after new
80 * pending work is added.
82 static void check_busy_worker(struct btrfs_worker_thread *worker)
84 if (worker->idle && atomic_read(&worker->num_pending) >=
85 worker->workers->idle_thresh) {
87 spin_lock_irqsave(&worker->workers->lock, flags);
89 list_move_tail(&worker->worker_list,
90 &worker->workers->worker_list);
91 spin_unlock_irqrestore(&worker->workers->lock, flags);
95 static noinline int run_ordered_completions(struct btrfs_workers *workers,
96 struct btrfs_work *work)
100 if (!workers->ordered)
103 set_bit(WORK_DONE_BIT, &work->flags);
105 spin_lock_irqsave(&workers->lock, flags);
107 while (!list_empty(&workers->order_list)) {
108 work = list_entry(workers->order_list.next,
109 struct btrfs_work, order_list);
111 if (!test_bit(WORK_DONE_BIT, &work->flags))
114 /* we are going to call the ordered done function, but
115 * we leave the work item on the list as a barrier so
116 * that later work items that are done don't have their
117 * functions called before this one returns
119 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
122 spin_unlock_irqrestore(&workers->lock, flags);
124 work->ordered_func(work);
126 /* now take the lock again and call the freeing code */
127 spin_lock_irqsave(&workers->lock, flags);
128 list_del(&work->order_list);
129 work->ordered_free(work);
132 spin_unlock_irqrestore(&workers->lock, flags);
137 * main loop for servicing work items
139 static int worker_loop(void *arg)
141 struct btrfs_worker_thread *worker = arg;
142 struct list_head *cur;
143 struct btrfs_work *work;
145 spin_lock_irq(&worker->lock);
147 while (!list_empty(&worker->pending)) {
148 cur = worker->pending.next;
149 work = list_entry(cur, struct btrfs_work, list);
150 list_del(&work->list);
151 clear_bit(WORK_QUEUED_BIT, &work->flags);
153 work->worker = worker;
154 spin_unlock_irq(&worker->lock);
158 atomic_dec(&worker->num_pending);
160 * unless this is an ordered work queue,
161 * 'work' was probably freed by func above.
163 run_ordered_completions(worker->workers, work);
165 spin_lock_irq(&worker->lock);
166 check_idle_worker(worker);
169 if (freezing(current)) {
171 spin_unlock_irq(&worker->lock);
174 spin_unlock_irq(&worker->lock);
175 if (!kthread_should_stop()) {
178 * we've dropped the lock, did someone else
182 if (!list_empty(&worker->pending))
186 * this short schedule allows more work to
187 * come in without the queue functions
188 * needing to go through wake_up_process()
190 * worker->working is still 1, so nobody
191 * is going to try and wake us up
195 if (!list_empty(&worker->pending))
198 if (kthread_should_stop())
201 /* still no more work?, sleep for real */
202 spin_lock_irq(&worker->lock);
203 set_current_state(TASK_INTERRUPTIBLE);
204 if (!list_empty(&worker->pending))
208 * this makes sure we get a wakeup when someone
209 * adds something new to the queue
212 spin_unlock_irq(&worker->lock);
214 if (!kthread_should_stop())
217 __set_current_state(TASK_RUNNING);
219 } while (!kthread_should_stop());
224 * this will wait for all the worker threads to shutdown
226 int btrfs_stop_workers(struct btrfs_workers *workers)
228 struct list_head *cur;
229 struct btrfs_worker_thread *worker;
231 list_splice_init(&workers->idle_list, &workers->worker_list);
232 while (!list_empty(&workers->worker_list)) {
233 cur = workers->worker_list.next;
234 worker = list_entry(cur, struct btrfs_worker_thread,
236 kthread_stop(worker->task);
237 list_del(&worker->worker_list);
244 * simple init on struct btrfs_workers
246 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
248 workers->num_workers = 0;
249 INIT_LIST_HEAD(&workers->worker_list);
250 INIT_LIST_HEAD(&workers->idle_list);
251 INIT_LIST_HEAD(&workers->order_list);
252 spin_lock_init(&workers->lock);
253 workers->max_workers = max;
254 workers->idle_thresh = 32;
255 workers->name = name;
256 workers->ordered = 0;
260 * starts new worker threads. This does not enforce the max worker
261 * count in case you need to temporarily go past it.
263 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
265 struct btrfs_worker_thread *worker;
269 for (i = 0; i < num_workers; i++) {
270 worker = kzalloc(sizeof(*worker), GFP_NOFS);
276 INIT_LIST_HEAD(&worker->pending);
277 INIT_LIST_HEAD(&worker->worker_list);
278 spin_lock_init(&worker->lock);
279 atomic_set(&worker->num_pending, 0);
280 worker->task = kthread_run(worker_loop, worker,
281 "btrfs-%s-%d", workers->name,
282 workers->num_workers + i);
283 worker->workers = workers;
284 if (IS_ERR(worker->task)) {
286 ret = PTR_ERR(worker->task);
290 spin_lock_irq(&workers->lock);
291 list_add_tail(&worker->worker_list, &workers->idle_list);
293 workers->num_workers++;
294 spin_unlock_irq(&workers->lock);
298 btrfs_stop_workers(workers);
303 * run through the list and find a worker thread that doesn't have a lot
304 * to do right now. This can return null if we aren't yet at the thread
305 * count limit and all of the threads are busy.
307 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
309 struct btrfs_worker_thread *worker;
310 struct list_head *next;
311 int enforce_min = workers->num_workers < workers->max_workers;
314 * if we find an idle thread, don't move it to the end of the
315 * idle list. This improves the chance that the next submission
316 * will reuse the same thread, and maybe catch it while it is still
319 if (!list_empty(&workers->idle_list)) {
320 next = workers->idle_list.next;
321 worker = list_entry(next, struct btrfs_worker_thread,
325 if (enforce_min || list_empty(&workers->worker_list))
329 * if we pick a busy task, move the task to the end of the list.
330 * hopefully this will keep things somewhat evenly balanced.
331 * Do the move in batches based on the sequence number. This groups
332 * requests submitted at roughly the same time onto the same worker.
334 next = workers->worker_list.next;
335 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
336 atomic_inc(&worker->num_pending);
339 if (worker->sequence % workers->idle_thresh == 0)
340 list_move_tail(next, &workers->worker_list);
345 * selects a worker thread to take the next job. This will either find
346 * an idle worker, start a new worker up to the max count, or just return
347 * one of the existing busy workers.
349 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
351 struct btrfs_worker_thread *worker;
355 spin_lock_irqsave(&workers->lock, flags);
356 worker = next_worker(workers);
357 spin_unlock_irqrestore(&workers->lock, flags);
360 spin_lock_irqsave(&workers->lock, flags);
361 if (workers->num_workers >= workers->max_workers) {
362 struct list_head *fallback = NULL;
364 * we have failed to find any workers, just
365 * return the force one
367 if (!list_empty(&workers->worker_list))
368 fallback = workers->worker_list.next;
369 if (!list_empty(&workers->idle_list))
370 fallback = workers->idle_list.next;
372 worker = list_entry(fallback,
373 struct btrfs_worker_thread, worker_list);
374 spin_unlock_irqrestore(&workers->lock, flags);
376 spin_unlock_irqrestore(&workers->lock, flags);
377 /* we're below the limit, start another worker */
378 btrfs_start_workers(workers, 1);
386 * btrfs_requeue_work just puts the work item back on the tail of the list
387 * it was taken from. It is intended for use with long running work functions
388 * that make some progress and want to give the cpu up for others.
390 int btrfs_requeue_work(struct btrfs_work *work)
392 struct btrfs_worker_thread *worker = work->worker;
396 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
399 spin_lock_irqsave(&worker->lock, flags);
400 list_add_tail(&work->list, &worker->pending);
401 atomic_inc(&worker->num_pending);
403 /* by definition we're busy, take ourselves off the idle
407 spin_lock_irqsave(&worker->workers->lock, flags);
409 list_move_tail(&worker->worker_list,
410 &worker->workers->worker_list);
411 spin_unlock_irqrestore(&worker->workers->lock, flags);
413 if (!worker->working) {
418 spin_unlock_irqrestore(&worker->lock, flags);
420 wake_up_process(worker->task);
427 * places a struct btrfs_work into the pending queue of one of the kthreads
429 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
431 struct btrfs_worker_thread *worker;
435 /* don't requeue something already on a list */
436 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
439 worker = find_worker(workers);
440 if (workers->ordered) {
441 spin_lock_irqsave(&workers->lock, flags);
442 list_add_tail(&work->order_list, &workers->order_list);
443 spin_unlock_irqrestore(&workers->lock, flags);
445 INIT_LIST_HEAD(&work->order_list);
448 spin_lock_irqsave(&worker->lock, flags);
450 list_add_tail(&work->list, &worker->pending);
451 atomic_inc(&worker->num_pending);
452 check_busy_worker(worker);
455 * avoid calling into wake_up_process if this thread has already
458 if (!worker->working)
462 spin_unlock_irqrestore(&worker->lock, flags);
465 wake_up_process(worker->task);