* Derived from the taskqueue/keventd code by:
*
* David Woodhouse <dwmw2@infradead.org>
- * Andrew Morton <andrewm@uow.edu.au>
+ * Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
const char *name;
int singlethread;
int freezeable; /* Freeze threads during suspend */
+ int rt;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
f(work);
- lock_release(&lockdep_map, 1, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_release(&lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
int cpu;
might_sleep();
- lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&wq->lockdep_map);
+ lock_map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
if (!cwq)
return 0;
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
prev = NULL;
spin_lock_irq(&cwq->lock);
might_sleep();
- lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&work->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
cwq = get_wq_data(work);
if (!cwq)
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
struct workqueue_struct *wq = cwq->wq;
const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
struct task_struct *p;
*/
if (IS_ERR(p))
return PTR_ERR(p);
-
+ if (cwq->wq->rt)
+ sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
cwq->thread = p;
return 0;
struct workqueue_struct *__create_workqueue_key(const char *name,
int singlethread,
int freezeable,
+ int rt,
struct lock_class_key *key,
const char *lock_name)
{
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
+ wq->rt = rt;
INIT_LIST_HEAD(&wq->list);
if (singlethread) {
start_workqueue_thread(cwq, -1);
} else {
cpu_maps_update_begin();
+ /*
+ * We must place this wq on list even if the code below fails.
+ * cpu_down(cpu) can remove cpu from cpu_populated_map before
+ * destroy_workqueue() takes the lock, in that case we leak
+ * cwq[cpu]->thread.
+ */
spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock);
-
+ /*
+ * We must initialize cwqs for each possible cpu even if we
+ * are going to call destroy_workqueue() finally. Otherwise
+ * cpu_up() can hit the uninitialized cwq once we drop the
+ * lock.
+ */
for_each_possible_cpu(cpu) {
cwq = init_cpu_workqueue(wq, cpu);
if (err || !cpu_online(cpu))
if (cwq->thread == NULL)
return;
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq);
/*