1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
41 #include <asm/mmu_context.h>
43 #include <asm/spu_csa.h>
44 #include <asm/spu_priv1.h>
47 struct spu_prio_array {
48 DECLARE_BITMAP(bitmap, MAX_PRIO);
49 struct list_head runq[MAX_PRIO];
51 struct list_head active_list[MAX_NUMNODES];
52 struct mutex active_mutex[MAX_NUMNODES];
55 static struct spu_prio_array *spu_prio;
56 static struct task_struct *spusched_task;
57 static struct timer_list spusched_timer;
60 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
62 #define NORMAL_PRIO 120
65 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
66 * tick for every 10 CPU scheduler ticks.
68 #define SPUSCHED_TICK (10)
71 * These are the 'tuning knobs' of the scheduler:
73 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
74 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
76 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
77 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
79 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
80 #define SCALE_PRIO(x, prio) \
81 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
84 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
85 * [800ms ... 100ms ... 5ms]
87 * The higher a thread's priority, the bigger timeslices
88 * it gets during one round of execution. But even the lowest
89 * priority thread gets MIN_TIMESLICE worth of execution time.
91 void spu_set_timeslice(struct spu_context *ctx)
93 if (ctx->prio < NORMAL_PRIO)
94 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
96 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
100 * Update scheduling information from the owning thread.
102 void __spu_update_sched_info(struct spu_context *ctx)
105 * We do our own priority calculations, so we normally want
106 * ->static_prio to start with. Unfortunately thies field
107 * contains junk for threads with a realtime scheduling
108 * policy so we have to look at ->prio in this case.
110 if (rt_prio(current->prio))
111 ctx->prio = current->prio;
113 ctx->prio = current->static_prio;
114 ctx->policy = current->policy;
117 * A lot of places that don't hold active_mutex poke into
118 * cpus_allowed, including grab_runnable_context which
119 * already holds the runq_lock. So abuse runq_lock
120 * to protect this field aswell.
122 spin_lock(&spu_prio->runq_lock);
123 ctx->cpus_allowed = current->cpus_allowed;
124 spin_unlock(&spu_prio->runq_lock);
127 void spu_update_sched_info(struct spu_context *ctx)
129 int node = ctx->spu->node;
131 mutex_lock(&spu_prio->active_mutex[node]);
132 __spu_update_sched_info(ctx);
133 mutex_unlock(&spu_prio->active_mutex[node]);
136 static int __node_allowed(struct spu_context *ctx, int node)
138 if (nr_cpus_node(node)) {
139 cpumask_t mask = node_to_cpumask(node);
141 if (cpus_intersects(mask, ctx->cpus_allowed))
148 static int node_allowed(struct spu_context *ctx, int node)
152 spin_lock(&spu_prio->runq_lock);
153 rval = __node_allowed(ctx, node);
154 spin_unlock(&spu_prio->runq_lock);
160 * spu_add_to_active_list - add spu to active list
161 * @spu: spu to add to the active list
163 static void spu_add_to_active_list(struct spu *spu)
165 mutex_lock(&spu_prio->active_mutex[spu->node]);
166 list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
167 mutex_unlock(&spu_prio->active_mutex[spu->node]);
170 static void __spu_remove_from_active_list(struct spu *spu)
172 list_del_init(&spu->list);
176 * spu_remove_from_active_list - remove spu from active list
177 * @spu: spu to remove from the active list
179 static void spu_remove_from_active_list(struct spu *spu)
181 int node = spu->node;
183 mutex_lock(&spu_prio->active_mutex[node]);
184 __spu_remove_from_active_list(spu);
185 mutex_unlock(&spu_prio->active_mutex[node]);
188 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
190 static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
192 blocking_notifier_call_chain(&spu_switch_notifier,
193 ctx ? ctx->object_id : 0, spu);
196 int spu_switch_event_register(struct notifier_block * n)
198 return blocking_notifier_chain_register(&spu_switch_notifier, n);
201 int spu_switch_event_unregister(struct notifier_block * n)
203 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
207 * spu_bind_context - bind spu context to physical spu
208 * @spu: physical spu to bind to
209 * @ctx: context to bind
211 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
213 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
214 spu->number, spu->node);
218 ctx->ops = &spu_hw_ops;
219 spu->pid = current->pid;
220 spu_associate_mm(spu, ctx->owner);
221 spu->ibox_callback = spufs_ibox_callback;
222 spu->wbox_callback = spufs_wbox_callback;
223 spu->stop_callback = spufs_stop_callback;
224 spu->mfc_callback = spufs_mfc_callback;
225 spu->dma_callback = spufs_dma_callback;
227 spu_unmap_mappings(ctx);
228 spu_restore(&ctx->csa, spu);
229 spu->timestamp = jiffies;
230 spu_cpu_affinity_set(spu, raw_smp_processor_id());
231 spu_switch_notify(spu, ctx);
232 ctx->state = SPU_STATE_RUNNABLE;
236 * spu_unbind_context - unbind spu context from physical spu
237 * @spu: physical spu to unbind from
238 * @ctx: context to unbind
240 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
242 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
243 spu->pid, spu->number, spu->node);
245 spu_switch_notify(spu, NULL);
246 spu_unmap_mappings(ctx);
247 spu_save(&ctx->csa, spu);
248 spu->timestamp = jiffies;
249 ctx->state = SPU_STATE_SAVED;
250 spu->ibox_callback = NULL;
251 spu->wbox_callback = NULL;
252 spu->stop_callback = NULL;
253 spu->mfc_callback = NULL;
254 spu->dma_callback = NULL;
255 spu_associate_mm(spu, NULL);
257 ctx->ops = &spu_backing_ops;
264 * spu_add_to_rq - add a context to the runqueue
265 * @ctx: context to add
267 static void __spu_add_to_rq(struct spu_context *ctx)
269 int prio = ctx->prio;
271 list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
272 set_bit(prio, spu_prio->bitmap);
275 static void __spu_del_from_rq(struct spu_context *ctx)
277 int prio = ctx->prio;
279 if (!list_empty(&ctx->rq))
280 list_del_init(&ctx->rq);
281 if (list_empty(&spu_prio->runq[prio]))
282 clear_bit(prio, spu_prio->bitmap);
285 static void spu_prio_wait(struct spu_context *ctx)
289 spin_lock(&spu_prio->runq_lock);
290 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
291 if (!signal_pending(current)) {
292 __spu_add_to_rq(ctx);
293 spin_unlock(&spu_prio->runq_lock);
294 mutex_unlock(&ctx->state_mutex);
296 mutex_lock(&ctx->state_mutex);
297 spin_lock(&spu_prio->runq_lock);
298 __spu_del_from_rq(ctx);
300 spin_unlock(&spu_prio->runq_lock);
301 __set_current_state(TASK_RUNNING);
302 remove_wait_queue(&ctx->stop_wq, &wait);
305 static struct spu *spu_get_idle(struct spu_context *ctx)
307 struct spu *spu = NULL;
308 int node = cpu_to_node(raw_smp_processor_id());
311 for (n = 0; n < MAX_NUMNODES; n++, node++) {
312 node = (node < MAX_NUMNODES) ? node : 0;
313 if (!node_allowed(ctx, node))
315 spu = spu_alloc_node(node);
323 * find_victim - find a lower priority context to preempt
324 * @ctx: canidate context for running
326 * Returns the freed physical spu to run the new context on.
328 static struct spu *find_victim(struct spu_context *ctx)
330 struct spu_context *victim = NULL;
335 * Look for a possible preemption candidate on the local node first.
336 * If there is no candidate look at the other nodes. This isn't
337 * exactly fair, but so far the whole spu schedule tries to keep
338 * a strong node affinity. We might want to fine-tune this in
342 node = cpu_to_node(raw_smp_processor_id());
343 for (n = 0; n < MAX_NUMNODES; n++, node++) {
344 node = (node < MAX_NUMNODES) ? node : 0;
345 if (!node_allowed(ctx, node))
348 mutex_lock(&spu_prio->active_mutex[node]);
349 list_for_each_entry(spu, &spu_prio->active_list[node], list) {
350 struct spu_context *tmp = spu->ctx;
352 if (tmp->prio > ctx->prio &&
353 (!victim || tmp->prio > victim->prio))
356 mutex_unlock(&spu_prio->active_mutex[node]);
360 * This nests ctx->state_mutex, but we always lock
361 * higher priority contexts before lower priority
362 * ones, so this is safe until we introduce
363 * priority inheritance schemes.
365 if (!mutex_trylock(&victim->state_mutex)) {
373 * This race can happen because we've dropped
374 * the active list mutex. No a problem, just
375 * restart the search.
377 mutex_unlock(&victim->state_mutex);
381 spu_remove_from_active_list(spu);
382 spu_unbind_context(spu, victim);
383 mutex_unlock(&victim->state_mutex);
385 * We need to break out of the wait loop in spu_run
386 * manually to ensure this context gets put on the
387 * runqueue again ASAP.
389 wake_up(&victim->stop_wq);
398 * spu_activate - find a free spu for a context and execute it
399 * @ctx: spu context to schedule
400 * @flags: flags (currently ignored)
402 * Tries to find a free spu to run @ctx. If no free spu is available
403 * add the context to the runqueue so it gets woken up once an spu
406 int spu_activate(struct spu_context *ctx, unsigned long flags)
415 spu = spu_get_idle(ctx);
417 * If this is a realtime thread we try to get it running by
418 * preempting a lower priority thread.
420 if (!spu && rt_prio(ctx->prio))
421 spu = find_victim(ctx);
423 spu_bind_context(spu, ctx);
424 spu_add_to_active_list(spu);
429 } while (!signal_pending(current));
435 * grab_runnable_context - try to find a runnable context
437 * Remove the highest priority context on the runqueue and return it
438 * to the caller. Returns %NULL if no runnable context was found.
440 static struct spu_context *grab_runnable_context(int prio, int node)
442 struct spu_context *ctx;
445 spin_lock(&spu_prio->runq_lock);
446 best = sched_find_first_bit(spu_prio->bitmap);
447 while (best < prio) {
448 struct list_head *rq = &spu_prio->runq[best];
450 list_for_each_entry(ctx, rq, rq) {
451 /* XXX(hch): check for affinity here aswell */
452 if (__node_allowed(ctx, node)) {
453 __spu_del_from_rq(ctx);
461 spin_unlock(&spu_prio->runq_lock);
465 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
467 struct spu *spu = ctx->spu;
468 struct spu_context *new = NULL;
471 new = grab_runnable_context(max_prio, spu->node);
473 spu_remove_from_active_list(spu);
474 spu_unbind_context(spu, ctx);
477 wake_up(&new->stop_wq);
486 * spu_deactivate - unbind a context from it's physical spu
487 * @ctx: spu context to unbind
489 * Unbind @ctx from the physical spu it is running on and schedule
490 * the highest priority context to run on the freed physical spu.
492 void spu_deactivate(struct spu_context *ctx)
495 * We must never reach this for a nosched context,
496 * but handle the case gracefull instead of panicing.
498 if (ctx->flags & SPU_CREATE_NOSCHED) {
503 __spu_deactivate(ctx, 1, MAX_PRIO);
507 * spu_yield - yield a physical spu if others are waiting
508 * @ctx: spu context to yield
510 * Check if there is a higher priority context waiting and if yes
511 * unbind @ctx from the physical spu and schedule the highest
512 * priority context to run on the freed physical spu instead.
514 void spu_yield(struct spu_context *ctx)
516 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
517 mutex_lock(&ctx->state_mutex);
518 __spu_deactivate(ctx, 0, MAX_PRIO);
519 mutex_unlock(&ctx->state_mutex);
523 static void spusched_tick(struct spu_context *ctx)
525 if (ctx->policy == SCHED_FIFO || --ctx->time_slice)
529 * Unfortunately active_mutex ranks outside of state_mutex, so
530 * we have to trylock here. If we fail give the context another
531 * tick and try again.
533 if (mutex_trylock(&ctx->state_mutex)) {
534 struct spu *spu = ctx->spu;
535 struct spu_context *new;
537 new = grab_runnable_context(ctx->prio + 1, spu->node);
540 __spu_remove_from_active_list(spu);
541 spu_unbind_context(spu, ctx);
543 wake_up(&new->stop_wq);
545 * We need to break out of the wait loop in
546 * spu_run manually to ensure this context
547 * gets put on the runqueue again ASAP.
549 wake_up(&ctx->stop_wq);
551 spu_set_timeslice(ctx);
552 mutex_unlock(&ctx->state_mutex);
558 static void spusched_wake(unsigned long data)
560 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
561 wake_up_process(spusched_task);
564 static int spusched_thread(void *unused)
566 struct spu *spu, *next;
569 setup_timer(&spusched_timer, spusched_wake, 0);
570 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
572 while (!kthread_should_stop()) {
573 set_current_state(TASK_INTERRUPTIBLE);
575 for (node = 0; node < MAX_NUMNODES; node++) {
576 mutex_lock(&spu_prio->active_mutex[node]);
577 list_for_each_entry_safe(spu, next,
578 &spu_prio->active_list[node],
580 spusched_tick(spu->ctx);
581 mutex_unlock(&spu_prio->active_mutex[node]);
585 del_timer_sync(&spusched_timer);
589 int __init spu_sched_init(void)
593 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
597 for (i = 0; i < MAX_PRIO; i++) {
598 INIT_LIST_HEAD(&spu_prio->runq[i]);
599 __clear_bit(i, spu_prio->bitmap);
601 __set_bit(MAX_PRIO, spu_prio->bitmap);
602 for (i = 0; i < MAX_NUMNODES; i++) {
603 mutex_init(&spu_prio->active_mutex[i]);
604 INIT_LIST_HEAD(&spu_prio->active_list[i]);
606 spin_lock_init(&spu_prio->runq_lock);
608 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
609 if (IS_ERR(spusched_task)) {
611 return PTR_ERR(spusched_task);
614 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
615 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
620 void __exit spu_sched_exit(void)
622 struct spu *spu, *tmp;
625 kthread_stop(spusched_task);
627 for (node = 0; node < MAX_NUMNODES; node++) {
628 mutex_lock(&spu_prio->active_mutex[node]);
629 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
631 list_del_init(&spu->list);
634 mutex_unlock(&spu_prio->active_mutex[node]);