]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched_rt.c
[SCSI] scsi.h: add macro for enclosure bit of inquiry data
[linux-2.6-omap-h63xx.git] / kernel / sched_rt.c
index d0097a0634e54f3dfcce71da006359c6d22d5b01..9ba3daa03475cfeed642eeaacbd21f5b3aa118eb 100644 (file)
@@ -23,6 +23,7 @@ static void update_curr_rt(struct rq *rq)
 
        curr->se.sum_exec_runtime += delta_exec;
        curr->se.exec_start = rq->clock;
+       cpuacct_charge(curr, delta_exec);
 }
 
 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
@@ -98,6 +99,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
        p->se.exec_start = 0;
 }
 
+#ifdef CONFIG_SMP
 /*
  * Load-balancing iterator. Note: while the runqueue stays locked
  * during the whole iteration, the current task might be
@@ -172,13 +174,11 @@ static struct task_struct *load_balance_next_rt(void *arg)
 
 static unsigned long
 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
-                       unsigned long max_nr_move, unsigned long max_load_move,
-                       struct sched_domain *sd, enum cpu_idle_type idle,
-                       int *all_pinned, int *this_best_prio)
+               unsigned long max_load_move,
+               struct sched_domain *sd, enum cpu_idle_type idle,
+               int *all_pinned, int *this_best_prio)
 {
-       int nr_moved;
        struct rq_iterator rt_rq_iterator;
-       unsigned long load_moved;
 
        rt_rq_iterator.start = load_balance_start_rt;
        rt_rq_iterator.next = load_balance_next_rt;
@@ -187,15 +187,29 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
         */
        rt_rq_iterator.arg = busiest;
 
-       nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
-                       max_load_move, sd, idle, all_pinned, &load_moved,
-                       this_best_prio, &rt_rq_iterator);
+       return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
+                            idle, all_pinned, this_best_prio, &rt_rq_iterator);
+}
+
+static int
+move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
+                struct sched_domain *sd, enum cpu_idle_type idle)
+{
+       struct rq_iterator rt_rq_iterator;
+
+       rt_rq_iterator.start = load_balance_start_rt;
+       rt_rq_iterator.next = load_balance_next_rt;
+       rt_rq_iterator.arg = busiest;
 
-       return load_moved;
+       return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
+                                 &rt_rq_iterator);
 }
+#endif
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p)
 {
+       update_curr_rt(rq);
+
        /*
         * RR tasks need a special form of timeslice management.
         * FIFO tasks have no timeslices.
@@ -236,7 +250,10 @@ const struct sched_class rt_sched_class = {
        .pick_next_task         = pick_next_task_rt,
        .put_prev_task          = put_prev_task_rt,
 
+#ifdef CONFIG_SMP
        .load_balance           = load_balance_rt,
+       .move_one_task          = move_one_task_rt,
+#endif
 
        .set_curr_task          = set_curr_task_rt,
        .task_tick              = task_tick_rt,