]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
[MTD] drivers/mtd/inftlmount.c: kmalloc + memset conversion to kcalloc
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index cb31fb4a1379e23b0628795d5e1bfd64c6031bf0..93cf241cfbe9b62e4580446552873858dc8967a9 100644 (file)
@@ -301,7 +301,7 @@ struct rq {
        struct lock_class_key rq_lock_key;
 };
 
-static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
 #define task_rq(p)             cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
 
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long long now;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       now = rq_clock(rq);
+       spin_unlock_irqrestore(&rq->lock, flags);
+
+       return now;
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /* Change a task's ->cfs_rq if it moves across CPUs */
 static inline void set_task_cfs_rq(struct task_struct *p)
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
 
                        rq = cpu_rq(i);
 
-                       if (*sd_idle && !idle_cpu(i))
+                       if (*sd_idle && rq->nr_running)
                                *sd_idle = 0;
 
                        /* Bias balancing toward cpus of our domain */
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                /*
                 * First idle cpu or the first cpu(busiest) in this sched group
                 * is eligible for doing load balancing at this and above
-                * domains.
+                * domains. In the newly idle case, we will allow all the cpu's
+                * to do the newly idle load balance.
                 */
-               if (local_group && balance_cpu != this_cpu && balance) {
+               if (idle != CPU_NEWLY_IDLE && local_group &&
+                   balance_cpu != this_cpu && balance) {
                        *balance = 0;
                        goto ret;
                }
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
        unsigned long imbalance;
        int nr_moved = 0;
        int sd_idle = 0;
+       int all_pinned = 0;
        cpumask_t cpus = CPU_MASK_ALL;
 
        /*
@@ -2715,10 +2735,11 @@ redo:
                double_lock_balance(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        minus_1_or_zero(busiest->nr_running),
-                                       imbalance, sd, CPU_NEWLY_IDLE, NULL);
+                                       imbalance, sd, CPU_NEWLY_IDLE,
+                                       &all_pinned);
                spin_unlock(&busiest->lock);
 
-               if (!nr_moved) {
+               if (unlikely(all_pinned)) {
                        cpu_clear(cpu_of(busiest), cpus);
                        if (!cpus_empty(cpus))
                                goto redo;