]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched_fair.c
tcp: Make prior_ssthresh a u32
[linux-2.6-omap-h63xx.git] / kernel / sched_fair.c
index d72e8b41b3e4ff612c0e9746b7c38c8096f67abf..e24ecd39c4b8aec9786d0ab3df0a01ad6dcba08d 100644 (file)
@@ -662,10 +662,15 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        if (!initial) {
                /* sleeps upto a single latency don't count. */
                if (sched_feat(NEW_FAIR_SLEEPERS)) {
+                       unsigned long thresh = sysctl_sched_latency;
+
+                       /*
+                        * convert the sleeper threshold into virtual time
+                        */
                        if (sched_feat(NORMALIZED_SLEEPER))
-                               vruntime -= calc_delta_weight(sysctl_sched_latency, se);
-                       else
-                               vruntime -= sysctl_sched_latency;
+                               thresh = calc_delta_fair(thresh, se);
+
+                       vruntime -= thresh;
                }
 
                /* ensure we never gain time by being placed backwards. */
@@ -682,6 +687,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
+       account_entity_enqueue(cfs_rq, se);
 
        if (wakeup) {
                place_entity(cfs_rq, se, 0);
@@ -692,7 +698,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
        check_spread(cfs_rq, se);
        if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
-       account_entity_enqueue(cfs_rq, se);
 }
 
 static void update_avg(u64 *avg, u64 sample)
@@ -841,8 +846,10 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * queued ticks are scheduled to match the slice, so don't bother
         * validating it and just reschedule.
         */
-       if (queued)
-               return resched_task(rq_of(cfs_rq)->curr);
+       if (queued) {
+               resched_task(rq_of(cfs_rq)->curr);
+               return;
+       }
        /*
         * don't let the period tick interfere with the hrtick preemption
         */
@@ -957,7 +964,7 @@ static void yield_task_fair(struct rq *rq)
                return;
 
        if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
-               __update_rq_clock(rq);
+               update_rq_clock(rq);
                /*
                 * Update run-time statistics of the 'current'.
                 */
@@ -1007,7 +1014,7 @@ static int wake_idle(int cpu, struct task_struct *p)
         * sibling runqueue info. This will avoid the checks and cache miss
         * penalities associated with that.
         */
-       if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
+       if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
                return cpu;
 
        for_each_domain(cpu, sd) {