]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
drivers/net/mv643xx_eth.c: Use FIELD_SIZEOF
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index 1cb53fb1fe3ddcc102c9de1e44350e384fe66fff..d1ad69b270ca7abc418871ce3160cc4bfbc04135 100644 (file)
@@ -301,7 +301,7 @@ struct cfs_rq {
        /* 'curr' points to currently running entity on this cfs_rq.
         * It is set to NULL otherwise (i.e when none are currently running).
         */
-       struct sched_entity *curr;
+       struct sched_entity *curr, *next;
 
        unsigned long nr_spread_over;
 
@@ -1084,7 +1084,7 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
        u64 tmp;
 
        if (unlikely(!lw->inv_weight))
-               lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1;
+               lw->inv_weight = (WMULT_CONST-lw->weight/2) / (lw->weight+1);
 
        tmp = (u64)delta_exec * weight;
        /*
@@ -1108,11 +1108,13 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
        lw->weight += inc;
+       lw->inv_weight = 0;
 }
 
 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
 {
        lw->weight -= dec;
+       lw->inv_weight = 0;
 }
 
 /*
@@ -4268,11 +4270,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        oldprio = p->prio;
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
-       if (on_rq) {
+       if (on_rq)
                dequeue_task(rq, p, 0);
-               if (running)
-                       p->sched_class->put_prev_task(rq, p);
-       }
+       if (running)
+               p->sched_class->put_prev_task(rq, p);
 
        if (rt_prio(prio))
                p->sched_class = &rt_sched_class;
@@ -4281,10 +4282,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        p->prio = prio;
 
+       if (running)
+               p->sched_class->set_curr_task(rq);
        if (on_rq) {
-               if (running)
-                       p->sched_class->set_curr_task(rq);
-
                enqueue_task(rq, p, 0);
 
                check_class_changed(rq, p, prev_class, oldprio, running);
@@ -4581,19 +4581,17 @@ recheck:
        update_rq_clock(rq);
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
-       if (on_rq) {
+       if (on_rq)
                deactivate_task(rq, p, 0);
-               if (running)
-                       p->sched_class->put_prev_task(rq, p);
-       }
+       if (running)
+               p->sched_class->put_prev_task(rq, p);
 
        oldprio = p->prio;
        __setscheduler(rq, p, policy, param->sched_priority);
 
+       if (running)
+               p->sched_class->set_curr_task(rq);
        if (on_rq) {
-               if (running)
-                       p->sched_class->set_curr_task(rq);
-
                activate_task(rq, p, 0);
 
                check_class_changed(rq, p, prev_class, oldprio, running);
@@ -7618,11 +7616,10 @@ void sched_move_task(struct task_struct *tsk)
        running = task_current(rq, tsk);
        on_rq = tsk->se.on_rq;
 
-       if (on_rq) {
+       if (on_rq)
                dequeue_task(rq, tsk, 0);
-               if (unlikely(running))
-                       tsk->sched_class->put_prev_task(rq, tsk);
-       }
+       if (unlikely(running))
+               tsk->sched_class->put_prev_task(rq, tsk);
 
        set_task_rq(tsk, task_cpu(tsk));
 
@@ -7631,11 +7628,10 @@ void sched_move_task(struct task_struct *tsk)
                tsk->sched_class->moved_group(tsk);
 #endif
 
-       if (on_rq) {
-               if (unlikely(running))
-                       tsk->sched_class->set_curr_task(rq);
+       if (unlikely(running))
+               tsk->sched_class->set_curr_task(rq);
+       if (on_rq)
                enqueue_task(rq, tsk, 0);
-       }
 
        task_rq_unlock(rq, &flags);
 }