unsigned long rt_nr_migratory;
        /* highest queued rt task prio */
        int highest_prio;
+       int overloaded;
 };
 
 /*
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
                rq->rt.highest_prio = MAX_RT_PRIO;
+               rq->rt.overloaded = 0;
 #endif
                atomic_set(&rq->nr_iowait, 0);
 
 
 }
 static inline void rt_set_overload(struct rq *rq)
 {
+       rq->rt.overloaded = 1;
        cpu_set(rq->cpu, rt_overload_mask);
        /*
         * Make sure the mask is visible before we set
        /* the order here really doesn't matter */
        atomic_dec(&rto_count);
        cpu_clear(rq->cpu, rt_overload_mask);
+       rq->rt.overloaded = 0;
 }
 
 static void update_rt_migration(struct rq *rq)
 
        assert_spin_locked(&rq->lock);
 
+       if (!rq->rt.overloaded)
+               return 0;
+
        next_task = pick_next_highest_task_rt(rq, -1);
        if (!next_task)
                return 0;
         * the lock was owned by prev, we need to release it
         * first via finish_lock_switch and then reaquire it here.
         */
-       if (unlikely(rq->rt.rt_nr_running > 1)) {
+       if (unlikely(rq->rt.overloaded)) {
                spin_lock_irq(&rq->lock);
                push_rt_tasks(rq);
                spin_unlock_irq(&rq->lock);
 {
        if (unlikely(rt_task(p)) &&
            !task_running(rq, p) &&
-           (p->prio >= rq->curr->prio))
+           (p->prio >= rq->rt.highest_prio) &&
+           rq->rt.overloaded)
                push_rt_tasks(rq);
 }