assert_spin_locked(&task_rq(p)->lock);
 
-       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+       if (test_tsk_need_resched(p))
                return;
 
-       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+       set_tsk_need_resched(p);
 
        cpu = task_cpu(p);
        if (cpu == smp_processor_id())
         * lockless. The worst case is that the other CPU runs the
         * idle task through an additional NOOP schedule()
         */
-       set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
+       set_tsk_need_resched(rq->idle);
 
        /* NEED_RESCHED must be visible before we test polling */
        smp_mb();
                 * between schedule and now.
                 */
                barrier();
-       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+       } while (need_resched());
 }
 EXPORT_SYMBOL(preempt_schedule);
 
                 * between schedule and now.
                 */
                barrier();
-       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+       } while (need_resched());
 }
 
 #endif /* CONFIG_PREEMPT */
 
 int __lockfunc __reacquire_kernel_lock(void)
 {
        while (!_raw_spin_trylock(&kernel_flag)) {
-               if (test_thread_flag(TIF_NEED_RESCHED))
+               if (need_resched())
                        return -EAGAIN;
                cpu_relax();
        }