#include <asm/unistd.h>
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+ * Architectures and sub-architectures can override this.
+ */
+unsigned long long __attribute__((weak)) sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
+
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
return try_to_wake_up(p, state, 0);
}
+static void task_running_tick(struct rq *rq, struct task_struct *p);
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
* runqueue lock is not a problem.
*/
current->time_slice = 1;
- scheduler_tick();
+ task_running_tick(cpu_rq(cpu), current);
}
local_irq_enable();
put_cpu();
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
+ /*
+ * For paravirt, this is coupled with an exit in switch_to to
+ * combine the page table reload and the switch backend into
+ * one hypercall.
+ */
+ arch_enter_lazy_cpu_mode();
+
if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
static void update_load(struct rq *this_rq)
{
unsigned long this_load;
- int i, scale;
+ unsigned int i, scale;
this_load = this_rq->raw_weighted_load;
/* Update our load: */
- for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
+ for (i = 0, scale = 1; i < 3; i++, scale += scale) {
unsigned long old_load, new_load;
+ /* scale is effectively 1 << i now, and >> i divides by scale */
+
old_load = this_rq->cpu_load[i];
new_load = this_load;
/*
*/
if (new_load > old_load)
new_load += scale-1;
- this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
+ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
}
}
/**
- * sched_setscheduler - change the scheduling policy and/or RT priority of
- * a thread.
+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
- * NOTE: the task may be already dead
+ * NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param)
/**
* sys_sched_yield - yield the current processor to other threads.
*
- * this function yields the current CPU by moving the calling thread
+ * This function yields the current CPU by moving the calling thread
* to the expired array. If there are no other threads running on this
* CPU then this function will return.
*/
return 0;
}
-static inline int __resched_legal(int expected_preempt_count)
-{
- if (unlikely(preempt_count() != expected_preempt_count))
- return 0;
- if (unlikely(system_state != SYSTEM_RUNNING))
- return 0;
- return 1;
-}
-
static void __cond_resched(void)
{
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
int __sched cond_resched(void)
{
- if (need_resched() && __resched_legal(0)) {
+ if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
+ system_state == SYSTEM_RUNNING) {
__cond_resched();
return 1;
}
ret = 1;
spin_lock(lock);
}
- if (need_resched() && __resched_legal(1)) {
+ if (need_resched() && system_state == SYSTEM_RUNNING) {
spin_release(&lock->dep_map, 1, _THIS_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
{
BUG_ON(!in_softirq());
- if (need_resched() && __resched_legal(0)) {
+ if (need_resched() && system_state == SYSTEM_RUNNING) {
raw_local_irq_disable();
_local_bh_enable();
raw_local_irq_enable();
/**
* yield - yield the current processor to other threads.
*
- * this is a shortcut for kernel-space yielding - it marks the
+ * This is a shortcut for kernel-space yielding - it marks the
* thread runnable and calls sys_sched_yield().
*/
void __sched yield(void)
}
/* cpus with isolated domains */
-static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE;
+static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
lock_cpu_hotplug();
arch_init_sched_domains(&cpu_online_map);
- cpus_andnot(non_isolated_cpus, cpu_online_map, cpu_isolated_map);
+ cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus);
unlock_cpu_hotplug();