2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Ingo Molnar <mingo@redhat.com>
8 * Guillaume Chazarain <guichaz@gmail.com>
10 * Create a semi stable clock from a mixture of other events, including:
14 * - explicit idle events
16 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
17 * making it monotonic and keeping it within an expected window. This window
18 * is set up using jiffies.
20 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
21 * that is otherwise invisible (TSC gets stopped).
23 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
24 * consistent between cpus (never more than 1 jiffies difference).
26 #include <linux/sched.h>
27 #include <linux/percpu.h>
28 #include <linux/spinlock.h>
29 #include <linux/ktime.h>
30 #include <linux/module.h>
33 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35 struct sched_clock_data {
37 * Raw spinlock - this is a special case: this might be called
38 * from within instrumentation code so we dont want to do any
39 * instrumentation ourselves.
43 unsigned long tick_jiffies;
53 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
55 static inline struct sched_clock_data *this_scd(void)
57 return &__get_cpu_var(sched_clock_data);
60 static inline struct sched_clock_data *cpu_sdc(int cpu)
62 return &per_cpu(sched_clock_data, cpu);
65 static __read_mostly int sched_clock_running;
67 void sched_clock_init(void)
69 u64 ktime_now = ktime_to_ns(ktime_get());
70 unsigned long now_jiffies = jiffies;
73 for_each_possible_cpu(cpu) {
74 struct sched_clock_data *scd = cpu_sdc(cpu);
76 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
77 scd->tick_jiffies = now_jiffies;
80 scd->tick_gtod = ktime_now;
81 scd->clock = ktime_now;
87 sched_clock_running = 1;
92 * The dynamic ticks makes the delta jiffies inaccurate. This
93 * prevents us from checking the maximum time update.
94 * Disable the maximum check during stopped ticks.
96 void sched_clock_tick_stop(int cpu)
98 struct sched_clock_data *scd = cpu_sdc(cpu);
103 void sched_clock_tick_start(int cpu)
105 struct sched_clock_data *scd = cpu_sdc(cpu);
110 static int check_max(struct sched_clock_data *scd)
112 return scd->check_max;
115 static int check_max(struct sched_clock_data *scd)
119 #endif /* CONFIG_NO_HZ */
122 * update the percpu scd from the raw @now value
124 * - filter out backward motion
125 * - use jiffies to generate a min,max window to clip the raw values
127 static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
129 unsigned long now_jiffies = jiffies;
130 long delta_jiffies = now_jiffies - scd->tick_jiffies;
131 u64 clock = scd->clock;
132 u64 min_clock, max_clock;
133 s64 delta = now - scd->prev_raw;
135 WARN_ON_ONCE(!irqs_disabled());
137 min_clock = scd->tick_gtod +
138 (delta_jiffies ? delta_jiffies - 1 : 0) * TICK_NSEC;
140 if (unlikely(delta < 0)) {
146 * The clock must stay within a jiffie of the gtod.
147 * But since we may be at the start of a jiffy or the end of one
148 * we add another jiffy buffer.
150 max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
152 if (unlikely(clock + delta > max_clock) && check_max(scd)) {
153 if (clock < max_clock)
162 if (unlikely(clock < min_clock))
169 static void lock_double_clock(struct sched_clock_data *data1,
170 struct sched_clock_data *data2)
173 __raw_spin_lock(&data1->lock);
174 __raw_spin_lock(&data2->lock);
176 __raw_spin_lock(&data2->lock);
177 __raw_spin_lock(&data1->lock);
181 u64 sched_clock_cpu(int cpu)
183 struct sched_clock_data *scd = cpu_sdc(cpu);
186 if (unlikely(!sched_clock_running))
189 WARN_ON_ONCE(!irqs_disabled());
192 if (cpu != raw_smp_processor_id()) {
194 * in order to update a remote cpu's clock based on our
195 * unstable raw time rebase it against:
196 * tick_raw (offset between raw counters)
197 * tick_gotd (tick offset between cpus)
199 struct sched_clock_data *my_scd = this_scd();
201 lock_double_clock(scd, my_scd);
203 now -= my_scd->tick_raw;
204 now += scd->tick_raw;
206 now += my_scd->tick_gtod;
207 now -= scd->tick_gtod;
209 __raw_spin_unlock(&my_scd->lock);
211 __raw_spin_lock(&scd->lock);
214 __update_sched_clock(scd, now);
217 __raw_spin_unlock(&scd->lock);
222 void sched_clock_tick(void)
224 struct sched_clock_data *scd = this_scd();
225 unsigned long now_jiffies = jiffies;
228 if (unlikely(!sched_clock_running))
231 WARN_ON_ONCE(!irqs_disabled());
234 now_gtod = ktime_to_ns(ktime_get());
236 __raw_spin_lock(&scd->lock);
237 __update_sched_clock(scd, now);
239 * update tick_gtod after __update_sched_clock() because that will
240 * already observe 1 new jiffy; adding a new tick_gtod to that would
241 * increase the clock 2 jiffies.
243 scd->tick_jiffies = now_jiffies;
245 scd->tick_gtod = now_gtod;
246 __raw_spin_unlock(&scd->lock);
250 * We are going deep-idle (irqs are disabled):
252 void sched_clock_idle_sleep_event(void)
254 sched_clock_cpu(smp_processor_id());
256 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
259 * We just idled delta nanoseconds (called with irqs disabled):
261 void sched_clock_idle_wakeup_event(u64 delta_ns)
263 struct sched_clock_data *scd = this_scd();
264 u64 now = sched_clock();
267 * Override the previous timestamp and ignore all
268 * sched_clock() deltas that occured while we idled,
269 * and use the PM-provided delta_ns to advance the
272 __raw_spin_lock(&scd->lock);
274 scd->clock += delta_ns;
275 __raw_spin_unlock(&scd->lock);
277 touch_softlockup_watchdog();
279 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
284 * Scheduler clock - returns current time in nanosec units.
285 * This is default implementation.
286 * Architectures and sub-architectures can override this.
288 unsigned long long __attribute__((weak)) sched_clock(void)
290 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);