2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/smp.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ctype.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sysctl.h>
21 #include <linux/types.h>
23 #include <linux/sysfs.h>
24 #include <linux/sched.h>
25 #include <linux/kmod.h>
26 #include <linux/workqueue.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/percpu.h>
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
36 #define DEF_FREQUENCY_UP_THRESHOLD (80)
37 #define MIN_FREQUENCY_UP_THRESHOLD (0)
38 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41 #define MIN_FREQUENCY_DOWN_THRESHOLD (0)
42 #define MAX_FREQUENCY_DOWN_THRESHOLD (100)
45 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition
47 * latency of the processor. The governor will work on any processor with
48 * transition latency <= 10mS, using appropriate sampling
50 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
51 * this governor will not work.
52 * All times here are in uS.
54 static unsigned int def_sampling_rate;
55 #define MIN_SAMPLING_RATE (def_sampling_rate / 2)
56 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58 #define DEF_SAMPLING_DOWN_FACTOR (10)
59 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 static void do_dbs_timer(void *data);
63 struct cpu_dbs_info_s {
64 struct cpufreq_policy *cur_policy;
65 unsigned int prev_cpu_idle_up;
66 unsigned int prev_cpu_idle_down;
69 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 static unsigned int dbs_enable; /* number of CPUs using this policy */
73 static DECLARE_MUTEX (dbs_sem);
74 static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
77 unsigned int sampling_rate;
78 unsigned int sampling_down_factor;
79 unsigned int up_threshold;
80 unsigned int down_threshold;
81 unsigned int ignore_nice;
82 unsigned int freq_step;
85 static struct dbs_tuners dbs_tuners_ins = {
86 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
87 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
88 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
91 /************************** sysfs interface ************************/
92 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
94 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
97 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
99 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
102 #define define_one_ro(_name) \
103 static struct freq_attr _name = \
104 __ATTR(_name, 0444, show_##_name, NULL)
106 define_one_ro(sampling_rate_max);
107 define_one_ro(sampling_rate_min);
109 /* cpufreq_ondemand Governor Tunables */
110 #define show_one(file_name, object) \
111 static ssize_t show_##file_name \
112 (struct cpufreq_policy *unused, char *buf) \
114 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
116 show_one(sampling_rate, sampling_rate);
117 show_one(sampling_down_factor, sampling_down_factor);
118 show_one(up_threshold, up_threshold);
119 show_one(down_threshold, down_threshold);
120 show_one(ignore_nice, ignore_nice);
121 show_one(freq_step, freq_step);
123 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
124 const char *buf, size_t count)
128 ret = sscanf (buf, "%u", &input);
133 dbs_tuners_ins.sampling_down_factor = input;
139 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
140 const char *buf, size_t count)
144 ret = sscanf (buf, "%u", &input);
147 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
152 dbs_tuners_ins.sampling_rate = input;
158 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
159 const char *buf, size_t count)
163 ret = sscanf (buf, "%u", &input);
166 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
167 input < MIN_FREQUENCY_UP_THRESHOLD ||
168 input <= dbs_tuners_ins.down_threshold) {
173 dbs_tuners_ins.up_threshold = input;
179 static ssize_t store_down_threshold(struct cpufreq_policy *unused,
180 const char *buf, size_t count)
184 ret = sscanf (buf, "%u", &input);
187 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
188 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
189 input >= dbs_tuners_ins.up_threshold) {
194 dbs_tuners_ins.down_threshold = input;
200 static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
201 const char *buf, size_t count)
208 ret = sscanf (buf, "%u", &input);
216 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
220 dbs_tuners_ins.ignore_nice = input;
222 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
223 for_each_cpu_mask(j, policy->cpus) {
224 struct cpu_dbs_info_s *j_dbs_info;
225 j_dbs_info = &per_cpu(cpu_dbs_info, j);
226 j_dbs_info->cur_policy = policy;
228 j_dbs_info->prev_cpu_idle_up =
229 kstat_cpu(j).cpustat.idle +
230 kstat_cpu(j).cpustat.iowait +
231 ( !dbs_tuners_ins.ignore_nice
232 ? kstat_cpu(j).cpustat.nice : 0 );
233 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
240 static ssize_t store_freq_step(struct cpufreq_policy *policy,
241 const char *buf, size_t count)
246 ret = sscanf (buf, "%u", &input);
254 /* no need to test here if freq_step is zero as the user might actually
255 * want this, they would be crazy though :) */
257 dbs_tuners_ins.freq_step = input;
263 #define define_one_rw(_name) \
264 static struct freq_attr _name = \
265 __ATTR(_name, 0644, show_##_name, store_##_name)
267 define_one_rw(sampling_rate);
268 define_one_rw(sampling_down_factor);
269 define_one_rw(up_threshold);
270 define_one_rw(down_threshold);
271 define_one_rw(ignore_nice);
272 define_one_rw(freq_step);
274 static struct attribute * dbs_attributes[] = {
275 &sampling_rate_max.attr,
276 &sampling_rate_min.attr,
278 &sampling_down_factor.attr,
280 &down_threshold.attr,
286 static struct attribute_group dbs_attr_group = {
287 .attrs = dbs_attributes,
291 /************************** sysfs end ************************/
293 static void dbs_check_cpu(int cpu)
295 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
296 unsigned int total_idle_ticks;
297 unsigned int freq_down_step;
298 unsigned int freq_down_sampling_rate;
299 static int down_skip[NR_CPUS];
300 struct cpu_dbs_info_s *this_dbs_info;
302 struct cpufreq_policy *policy;
305 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
306 if (!this_dbs_info->enable)
309 policy = this_dbs_info->cur_policy;
311 * The default safe range is 20% to 80%
312 * Every sampling_rate, we check
313 * - If current idle time is less than 20%, then we try to
315 * Every sampling_rate*sampling_down_factor, we check
316 * - If current idle time is more than 80%, then we try to
319 * Any frequency increase takes it to the maximum frequency.
320 * Frequency reduction happens at minimum steps of
321 * 5% (default) of max_frequency
324 /* Check for frequency increase */
325 total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
326 kstat_cpu(cpu).cpustat.iowait;
327 /* consider 'nice' tasks as 'idle' time too if required */
328 if (dbs_tuners_ins.ignore_nice == 0)
329 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
330 idle_ticks = total_idle_ticks -
331 this_dbs_info->prev_cpu_idle_up;
332 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
335 for_each_cpu_mask(j, policy->cpus) {
336 unsigned int tmp_idle_ticks;
337 struct cpu_dbs_info_s *j_dbs_info;
342 j_dbs_info = &per_cpu(cpu_dbs_info, j);
343 /* Check for frequency increase */
344 total_idle_ticks = kstat_cpu(j).cpustat.idle +
345 kstat_cpu(j).cpustat.iowait;
346 /* consider 'nice' too? */
347 if (dbs_tuners_ins.ignore_nice == 0)
348 total_idle_ticks += kstat_cpu(j).cpustat.nice;
349 tmp_idle_ticks = total_idle_ticks -
350 j_dbs_info->prev_cpu_idle_up;
351 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
353 if (tmp_idle_ticks < idle_ticks)
354 idle_ticks = tmp_idle_ticks;
357 /* Scale idle ticks by 100 and compare with up and down ticks */
359 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
360 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
362 if (idle_ticks < up_idle_ticks) {
363 /* if we are already at full speed then break out early */
364 if (policy->cur == policy->max)
367 __cpufreq_driver_target(policy, policy->max,
370 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
374 /* Check for frequency decrease */
376 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
379 total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
380 kstat_cpu(cpu).cpustat.iowait;
381 /* consider 'nice' too? */
382 if (dbs_tuners_ins.ignore_nice == 0)
383 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
384 idle_ticks = total_idle_ticks -
385 this_dbs_info->prev_cpu_idle_down;
386 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
388 for_each_cpu_mask(j, policy->cpus) {
389 unsigned int tmp_idle_ticks;
390 struct cpu_dbs_info_s *j_dbs_info;
395 j_dbs_info = &per_cpu(cpu_dbs_info, j);
396 /* Check for frequency increase */
397 total_idle_ticks = kstat_cpu(j).cpustat.idle +
398 kstat_cpu(j).cpustat.iowait;
399 /* consider 'nice' too? */
400 if (dbs_tuners_ins.ignore_nice == 0)
401 total_idle_ticks += kstat_cpu(j).cpustat.nice;
402 tmp_idle_ticks = total_idle_ticks -
403 j_dbs_info->prev_cpu_idle_down;
404 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
406 if (tmp_idle_ticks < idle_ticks)
407 idle_ticks = tmp_idle_ticks;
410 /* Scale idle ticks by 100 and compare with up and down ticks */
414 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
415 dbs_tuners_ins.sampling_down_factor;
416 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
417 usecs_to_jiffies(freq_down_sampling_rate);
419 if (idle_ticks > down_idle_ticks ) {
420 /* if we are already at the lowest speed then break out early
421 * or if we 'cannot' reduce the speed as the user might want
422 * freq_step to be zero */
423 if (policy->cur == policy->min || dbs_tuners_ins.freq_step == 0)
426 freq_down_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
428 /* max freq cannot be less than 100. But who knows.... */
429 if (unlikely(freq_down_step == 0))
432 __cpufreq_driver_target(policy,
433 policy->cur - freq_down_step,
439 static void do_dbs_timer(void *data)
443 for_each_online_cpu(i)
445 schedule_delayed_work(&dbs_work,
446 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
450 static inline void dbs_timer_init(void)
452 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
453 schedule_delayed_work(&dbs_work,
454 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
458 static inline void dbs_timer_exit(void)
460 cancel_delayed_work(&dbs_work);
464 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
467 unsigned int cpu = policy->cpu;
468 struct cpu_dbs_info_s *this_dbs_info;
471 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
474 case CPUFREQ_GOV_START:
475 if ((!cpu_online(cpu)) ||
479 if (policy->cpuinfo.transition_latency >
480 (TRANSITION_LATENCY_LIMIT * 1000))
482 if (this_dbs_info->enable) /* Already enabled */
486 for_each_cpu_mask(j, policy->cpus) {
487 struct cpu_dbs_info_s *j_dbs_info;
488 j_dbs_info = &per_cpu(cpu_dbs_info, j);
489 j_dbs_info->cur_policy = policy;
491 j_dbs_info->prev_cpu_idle_up =
492 kstat_cpu(j).cpustat.idle +
493 kstat_cpu(j).cpustat.iowait +
494 ( !dbs_tuners_ins.ignore_nice
495 ? kstat_cpu(j).cpustat.nice : 0 );
496 j_dbs_info->prev_cpu_idle_down
497 = j_dbs_info->prev_cpu_idle_up;
499 this_dbs_info->enable = 1;
500 sysfs_create_group(&policy->kobj, &dbs_attr_group);
503 * Start the timerschedule work, when this governor
504 * is used for first time
506 if (dbs_enable == 1) {
507 unsigned int latency;
508 /* policy latency is in nS. Convert it to uS first */
510 latency = policy->cpuinfo.transition_latency;
514 def_sampling_rate = (latency / 1000) *
515 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
516 dbs_tuners_ins.sampling_rate = def_sampling_rate;
517 dbs_tuners_ins.ignore_nice = 0;
518 dbs_tuners_ins.freq_step = 5;
526 case CPUFREQ_GOV_STOP:
528 this_dbs_info->enable = 0;
529 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
532 * Stop the timerschedule work, when this governor
533 * is used for first time
542 case CPUFREQ_GOV_LIMITS:
544 if (policy->max < this_dbs_info->cur_policy->cur)
545 __cpufreq_driver_target(
546 this_dbs_info->cur_policy,
547 policy->max, CPUFREQ_RELATION_H);
548 else if (policy->min > this_dbs_info->cur_policy->cur)
549 __cpufreq_driver_target(
550 this_dbs_info->cur_policy,
551 policy->min, CPUFREQ_RELATION_L);
558 static struct cpufreq_governor cpufreq_gov_dbs = {
560 .governor = cpufreq_governor_dbs,
561 .owner = THIS_MODULE,
564 static int __init cpufreq_gov_dbs_init(void)
566 return cpufreq_register_governor(&cpufreq_gov_dbs);
569 static void __exit cpufreq_gov_dbs_exit(void)
571 /* Make sure that the scheduled work is indeed not running */
572 flush_scheduled_work();
574 cpufreq_unregister_governor(&cpufreq_gov_dbs);
578 MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
579 MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
580 "Low Latency Frequency Transition capable processors");
581 MODULE_LICENSE ("GPL");
583 module_init(cpufreq_gov_dbs_init);
584 module_exit(cpufreq_gov_dbs_exit);