2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
23 * dbs is used in this file as a shortform for demandbased switching
24 * It helps to keep variable names smaller, simpler
27 #define DEF_FREQUENCY_UP_THRESHOLD (80)
28 #define MIN_FREQUENCY_UP_THRESHOLD (11)
29 #define MAX_FREQUENCY_UP_THRESHOLD (100)
32 * The polling frequency of this governor depends on the capability of
33 * the processor. Default polling frequency is 1000 times the transition
34 * latency of the processor. The governor will work on any processor with
35 * transition latency <= 10mS, using appropriate sampling
37 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
38 * this governor will not work.
39 * All times here are in uS.
41 static unsigned int def_sampling_rate;
42 #define MIN_SAMPLING_RATE_RATIO (2)
43 /* for correct statistics, we need at least 10 ticks between each measure */
44 #define MIN_STAT_SAMPLING_RATE \
45 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
46 #define MIN_SAMPLING_RATE \
47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
48 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
49 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
50 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
52 static void do_dbs_timer(void *data);
54 struct cpu_dbs_info_s {
55 cputime64_t prev_cpu_idle;
56 cputime64_t prev_cpu_wall;
57 struct cpufreq_policy *cur_policy;
58 struct work_struct work;
60 struct cpufreq_frequency_table *freq_table;
62 unsigned int freq_lo_jiffies;
63 unsigned int freq_hi_jiffies;
65 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
67 static unsigned int dbs_enable; /* number of CPUs using this policy */
70 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
71 * lock and dbs_mutex. cpu_hotplug lock should always be held before
72 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
73 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
74 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
75 * is recursive for the same process. -Venki
77 static DEFINE_MUTEX(dbs_mutex);
79 static struct workqueue_struct *kondemand_wq;
81 static struct dbs_tuners {
82 unsigned int sampling_rate;
83 unsigned int up_threshold;
84 unsigned int ignore_nice;
85 unsigned int powersave_bias;
87 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
92 static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
96 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
97 kstat_cpu(cpu).cpustat.iowait);
99 if (dbs_tuners_ins.ignore_nice)
100 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
106 * Find right freq to be set now with powersave_bias on.
107 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
108 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
110 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
111 unsigned int freq_next,
112 unsigned int relation)
114 unsigned int freq_req, freq_reduc, freq_avg;
115 unsigned int freq_hi, freq_lo;
116 unsigned int index = 0;
117 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
118 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
120 if (!dbs_info->freq_table) {
121 dbs_info->freq_lo = 0;
122 dbs_info->freq_lo_jiffies = 0;
126 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
128 freq_req = dbs_info->freq_table[index].frequency;
129 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
130 freq_avg = freq_req - freq_reduc;
132 /* Find freq bounds for freq_avg in freq_table */
134 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
135 CPUFREQ_RELATION_H, &index);
136 freq_lo = dbs_info->freq_table[index].frequency;
138 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
139 CPUFREQ_RELATION_L, &index);
140 freq_hi = dbs_info->freq_table[index].frequency;
142 /* Find out how long we have to be in hi and lo freqs */
143 if (freq_hi == freq_lo) {
144 dbs_info->freq_lo = 0;
145 dbs_info->freq_lo_jiffies = 0;
148 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
149 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
150 jiffies_hi += ((freq_hi - freq_lo) / 2);
151 jiffies_hi /= (freq_hi - freq_lo);
152 jiffies_lo = jiffies_total - jiffies_hi;
153 dbs_info->freq_lo = freq_lo;
154 dbs_info->freq_lo_jiffies = jiffies_lo;
155 dbs_info->freq_hi_jiffies = jiffies_hi;
159 static void ondemand_powersave_bias_init(void)
162 for_each_online_cpu(i) {
163 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
164 dbs_info->freq_table = cpufreq_frequency_get_table(i);
165 dbs_info->freq_lo = 0;
169 /************************** sysfs interface ************************/
170 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
172 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
175 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
177 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
180 #define define_one_ro(_name) \
181 static struct freq_attr _name = \
182 __ATTR(_name, 0444, show_##_name, NULL)
184 define_one_ro(sampling_rate_max);
185 define_one_ro(sampling_rate_min);
187 /* cpufreq_ondemand Governor Tunables */
188 #define show_one(file_name, object) \
189 static ssize_t show_##file_name \
190 (struct cpufreq_policy *unused, char *buf) \
192 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
194 show_one(sampling_rate, sampling_rate);
195 show_one(up_threshold, up_threshold);
196 show_one(ignore_nice_load, ignore_nice);
197 show_one(powersave_bias, powersave_bias);
199 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
200 const char *buf, size_t count)
204 ret = sscanf(buf, "%u", &input);
206 mutex_lock(&dbs_mutex);
207 if (ret != 1 || input > MAX_SAMPLING_RATE
208 || input < MIN_SAMPLING_RATE) {
209 mutex_unlock(&dbs_mutex);
213 dbs_tuners_ins.sampling_rate = input;
214 mutex_unlock(&dbs_mutex);
219 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
220 const char *buf, size_t count)
224 ret = sscanf(buf, "%u", &input);
226 mutex_lock(&dbs_mutex);
227 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
228 input < MIN_FREQUENCY_UP_THRESHOLD) {
229 mutex_unlock(&dbs_mutex);
233 dbs_tuners_ins.up_threshold = input;
234 mutex_unlock(&dbs_mutex);
239 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
240 const char *buf, size_t count)
247 ret = sscanf(buf, "%u", &input);
254 mutex_lock(&dbs_mutex);
255 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
256 mutex_unlock(&dbs_mutex);
259 dbs_tuners_ins.ignore_nice = input;
261 /* we need to re-evaluate prev_cpu_idle */
262 for_each_online_cpu(j) {
263 struct cpu_dbs_info_s *dbs_info;
264 dbs_info = &per_cpu(cpu_dbs_info, j);
265 dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
266 dbs_info->prev_cpu_wall = get_jiffies_64();
268 mutex_unlock(&dbs_mutex);
273 static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
274 const char *buf, size_t count)
278 ret = sscanf(buf, "%u", &input);
286 mutex_lock(&dbs_mutex);
287 dbs_tuners_ins.powersave_bias = input;
288 ondemand_powersave_bias_init();
289 mutex_unlock(&dbs_mutex);
294 #define define_one_rw(_name) \
295 static struct freq_attr _name = \
296 __ATTR(_name, 0644, show_##_name, store_##_name)
298 define_one_rw(sampling_rate);
299 define_one_rw(up_threshold);
300 define_one_rw(ignore_nice_load);
301 define_one_rw(powersave_bias);
303 static struct attribute * dbs_attributes[] = {
304 &sampling_rate_max.attr,
305 &sampling_rate_min.attr,
308 &ignore_nice_load.attr,
309 &powersave_bias.attr,
313 static struct attribute_group dbs_attr_group = {
314 .attrs = dbs_attributes,
318 /************************** sysfs end ************************/
320 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
322 unsigned int idle_ticks, total_ticks;
324 cputime64_t cur_jiffies;
326 struct cpufreq_policy *policy;
329 if (!this_dbs_info->enable)
332 this_dbs_info->freq_lo = 0;
333 policy = this_dbs_info->cur_policy;
334 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
335 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
336 this_dbs_info->prev_cpu_wall);
337 this_dbs_info->prev_cpu_wall = cur_jiffies;
341 * Every sampling_rate, we check, if current idle time is less
342 * than 20% (default), then we try to increase frequency
343 * Every sampling_rate, we look for a the lowest
344 * frequency which can sustain the load while keeping idle time over
345 * 30%. If such a frequency exist, we try to decrease to this frequency.
347 * Any frequency increase takes it to the maximum frequency.
348 * Frequency reduction happens at minimum steps of
349 * 5% (default) of current frequency
353 idle_ticks = UINT_MAX;
354 for_each_cpu_mask(j, policy->cpus) {
355 cputime64_t total_idle_ticks;
356 unsigned int tmp_idle_ticks;
357 struct cpu_dbs_info_s *j_dbs_info;
359 j_dbs_info = &per_cpu(cpu_dbs_info, j);
360 total_idle_ticks = get_cpu_idle_time(j);
361 tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
362 j_dbs_info->prev_cpu_idle);
363 j_dbs_info->prev_cpu_idle = total_idle_ticks;
365 if (tmp_idle_ticks < idle_ticks)
366 idle_ticks = tmp_idle_ticks;
368 load = (100 * (total_ticks - idle_ticks)) / total_ticks;
370 /* Check for frequency increase */
371 if (load > dbs_tuners_ins.up_threshold) {
372 /* if we are already at full speed then break out early */
373 if (!dbs_tuners_ins.powersave_bias) {
374 if (policy->cur == policy->max)
377 __cpufreq_driver_target(policy, policy->max,
380 int freq = powersave_bias_target(policy, policy->max,
382 __cpufreq_driver_target(policy, freq,
388 /* Check for frequency decrease */
389 /* if we cannot reduce the frequency anymore, break out early */
390 if (policy->cur == policy->min)
394 * The optimal frequency is the frequency that is the lowest that
395 * can support the current CPU usage without triggering the up
396 * policy. To be safe, we focus 10 points under the threshold.
398 if (load < (dbs_tuners_ins.up_threshold - 10)) {
399 unsigned int freq_next, freq_cur;
401 freq_cur = cpufreq_driver_getavg(policy);
403 freq_cur = policy->cur;
405 freq_next = (freq_cur * load) /
406 (dbs_tuners_ins.up_threshold - 10);
408 if (!dbs_tuners_ins.powersave_bias) {
409 __cpufreq_driver_target(policy, freq_next,
412 int freq = powersave_bias_target(policy, freq_next,
414 __cpufreq_driver_target(policy, freq,
421 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
423 static void do_dbs_timer(void *data)
425 unsigned int cpu = smp_processor_id();
426 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
427 /* We want all CPUs to do sampling nearly on same jiffy */
428 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
429 delay -= jiffies % delay;
431 if (!dbs_info->enable)
433 /* Common NORMAL_SAMPLE setup */
434 INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
435 if (!dbs_tuners_ins.powersave_bias ||
436 (unsigned long) data == DBS_NORMAL_SAMPLE) {
438 dbs_check_cpu(dbs_info);
439 unlock_cpu_hotplug();
440 if (dbs_info->freq_lo) {
441 /* Setup timer for SUB_SAMPLE */
442 INIT_WORK(&dbs_info->work, do_dbs_timer,
443 (void *)DBS_SUB_SAMPLE);
444 delay = dbs_info->freq_hi_jiffies;
447 __cpufreq_driver_target(dbs_info->cur_policy,
451 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
454 static inline void dbs_timer_init(unsigned int cpu)
456 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
457 /* We want all CPUs to do sampling nearly on same jiffy */
458 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
459 delay -= jiffies % delay;
461 ondemand_powersave_bias_init();
462 INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
463 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
466 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
468 dbs_info->enable = 0;
469 cancel_delayed_work(&dbs_info->work);
470 flush_workqueue(kondemand_wq);
473 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
476 unsigned int cpu = policy->cpu;
477 struct cpu_dbs_info_s *this_dbs_info;
481 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
484 case CPUFREQ_GOV_START:
485 if ((!cpu_online(cpu)) || (!policy->cur))
488 if (policy->cpuinfo.transition_latency >
489 (TRANSITION_LATENCY_LIMIT * 1000)) {
490 printk(KERN_WARNING "ondemand governor failed to load "
491 "due to too long transition latency\n");
494 if (this_dbs_info->enable) /* Already enabled */
497 mutex_lock(&dbs_mutex);
499 if (dbs_enable == 1) {
500 kondemand_wq = create_workqueue("kondemand");
503 "Creation of kondemand failed\n");
505 mutex_unlock(&dbs_mutex);
510 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
513 destroy_workqueue(kondemand_wq);
515 mutex_unlock(&dbs_mutex);
519 for_each_cpu_mask(j, policy->cpus) {
520 struct cpu_dbs_info_s *j_dbs_info;
521 j_dbs_info = &per_cpu(cpu_dbs_info, j);
522 j_dbs_info->cur_policy = policy;
524 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
525 j_dbs_info->prev_cpu_wall = get_jiffies_64();
527 this_dbs_info->enable = 1;
529 * Start the timerschedule work, when this governor
530 * is used for first time
532 if (dbs_enable == 1) {
533 unsigned int latency;
534 /* policy latency is in nS. Convert it to uS first */
535 latency = policy->cpuinfo.transition_latency / 1000;
539 def_sampling_rate = latency *
540 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
542 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
543 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
545 dbs_tuners_ins.sampling_rate = def_sampling_rate;
547 dbs_timer_init(policy->cpu);
549 mutex_unlock(&dbs_mutex);
552 case CPUFREQ_GOV_STOP:
553 mutex_lock(&dbs_mutex);
554 dbs_timer_exit(this_dbs_info);
555 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
558 destroy_workqueue(kondemand_wq);
560 mutex_unlock(&dbs_mutex);
564 case CPUFREQ_GOV_LIMITS:
565 mutex_lock(&dbs_mutex);
566 if (policy->max < this_dbs_info->cur_policy->cur)
567 __cpufreq_driver_target(this_dbs_info->cur_policy,
570 else if (policy->min > this_dbs_info->cur_policy->cur)
571 __cpufreq_driver_target(this_dbs_info->cur_policy,
574 mutex_unlock(&dbs_mutex);
580 static struct cpufreq_governor cpufreq_gov_dbs = {
582 .governor = cpufreq_governor_dbs,
583 .owner = THIS_MODULE,
586 static int __init cpufreq_gov_dbs_init(void)
588 return cpufreq_register_governor(&cpufreq_gov_dbs);
591 static void __exit cpufreq_gov_dbs_exit(void)
593 cpufreq_unregister_governor(&cpufreq_gov_dbs);
597 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
598 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
599 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
600 "Low Latency Frequency Transition capable processors");
601 MODULE_LICENSE("GPL");
603 module_init(cpufreq_gov_dbs_init);
604 module_exit(cpufreq_gov_dbs_exit);