static int enable_arbiter_disable(void)
{
struct pci_dev *dev;
+ int status;
int reg;
u8 pci_cmd;
+ status = 1;
/* Find PLE133 host bridge */
reg = 0x78;
- dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL);
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
+ NULL);
/* Find CLE266 host bridge */
if (dev == NULL) {
reg = 0x76;
- dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_862X_0, NULL);
/* Find CN400 V-Link host bridge */
if (dev == NULL)
- dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
-
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
}
if (dev != NULL) {
/* Enable access to port 0x22 */
if (!(pci_cmd & 1<<7)) {
printk(KERN_ERR PFX
"Can't enable access to port 0x22.\n");
- return 0;
+ status = 0;
}
}
- return 1;
+ pci_dev_put(dev);
+ return status;
}
return 0;
}
u8 pci_cmd;
/* Find VT8235 southbridge */
- dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
if (dev != NULL) {
/* Set transition time to max */
pci_read_config_byte(dev, 0xec, &pci_cmd);
pci_read_config_byte(dev, 0xe5, &pci_cmd);
pci_cmd |= 1 << 7;
pci_write_config_byte(dev, 0xe5, pci_cmd);
+ pci_dev_put(dev);
return 1;
}
return 0;
sizeof(samuel2_eblcr));
break;
case 1 ... 15:
- longhaul_version = TYPE_LONGHAUL_V2;
+ longhaul_version = TYPE_LONGHAUL_V1;
if (c->x86_mask < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
-#include <linux/sched.h> /* current / set_cpus_allowed() */
#include <asm/processor.h>
#include <asm/msr.h>
if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV))
return -EINVAL;
- rdmsr(MSR_IA32_THERM_STATUS, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
if (l & 0x01)
dprintk("CPU#%d currently thermal throttled\n", cpu);
if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
- rdmsr(MSR_IA32_THERM_CONTROL, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
if (newstate == DC_DISABLE) {
dprintk("CPU#%d disabling modulation\n", cpu);
- wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
} else {
dprintk("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
*/
l = (l & ~14);
l = l | (1<<4) | ((newstate & 0x7)<<1);
- wrmsr(MSR_IA32_THERM_CONTROL, l, h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
}
return 0;
{
unsigned int newstate = DC_RESV;
struct cpufreq_freqs freqs;
- cpumask_t cpus_allowed;
int i;
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
- cpus_allowed = current->cpus_allowed;
-
- for_each_cpu_mask(i, policy->cpus) {
- cpumask_t this_cpu = cpumask_of_cpu(i);
-
- set_cpus_allowed(current, this_cpu);
- BUG_ON(smp_processor_id() != i);
-
+ for_each_cpu_mask(i, policy->cpus)
cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
- }
- set_cpus_allowed(current, cpus_allowed);
/* notifiers */
for_each_cpu_mask(i, policy->cpus) {
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
- cpumask_t cpus_allowed;
u32 l, h;
- cpus_allowed = current->cpus_allowed;
-
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- BUG_ON(smp_processor_id() != cpu);
-
- rdmsr(MSR_IA32_THERM_CONTROL, l, h);
-
- set_cpus_allowed(current, cpus_allowed);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
if (l & 0x10) {
l = l >> 1;
dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
data->powernow_table = powernow_table;
- print_basics(data);
+ if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+ print_basics(data);
for (j = 0; j < data->numps; j++)
if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
/* fill in data */
data->numps = data->acpi_data.state_count;
- print_basics(data);
+ if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+ print_basics(data);
powernow_k8_acpi_pst_values(data, 0);
/* notify BIOS that we exist */
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
+#endif
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
+ preempt_disable();
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
/* We haven't started the transition yet. */
goto migrate_end;
}
+ preempt_enable();
break;
}
}
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+ preempt_enable();
break;
+ }
cpu_set(j, covered_cpus);
+ preempt_enable();
}
for_each_cpu_mask(k, online_policy_cpus) {
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
+ set_cpus_allowed(current, saved_mask);
+ return 0;
migrate_end:
+ preempt_enable();
set_cpus_allowed(current, saved_mask);
return 0;
}
help
This adds the CPUFreq driver for mobile AMD Opteron/Athlon64 processors.
+ To compile this driver as a module, choose M here: the
+ module will be called powernow-k8.
+
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
mobile CPUs. This means Intel Pentium M (Centrino) CPUs
or 64bit enabled Intel Xeons.
+ To compile this driver as a module, choose M here: the
+ module will be called speedstep-centrino.
+
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
Processor Performance States.
This driver also supports Intel Enhanced Speedstep.
+ To compile this driver as a module, choose M here: the
+ module will be called acpi-cpufreq.
+
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
comment "shared options"
config X86_ACPI_CPUFREQ_PROC_INTF
- bool "/proc/acpi/processor/../performance interface (deprecated)"
+ bool "/proc/acpi/processor/../performance interface (deprecated)"
depends on PROC_FS
depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K8_ACPI
help
slowdowns and noticeable latencies. Normally Speedstep should be used
instead.
+ To compile this driver as a module, choose M here: the
+ module will be called p4-clockmod.
+
For details, take a look at <file:Documentation/cpu-freq/>.
Unless you are absolutely sure say N.
config X86_SPEEDSTEP_LIB
- tristate
- default X86_P4_CLOCKMOD
+ tristate
+ default X86_P4_CLOCKMOD
endif
endmenu
-
PDE(inode)->data);
}
-static ssize_t
-acpi_processor_write_performance(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int result = 0;
- struct seq_file *m = file->private_data;
- struct acpi_processor *pr = m->private;
- struct acpi_processor_performance *perf;
- char state_string[12] = { '\0' };
- unsigned int new_state = 0;
- struct cpufreq_policy policy;
-
-
- if (!pr || (count > sizeof(state_string) - 1))
- return -EINVAL;
-
- perf = pr->performance;
- if (!perf)
- return -EINVAL;
-
- if (copy_from_user(state_string, buffer, count))
- return -EFAULT;
-
- state_string[count] = '\0';
- new_state = simple_strtoul(state_string, NULL, 0);
-
- if (new_state >= perf->state_count)
- return -EINVAL;
-
- cpufreq_get_policy(&policy, pr->id);
-
- policy.cpu = pr->id;
- policy.min = perf->states[new_state].core_frequency * 1000;
- policy.max = perf->states[new_state].core_frequency * 1000;
-
- result = cpufreq_set_policy(&policy);
- if (result)
- return result;
-
- return count;
-}
-
static void acpi_cpufreq_add_file(struct acpi_processor *pr)
{
struct proc_dir_entry *entry = NULL;
/* add file 'performance' [R/W] */
entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
- S_IFREG | S_IRUGO | S_IWUSR,
+ S_IFREG | S_IRUGO,
acpi_device_dir(device));
if (entry){
- acpi_processor_perf_fops.write = acpi_processor_write_performance;
entry->proc_fops = &acpi_processor_perf_fops;
entry->data = acpi_driver_data(device);
entry->owner = THIS_MODULE;
clock speed, you need to either enable a dynamic cpufreq governor
(see below) after boot, or use a userspace tool.
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq.
+
For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
if CPU_FREQ
config CPU_FREQ_TABLE
- tristate
+ tristate
config CPU_FREQ_DEBUG
bool "Enable CPUfreq debugging"
4 to activate CPUfreq governor debugging
config CPU_FREQ_STAT
- tristate "CPU frequency translation statistics"
- select CPU_FREQ_TABLE
- default y
- help
- This driver exports CPU frequency statistics information through sysfs
- file system
+ tristate "CPU frequency translation statistics"
+ select CPU_FREQ_TABLE
+ default y
+ help
+ This driver exports CPU frequency statistics information through sysfs
+ file system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_stats.
+
+ If in doubt, say N.
config CPU_FREQ_STAT_DETAILS
- bool "CPU frequency translation statistics details"
- depends on CPU_FREQ_STAT
- help
- This will show detail CPU frequency translation table in sysfs file
- system
+ bool "CPU frequency translation statistics details"
+ depends on CPU_FREQ_STAT
+ help
+ This will show detail CPU frequency translation table in sysfs file
+ system.
+
+ If in doubt, say N.
# Note that it is not currently possible to set the other governors (such as ondemand)
# as the default, since if they fail to initialise, cpufreq will be
endchoice
config CPU_FREQ_GOV_PERFORMANCE
- tristate "'performance' governor"
- help
+ tristate "'performance' governor"
+ help
This cpufreq governor sets the frequency statically to the
highest available CPU frequency.
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_performance.
+
If in doubt, say Y.
config CPU_FREQ_GOV_POWERSAVE
- tristate "'powersave' governor"
- help
+ tristate "'powersave' governor"
+ help
This cpufreq governor sets the frequency statically to the
lowest available CPU frequency.
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_powersave.
+
If in doubt, say Y.
config CPU_FREQ_GOV_USERSPACE
- tristate "'userspace' governor for userspace frequency scaling"
- help
+ tristate "'userspace' governor for userspace frequency scaling"
+ help
Enable this cpufreq governor when you either want to set the
CPU frequency manually or when an userspace program shall
be able to set the CPU dynamically, like on LART
<http://www.lartmaker.nl/>.
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_userspace.
+
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say Y.
do fast frequency switching (i.e, very low latency frequency
transitions).
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_ondemand.
+
For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
step-by-step latency issues between the minimum and maximum frequency
transitions in the CPU) you will probably want to use this governor.
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_conservative.
+
For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
unlock_policy_rwsem_write(cpu);
goto err_out;
}
+ policy->user_policy.min = policy->cpuinfo.min_freq;
+ policy->user_policy.max = policy->cpuinfo.max_freq;
+ policy->user_policy.governor = policy->governor;
#ifdef CONFIG_SMP
for_each_cpu_mask(j, policy->cpus) {
policy->governor = NULL; /* to assure that the starting sequence is
* run in cpufreq_set_policy */
- unlock_policy_rwsem_write(cpu);
/* set default policy */
- ret = cpufreq_set_policy(&new_policy);
+ ret = __cpufreq_set_policy(policy, &new_policy);
+ policy->user_policy.policy = policy->policy;
+
+ unlock_policy_rwsem_write(cpu);
+
if (ret) {
dprintk("setting policy failed\n");
goto err_out_unregister;
return ret;
}
-/**
- * cpufreq_set_policy - set a new CPUFreq policy
- * @policy: policy to be set.
- *
- * Sets a new CPU frequency and voltage scaling policy.
- */
-int cpufreq_set_policy(struct cpufreq_policy *policy)
-{
- int ret = 0;
- struct cpufreq_policy *data;
-
- if (!policy)
- return -EINVAL;
-
- data = cpufreq_cpu_get(policy->cpu);
- if (!data)
- return -EINVAL;
-
- if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- return -EINVAL;
-
-
- ret = __cpufreq_set_policy(data, policy);
- data->user_policy.min = data->min;
- data->user_policy.max = data->max;
- data->user_policy.policy = data->policy;
- data->user_policy.governor = data->governor;
-
- unlock_policy_rwsem_write(policy->cpu);
-
- cpufreq_cpu_put(data);
-
- return ret;
-}
-EXPORT_SYMBOL(cpufreq_set_policy);
-
-
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
*********************************************************************/
-int cpufreq_set_policy(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);