summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c191
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h11
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c60
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c93
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c1
-rw-r--r--arch/x86/kernel/tsc.c8
6 files changed, 175 insertions, 189 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index cf52215d9eb1..81cbe64ed6b4 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1,3 +1,4 @@
+
/*
* (c) 2003-2006 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
@@ -117,20 +118,17 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
u32 i = 0;
if (cpu_family == CPU_HW_PSTATE) {
- if (data->currpstate == HW_PSTATE_INVALID) {
- /* read (initial) hw pstate if not yet set */
- rdmsr(MSR_PSTATE_STATUS, lo, hi);
- i = lo & HW_PSTATE_MASK;
-
- /*
- * a workaround for family 11h erratum 311 might cause
- * an "out-of-range Pstate if the core is in Pstate-0
- */
- if (i >= data->numps)
- data->currpstate = HW_PSTATE_0;
- else
- data->currpstate = i;
- }
+ rdmsr(MSR_PSTATE_STATUS, lo, hi);
+ i = lo & HW_PSTATE_MASK;
+ data->currpstate = i;
+
+ /*
+ * a workaround for family 11h erratum 311 might cause
+ * an "out-of-range Pstate if the core is in Pstate-0
+ */
+ if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
+ data->currpstate = HW_PSTATE_0;
+
return 0;
}
do {
@@ -510,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
return 0;
}
-static int check_supported_cpu(unsigned int cpu)
+static void check_supported_cpu(void *_rc)
{
- cpumask_t oldmask;
u32 eax, ebx, ecx, edx;
- unsigned int rc = 0;
-
- oldmask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ int *rc = _rc;
- if (smp_processor_id() != cpu) {
- printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
- goto out;
- }
+ *rc = -ENODEV;
if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
- goto out;
+ return;
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) &&
((eax & CPUID_XFAM) < CPUID_XFAM_10H))
- goto out;
+ return;
if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
printk(KERN_INFO PFX
"Processor cpuid %x not supported\n", eax);
- goto out;
+ return;
}
eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
printk(KERN_INFO PFX
"No frequency change capabilities detected\n");
- goto out;
+ return;
}
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
@@ -552,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu)
!= P_STATE_TRANSITION_CAPABLE) {
printk(KERN_INFO PFX
"Power state transitions not supported\n");
- goto out;
+ return;
}
} else { /* must be a HW Pstate capable processor */
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
cpu_family = CPU_HW_PSTATE;
else
- goto out;
+ return;
}
- rc = 1;
-
-out:
- set_cpus_allowed_ptr(current, &oldmask);
- return rc;
+ *rc = 0;
}
static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -823,13 +810,14 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
return;
- control = data->acpi_data.states[index].control; data->irt = (control
- >> IRT_SHIFT) & IRT_MASK; data->rvo = (control >>
- RVO_SHIFT) & RVO_MASK; data->exttype = (control
- >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
- data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1
- << ((control >> MVS_SHIFT) & MVS_MASK); data->vstable =
- (control >> VST_SHIFT) & VST_MASK; }
+ control = data->acpi_data.states[index].control;
+ data->irt = (control >> IRT_SHIFT) & IRT_MASK;
+ data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
+ data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+ data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
+ data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
+ data->vstable = (control >> VST_SHIFT) & VST_MASK;
+}
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
{
@@ -1046,6 +1034,19 @@ static int get_transition_latency(struct powernow_k8_data *data)
if (cur_latency > max_latency)
max_latency = cur_latency;
}
+ if (max_latency == 0) {
+ /*
+ * Fam 11h always returns 0 as transition latency.
+ * This is intended and means "very fast". While cpufreq core
+ * and governors currently can handle that gracefully, better
+ * set it to 1 to avoid problems in the future.
+ * For all others it's a BIOS bug.
+ */
+ if (!boot_cpu_data.x86 == 0x11)
+ printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
+ "latency\n");
+ max_latency = 1;
+ }
/* value in usecs, needs to be in nanoseconds */
return 1000 * max_latency;
}
@@ -1093,7 +1094,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- for_each_cpu_mask_nr(i, *(data->available_cores)) {
+ for_each_cpu(i, data->available_cores) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -1101,7 +1102,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
res = transition_fid_vid(data, fid, vid);
freqs.new = find_khz_freq_from_fid(data->currfid);
- for_each_cpu_mask_nr(i, *(data->available_cores)) {
+ for_each_cpu(i, data->available_cores) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@@ -1126,7 +1127,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
- for_each_cpu_mask_nr(i, *(data->available_cores)) {
+ for_each_cpu(i, data->available_cores) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -1134,7 +1135,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
res = transition_pstate(data, pstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
- for_each_cpu_mask_nr(i, *(data->available_cores)) {
+ for_each_cpu(i, data->available_cores) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@@ -1235,21 +1236,47 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
return cpufreq_frequency_table_verify(pol, data->powernow_table);
}
-static const char ACPI_PSS_BIOS_BUG_MSG[] =
- KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
- KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
+struct init_on_cpu {
+ struct powernow_k8_data *data;
+ int rc;
+};
+
+static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
+{
+ struct init_on_cpu *init_on_cpu = _init_on_cpu;
+
+ if (pending_bit_stuck()) {
+ printk(KERN_ERR PFX "failing init, change pending bit set\n");
+ init_on_cpu->rc = -ENODEV;
+ return;
+ }
+
+ if (query_current_values_with_pending_wait(init_on_cpu->data)) {
+ init_on_cpu->rc = -ENODEV;
+ return;
+ }
+
+ if (cpu_family == CPU_OPTERON)
+ fidvid_msr_init();
+
+ init_on_cpu->rc = 0;
+}
/* per CPU init entry point to the driver */
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{
+ static const char ACPI_PSS_BIOS_BUG_MSG[] =
+ KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
+ KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
struct powernow_k8_data *data;
- cpumask_t oldmask;
+ struct init_on_cpu init_on_cpu;
int rc;
if (!cpu_online(pol->cpu))
return -ENODEV;
- if (!check_supported_cpu(pol->cpu))
+ smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
+ if (rc)
return -ENODEV;
data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
@@ -1289,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
pol->cpuinfo.transition_latency = get_transition_latency(data);
/* only run on specific CPU from here on */
- oldmask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
-
- if (smp_processor_id() != pol->cpu) {
- printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
- goto err_out_unmask;
- }
-
- if (pending_bit_stuck()) {
- printk(KERN_ERR PFX "failing init, change pending bit set\n");
- goto err_out_unmask;
- }
-
- if (query_current_values_with_pending_wait(data))
- goto err_out_unmask;
-
- if (cpu_family == CPU_OPTERON)
- fidvid_msr_init();
-
- /* run on any CPU again */
- set_cpus_allowed_ptr(current, &oldmask);
+ init_on_cpu.data = data;
+ smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
+ &init_on_cpu, 1);
+ rc = init_on_cpu.rc;
+ if (rc != 0)
+ goto err_out_exit_acpi;
if (cpu_family == CPU_HW_PSTATE)
cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
@@ -1346,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return 0;
-err_out_unmask:
- set_cpus_allowed_ptr(current, &oldmask);
+err_out_exit_acpi:
powernow_k8_cpu_exit_acpi(data);
err_out:
@@ -1372,28 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
return 0;
}
+static void query_values_on_cpu(void *_err)
+{
+ int *err = _err;
+ struct powernow_k8_data *data = __get_cpu_var(powernow_data);
+
+ *err = query_current_values_with_pending_wait(data);
+}
+
static unsigned int powernowk8_get(unsigned int cpu)
{
- struct powernow_k8_data *data;
- cpumask_t oldmask = current->cpus_allowed;
+ struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
unsigned int khz = 0;
- unsigned int first;
-
- first = cpumask_first(cpu_core_mask(cpu));
- data = per_cpu(powernow_data, first);
+ int err;
if (!data)
return -EINVAL;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
- if (smp_processor_id() != cpu) {
- printk(KERN_ERR PFX
- "limiting to CPU %d failed in powernowk8_get\n", cpu);
- set_cpus_allowed_ptr(current, &oldmask);
- return 0;
- }
-
- if (query_current_values_with_pending_wait(data))
+ smp_call_function_single(cpu, query_values_on_cpu, &err, true);
+ if (err)
goto out;
if (cpu_family == CPU_HW_PSTATE)
@@ -1404,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu)
out:
- set_cpus_allowed_ptr(current, &oldmask);
return khz;
}
@@ -1430,7 +1437,9 @@ static int __cpuinit powernowk8_init(void)
unsigned int i, supported_cpus = 0;
for_each_online_cpu(i) {
- if (check_supported_cpu(i))
+ int rc;
+ smp_call_function_single(i, check_supported_cpu, &rc, 1);
+ if (rc == 0)
supported_cpus++;
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 6c6698feade1..c9c1190b5e1f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -223,14 +223,3 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
-
-#ifdef CONFIG_SMP
-static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
-{
-}
-#else
-static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
-{
- cpu_set(0, cpu_sharedcore_mask[0]);
-}
-#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 55c831ed71ce..8d672ef162ce 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu)
{
unsigned l, h;
unsigned clock_freq;
- cpumask_t saved_mask;
- saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
- if (smp_processor_id() != cpu)
- return 0;
-
- rdmsr(MSR_IA32_PERF_STATUS, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
clock_freq = extract_clock(l, cpu, 0);
if (unlikely(clock_freq == 0)) {
@@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
* P-state transition (like TM2). Get the last freq set
* in PERF_CTL.
*/
- rdmsr(MSR_IA32_PERF_CTL, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
clock_freq = extract_clock(l, cpu, 1);
}
-
- set_cpus_allowed_ptr(current, &saved_mask);
return clock_freq;
}
@@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int retval = 0;
unsigned int j, k, first_cpu, tmp;
- cpumask_var_t saved_mask, covered_cpus;
+ cpumask_var_t covered_cpus;
- if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
- return -ENOMEM;
- if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
- free_cpumask_var(saved_mask);
+ if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
return -ENOMEM;
- }
- cpumask_copy(saved_mask, &current->cpus_allowed);
if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
retval = -ENODEV;
@@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy,
first_cpu = 1;
for_each_cpu(j, policy->cpus) {
- const struct cpumask *mask;
+ int good_cpu;
/* cpufreq holds the hotplug lock, so we are safe here */
if (!cpu_online(j))
@@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy,
* Make sure we are running on CPU that wants to change freq
*/
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
- mask = policy->cpus;
+ good_cpu = cpumask_any_and(policy->cpus,
+ cpu_online_mask);
else
- mask = cpumask_of(j);
+ good_cpu = j;
- set_cpus_allowed_ptr(current, mask);
- preempt_disable();
- if (unlikely(!cpu_isset(smp_processor_id(), *mask))) {
+ if (good_cpu >= nr_cpu_ids) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
/* We haven't started the transition yet. */
- goto migrate_end;
+ goto out;
}
- preempt_enable();
break;
}
msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
if (first_cpu) {
- rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
if (msr == (oldmsr & 0xffff)) {
dprintk("no change needed - msr was and needs "
"to be %x\n", oldmsr);
retval = 0;
- goto migrate_end;
+ goto out;
}
freqs.old = extract_clock(oldmsr, cpu, 0);
@@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy,
oldmsr |= msr;
}
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
- preempt_enable();
+ wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
- }
- cpu_set(j, *covered_cpus);
- preempt_enable();
+ cpumask_set_cpu(j, covered_cpus);
}
for_each_cpu(k, policy->cpus) {
@@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy,
* Best effort undo..
*/
- for_each_cpu_mask_nr(j, *covered_cpus) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(j));
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
- }
+ for_each_cpu(j, covered_cpus)
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
tmp = freqs.new;
freqs.new = freqs.old;
@@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
- set_cpus_allowed_ptr(current, saved_mask);
retval = 0;
- goto out;
-migrate_end:
- preempt_enable();
- set_cpus_allowed_ptr(current, saved_mask);
out:
- free_cpumask_var(saved_mask);
free_cpumask_var(covered_cpus);
return retval;
}
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 016c1a4fa3fc..6911e91fb4f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -89,7 +89,8 @@ static int speedstep_find_register(void)
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
- * Tries to change the SpeedStep state.
+ * Tries to change the SpeedStep state. Can be called from
+ * smp_call_function_single.
*/
static void speedstep_set_state(unsigned int state)
{
@@ -143,6 +144,11 @@ static void speedstep_set_state(unsigned int state)
return;
}
+/* Wrapper for smp_call_function_single. */
+static void _speedstep_set_state(void *_state)
+{
+ speedstep_set_state(*(unsigned int *)_state);
+}
/**
* speedstep_activate - activate SpeedStep control in the chipset
@@ -226,22 +232,28 @@ static unsigned int speedstep_detect_chipset(void)
return 0;
}
-static unsigned int _speedstep_get(const struct cpumask *cpus)
-{
+struct get_freq_data {
unsigned int speed;
- cpumask_t cpus_allowed;
-
- cpus_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpus);
- speed = speedstep_get_frequency(speedstep_processor);
- set_cpus_allowed_ptr(current, &cpus_allowed);
- dprintk("detected %u kHz as current frequency\n", speed);
- return speed;
+ unsigned int processor;
+};
+
+static void get_freq_data(void *_data)
+{
+ struct get_freq_data *data = _data;
+
+ data->speed = speedstep_get_frequency(data->processor);
}
static unsigned int speedstep_get(unsigned int cpu)
{
- return _speedstep_get(cpumask_of(cpu));
+ struct get_freq_data data = { .processor = cpu };
+
+ /* You're supposed to ensure CPU is online. */
+ if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0)
+ BUG();
+
+ dprintk("detected %u kHz as current frequency\n", data.speed);
+ return data.speed;
}
/**
@@ -257,16 +269,16 @@ static int speedstep_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- unsigned int newstate = 0;
+ unsigned int newstate = 0, policy_cpu;
struct cpufreq_freqs freqs;
- cpumask_t cpus_allowed;
int i;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
target_freq, relation, &newstate))
return -EINVAL;
- freqs.old = _speedstep_get(policy->cpus);
+ policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
+ freqs.old = speedstep_get(policy_cpu);
freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = policy->cpu;
@@ -276,20 +288,13 @@ static int speedstep_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- cpus_allowed = current->cpus_allowed;
-
for_each_cpu(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
- /* switch to physical CPU where state is to be changed */
- set_cpus_allowed_ptr(current, policy->cpus);
-
- speedstep_set_state(newstate);
-
- /* allow to be run on all CPUs */
- set_cpus_allowed_ptr(current, &cpus_allowed);
+ smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
+ true);
for_each_cpu(i, policy->cpus) {
freqs.cpu = i;
@@ -312,33 +317,43 @@ static int speedstep_verify(struct cpufreq_policy *policy)
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
}
+struct get_freqs {
+ struct cpufreq_policy *policy;
+ int ret;
+};
+
+static void get_freqs_on_cpu(void *_get_freqs)
+{
+ struct get_freqs *get_freqs = _get_freqs;
+
+ get_freqs->ret =
+ speedstep_get_freqs(speedstep_processor,
+ &speedstep_freqs[SPEEDSTEP_LOW].frequency,
+ &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
+ &get_freqs->policy->cpuinfo.transition_latency,
+ &speedstep_set_state);
+}
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
- int result = 0;
- unsigned int speed;
- cpumask_t cpus_allowed;
+ int result;
+ unsigned int policy_cpu, speed;
+ struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
-
- cpus_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, policy->cpus);
+ policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
/* detect low and high frequency and transition latency */
- result = speedstep_get_freqs(speedstep_processor,
- &speedstep_freqs[SPEEDSTEP_LOW].frequency,
- &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
- &policy->cpuinfo.transition_latency,
- &speedstep_set_state);
- set_cpus_allowed_ptr(current, &cpus_allowed);
- if (result)
- return result;
+ gf.policy = policy;
+ smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
+ if (gf.ret)
+ return gf.ret;
/* get current speed setting */
- speed = _speedstep_get(policy->cpus);
+ speed = speedstep_get(policy_cpu);
if (!speed)
return -EIO;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index 2e3c6862657b..f4c290b8482f 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -226,6 +226,7 @@ static unsigned int pentium4_get_frequency(void)
}
+/* Warning: may get called from smp_call_function_single. */
unsigned int speedstep_get_frequency(unsigned int processor)
{
switch (processor) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ae3180c506a6..b0597ad02c93 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -632,17 +632,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
- unsigned long *lpj, dummy;
+ unsigned long *lpj;
if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
return 0;
- lpj = &dummy;
- if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+ lpj = &boot_cpu_data.loops_per_jiffy;
#ifdef CONFIG_SMP
+ if (!(freq->flags & CPUFREQ_CONST_LOOPS))
lpj = &cpu_data(freq->cpu).loops_per_jiffy;
-#else
- lpj = &boot_cpu_data.loops_per_jiffy;
#endif
if (!ref_freq) {