summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c6
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c138
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig.arm1
-rw-r--r--drivers/cpuidle/cpuidle-arm.c62
-rw-r--r--drivers/cpuidle/governors/menu.c20
-rw-r--r--drivers/idle/intel_idle.c32
11 files changed, 174 insertions, 125 deletions
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index e82bb3c30b92..10be285c9055 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -144,10 +144,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cppc_dmi_max_khz = cppc_get_dmi_max_khz();
- policy->min = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / cpu->perf_caps.highest_perf;
+ /*
+ * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
+ * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
+ */
+ policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
+ cpu->perf_caps.highest_perf;
policy->max = cppc_dmi_max_khz;
- policy->cpuinfo.min_freq = policy->min;
- policy->cpuinfo.max_freq = policy->max;
+
+ /*
+ * Set cpuinfo.min_freq to Lowest to make the full range of performance
+ * available if userspace wants to use any perf between lowest & lowest
+ * nonlinear perf
+ */
+ policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
+ cpu->perf_caps.highest_perf;
+ policy->cpuinfo.max_freq = cppc_dmi_max_khz;
+
policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
policy->shared_type = cpu->shared_type;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 921b4a6c3d16..1c262923fe58 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -31,6 +31,7 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "arm,integrator-ap", },
{ .compatible = "arm,integrator-cp", },
+ { .compatible = "hisilicon,hi3660", },
{ .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 26b643d57847..6e7424d12de9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -632,11 +632,21 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
+__weak unsigned int arch_freq_get_on_cpu(int cpu)
+{
+ return 0;
+}
+
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
+ unsigned int freq;
- if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ freq = arch_freq_get_on_cpu(policy->cpu);
+ if (freq)
+ ret = sprintf(buf, "%u\n", freq);
+ else if (cpufreq_driver && cpufreq_driver->setpolicy &&
+ cpufreq_driver->get)
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
ret = sprintf(buf, "%u\n", policy->cur);
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 9180d34cc9fc..b6b369c22272 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -173,12 +173,12 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
/* Enable PSTATE Change Event */
tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
- __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
/* Enable PSTATE Change IRQ */
tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
- __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
/* Set initial performance index */
cpufreq_for_each_entry(pos, freq_table)
@@ -330,7 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
struct resource res;
unsigned int cur_frequency;
- np = pdev->dev.of_node;
+ np = pdev->dev.of_node;
if (!np)
return -ENODEV;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9c13f097fd8c..b6edd3ccaa55 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -101,7 +101,8 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
- if (of_machine_is_compatible("fsl,imx6ul")) {
+ if (of_machine_is_compatible("fsl,imx6ul") ||
+ of_machine_is_compatible("fsl,imx6ull")) {
/*
* When changing pll1_sw_clk's parent to pll1_sys_clk,
* CPU may run at higher than 528MHz, this will lead to
@@ -215,7 +216,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_clk;
}
- if (of_machine_is_compatible("fsl,imx6ul")) {
+ if (of_machine_is_compatible("fsl,imx6ul") ||
+ of_machine_is_compatible("fsl,imx6ull")) {
pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 029a93bfb558..48a98f11a84e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -231,10 +231,8 @@ struct global_params {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
- * @min_perf: Minimum capacity limit as a fraction of the maximum
- * turbo P-state capacity.
- * @max_perf: Maximum capacity limit as a fraction of the maximum
- * turbo P-state capacity.
+ * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
+ * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
* @epp_powersave: Last saved HWP energy performance preference
@@ -266,8 +264,8 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
- int32_t min_perf;
- int32_t max_perf;
+ int32_t min_perf_ratio;
+ int32_t max_perf_ratio;
#ifdef CONFIG_ACPI
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
@@ -790,25 +788,32 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_hwp_set(unsigned int cpu)
+static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
+ int *current_max)
{
- struct cpudata *cpu_data = all_cpu_data[cpu];
- int min, hw_min, max, hw_max;
- u64 value, cap;
- s16 epp;
+ u64 cap;
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
- hw_min = HWP_LOWEST_PERF(cap);
if (global.no_turbo)
- hw_max = HWP_GUARANTEED_PERF(cap);
+ *current_max = HWP_GUARANTEED_PERF(cap);
else
- hw_max = HWP_HIGHEST_PERF(cap);
+ *current_max = HWP_HIGHEST_PERF(cap);
+
+ *phy_max = HWP_HIGHEST_PERF(cap);
+}
+
+static void intel_pstate_hwp_set(unsigned int cpu)
+{
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ int max, min;
+ u64 value;
+ s16 epp;
+
+ max = cpu_data->max_perf_ratio;
+ min = cpu_data->min_perf_ratio;
- max = fp_ext_toint(hw_max * cpu_data->max_perf);
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- else
- min = fp_ext_toint(hw_max * cpu_data->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
@@ -1524,8 +1529,7 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
update_turbo_state();
pstate = intel_pstate_get_base_pstate(cpu);
- pstate = max(cpu->pstate.min_pstate,
- fp_ext_toint(pstate * cpu->max_perf));
+ pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
intel_pstate_set_pstate(cpu, pstate);
}
@@ -1612,9 +1616,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
int32_t busy_frac, boost;
int target, avg_pstate;
- if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
- return cpu->pstate.turbo_pstate;
-
busy_frac = div_fp(sample->mperf, sample->tsc);
boost = cpu->iowait_boost;
@@ -1651,9 +1652,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
- if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
- return cpu->pstate.turbo_pstate;
-
/*
* perf_scaled is the ratio of the average P-state during the last
* sampling period to the P-state requested last time (in percent).
@@ -1691,9 +1689,8 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
int max_pstate = intel_pstate_get_base_pstate(cpu);
int min_pstate;
- min_pstate = max(cpu->pstate.min_pstate,
- fp_ext_toint(max_pstate * cpu->min_perf));
- max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
+ min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
+ max_pstate = max(min_pstate, cpu->max_perf_ratio);
return clamp_t(int, pstate, min_pstate, max_pstate);
}
@@ -1729,16 +1726,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_pstate_update_util_hwp(struct update_util_data *data,
- u64 time, unsigned int flags)
-{
- struct cpudata *cpu = container_of(data, struct cpudata, update_util);
- u64 delta_ns = time - cpu->sample.time;
-
- if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
- intel_pstate_sample(cpu, time);
-}
-
static void intel_pstate_update_util_pid(struct update_util_data *data,
u64 time, unsigned int flags)
{
@@ -1930,6 +1917,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
+ if (hwp_active)
+ return;
+
if (cpu->update_util_set)
return;
@@ -1963,52 +1953,61 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
{
int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
+ int max_state, turbo_max;
- max_policy_perf = div_ext_fp(policy->max, max_freq);
- max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
+ /*
+ * HWP needs some special consideration, because on BDX the
+ * HWP_REQUEST uses abstract value to represent performance
+ * rather than pure ratios.
+ */
+ if (hwp_active) {
+ intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ } else {
+ max_state = intel_pstate_get_base_pstate(cpu);
+ turbo_max = cpu->pstate.turbo_pstate;
+ }
+
+ max_policy_perf = max_state * policy->max / max_freq;
if (policy->max == policy->min) {
min_policy_perf = max_policy_perf;
} else {
- min_policy_perf = div_ext_fp(policy->min, max_freq);
+ min_policy_perf = max_state * policy->min / max_freq;
min_policy_perf = clamp_t(int32_t, min_policy_perf,
0, max_policy_perf);
}
+ pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
+ policy->cpu, max_state,
+ min_policy_perf, max_policy_perf);
+
/* Normalize user input to [min_perf, max_perf] */
if (per_cpu_limits) {
- cpu->min_perf = min_policy_perf;
- cpu->max_perf = max_policy_perf;
+ cpu->min_perf_ratio = min_policy_perf;
+ cpu->max_perf_ratio = max_policy_perf;
} else {
int32_t global_min, global_max;
/* Global limits are in percent of the maximum turbo P-state. */
- global_max = percent_ext_fp(global.max_perf_pct);
- global_min = percent_ext_fp(global.min_perf_pct);
- if (max_freq != cpu->pstate.turbo_freq) {
- int32_t turbo_factor;
-
- turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
- cpu->pstate.max_pstate);
- global_min = mul_ext_fp(global_min, turbo_factor);
- global_max = mul_ext_fp(global_max, turbo_factor);
- }
+ global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
+ global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
global_min = clamp_t(int32_t, global_min, 0, global_max);
- cpu->min_perf = max(min_policy_perf, global_min);
- cpu->min_perf = min(cpu->min_perf, max_policy_perf);
- cpu->max_perf = min(max_policy_perf, global_max);
- cpu->max_perf = max(min_policy_perf, cpu->max_perf);
+ pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
+ global_min, global_max);
- /* Make sure min_perf <= max_perf */
- cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
- }
+ cpu->min_perf_ratio = max(min_policy_perf, global_min);
+ cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
+ cpu->max_perf_ratio = min(max_policy_perf, global_max);
+ cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
- cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
- cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
+ /* Make sure min_perf <= max_perf */
+ cpu->min_perf_ratio = min(cpu->min_perf_ratio,
+ cpu->max_perf_ratio);
- pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
- fp_ext_toint(cpu->max_perf * 100),
- fp_ext_toint(cpu->min_perf * 100));
+ }
+ pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
+ cpu->max_perf_ratio,
+ cpu->min_perf_ratio);
}
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
@@ -2035,10 +2034,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
*/
intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_max_within_limits(cpu);
+ } else {
+ intel_pstate_set_update_util_hook(policy->cpu);
}
- intel_pstate_set_update_util_hook(policy->cpu);
-
if (hwp_active)
intel_pstate_hwp_set(policy->cpu);
@@ -2111,8 +2110,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- cpu->max_perf = int_ext_tofp(1);
- cpu->min_perf = 0;
+ cpu->max_perf_ratio = 0xFF;
+ cpu->min_perf_ratio = 0;
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2554,7 +2553,6 @@ static int __init intel_pstate_init(void)
} else {
hwp_active++;
intel_pstate.attr = hwp_cpufreq_attrs;
- pstate_funcs.update_util = intel_pstate_update_util_hwp;
goto hwp_cpu_matched;
}
} else {
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
index 992ce6f9abec..3779742f86e3 100644
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -24,7 +24,7 @@
#include <asm/msr.h>
-struct cpufreq_frequency_table *freq_table;
+static struct cpufreq_frequency_table *freq_table;
static struct sfi_freq_table_entry *sfi_cpufreq_array;
static int num_freq_table_entries;
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0be73e..f52144808455 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -4,6 +4,7 @@
config ARM_CPUIDLE
bool "Generic ARM/ARM64 CPU idle Driver"
select DT_IDLE_STATES
+ select CPU_IDLE_MULTIPLE_DRIVERS
help
Select this to enable generic cpuidle driver for ARM.
It provides a generic idle driver whose idle states are configured
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index f440d385ed34..7080c384ad5d 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/topology.h>
#include <asm/cpuidle.h>
@@ -44,7 +45,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
}
-static struct cpuidle_driver arm_idle_driver = {
+static struct cpuidle_driver arm_idle_driver __initdata = {
.name = "arm_idle",
.owner = THIS_MODULE,
/*
@@ -80,30 +81,42 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
static int __init arm_idle_init(void)
{
int cpu, ret;
- struct cpuidle_driver *drv = &arm_idle_driver;
+ struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- /*
- * Initialize idle states data, starting at index 1.
- * This driver is DT only, if no DT idle states are detected (ret == 0)
- * let the driver initialization fail accordingly since there is no
- * reason to initialize the idle driver if only wfi is supported.
- */
- ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
- if (ret <= 0)
- return ret ? : -ENODEV;
-
- ret = cpuidle_register_driver(drv);
- if (ret) {
- pr_err("Failed to register cpuidle driver\n");
- return ret;
- }
-
- /*
- * Call arch CPU operations in order to initialize
- * idle states suspend back-end specific data
- */
for_each_possible_cpu(cpu) {
+
+ drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1. This
+ * driver is DT only, if no DT idle states are detected (ret
+ * == 0) let the driver initialization fail accordingly since
+ * there is no reason to initialize the idle driver if only
+ * wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_fail;
+ }
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver\n");
+ goto out_fail;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
ret = arm_cpuidle_init(cpu);
/*
@@ -141,10 +154,11 @@ out_fail:
dev = per_cpu(cpuidle_devices, cpu);
cpuidle_unregister_device(dev);
kfree(dev);
+ drv = cpuidle_get_driver();
+ cpuidle_unregister_driver(drv);
+ kfree(drv);
}
- cpuidle_unregister_driver(drv);
-
return ret;
}
device_initcall(arm_idle_init);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b2330fd69e34..61b64c2b2cb8 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -286,6 +286,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct device *device = get_cpu_device(dev->cpu);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
+ int first_idx;
+ int idx;
unsigned int interactivity_req;
unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
@@ -335,11 +337,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (data->next_timer_us > polling_threshold &&
latency_req > s->exit_latency && !s->disabled &&
!dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
- data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+ first_idx = CPUIDLE_DRIVER_STATE_START;
else
- data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
+ first_idx = CPUIDLE_DRIVER_STATE_START - 1;
} else {
- data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+ first_idx = 0;
}
/*
@@ -359,20 +361,28 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
- for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
+ idx = -1;
+ for (i = first_idx; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable)
continue;
+ if (idx == -1)
+ idx = i; /* first enabled state */
if (s->target_residency > data->predicted_us)
break;
if (s->exit_latency > latency_req)
break;
- data->last_state_idx = i;
+ idx = i;
}
+ if (idx == -1)
+ idx = 0; /* No states enabled. Must use 0. */
+
+ data->last_state_idx = idx;
+
return data->last_state_idx;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 216d7ec88c0c..c2ae819a871c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -51,6 +51,8 @@
/* un-comment DEBUG to enable pr_debug() statements */
#define DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/tick.h>
@@ -65,7 +67,6 @@
#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4.1"
-#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
@@ -1111,7 +1112,7 @@ static int __init intel_idle_probe(void)
const struct x86_cpu_id *id;
if (max_cstate == 0) {
- pr_debug(PREFIX "disabled\n");
+ pr_debug("disabled\n");
return -EPERM;
}
@@ -1119,8 +1120,8 @@ static int __init intel_idle_probe(void)
if (!id) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6)
- pr_debug(PREFIX "does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
+ pr_debug("does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
@@ -1134,13 +1135,13 @@ static int __init intel_idle_probe(void)
!mwait_substates)
return -ENODEV;
- pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
+ pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
- pr_debug(PREFIX "v" INTEL_IDLE_VERSION
- " model 0x%X\n", boot_cpu_data.x86_model);
+ pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+ boot_cpu_data.x86_model);
return 0;
}
@@ -1340,8 +1341,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
break;
if (cstate + 1 > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n",
- max_cstate);
+ pr_info("max_cstate %d reached\n", max_cstate);
break;
}
@@ -1358,8 +1358,8 @@ static void __init intel_idle_cpuidle_driver_init(void)
/* if state marked as disabled, skip it */
if (cpuidle_state_table[cstate].disabled != 0) {
- pr_debug(PREFIX "state %s is disabled",
- cpuidle_state_table[cstate].name);
+ pr_debug("state %s is disabled\n",
+ cpuidle_state_table[cstate].name);
continue;
}
@@ -1395,7 +1395,7 @@ static int intel_idle_cpu_init(unsigned int cpu)
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
- pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+ pr_debug("cpuidle_register_device %d failed!\n", cpu);
return -EIO;
}
@@ -1447,8 +1447,8 @@ static int __init intel_idle_init(void)
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
- printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
- drv ? drv->name : "none");
+ printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
+ drv ? drv->name : "none");
goto init_driver_fail;
}
@@ -1460,8 +1460,8 @@ static int __init intel_idle_init(void)
if (retval < 0)
goto hp_setup_fail;
- pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
- lapic_timer_reliable_states);
+ pr_debug("lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
return 0;