summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig27
-rw-r--r--drivers/cpufreq/cpufreq.c34
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c22
-rw-r--r--drivers/cpufreq/cpufreq_stats.c18
5 files changed, 77 insertions, 43 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 993fa7b89253..721f86f4f008 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -56,10 +56,6 @@ config CPU_FREQ_STAT_DETAILS
If in doubt, say N.
-# Note that it is not currently possible to set the other governors (such as ondemand)
-# as the default, since if they fail to initialise, cpufreq will be
-# left in an undefined state.
-
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
@@ -85,6 +81,29 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
program shall be able to set the CPU dynamically without having
to enable the userspace governor manually.
+config CPU_FREQ_DEFAULT_GOV_ONDEMAND
+ bool "ondemand"
+ select CPU_FREQ_GOV_ONDEMAND
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'ondemand' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the ondemand
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+ bool "conservative"
+ select CPU_FREQ_GOV_CONSERVATIVE
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'conservative' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the conservative
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
endchoice
config CPU_FREQ_GOV_PERFORMANCE
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2ce3de5e84af..5e626b12b97e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -763,6 +763,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
+ /* Set governor before ->init, so that driver could check it */
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
@@ -1109,12 +1111,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
unsigned int ret_freq = 0;
if (policy) {
- if (unlikely(lock_policy_rwsem_read(cpu)))
- return ret_freq;
-
ret_freq = policy->cur;
-
- unlock_policy_rwsem_read(cpu);
cpufreq_cpu_put(policy);
}
@@ -1483,6 +1480,31 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
{
int ret;
+ /* Only must be defined when default governor is known to have latency
+ restrictions, like e.g. conservative or ondemand.
+ That this is the case is already ensured in Kconfig
+ */
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
+ struct cpufreq_governor *gov = &cpufreq_gov_performance;
+#else
+ struct cpufreq_governor *gov = NULL;
+#endif
+
+ if (policy->governor->max_transition_latency &&
+ policy->cpuinfo.transition_latency >
+ policy->governor->max_transition_latency) {
+ if (!gov)
+ return -EINVAL;
+ else {
+ printk(KERN_WARNING "%s governor failed, too long"
+ " transition latency of HW, fallback"
+ " to %s governor\n",
+ policy->governor->name,
+ gov->name);
+ policy->governor = gov;
+ }
+ }
+
if (!try_module_get(policy->governor->owner))
return -EINVAL;
@@ -1703,7 +1725,7 @@ int cpufreq_update_policy(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_update_policy);
-static int cpufreq_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 26f440ccc3fb..4bd33ce8a6f3 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -58,7 +58,7 @@ static unsigned int def_sampling_rate;
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work);
@@ -466,9 +466,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
(!policy->cur))
return -EINVAL;
- if (policy->cpuinfo.transition_latency >
- (TRANSITION_LATENCY_LIMIT * 1000))
- return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
@@ -551,15 +548,17 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
-static struct cpufreq_governor cpufreq_gov_dbs = {
- .name = "conservative",
- .governor = cpufreq_governor_dbs,
- .owner = THIS_MODULE,
+struct cpufreq_governor cpufreq_gov_conservative = {
+ .name = "conservative",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
};
+EXPORT_SYMBOL(cpufreq_gov_conservative);
static int __init cpufreq_gov_dbs_init(void)
{
- return cpufreq_register_governor(&cpufreq_gov_dbs);
+ return cpufreq_register_governor(&cpufreq_gov_conservative);
}
static void __exit cpufreq_gov_dbs_exit(void)
@@ -567,7 +566,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
/* Make sure that the scheduled work is indeed not running */
flush_scheduled_work();
- cpufreq_unregister_governor(&cpufreq_gov_dbs);
+ cpufreq_unregister_governor(&cpufreq_gov_conservative);
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e794527e4925..369f44595150 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,7 +47,7 @@ static unsigned int def_sampling_rate;
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work);
@@ -508,12 +508,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if ((!cpu_online(cpu)) || (!policy->cur))
return -EINVAL;
- if (policy->cpuinfo.transition_latency >
- (TRANSITION_LATENCY_LIMIT * 1000)) {
- printk(KERN_WARNING "ondemand governor failed to load "
- "due to too long transition latency\n");
- return -EINVAL;
- }
if (this_dbs_info->enable) /* Already enabled */
break;
@@ -585,11 +579,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
-static struct cpufreq_governor cpufreq_gov_dbs = {
- .name = "ondemand",
- .governor = cpufreq_governor_dbs,
- .owner = THIS_MODULE,
+struct cpufreq_governor cpufreq_gov_ondemand = {
+ .name = "ondemand",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
};
+EXPORT_SYMBOL(cpufreq_gov_ondemand);
static int __init cpufreq_gov_dbs_init(void)
{
@@ -598,12 +594,12 @@ static int __init cpufreq_gov_dbs_init(void)
printk(KERN_ERR "Creation of kondemand failed\n");
return -EFAULT;
}
- return cpufreq_register_governor(&cpufreq_gov_dbs);
+ return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
- cpufreq_unregister_governor(&cpufreq_gov_dbs);
+ cpufreq_unregister_governor(&cpufreq_gov_ondemand);
destroy_workqueue(kondemand_wq);
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 917b9bab9ccb..8a45d0f93e26 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -164,8 +164,7 @@ freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
return -1;
}
-static void
-cpufreq_stats_free_table (unsigned int cpu)
+static void __cpuexit cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = cpufreq_stats_table[cpu];
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
@@ -305,8 +304,9 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
return 0;
}
-static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -323,7 +323,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cpufreq_stat_cpu_notifier =
+static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata =
{
.notifier_call = cpufreq_stat_cpu_callback,
};
@@ -356,8 +356,7 @@ __init cpufreq_stats_init(void)
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
- cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
- CPU_ONLINE, (void *)(long)cpu);
+ cpufreq_update_policy(cpu);
}
return 0;
}
@@ -372,13 +371,12 @@ __exit cpufreq_stats_exit(void)
CPUFREQ_TRANSITION_NOTIFIER);
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
- cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
- CPU_DEAD, (void *)(long)cpu);
+ cpufreq_stats_free_table(cpu);
}
}
MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats"
+MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats "
"through sysfs filesystem");
MODULE_LICENSE ("GPL");