summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_conservative.c
diff options
context:
space:
mode:
authorThomas Renninger <trenn@suse.de>2011-03-03 21:31:27 +0100
committerDave Jones <davej@redhat.com>2011-03-16 17:54:32 -0400
commit326c86deaed54ad1b364fcafe5073f563671eb58 (patch)
treefb0784c6450d3d618127df3823ffeeda400ac58f /drivers/cpufreq/cpufreq_conservative.c
parente8951251b89440644a39f2512b4f265973926b41 (diff)
downloadlinux-326c86deaed54ad1b364fcafe5073f563671eb58.tar.bz2
[CPUFREQ] Remove unneeded locks
There cannot be any concurrent access to these through different cpu sysfs files anymore, because these tunables are now all global (not per cpu). I still have some doubts whether some of these locks were needed at all. Anyway, let's get rid of them. Signed-off-by: Thomas Renninger <trenn@suse.de> Signed-off-by: Dave Jones <davej@redhat.com> CC: cpufreq@vger.kernel.org
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c34
1 files changed, 5 insertions, 29 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 3182d85b3374..33b56e5c5c14 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -195,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -212,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -226,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold) {
- mutex_unlock(&dbs_mutex);
+ input <= dbs_tuners_ins.down_threshold)
return -EINVAL;
- }
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -246,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold) {
- mutex_unlock(&dbs_mutex);
+ input >= dbs_tuners_ins.up_threshold)
return -EINVAL;
- }
dbs_tuners_ins.down_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -275,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
return count;
- }
+
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
@@ -291,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -311,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.freq_step = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}