summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/clockdomain.c
diff options
context:
space:
mode:
authorPaul Walmsley <paul@pwsan.com>2013-01-26 00:58:17 -0700
committerPaul Walmsley <paul@pwsan.com>2013-01-29 14:59:58 -0700
commit92493870196f52c743330db7b545b54b2abfda82 (patch)
treef4861c754544226fbba2f5756eb701a71ba82062 /arch/arm/mach-omap2/clockdomain.c
parent65958fb6ca7b7e504caf852e492fe554224de1ba (diff)
downloadlinux-92493870196f52c743330db7b545b54b2abfda82.tar.bz2
ARM: OMAP2+: clockdomain: convert existing atomic usecounts into spinlock-protected shorts/ints
The atomic usecounts seem to be confusing, and are no longer needed since the operations that they are attached to really should take place under lock. Replace the atomic counters with simple integers, protected by the enclosing powerdomain spinlock. Signed-off-by: Paul Walmsley <paul@pwsan.com> Cc: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r--arch/arm/mach-omap2/clockdomain.c96
1 files changed, 79 insertions, 17 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index ab194753eb7a..2da3b5ec010c 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -210,7 +210,8 @@ static int _clkdm_add_wkdep(struct clockdomain *clkdm1,
return ret;
}
- if (atomic_inc_return(&cd->wkdep_usecount) == 1) {
+ cd->wkdep_usecount++;
+ if (cd->wkdep_usecount == 1) {
pr_debug("clockdomain: hardware will wake up %s when %s wakes up\n",
clkdm1->name, clkdm2->name);
@@ -252,7 +253,8 @@ static int _clkdm_del_wkdep(struct clockdomain *clkdm1,
return ret;
}
- if (atomic_dec_return(&cd->wkdep_usecount) == 0) {
+ cd->wkdep_usecount--;
+ if (cd->wkdep_usecount == 0) {
pr_debug("clockdomain: hardware will no longer wake up %s after %s wakes up\n",
clkdm1->name, clkdm2->name);
@@ -296,7 +298,8 @@ static int _clkdm_add_sleepdep(struct clockdomain *clkdm1,
return ret;
}
- if (atomic_inc_return(&cd->sleepdep_usecount) == 1) {
+ cd->sleepdep_usecount++;
+ if (cd->sleepdep_usecount == 1) {
pr_debug("clockdomain: will prevent %s from sleeping if %s is active\n",
clkdm1->name, clkdm2->name);
@@ -340,7 +343,8 @@ static int _clkdm_del_sleepdep(struct clockdomain *clkdm1,
return ret;
}
- if (atomic_dec_return(&cd->sleepdep_usecount) == 0) {
+ cd->sleepdep_usecount--;
+ if (cd->sleepdep_usecount == 0) {
pr_debug("clockdomain: will no longer prevent %s from sleeping if %s is active\n",
clkdm1->name, clkdm2->name);
@@ -567,7 +571,21 @@ struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm)
*/
int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_add_wkdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_add_wkdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -582,7 +600,21 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
*/
int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_del_wkdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_del_wkdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -620,7 +652,7 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
return ret;
}
- /* XXX It's faster to return the atomic wkdep_usecount */
+ /* XXX It's faster to return the wkdep_usecount */
return arch_clkdm->clkdm_read_wkdep(clkdm1, clkdm2);
}
@@ -659,7 +691,21 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
*/
int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_add_sleepdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_add_sleepdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -676,7 +722,21 @@ int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
*/
int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_del_sleepdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_del_sleepdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -716,7 +776,7 @@ int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
return ret;
}
- /* XXX It's faster to return the atomic sleepdep_usecount */
+ /* XXX It's faster to return the sleepdep_usecount */
return arch_clkdm->clkdm_read_sleepdep(clkdm1, clkdm2);
}
@@ -1063,7 +1123,8 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
* should be called for every clock instance or hwmod that is
* enabled, so the clkdm can be force woken up.
*/
- if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
+ clkdm->usecount++;
+ if (clkdm->usecount > 1 && autodeps) {
pwrdm_unlock(clkdm->pwrdm.ptr);
return 0;
}
@@ -1125,17 +1186,17 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
pwrdm_lock(clkdm->pwrdm.ptr);
/* corner case: disabling unused clocks */
- if ((__clk_get_enable_count(clk) == 0) &&
- (atomic_read(&clkdm->usecount) == 0))
+ if ((__clk_get_enable_count(clk) == 0) && clkdm->usecount == 0)
goto ccd_exit;
- if (atomic_read(&clkdm->usecount) == 0) {
+ if (clkdm->usecount == 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
WARN_ON(1); /* underflow */
return -ERANGE;
}
- if (atomic_dec_return(&clkdm->usecount) > 0) {
+ clkdm->usecount--;
+ if (clkdm->usecount > 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
return 0;
}
@@ -1213,13 +1274,14 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
pwrdm_lock(clkdm->pwrdm.ptr);
- if (atomic_read(&clkdm->usecount) == 0) {
+ if (clkdm->usecount == 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
WARN_ON(1); /* underflow */
return -ERANGE;
}
- if (atomic_dec_return(&clkdm->usecount) > 0) {
+ clkdm->usecount--;
+ if (clkdm->usecount > 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
return 0;
}