diff options
| author | Juan Gutierrez <jgutierrez@ti.com> | 2011-09-06 09:30:16 +0300 | 
|---|---|---|
| committer | Ohad Ben-Cohen <ohad@wizery.com> | 2011-09-21 19:45:32 +0300 | 
| commit | 93b465c2e186d96fb90012ba0f9372eb9952e732 (patch) | |
| tree | 3781c5443068f2fc79c2bb70c8793075b608d1f0 /drivers/hwspinlock | |
| parent | c3c1250e93a7ab1327a9fc49d2a22405672f4204 (diff) | |
| download | linux-93b465c2e186d96fb90012ba0f9372eb9952e732.tar.bz2 | |
hwspinlock/core: use a mutex to protect the radix tree
Since we're using non-atomic radix tree allocations, we
should be protecting the tree using a mutex and not a
spinlock.
Non-atomic allocations and process context locking is good enough,
as the tree is manipulated only when locks are registered/
unregistered/requested/freed.
The locks themselves are still protected by spinlocks of course,
and mutexes are not involved in the locking/unlocking paths.
Cc: <stable@kernel.org>
Signed-off-by: Juan Gutierrez <jgutierrez@ti.com>
[ohad@wizery.com: rewrite the commit log, #include mutex.h, add minor
commentary]
[ohad@wizery.com: update register/unregister parts in hwspinlock.txt]
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Diffstat (limited to 'drivers/hwspinlock')
| -rw-r--r-- | drivers/hwspinlock/hwspinlock_core.c | 45 | 
1 files changed, 20 insertions, 25 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 4eb85b4a320e..0d20b82df0a7 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -26,6 +26,7 @@  #include <linux/radix-tree.h>  #include <linux/hwspinlock.h>  #include <linux/pm_runtime.h> +#include <linux/mutex.h>  #include "hwspinlock_internal.h" @@ -52,10 +53,12 @@  static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);  /* - * Synchronization of access to the tree is achieved using this spinlock, + * Synchronization of access to the tree is achieved using this mutex,   * as the radix-tree API requires that users provide all synchronisation. + * A mutex is needed because we're using non-atomic radix tree allocations.   */ -static DEFINE_SPINLOCK(hwspinlock_tree_lock); +static DEFINE_MUTEX(hwspinlock_tree_lock); +  /**   * __hwspin_trylock() - attempt to lock a specific hwspinlock @@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);   * This function should be called from the underlying platform-specific   * implementation, to register a new hwspinlock instance.   * - * Can be called from an atomic context (will not sleep) but not from - * within interrupt context. + * Should be called from a process context (might sleep)   *   * Returns 0 on success, or an appropriate error code on failure   */ @@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)  	spin_lock_init(&hwlock->lock); -	spin_lock(&hwspinlock_tree_lock); +	mutex_lock(&hwspinlock_tree_lock);  	ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);  	if (ret == -EEXIST) @@ -295,7 +297,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)  	WARN_ON(tmp != hwlock);  out: -	spin_unlock(&hwspinlock_tree_lock); +	mutex_unlock(&hwspinlock_tree_lock);  	return ret;  }  EXPORT_SYMBOL_GPL(hwspin_lock_register); @@ -307,8 +309,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);   * This function should be called from the underlying platform-specific   * implementation, to unregister an existing (and unused) hwspinlock.   * - * Can be called from an atomic context (will not sleep) but not from - * within interrupt context. + * Should be called from a process context (might sleep)   *   * Returns the address of hwspinlock @id on success, or NULL on failure   */ @@ -317,7 +318,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)  	struct hwspinlock *hwlock = NULL;  	int ret; -	spin_lock(&hwspinlock_tree_lock); +	mutex_lock(&hwspinlock_tree_lock);  	/* make sure the hwspinlock is not in use (tag is set) */  	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); @@ -333,7 +334,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)  	}  out: -	spin_unlock(&hwspinlock_tree_lock); +	mutex_unlock(&hwspinlock_tree_lock);  	return hwlock;  }  EXPORT_SYMBOL_GPL(hwspin_lock_unregister); @@ -402,9 +403,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);   * to the remote core before it can be used for synchronization (to get the   * id of a given hwlock, use hwspin_lock_get_id()).   * - * Can be called from an atomic context (will not sleep) but not from - * within interrupt context (simply because there is no use case for - * that yet). + * Should be called from a process context (might sleep)   *   * Returns the address of the assigned hwspinlock, or NULL on error   */ @@ -413,7 +412,7 @@ struct hwspinlock *hwspin_lock_request(void)  	struct hwspinlock *hwlock;  	int ret; -	spin_lock(&hwspinlock_tree_lock); +	mutex_lock(&hwspinlock_tree_lock);  	/* look for an unused lock */  	ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, @@ -433,7 +432,7 @@ struct hwspinlock *hwspin_lock_request(void)  		hwlock = NULL;  out: -	spin_unlock(&hwspinlock_tree_lock); +	mutex_unlock(&hwspinlock_tree_lock);  	return hwlock;  }  EXPORT_SYMBOL_GPL(hwspin_lock_request); @@ -447,9 +446,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);   * Usually early board code will be calling this function in order to   * reserve specific hwspinlock ids for predefined purposes.   * - * Can be called from an atomic context (will not sleep) but not from - * within interrupt context (simply because there is no use case for - * that yet). + * Should be called from a process context (might sleep)   *   * Returns the address of the assigned hwspinlock, or NULL on error   */ @@ -458,7 +455,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)  	struct hwspinlock *hwlock;  	int ret; -	spin_lock(&hwspinlock_tree_lock); +	mutex_lock(&hwspinlock_tree_lock);  	/* make sure this hwspinlock exists */  	hwlock = radix_tree_lookup(&hwspinlock_tree, id); @@ -484,7 +481,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)  		hwlock = NULL;  out: -	spin_unlock(&hwspinlock_tree_lock); +	mutex_unlock(&hwspinlock_tree_lock);  	return hwlock;  }  EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); @@ -497,9 +494,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);   * Should only be called with an @hwlock that was retrieved from   * an earlier call to omap_hwspin_lock_request{_specific}.   * - * Can be called from an atomic context (will not sleep) but not from - * within interrupt context (simply because there is no use case for - * that yet). + * Should be called from a process context (might sleep)   *   * Returns 0 on success, or an appropriate error code on failure   */ @@ -513,7 +508,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)  		return -EINVAL;  	} -	spin_lock(&hwspinlock_tree_lock); +	mutex_lock(&hwspinlock_tree_lock);  	/* make sure the hwspinlock is used */  	ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, @@ -540,7 +535,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)  	module_put(hwlock->dev->driver->owner);  out: -	spin_unlock(&hwspinlock_tree_lock); +	mutex_unlock(&hwspinlock_tree_lock);  	return ret;  }  EXPORT_SYMBOL_GPL(hwspin_lock_free);  |