summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@sandeen.net>2007-10-11 17:42:10 +1000
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 16:47:01 +1100
commitba74d0cba51dcaa99e4dc2e4fb62e6e13abbf703 (patch)
tree1dfedaeec9702659950c2ee19dccee4ea076c3ed
parent703e1f0fd2edc2978bde3b4536e78b577318c090 (diff)
downloadlinux-ba74d0cba51dcaa99e4dc2e4fb62e6e13abbf703.tar.bz2
[XFS] Unwrap mru_lock.
Un-obfuscate mru_lock, remove mutex_lock->spin_lock macros, call spin_lock directly, remove extraneous cookie holdover from old xfs code. SGI-PV: 970382 SGI-Modid: xfs-linux-melb:xfs-kern:29745a Signed-off-by: Eric Sandeen <sandeen@sandeen.net> Signed-off-by: Donald Douwsma <donaldd@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r--fs/xfs/xfs_mru_cache.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index e0b358c1c533..dc64630e870e 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -245,7 +245,7 @@ _xfs_mru_cache_clear_reap_list(
*/
list_move(&elem->list_node, &tmp);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
list_for_each_entry_safe(elem, next, &tmp, list_node) {
@@ -259,7 +259,7 @@ _xfs_mru_cache_clear_reap_list(
kmem_zone_free(xfs_mru_elem_zone, elem);
}
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
}
/*
@@ -280,7 +280,7 @@ _xfs_mru_cache_reap(
if (!mru || !mru->lists)
return;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
next = _xfs_mru_cache_migrate(mru, jiffies);
_xfs_mru_cache_clear_reap_list(mru);
@@ -294,7 +294,7 @@ _xfs_mru_cache_reap(
queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}
int
@@ -398,17 +398,17 @@ xfs_mru_cache_flush(
if (!mru || !mru->lists)
return;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
if (mru->queued) {
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
}
_xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
_xfs_mru_cache_clear_reap_list(mru);
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}
void
@@ -454,13 +454,13 @@ xfs_mru_cache_insert(
elem->key = key;
elem->value = value;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
radix_tree_insert(&mru->store, key, elem);
radix_tree_preload_end();
_xfs_mru_cache_list_insert(mru, elem);
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return 0;
}
@@ -483,14 +483,14 @@ xfs_mru_cache_remove(
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_delete(&mru->store, key);
if (elem) {
value = elem->value;
list_del(&elem->list_node);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
if (elem)
kmem_zone_free(xfs_mru_elem_zone, elem);
@@ -540,14 +540,14 @@ xfs_mru_cache_lookup(
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key);
if (elem) {
list_del(&elem->list_node);
_xfs_mru_cache_list_insert(mru, elem);
}
else
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
}
@@ -571,10 +571,10 @@ xfs_mru_cache_peek(
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key);
if (!elem)
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
}
@@ -588,5 +588,5 @@ void
xfs_mru_cache_done(
xfs_mru_cache_t *mru)
{
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}