diff options
author | Hugh Dickins <hughd@google.com> | 2011-07-25 17:12:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-25 20:57:10 -0700 |
commit | d515afe88a32e567c550e3db914f3e378f86453a (patch) | |
tree | 0129002decdd5c1f8bcb521aacdba73e0d1699c3 /mm | |
parent | d0823576bf4b8eafce1b56f98613465a0352a376 (diff) | |
download | linux-d515afe88a32e567c550e3db914f3e378f86453a.tar.bz2 |
tmpfs: no need to use i_lock
2.6.36's 7e496299d4d2 ("tmpfs: make tmpfs scalable with percpu_counter for
used blocks") to make tmpfs scalable with percpu_counter used
inode->i_lock in place of sbinfo->stat_lock around i_blocks updates; but
that was adverse to scalability, and unnecessary, since info->lock is
already held there in the fast paths.
Remove those uses of i_lock, and add info->lock in the three error paths
where it's then needed across shmem_free_blocks(). It's not actually
needed across shmem_unacct_blocks(), but they're so often paired that it
looks wrong to split them apart.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/shmem.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index fcedf5464eb7..c1db11cf220d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -241,9 +241,7 @@ static void shmem_free_blocks(struct inode *inode, long pages) struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { percpu_counter_add(&sbinfo->used_blocks, -pages); - spin_lock(&inode->i_lock); inode->i_blocks -= pages*BLOCKS_PER_PAGE; - spin_unlock(&inode->i_lock); } } @@ -432,9 +430,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long sbinfo->max_blocks - 1) >= 0) return ERR_PTR(-ENOSPC); percpu_counter_inc(&sbinfo->used_blocks); - spin_lock(&inode->i_lock); inode->i_blocks += BLOCKS_PER_PAGE; - spin_unlock(&inode->i_lock); } spin_unlock(&info->lock); @@ -1421,9 +1417,7 @@ repeat: shmem_acct_block(info->flags)) goto nospace; percpu_counter_inc(&sbinfo->used_blocks); - spin_lock(&inode->i_lock); inode->i_blocks += BLOCKS_PER_PAGE; - spin_unlock(&inode->i_lock); } else if (shmem_acct_block(info->flags)) goto nospace; @@ -1434,8 +1428,10 @@ repeat: spin_unlock(&info->lock); filepage = shmem_alloc_page(gfp, info, idx); if (!filepage) { + spin_lock(&info->lock); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); + spin_unlock(&info->lock); error = -ENOMEM; goto failed; } @@ -1449,8 +1445,10 @@ repeat: current->mm, GFP_KERNEL); if (error) { page_cache_release(filepage); + spin_lock(&info->lock); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); + spin_unlock(&info->lock); filepage = NULL; goto failed; } @@ -1480,10 +1478,10 @@ repeat: * be done automatically. */ if (ret) { - spin_unlock(&info->lock); - page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); + spin_unlock(&info->lock); + page_cache_release(filepage); filepage = NULL; if (error) goto failed; |