summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2015-11-05 18:50:34 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 19:34:48 -0800
commitd0424c429f8e0555a337d71e0a13f2289c636ec9 (patch)
tree0a2bbf782d14aaf8cf8e68dbbcf94fe4d27ab4c1
parentb72bdfa73603f2f81fbac651b9ae36807e877752 (diff)
downloadlinux-d0424c429f8e0555a337d71e0a13f2289c636ec9.tar.bz2
tmpfs: avoid a little creat and stat slowdown
LKP reports that v4.2 commit afa2db2fb6f1 ("tmpfs: truncate prealloc blocks past i_size") causes a 14.5% slowdown in the AIM9 creat-clo benchmark. creat-clo does just what you'd expect from the name, and creat's O_TRUNC on 0-length file does indeed get into more overhead now shmem_setattr() tests "0 <= 0" instead of "0 < 0". I'm not sure how much we care, but I think it would not be too VW-like to add in a check for whether any pages (or swap) are allocated: if none are allocated, there's none to remove from the radix_tree. At first I thought that check would be good enough for the unmaps too, but no: we should not skip the unlikely case of unmapping pages beyond the new EOF, which were COWed from holes which have now been reclaimed, leaving none. This gives me an 8.5% speedup: on Haswell instead of LKP's Westmere, and running a debug config before and after: I hope those account for the lesser speedup. And probably someone has a benchmark where a thousand threads keep on stat'ing the same file repeatedly: forestall that report by adjusting v4.3 commit 44a30220bc0a ("shmem: recalculate file inode when fstat") not to take the spinlock in shmem_getattr() when there's no work to do. Signed-off-by: Hugh Dickins <hughd@google.com> Reported-by: Ying Huang <ying.huang@linux.intel.com> Tested-by: Ying Huang <ying.huang@linux.intel.com> Cc: Josef Bacik <jbacik@fb.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/shmem.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 6529226b3814..3b8b73928398 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -548,12 +548,12 @@ static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct inode *inode = dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
- spin_lock(&info->lock);
- shmem_recalc_inode(inode);
- spin_unlock(&info->lock);
-
+ if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
+ spin_lock(&info->lock);
+ shmem_recalc_inode(inode);
+ spin_unlock(&info->lock);
+ }
generic_fillattr(inode, stat);
-
return 0;
}
@@ -586,10 +586,16 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
}
if (newsize <= oldsize) {
loff_t holebegin = round_up(newsize, PAGE_SIZE);
- unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
- shmem_truncate_range(inode, newsize, (loff_t)-1);
+ if (oldsize > holebegin)
+ unmap_mapping_range(inode->i_mapping,
+ holebegin, 0, 1);
+ if (info->alloced)
+ shmem_truncate_range(inode,
+ newsize, (loff_t)-1);
/* unmap again to remove racily COWed private pages */
- unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+ if (oldsize > holebegin)
+ unmap_mapping_range(inode->i_mapping,
+ holebegin, 0, 1);
}
}