From be0726d33cb8f411945884664924bed3cb8c70ee Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 22 Feb 2016 11:56:38 -0500 Subject: ext2: convert to mbcache2 The conversion is generally straightforward. We convert filesystem from a global cache to per-fs one. Similarly to ext4 the tricky part is that xattr block corresponding to found mbcache entry can get freed before we get buffer lock for that block. So we have to check whether the entry is still valid after getting the buffer lock. Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext2/ext2.h | 3 ++ fs/ext2/super.c | 25 ++++++---- fs/ext2/xattr.c | 143 ++++++++++++++++++++++++++------------------------------ fs/ext2/xattr.h | 21 ++------- 4 files changed, 92 insertions(+), 100 deletions(-) (limited to 'fs/ext2') diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 4c69c94cafd8..f98ce7e60a0f 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -61,6 +61,8 @@ struct ext2_block_alloc_info { #define rsv_start rsv_window._rsv_start #define rsv_end rsv_window._rsv_end +struct mb2_cache; + /* * second extended-fs super-block data in memory */ @@ -111,6 +113,7 @@ struct ext2_sb_info { * of the mount options. */ spinlock_t s_lock; + struct mb2_cache *s_mb_cache; }; static inline spinlock_t * diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 2a188413a2b0..b78caf25f746 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -131,7 +131,10 @@ static void ext2_put_super (struct super_block * sb) dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); - ext2_xattr_put_super(sb); + if (sbi->s_mb_cache) { + ext2_xattr_destroy_cache(sbi->s_mb_cache); + sbi->s_mb_cache = NULL; + } if (!(sb->s_flags & MS_RDONLY)) { struct ext2_super_block *es = sbi->s_es; @@ -1104,6 +1107,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ext2_msg(sb, KERN_ERR, "error: insufficient memory"); goto failed_mount3; } + +#ifdef CONFIG_EXT2_FS_XATTR + sbi->s_mb_cache = ext2_xattr_create_cache(); + if (!sbi->s_mb_cache) { + ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache"); + goto failed_mount3; + } +#endif /* * set up enough so that it can read an inode */ @@ -1149,6 +1160,8 @@ cantfind_ext2: sb->s_id); goto failed_mount; failed_mount3: + if (sbi->s_mb_cache) + ext2_xattr_destroy_cache(sbi->s_mb_cache); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); @@ -1555,20 +1568,17 @@ MODULE_ALIAS_FS("ext2"); static int __init init_ext2_fs(void) { - int err = init_ext2_xattr(); - if (err) - return err; + int err; + err = init_inodecache(); if (err) - goto out1; + return err; err = register_filesystem(&ext2_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); -out1: - exit_ext2_xattr(); return err; } @@ -1576,7 +1586,6 @@ static void __exit exit_ext2_fs(void) { unregister_filesystem(&ext2_fs_type); destroy_inodecache(); - exit_ext2_xattr(); } MODULE_AUTHOR("Remy Card and others"); diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index f57a7aba32eb..7162b4869bc3 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -56,7 +56,7 @@ #include #include #include -#include +#include #include #include #include @@ -90,14 +90,12 @@ static int ext2_xattr_set2(struct inode *, struct buffer_head *, struct ext2_xattr_header *); -static int ext2_xattr_cache_insert(struct buffer_head *); +static int ext2_xattr_cache_insert(struct mb2_cache *, struct buffer_head *); static struct buffer_head *ext2_xattr_cache_find(struct inode *, struct ext2_xattr_header *); static void ext2_xattr_rehash(struct ext2_xattr_header *, struct ext2_xattr_entry *); -static struct mb_cache *ext2_xattr_cache; - static const struct xattr_handler *ext2_xattr_handler_map[] = { [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, #ifdef CONFIG_EXT2_FS_POSIX_ACL @@ -152,6 +150,7 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name, size_t name_len, size; char *end; int error; + struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); @@ -196,7 +195,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", goto found; entry = next; } - if (ext2_xattr_cache_insert(bh)) + if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) ea_idebug(inode, "cache insert failed"); error = -ENODATA; goto cleanup; @@ -209,7 +208,7 @@ found: le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) goto bad_block; - if (ext2_xattr_cache_insert(bh)) + if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) ea_idebug(inode, "cache insert failed"); if (buffer) { error = -ERANGE; @@ -247,6 +246,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) char *end; size_t rest = buffer_size; int error; + struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); @@ -281,7 +281,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", goto bad_block; entry = next; } - if (ext2_xattr_cache_insert(bh)) + if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) ea_idebug(inode, "cache insert failed"); /* list the attribute names */ @@ -483,22 +483,23 @@ bad_block: ext2_error(sb, "ext2_xattr_set", /* Here we know that we can set the new attribute. */ if (header) { - struct mb_cache_entry *ce; - /* assert(header == HDR(bh)); */ - ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, - bh->b_blocknr); lock_buffer(bh); if (header->h_refcount == cpu_to_le32(1)) { + __u32 hash = le32_to_cpu(header->h_hash); + ea_bdebug(bh, "modifying in-place"); - if (ce) - mb_cache_entry_free(ce); + /* + * This must happen under buffer lock for + * ext2_xattr_set2() to reliably detect modified block + */ + mb2_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache, + hash, bh->b_blocknr); + /* keep the buffer locked while modifying it. */ } else { int offset; - if (ce) - mb_cache_entry_release(ce); unlock_buffer(bh); ea_bdebug(bh, "cloning"); header = kmalloc(bh->b_size, GFP_KERNEL); @@ -626,6 +627,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; int error; + struct mb2_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache; if (header) { new_bh = ext2_xattr_cache_find(inode, header); @@ -653,7 +655,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, don't need to change the reference count. */ new_bh = old_bh; get_bh(new_bh); - ext2_xattr_cache_insert(new_bh); + ext2_xattr_cache_insert(ext2_mb_cache, new_bh); } else { /* We need to allocate a new block */ ext2_fsblk_t goal = ext2_group_first_block_no(sb, @@ -674,7 +676,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, memcpy(new_bh->b_data, header, new_bh->b_size); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); - ext2_xattr_cache_insert(new_bh); + ext2_xattr_cache_insert(ext2_mb_cache, new_bh); ext2_xattr_update_super_block(sb); } @@ -707,19 +709,21 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, error = 0; if (old_bh && old_bh != new_bh) { - struct mb_cache_entry *ce; - /* * If there was an old block and we are no longer using it, * release the old block. */ - ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev, - old_bh->b_blocknr); lock_buffer(old_bh); if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { + __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash); + + /* + * This must happen under buffer lock for + * ext2_xattr_set2() to reliably detect freed block + */ + mb2_cache_entry_delete_block(ext2_mb_cache, + hash, old_bh->b_blocknr); /* Free the old block. */ - if (ce) - mb_cache_entry_free(ce); ea_bdebug(old_bh, "freeing"); ext2_free_blocks(inode, old_bh->b_blocknr, 1); mark_inode_dirty(inode); @@ -730,8 +734,6 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, } else { /* Decrement the refcount only. */ le32_add_cpu(&HDR(old_bh)->h_refcount, -1); - if (ce) - mb_cache_entry_release(ce); dquot_free_block_nodirty(inode, 1); mark_inode_dirty(inode); mark_buffer_dirty(old_bh); @@ -757,7 +759,6 @@ void ext2_xattr_delete_inode(struct inode *inode) { struct buffer_head *bh = NULL; - struct mb_cache_entry *ce; down_write(&EXT2_I(inode)->xattr_sem); if (!EXT2_I(inode)->i_file_acl) @@ -777,19 +778,22 @@ ext2_xattr_delete_inode(struct inode *inode) EXT2_I(inode)->i_file_acl); goto cleanup; } - ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr); lock_buffer(bh); if (HDR(bh)->h_refcount == cpu_to_le32(1)) { - if (ce) - mb_cache_entry_free(ce); + __u32 hash = le32_to_cpu(HDR(bh)->h_hash); + + /* + * This must happen under buffer lock for ext2_xattr_set2() to + * reliably detect freed block + */ + mb2_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, + hash, bh->b_blocknr); ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); get_bh(bh); bforget(bh); unlock_buffer(bh); } else { le32_add_cpu(&HDR(bh)->h_refcount, -1); - if (ce) - mb_cache_entry_release(ce); ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount)); unlock_buffer(bh); @@ -805,18 +809,6 @@ cleanup: up_write(&EXT2_I(inode)->xattr_sem); } -/* - * ext2_xattr_put_super() - * - * This is called when a file system is unmounted. - */ -void -ext2_xattr_put_super(struct super_block *sb) -{ - mb_cache_shrink(sb->s_bdev); -} - - /* * ext2_xattr_cache_insert() * @@ -826,28 +818,20 @@ ext2_xattr_put_super(struct super_block *sb) * Returns 0, or a negative error number on failure. */ static int -ext2_xattr_cache_insert(struct buffer_head *bh) +ext2_xattr_cache_insert(struct mb2_cache *cache, struct buffer_head *bh) { __u32 hash = le32_to_cpu(HDR(bh)->h_hash); - struct mb_cache_entry *ce; int error; - ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS); - if (!ce) - return -ENOMEM; - error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash); + error = mb2_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr); if (error) { - mb_cache_entry_free(ce); if (error == -EBUSY) { ea_bdebug(bh, "already in cache (%d cache entries)", atomic_read(&ext2_xattr_cache->c_entry_count)); error = 0; } - } else { - ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, - atomic_read(&ext2_xattr_cache->c_entry_count)); - mb_cache_entry_release(ce); - } + } else + ea_bdebug(bh, "inserting [%x]", (int)hash); return error; } @@ -903,23 +887,17 @@ static struct buffer_head * ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) { __u32 hash = le32_to_cpu(header->h_hash); - struct mb_cache_entry *ce; + struct mb2_cache_entry *ce; + struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); again: - ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev, - hash); + ce = mb2_cache_entry_find_first(ext2_mb_cache, hash); while (ce) { struct buffer_head *bh; - if (IS_ERR(ce)) { - if (PTR_ERR(ce) == -EAGAIN) - goto again; - break; - } - bh = sb_bread(inode->i_sb, ce->e_block); if (!bh) { ext2_error(inode->i_sb, "ext2_xattr_cache_find", @@ -927,7 +905,21 @@ again: inode->i_ino, (unsigned long) ce->e_block); } else { lock_buffer(bh); - if (le32_to_cpu(HDR(bh)->h_refcount) > + /* + * We have to be careful about races with freeing or + * rehashing of xattr block. Once we hold buffer lock + * xattr block's state is stable so we can check + * whether the block got freed / rehashed or not. + * Since we unhash mbcache entry under buffer lock when + * freeing / rehashing xattr block, checking whether + * entry is still hashed is reliable. + */ + if (hlist_bl_unhashed(&ce->e_hash_list)) { + mb2_cache_entry_put(ext2_mb_cache, ce); + unlock_buffer(bh); + brelse(bh); + goto again; + } else if (le32_to_cpu(HDR(bh)->h_refcount) > EXT2_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %ld refcount %d>%d", (unsigned long) ce->e_block, @@ -936,13 +928,14 @@ again: } else if (!ext2_xattr_cmp(header, HDR(bh))) { ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); - mb_cache_entry_release(ce); + mb2_cache_entry_touch(ext2_mb_cache, ce); + mb2_cache_entry_put(ext2_mb_cache, ce); return bh; } unlock_buffer(bh); brelse(bh); } - ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash); + ce = mb2_cache_entry_find_next(ext2_mb_cache, ce); } return NULL; } @@ -1015,17 +1008,15 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *header, #undef BLOCK_HASH_SHIFT -int __init -init_ext2_xattr(void) +#define HASH_BUCKET_BITS 10 + +struct mb2_cache *ext2_xattr_create_cache(void) { - ext2_xattr_cache = mb_cache_create("ext2_xattr", 6); - if (!ext2_xattr_cache) - return -ENOMEM; - return 0; + return mb2_cache_create(HASH_BUCKET_BITS); } -void -exit_ext2_xattr(void) +void ext2_xattr_destroy_cache(struct mb2_cache *cache) { - mb_cache_destroy(ext2_xattr_cache); + if (cache) + mb2_cache_destroy(cache); } diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h index 60edf298644e..6ea38aa9563a 100644 --- a/fs/ext2/xattr.h +++ b/fs/ext2/xattr.h @@ -53,6 +53,8 @@ struct ext2_xattr_entry { #define EXT2_XATTR_SIZE(size) \ (((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND) +struct mb2_cache; + # ifdef CONFIG_EXT2_FS_XATTR extern const struct xattr_handler ext2_xattr_user_handler; @@ -65,10 +67,9 @@ extern int ext2_xattr_get(struct inode *, int, const char *, void *, size_t); extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_t, int); extern void ext2_xattr_delete_inode(struct inode *); -extern void ext2_xattr_put_super(struct super_block *); -extern int init_ext2_xattr(void); -extern void exit_ext2_xattr(void); +extern struct mb2_cache *ext2_xattr_create_cache(void); +extern void ext2_xattr_destroy_cache(struct mb2_cache *cache); extern const struct xattr_handler *ext2_xattr_handlers[]; @@ -93,19 +94,7 @@ ext2_xattr_delete_inode(struct inode *inode) { } -static inline void -ext2_xattr_put_super(struct super_block *sb) -{ -} - -static inline int -init_ext2_xattr(void) -{ - return 0; -} - -static inline void -exit_ext2_xattr(void) +static inline void ext2_xattr_destroy_cache(struct mb2_cache *cache) { } -- cgit v1.2.3 From 7a2508e1b657cfc7e1371550f88c7a7bc4288f32 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 22 Feb 2016 22:35:22 -0500 Subject: mbcache2: rename to mbcache Since old mbcache code is gone, let's rename new code to mbcache since number 2 is now meaningless. This is just a mechanical replacement. Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/Makefile | 2 +- fs/ext2/ext2.h | 4 +- fs/ext2/xattr.c | 48 +++--- fs/ext2/xattr.h | 8 +- fs/ext4/ext4.h | 2 +- fs/ext4/xattr.c | 54 +++--- fs/ext4/xattr.h | 4 +- fs/mbcache.c | 424 +++++++++++++++++++++++++++++++++++++++++++++++ fs/mbcache2.c | 424 ----------------------------------------------- include/linux/mbcache.h | 53 ++++++ include/linux/mbcache2.h | 53 ------ 11 files changed, 538 insertions(+), 538 deletions(-) create mode 100644 fs/mbcache.c delete mode 100644 fs/mbcache2.c create mode 100644 include/linux/mbcache.h delete mode 100644 include/linux/mbcache2.h (limited to 'fs/ext2') diff --git a/fs/Makefile b/fs/Makefile index 59b844007fbc..79f522575cba 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o -obj-$(CONFIG_FS_MBCACHE) += mbcache2.o +obj-$(CONFIG_FS_MBCACHE) += mbcache.o obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o obj-$(CONFIG_NFS_COMMON) += nfs_common/ obj-$(CONFIG_COREDUMP) += coredump.o diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index f98ce7e60a0f..170939f379d7 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -61,7 +61,7 @@ struct ext2_block_alloc_info { #define rsv_start rsv_window._rsv_start #define rsv_end rsv_window._rsv_end -struct mb2_cache; +struct mb_cache; /* * second extended-fs super-block data in memory @@ -113,7 +113,7 @@ struct ext2_sb_info { * of the mount options. */ spinlock_t s_lock; - struct mb2_cache *s_mb_cache; + struct mb_cache *s_mb_cache; }; static inline spinlock_t * diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 7162b4869bc3..71d58c2d7a19 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -56,7 +56,7 @@ #include #include #include -#include +#include #include #include #include @@ -90,7 +90,7 @@ static int ext2_xattr_set2(struct inode *, struct buffer_head *, struct ext2_xattr_header *); -static int ext2_xattr_cache_insert(struct mb2_cache *, struct buffer_head *); +static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *); static struct buffer_head *ext2_xattr_cache_find(struct inode *, struct ext2_xattr_header *); static void ext2_xattr_rehash(struct ext2_xattr_header *, @@ -150,7 +150,7 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name, size_t name_len, size; char *end; int error; - struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; + struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); @@ -246,7 +246,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) char *end; size_t rest = buffer_size; int error; - struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; + struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); @@ -493,8 +493,8 @@ bad_block: ext2_error(sb, "ext2_xattr_set", * This must happen under buffer lock for * ext2_xattr_set2() to reliably detect modified block */ - mb2_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache, - hash, bh->b_blocknr); + mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache, + hash, bh->b_blocknr); /* keep the buffer locked while modifying it. */ } else { @@ -627,7 +627,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; int error; - struct mb2_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache; + struct mb_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache; if (header) { new_bh = ext2_xattr_cache_find(inode, header); @@ -721,8 +721,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, * This must happen under buffer lock for * ext2_xattr_set2() to reliably detect freed block */ - mb2_cache_entry_delete_block(ext2_mb_cache, - hash, old_bh->b_blocknr); + mb_cache_entry_delete_block(ext2_mb_cache, + hash, old_bh->b_blocknr); /* Free the old block. */ ea_bdebug(old_bh, "freeing"); ext2_free_blocks(inode, old_bh->b_blocknr, 1); @@ -786,8 +786,8 @@ ext2_xattr_delete_inode(struct inode *inode) * This must happen under buffer lock for ext2_xattr_set2() to * reliably detect freed block */ - mb2_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, - hash, bh->b_blocknr); + mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, + hash, bh->b_blocknr); ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); get_bh(bh); bforget(bh); @@ -818,12 +818,12 @@ cleanup: * Returns 0, or a negative error number on failure. */ static int -ext2_xattr_cache_insert(struct mb2_cache *cache, struct buffer_head *bh) +ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) { __u32 hash = le32_to_cpu(HDR(bh)->h_hash); int error; - error = mb2_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr); + error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr); if (error) { if (error == -EBUSY) { ea_bdebug(bh, "already in cache (%d cache entries)", @@ -887,14 +887,14 @@ static struct buffer_head * ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) { __u32 hash = le32_to_cpu(header->h_hash); - struct mb2_cache_entry *ce; - struct mb2_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; + struct mb_cache_entry *ce; + struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); again: - ce = mb2_cache_entry_find_first(ext2_mb_cache, hash); + ce = mb_cache_entry_find_first(ext2_mb_cache, hash); while (ce) { struct buffer_head *bh; @@ -915,7 +915,7 @@ again: * entry is still hashed is reliable. */ if (hlist_bl_unhashed(&ce->e_hash_list)) { - mb2_cache_entry_put(ext2_mb_cache, ce); + mb_cache_entry_put(ext2_mb_cache, ce); unlock_buffer(bh); brelse(bh); goto again; @@ -928,14 +928,14 @@ again: } else if (!ext2_xattr_cmp(header, HDR(bh))) { ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); - mb2_cache_entry_touch(ext2_mb_cache, ce); - mb2_cache_entry_put(ext2_mb_cache, ce); + mb_cache_entry_touch(ext2_mb_cache, ce); + mb_cache_entry_put(ext2_mb_cache, ce); return bh; } unlock_buffer(bh); brelse(bh); } - ce = mb2_cache_entry_find_next(ext2_mb_cache, ce); + ce = mb_cache_entry_find_next(ext2_mb_cache, ce); } return NULL; } @@ -1010,13 +1010,13 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *header, #define HASH_BUCKET_BITS 10 -struct mb2_cache *ext2_xattr_create_cache(void) +struct mb_cache *ext2_xattr_create_cache(void) { - return mb2_cache_create(HASH_BUCKET_BITS); + return mb_cache_create(HASH_BUCKET_BITS); } -void ext2_xattr_destroy_cache(struct mb2_cache *cache) +void ext2_xattr_destroy_cache(struct mb_cache *cache) { if (cache) - mb2_cache_destroy(cache); + mb_cache_destroy(cache); } diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h index 6ea38aa9563a..6f82ab1b00ca 100644 --- a/fs/ext2/xattr.h +++ b/fs/ext2/xattr.h @@ -53,7 +53,7 @@ struct ext2_xattr_entry { #define EXT2_XATTR_SIZE(size) \ (((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND) -struct mb2_cache; +struct mb_cache; # ifdef CONFIG_EXT2_FS_XATTR @@ -68,8 +68,8 @@ extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_ extern void ext2_xattr_delete_inode(struct inode *); -extern struct mb2_cache *ext2_xattr_create_cache(void); -extern void ext2_xattr_destroy_cache(struct mb2_cache *cache); +extern struct mb_cache *ext2_xattr_create_cache(void); +extern void ext2_xattr_destroy_cache(struct mb_cache *cache); extern const struct xattr_handler *ext2_xattr_handlers[]; @@ -94,7 +94,7 @@ ext2_xattr_delete_inode(struct inode *inode) { } -static inline void ext2_xattr_destroy_cache(struct mb2_cache *cache) +static inline void ext2_xattr_destroy_cache(struct mb_cache *cache) { } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 9ac9e62569ef..157b458a69d4 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1468,7 +1468,7 @@ struct ext4_sb_info { struct list_head s_es_list; /* List of inodes with reclaimable extents */ long s_es_nr_inode; struct ext4_es_stats s_es_stats; - struct mb2_cache *s_mb_cache; + struct mb_cache *s_mb_cache; spinlock_t s_es_lock ____cacheline_aligned_in_smp; /* Ratelimit ext4 messages. */ diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index fe9f8d6ab6c9..c6af8a7a436a 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include "ext4_jbd2.h" #include "ext4.h" @@ -78,10 +78,10 @@ # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif -static void ext4_xattr_cache_insert(struct mb2_cache *, struct buffer_head *); +static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *); static struct buffer_head *ext4_xattr_cache_find(struct inode *, struct ext4_xattr_header *, - struct mb2_cache_entry **); + struct mb_cache_entry **); static void ext4_xattr_rehash(struct ext4_xattr_header *, struct ext4_xattr_entry *); static int ext4_xattr_list(struct dentry *dentry, char *buffer, @@ -276,7 +276,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, struct ext4_xattr_entry *entry; size_t size; int error; - struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); @@ -428,7 +428,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) struct inode *inode = d_inode(dentry); struct buffer_head *bh = NULL; int error; - struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); @@ -561,8 +561,8 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect freed block */ - mb2_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash, - bh->b_blocknr); + mb_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash, + bh->b_blocknr); get_bh(bh); unlock_buffer(bh); ext4_free_blocks(handle, inode, bh, 0, 1, @@ -782,9 +782,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; struct ext4_xattr_search *s = &bs->s; - struct mb2_cache_entry *ce = NULL; + struct mb_cache_entry *ce = NULL; int error = 0; - struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); #define header(x) ((struct ext4_xattr_header *)(x)) @@ -805,8 +805,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, * ext4_xattr_block_set() to reliably detect modified * block */ - mb2_cache_entry_delete_block(ext4_mb_cache, hash, - bs->bh->b_blocknr); + mb_cache_entry_delete_block(ext4_mb_cache, hash, + bs->bh->b_blocknr); ea_bdebug(bs->bh, "modifying in-place"); error = ext4_xattr_set_entry(i, s); if (!error) { @@ -904,7 +904,7 @@ inserted: EXT4_C2B(EXT4_SB(sb), 1)); brelse(new_bh); - mb2_cache_entry_put(ext4_mb_cache, ce); + mb_cache_entry_put(ext4_mb_cache, ce); ce = NULL; new_bh = NULL; goto inserted; @@ -919,8 +919,8 @@ inserted: if (error) goto cleanup_dquot; } - mb2_cache_entry_touch(ext4_mb_cache, ce); - mb2_cache_entry_put(ext4_mb_cache, ce); + mb_cache_entry_touch(ext4_mb_cache, ce); + mb_cache_entry_put(ext4_mb_cache, ce); ce = NULL; } else if (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */ @@ -985,7 +985,7 @@ getblk_failed: cleanup: if (ce) - mb2_cache_entry_put(ext4_mb_cache, ce); + mb_cache_entry_put(ext4_mb_cache, ce); brelse(new_bh); if (!(bs->bh && s->base == bs->bh->b_data)) kfree(s->base); @@ -1546,13 +1546,13 @@ cleanup: * Returns 0, or a negative error number on failure. */ static void -ext4_xattr_cache_insert(struct mb2_cache *ext4_mb_cache, struct buffer_head *bh) +ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh) { __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); int error; - error = mb2_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash, - bh->b_blocknr); + error = mb_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash, + bh->b_blocknr); if (error) { if (error == -EBUSY) ea_bdebug(bh, "already in cache"); @@ -1610,16 +1610,16 @@ ext4_xattr_cmp(struct ext4_xattr_header *header1, */ static struct buffer_head * ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, - struct mb2_cache_entry **pce) + struct mb_cache_entry **pce) { __u32 hash = le32_to_cpu(header->h_hash); - struct mb2_cache_entry *ce; - struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); + struct mb_cache_entry *ce; + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); - ce = mb2_cache_entry_find_first(ext4_mb_cache, hash); + ce = mb_cache_entry_find_first(ext4_mb_cache, hash); while (ce) { struct buffer_head *bh; @@ -1638,7 +1638,7 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, return bh; } brelse(bh); - ce = mb2_cache_entry_find_next(ext4_mb_cache, ce); + ce = mb_cache_entry_find_next(ext4_mb_cache, ce); } return NULL; } @@ -1713,15 +1713,15 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header, #define HASH_BUCKET_BITS 10 -struct mb2_cache * +struct mb_cache * ext4_xattr_create_cache(void) { - return mb2_cache_create(HASH_BUCKET_BITS); + return mb_cache_create(HASH_BUCKET_BITS); } -void ext4_xattr_destroy_cache(struct mb2_cache *cache) +void ext4_xattr_destroy_cache(struct mb_cache *cache) { if (cache) - mb2_cache_destroy(cache); + mb_cache_destroy(cache); } diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 10b0f7323ed6..69dd3e6566e0 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -123,8 +123,8 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is); -extern struct mb2_cache *ext4_xattr_create_cache(void); -extern void ext4_xattr_destroy_cache(struct mb2_cache *); +extern struct mb_cache *ext4_xattr_create_cache(void); +extern void ext4_xattr_destroy_cache(struct mb_cache *); #ifdef CONFIG_EXT4_FS_SECURITY extern int ext4_init_security(handle_t *handle, struct inode *inode, diff --git a/fs/mbcache.c b/fs/mbcache.c new file mode 100644 index 000000000000..4241b633f155 --- /dev/null +++ b/fs/mbcache.c @@ -0,0 +1,424 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Mbcache is a simple key-value store. Keys need not be unique, however + * key-value pairs are expected to be unique (we use this fact in + * mb_cache_entry_delete_block()). + * + * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. + * They use hash of a block contents as a key and block number as a value. + * That's why keys need not be unique (different xattr blocks may end up having + * the same hash). However block number always uniquely identifies a cache + * entry. + * + * We provide functions for creation and removal of entries, search by key, + * and a special "delete entry with given key-value pair" operation. Fixed + * size hash table is used for fast key lookups. + */ + +struct mb_cache { + /* Hash table of entries */ + struct hlist_bl_head *c_hash; + /* log2 of hash table size */ + int c_bucket_bits; + /* Maximum entries in cache to avoid degrading hash too much */ + int c_max_entries; + /* Protects c_list, c_entry_count */ + spinlock_t c_list_lock; + struct list_head c_list; + /* Number of entries in cache */ + unsigned long c_entry_count; + struct shrinker c_shrink; + /* Work for shrinking when the cache has too many entries */ + struct work_struct c_shrink_work; +}; + +static struct kmem_cache *mb_entry_cache; + +static unsigned long mb_cache_shrink(struct mb_cache *cache, + unsigned int nr_to_scan); + +static inline bool mb_cache_entry_referenced(struct mb_cache_entry *entry) +{ + return entry->_e_hash_list_head & 1; +} + +static inline void mb_cache_entry_set_referenced(struct mb_cache_entry *entry) +{ + entry->_e_hash_list_head |= 1; +} + +static inline void mb_cache_entry_clear_referenced( + struct mb_cache_entry *entry) +{ + entry->_e_hash_list_head &= ~1; +} + +static inline struct hlist_bl_head *mb_cache_entry_head( + struct mb_cache_entry *entry) +{ + return (struct hlist_bl_head *) + (entry->_e_hash_list_head & ~1); +} + +/* + * Number of entries to reclaim synchronously when there are too many entries + * in cache + */ +#define SYNC_SHRINK_BATCH 64 + +/* + * mb_cache_entry_create - create entry in cache + * @cache - cache where the entry should be created + * @mask - gfp mask with which the entry should be allocated + * @key - key of the entry + * @block - block that contains data + * + * Creates entry in @cache with key @key and records that data is stored in + * block @block. The function returns -EBUSY if entry with the same key + * and for the same block already exists in cache. Otherwise 0 is returned. + */ +int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, + sector_t block) +{ + struct mb_cache_entry *entry, *dup; + struct hlist_bl_node *dup_node; + struct hlist_bl_head *head; + + /* Schedule background reclaim if there are too many entries */ + if (cache->c_entry_count >= cache->c_max_entries) + schedule_work(&cache->c_shrink_work); + /* Do some sync reclaim if background reclaim cannot keep up */ + if (cache->c_entry_count >= 2*cache->c_max_entries) + mb_cache_shrink(cache, SYNC_SHRINK_BATCH); + + entry = kmem_cache_alloc(mb_entry_cache, mask); + if (!entry) + return -ENOMEM; + + INIT_LIST_HEAD(&entry->e_list); + /* One ref for hash, one ref returned */ + atomic_set(&entry->e_refcnt, 1); + entry->e_key = key; + entry->e_block = block; + head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; + entry->_e_hash_list_head = (unsigned long)head; + hlist_bl_lock(head); + hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { + if (dup->e_key == key && dup->e_block == block) { + hlist_bl_unlock(head); + kmem_cache_free(mb_entry_cache, entry); + return -EBUSY; + } + } + hlist_bl_add_head(&entry->e_hash_list, head); + hlist_bl_unlock(head); + + spin_lock(&cache->c_list_lock); + list_add_tail(&entry->e_list, &cache->c_list); + /* Grab ref for LRU list */ + atomic_inc(&entry->e_refcnt); + cache->c_entry_count++; + spin_unlock(&cache->c_list_lock); + + return 0; +} +EXPORT_SYMBOL(mb_cache_entry_create); + +void __mb_cache_entry_free(struct mb_cache_entry *entry) +{ + kmem_cache_free(mb_entry_cache, entry); +} +EXPORT_SYMBOL(__mb_cache_entry_free); + +static struct mb_cache_entry *__entry_find(struct mb_cache *cache, + struct mb_cache_entry *entry, + u32 key) +{ + struct mb_cache_entry *old_entry = entry; + struct hlist_bl_node *node; + struct hlist_bl_head *head; + + if (entry) + head = mb_cache_entry_head(entry); + else + head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; + hlist_bl_lock(head); + if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) + node = entry->e_hash_list.next; + else + node = hlist_bl_first(head); + while (node) { + entry = hlist_bl_entry(node, struct mb_cache_entry, + e_hash_list); + if (entry->e_key == key) { + atomic_inc(&entry->e_refcnt); + goto out; + } + node = node->next; + } + entry = NULL; +out: + hlist_bl_unlock(head); + if (old_entry) + mb_cache_entry_put(cache, old_entry); + + return entry; +} + +/* + * mb_cache_entry_find_first - find the first entry in cache with given key + * @cache: cache where we should search + * @key: key to look for + * + * Search in @cache for entry with key @key. Grabs reference to the first + * entry found and returns the entry. + */ +struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, + u32 key) +{ + return __entry_find(cache, NULL, key); +} +EXPORT_SYMBOL(mb_cache_entry_find_first); + +/* + * mb_cache_entry_find_next - find next entry in cache with the same + * @cache: cache where we should search + * @entry: entry to start search from + * + * Finds next entry in the hash chain which has the same key as @entry. + * If @entry is unhashed (which can happen when deletion of entry races + * with the search), finds the first entry in the hash chain. The function + * drops reference to @entry and returns with a reference to the found entry. + */ +struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, + struct mb_cache_entry *entry) +{ + return __entry_find(cache, entry, entry->e_key); +} +EXPORT_SYMBOL(mb_cache_entry_find_next); + +/* mb_cache_entry_delete_block - remove information about block from cache + * @cache - cache we work with + * @key - key of the entry to remove + * @block - block containing data for @key + * + * Remove entry from cache @cache with key @key with data stored in @block. + */ +void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, + sector_t block) +{ + struct hlist_bl_node *node; + struct hlist_bl_head *head; + struct mb_cache_entry *entry; + + head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; + hlist_bl_lock(head); + hlist_bl_for_each_entry(entry, node, head, e_hash_list) { + if (entry->e_key == key && entry->e_block == block) { + /* We keep hash list reference to keep entry alive */ + hlist_bl_del_init(&entry->e_hash_list); + hlist_bl_unlock(head); + spin_lock(&cache->c_list_lock); + if (!list_empty(&entry->e_list)) { + list_del_init(&entry->e_list); + cache->c_entry_count--; + atomic_dec(&entry->e_refcnt); + } + spin_unlock(&cache->c_list_lock); + mb_cache_entry_put(cache, entry); + return; + } + } + hlist_bl_unlock(head); +} +EXPORT_SYMBOL(mb_cache_entry_delete_block); + +/* mb_cache_entry_touch - cache entry got used + * @cache - cache the entry belongs to + * @entry - entry that got used + * + * Marks entry as used to give hit higher chances of surviving in cache. + */ +void mb_cache_entry_touch(struct mb_cache *cache, + struct mb_cache_entry *entry) +{ + mb_cache_entry_set_referenced(entry); +} +EXPORT_SYMBOL(mb_cache_entry_touch); + +static unsigned long mb_cache_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct mb_cache *cache = container_of(shrink, struct mb_cache, + c_shrink); + + return cache->c_entry_count; +} + +/* Shrink number of entries in cache */ +static unsigned long mb_cache_shrink(struct mb_cache *cache, + unsigned int nr_to_scan) +{ + struct mb_cache_entry *entry; + struct hlist_bl_head *head; + unsigned int shrunk = 0; + + spin_lock(&cache->c_list_lock); + while (nr_to_scan-- && !list_empty(&cache->c_list)) { + entry = list_first_entry(&cache->c_list, + struct mb_cache_entry, e_list); + if (mb_cache_entry_referenced(entry)) { + mb_cache_entry_clear_referenced(entry); + list_move_tail(&cache->c_list, &entry->e_list); + continue; + } + list_del_init(&entry->e_list); + cache->c_entry_count--; + /* + * We keep LRU list reference so that entry doesn't go away + * from under us. + */ + spin_unlock(&cache->c_list_lock); + head = mb_cache_entry_head(entry); + hlist_bl_lock(head); + if (!hlist_bl_unhashed(&entry->e_hash_list)) { + hlist_bl_del_init(&entry->e_hash_list); + atomic_dec(&entry->e_refcnt); + } + hlist_bl_unlock(head); + if (mb_cache_entry_put(cache, entry)) + shrunk++; + cond_resched(); + spin_lock(&cache->c_list_lock); + } + spin_unlock(&cache->c_list_lock); + + return shrunk; +} + +static unsigned long mb_cache_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + int nr_to_scan = sc->nr_to_scan; + struct mb_cache *cache = container_of(shrink, struct mb_cache, + c_shrink); + return mb_cache_shrink(cache, nr_to_scan); +} + +/* We shrink 1/X of the cache when we have too many entries in it */ +#define SHRINK_DIVISOR 16 + +static void mb_cache_shrink_worker(struct work_struct *work) +{ + struct mb_cache *cache = container_of(work, struct mb_cache, + c_shrink_work); + mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); +} + +/* + * mb_cache_create - create cache + * @bucket_bits: log2 of the hash table size + * + * Create cache for keys with 2^bucket_bits hash entries. + */ +struct mb_cache *mb_cache_create(int bucket_bits) +{ + struct mb_cache *cache; + int bucket_count = 1 << bucket_bits; + int i; + + if (!try_module_get(THIS_MODULE)) + return NULL; + + cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL); + if (!cache) + goto err_out; + cache->c_bucket_bits = bucket_bits; + cache->c_max_entries = bucket_count << 4; + INIT_LIST_HEAD(&cache->c_list); + spin_lock_init(&cache->c_list_lock); + cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head), + GFP_KERNEL); + if (!cache->c_hash) { + kfree(cache); + goto err_out; + } + for (i = 0; i < bucket_count; i++) + INIT_HLIST_BL_HEAD(&cache->c_hash[i]); + + cache->c_shrink.count_objects = mb_cache_count; + cache->c_shrink.scan_objects = mb_cache_scan; + cache->c_shrink.seeks = DEFAULT_SEEKS; + register_shrinker(&cache->c_shrink); + + INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); + + return cache; + +err_out: + module_put(THIS_MODULE); + return NULL; +} +EXPORT_SYMBOL(mb_cache_create); + +/* + * mb_cache_destroy - destroy cache + * @cache: the cache to destroy + * + * Free all entries in cache and cache itself. Caller must make sure nobody + * (except shrinker) can reach @cache when calling this. + */ +void mb_cache_destroy(struct mb_cache *cache) +{ + struct mb_cache_entry *entry, *next; + + unregister_shrinker(&cache->c_shrink); + + /* + * We don't bother with any locking. Cache must not be used at this + * point. + */ + list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { + if (!hlist_bl_unhashed(&entry->e_hash_list)) { + hlist_bl_del_init(&entry->e_hash_list); + atomic_dec(&entry->e_refcnt); + } else + WARN_ON(1); + list_del(&entry->e_list); + WARN_ON(atomic_read(&entry->e_refcnt) != 1); + mb_cache_entry_put(cache, entry); + } + kfree(cache->c_hash); + kfree(cache); + module_put(THIS_MODULE); +} +EXPORT_SYMBOL(mb_cache_destroy); + +static int __init mbcache_init(void) +{ + mb_entry_cache = kmem_cache_create("mbcache", + sizeof(struct mb_cache_entry), 0, + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); + BUG_ON(!mb_entry_cache); + return 0; +} + +static void __exit mbcache_exit(void) +{ + kmem_cache_destroy(mb_entry_cache); +} + +module_init(mbcache_init) +module_exit(mbcache_exit) + +MODULE_AUTHOR("Jan Kara "); +MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); +MODULE_LICENSE("GPL"); diff --git a/fs/mbcache2.c b/fs/mbcache2.c deleted file mode 100644 index 49f7a6feaa83..000000000000 --- a/fs/mbcache2.c +++ /dev/null @@ -1,424 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Mbcache is a simple key-value store. Keys need not be unique, however - * key-value pairs are expected to be unique (we use this fact in - * mb2_cache_entry_delete_block()). - * - * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. - * They use hash of a block contents as a key and block number as a value. - * That's why keys need not be unique (different xattr blocks may end up having - * the same hash). However block number always uniquely identifies a cache - * entry. - * - * We provide functions for creation and removal of entries, search by key, - * and a special "delete entry with given key-value pair" operation. Fixed - * size hash table is used for fast key lookups. - */ - -struct mb2_cache { - /* Hash table of entries */ - struct hlist_bl_head *c_hash; - /* log2 of hash table size */ - int c_bucket_bits; - /* Maximum entries in cache to avoid degrading hash too much */ - int c_max_entries; - /* Protects c_list, c_entry_count */ - spinlock_t c_list_lock; - struct list_head c_list; - /* Number of entries in cache */ - unsigned long c_entry_count; - struct shrinker c_shrink; - /* Work for shrinking when the cache has too many entries */ - struct work_struct c_shrink_work; -}; - -static struct kmem_cache *mb2_entry_cache; - -static unsigned long mb2_cache_shrink(struct mb2_cache *cache, - unsigned int nr_to_scan); - -static inline bool mb2_cache_entry_referenced(struct mb2_cache_entry *entry) -{ - return entry->_e_hash_list_head & 1; -} - -static inline void mb2_cache_entry_set_referenced(struct mb2_cache_entry *entry) -{ - entry->_e_hash_list_head |= 1; -} - -static inline void mb2_cache_entry_clear_referenced( - struct mb2_cache_entry *entry) -{ - entry->_e_hash_list_head &= ~1; -} - -static inline struct hlist_bl_head *mb2_cache_entry_head( - struct mb2_cache_entry *entry) -{ - return (struct hlist_bl_head *) - (entry->_e_hash_list_head & ~1); -} - -/* - * Number of entries to reclaim synchronously when there are too many entries - * in cache - */ -#define SYNC_SHRINK_BATCH 64 - -/* - * mb2_cache_entry_create - create entry in cache - * @cache - cache where the entry should be created - * @mask - gfp mask with which the entry should be allocated - * @key - key of the entry - * @block - block that contains data - * - * Creates entry in @cache with key @key and records that data is stored in - * block @block. The function returns -EBUSY if entry with the same key - * and for the same block already exists in cache. Otherwise 0 is returned. - */ -int mb2_cache_entry_create(struct mb2_cache *cache, gfp_t mask, u32 key, - sector_t block) -{ - struct mb2_cache_entry *entry, *dup; - struct hlist_bl_node *dup_node; - struct hlist_bl_head *head; - - /* Schedule background reclaim if there are too many entries */ - if (cache->c_entry_count >= cache->c_max_entries) - schedule_work(&cache->c_shrink_work); - /* Do some sync reclaim if background reclaim cannot keep up */ - if (cache->c_entry_count >= 2*cache->c_max_entries) - mb2_cache_shrink(cache, SYNC_SHRINK_BATCH); - - entry = kmem_cache_alloc(mb2_entry_cache, mask); - if (!entry) - return -ENOMEM; - - INIT_LIST_HEAD(&entry->e_list); - /* One ref for hash, one ref returned */ - atomic_set(&entry->e_refcnt, 1); - entry->e_key = key; - entry->e_block = block; - head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; - entry->_e_hash_list_head = (unsigned long)head; - hlist_bl_lock(head); - hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { - if (dup->e_key == key && dup->e_block == block) { - hlist_bl_unlock(head); - kmem_cache_free(mb2_entry_cache, entry); - return -EBUSY; - } - } - hlist_bl_add_head(&entry->e_hash_list, head); - hlist_bl_unlock(head); - - spin_lock(&cache->c_list_lock); - list_add_tail(&entry->e_list, &cache->c_list); - /* Grab ref for LRU list */ - atomic_inc(&entry->e_refcnt); - cache->c_entry_count++; - spin_unlock(&cache->c_list_lock); - - return 0; -} -EXPORT_SYMBOL(mb2_cache_entry_create); - -void __mb2_cache_entry_free(struct mb2_cache_entry *entry) -{ - kmem_cache_free(mb2_entry_cache, entry); -} -EXPORT_SYMBOL(__mb2_cache_entry_free); - -static struct mb2_cache_entry *__entry_find(struct mb2_cache *cache, - struct mb2_cache_entry *entry, - u32 key) -{ - struct mb2_cache_entry *old_entry = entry; - struct hlist_bl_node *node; - struct hlist_bl_head *head; - - if (entry) - head = mb2_cache_entry_head(entry); - else - head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; - hlist_bl_lock(head); - if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) - node = entry->e_hash_list.next; - else - node = hlist_bl_first(head); - while (node) { - entry = hlist_bl_entry(node, struct mb2_cache_entry, - e_hash_list); - if (entry->e_key == key) { - atomic_inc(&entry->e_refcnt); - goto out; - } - node = node->next; - } - entry = NULL; -out: - hlist_bl_unlock(head); - if (old_entry) - mb2_cache_entry_put(cache, old_entry); - - return entry; -} - -/* - * mb2_cache_entry_find_first - find the first entry in cache with given key - * @cache: cache where we should search - * @key: key to look for - * - * Search in @cache for entry with key @key. Grabs reference to the first - * entry found and returns the entry. - */ -struct mb2_cache_entry *mb2_cache_entry_find_first(struct mb2_cache *cache, - u32 key) -{ - return __entry_find(cache, NULL, key); -} -EXPORT_SYMBOL(mb2_cache_entry_find_first); - -/* - * mb2_cache_entry_find_next - find next entry in cache with the same - * @cache: cache where we should search - * @entry: entry to start search from - * - * Finds next entry in the hash chain which has the same key as @entry. - * If @entry is unhashed (which can happen when deletion of entry races - * with the search), finds the first entry in the hash chain. The function - * drops reference to @entry and returns with a reference to the found entry. - */ -struct mb2_cache_entry *mb2_cache_entry_find_next(struct mb2_cache *cache, - struct mb2_cache_entry *entry) -{ - return __entry_find(cache, entry, entry->e_key); -} -EXPORT_SYMBOL(mb2_cache_entry_find_next); - -/* mb2_cache_entry_delete_block - remove information about block from cache - * @cache - cache we work with - * @key - key of the entry to remove - * @block - block containing data for @key - * - * Remove entry from cache @cache with key @key with data stored in @block. - */ -void mb2_cache_entry_delete_block(struct mb2_cache *cache, u32 key, - sector_t block) -{ - struct hlist_bl_node *node; - struct hlist_bl_head *head; - struct mb2_cache_entry *entry; - - head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; - hlist_bl_lock(head); - hlist_bl_for_each_entry(entry, node, head, e_hash_list) { - if (entry->e_key == key && entry->e_block == block) { - /* We keep hash list reference to keep entry alive */ - hlist_bl_del_init(&entry->e_hash_list); - hlist_bl_unlock(head); - spin_lock(&cache->c_list_lock); - if (!list_empty(&entry->e_list)) { - list_del_init(&entry->e_list); - cache->c_entry_count--; - atomic_dec(&entry->e_refcnt); - } - spin_unlock(&cache->c_list_lock); - mb2_cache_entry_put(cache, entry); - return; - } - } - hlist_bl_unlock(head); -} -EXPORT_SYMBOL(mb2_cache_entry_delete_block); - -/* mb2_cache_entry_touch - cache entry got used - * @cache - cache the entry belongs to - * @entry - entry that got used - * - * Marks entry as used to give hit higher chances of surviving in cache. - */ -void mb2_cache_entry_touch(struct mb2_cache *cache, - struct mb2_cache_entry *entry) -{ - mb2_cache_entry_set_referenced(entry); -} -EXPORT_SYMBOL(mb2_cache_entry_touch); - -static unsigned long mb2_cache_count(struct shrinker *shrink, - struct shrink_control *sc) -{ - struct mb2_cache *cache = container_of(shrink, struct mb2_cache, - c_shrink); - - return cache->c_entry_count; -} - -/* Shrink number of entries in cache */ -static unsigned long mb2_cache_shrink(struct mb2_cache *cache, - unsigned int nr_to_scan) -{ - struct mb2_cache_entry *entry; - struct hlist_bl_head *head; - unsigned int shrunk = 0; - - spin_lock(&cache->c_list_lock); - while (nr_to_scan-- && !list_empty(&cache->c_list)) { - entry = list_first_entry(&cache->c_list, - struct mb2_cache_entry, e_list); - if (mb2_cache_entry_referenced(entry)) { - mb2_cache_entry_clear_referenced(entry); - list_move_tail(&cache->c_list, &entry->e_list); - continue; - } - list_del_init(&entry->e_list); - cache->c_entry_count--; - /* - * We keep LRU list reference so that entry doesn't go away - * from under us. - */ - spin_unlock(&cache->c_list_lock); - head = mb2_cache_entry_head(entry); - hlist_bl_lock(head); - if (!hlist_bl_unhashed(&entry->e_hash_list)) { - hlist_bl_del_init(&entry->e_hash_list); - atomic_dec(&entry->e_refcnt); - } - hlist_bl_unlock(head); - if (mb2_cache_entry_put(cache, entry)) - shrunk++; - cond_resched(); - spin_lock(&cache->c_list_lock); - } - spin_unlock(&cache->c_list_lock); - - return shrunk; -} - -static unsigned long mb2_cache_scan(struct shrinker *shrink, - struct shrink_control *sc) -{ - int nr_to_scan = sc->nr_to_scan; - struct mb2_cache *cache = container_of(shrink, struct mb2_cache, - c_shrink); - return mb2_cache_shrink(cache, nr_to_scan); -} - -/* We shrink 1/X of the cache when we have too many entries in it */ -#define SHRINK_DIVISOR 16 - -static void mb2_cache_shrink_worker(struct work_struct *work) -{ - struct mb2_cache *cache = container_of(work, struct mb2_cache, - c_shrink_work); - mb2_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); -} - -/* - * mb2_cache_create - create cache - * @bucket_bits: log2 of the hash table size - * - * Create cache for keys with 2^bucket_bits hash entries. - */ -struct mb2_cache *mb2_cache_create(int bucket_bits) -{ - struct mb2_cache *cache; - int bucket_count = 1 << bucket_bits; - int i; - - if (!try_module_get(THIS_MODULE)) - return NULL; - - cache = kzalloc(sizeof(struct mb2_cache), GFP_KERNEL); - if (!cache) - goto err_out; - cache->c_bucket_bits = bucket_bits; - cache->c_max_entries = bucket_count << 4; - INIT_LIST_HEAD(&cache->c_list); - spin_lock_init(&cache->c_list_lock); - cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head), - GFP_KERNEL); - if (!cache->c_hash) { - kfree(cache); - goto err_out; - } - for (i = 0; i < bucket_count; i++) - INIT_HLIST_BL_HEAD(&cache->c_hash[i]); - - cache->c_shrink.count_objects = mb2_cache_count; - cache->c_shrink.scan_objects = mb2_cache_scan; - cache->c_shrink.seeks = DEFAULT_SEEKS; - register_shrinker(&cache->c_shrink); - - INIT_WORK(&cache->c_shrink_work, mb2_cache_shrink_worker); - - return cache; - -err_out: - module_put(THIS_MODULE); - return NULL; -} -EXPORT_SYMBOL(mb2_cache_create); - -/* - * mb2_cache_destroy - destroy cache - * @cache: the cache to destroy - * - * Free all entries in cache and cache itself. Caller must make sure nobody - * (except shrinker) can reach @cache when calling this. - */ -void mb2_cache_destroy(struct mb2_cache *cache) -{ - struct mb2_cache_entry *entry, *next; - - unregister_shrinker(&cache->c_shrink); - - /* - * We don't bother with any locking. Cache must not be used at this - * point. - */ - list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { - if (!hlist_bl_unhashed(&entry->e_hash_list)) { - hlist_bl_del_init(&entry->e_hash_list); - atomic_dec(&entry->e_refcnt); - } else - WARN_ON(1); - list_del(&entry->e_list); - WARN_ON(atomic_read(&entry->e_refcnt) != 1); - mb2_cache_entry_put(cache, entry); - } - kfree(cache->c_hash); - kfree(cache); - module_put(THIS_MODULE); -} -EXPORT_SYMBOL(mb2_cache_destroy); - -static int __init mb2cache_init(void) -{ - mb2_entry_cache = kmem_cache_create("mbcache", - sizeof(struct mb2_cache_entry), 0, - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); - BUG_ON(!mb2_entry_cache); - return 0; -} - -static void __exit mb2cache_exit(void) -{ - kmem_cache_destroy(mb2_entry_cache); -} - -module_init(mb2cache_init) -module_exit(mb2cache_exit) - -MODULE_AUTHOR("Jan Kara "); -MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); -MODULE_LICENSE("GPL"); diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h new file mode 100644 index 000000000000..a74a1f3082fb --- /dev/null +++ b/include/linux/mbcache.h @@ -0,0 +1,53 @@ +#ifndef _LINUX_MBCACHE_H +#define _LINUX_MBCACHE_H + +#include +#include +#include +#include +#include + +struct mb_cache; + +struct mb_cache_entry { + /* List of entries in cache - protected by cache->c_list_lock */ + struct list_head e_list; + /* Hash table list - protected by bitlock in e_hash_list_head */ + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + /* Key in hash - stable during lifetime of the entry */ + u32 e_key; + /* Block number of hashed block - stable during lifetime of the entry */ + sector_t e_block; + /* + * Head of hash list (for list bit lock) - stable. Combined with + * referenced bit of entry + */ + unsigned long _e_hash_list_head; +}; + +struct mb_cache *mb_cache_create(int bucket_bits); +void mb_cache_destroy(struct mb_cache *cache); + +int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, + sector_t block); +void __mb_cache_entry_free(struct mb_cache_entry *entry); +static inline int mb_cache_entry_put(struct mb_cache *cache, + struct mb_cache_entry *entry) +{ + if (!atomic_dec_and_test(&entry->e_refcnt)) + return 0; + __mb_cache_entry_free(entry); + return 1; +} + +void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, + sector_t block); +struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, + u32 key); +struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, + struct mb_cache_entry *entry); +void mb_cache_entry_touch(struct mb_cache *cache, + struct mb_cache_entry *entry); + +#endif /* _LINUX_MBCACHE_H */ diff --git a/include/linux/mbcache2.h b/include/linux/mbcache2.h deleted file mode 100644 index c934843a6a31..000000000000 --- a/include/linux/mbcache2.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _LINUX_MB2CACHE_H -#define _LINUX_MB2CACHE_H - -#include -#include -#include -#include -#include - -struct mb2_cache; - -struct mb2_cache_entry { - /* List of entries in cache - protected by cache->c_list_lock */ - struct list_head e_list; - /* Hash table list - protected by bitlock in e_hash_list_head */ - struct hlist_bl_node e_hash_list; - atomic_t e_refcnt; - /* Key in hash - stable during lifetime of the entry */ - u32 e_key; - /* Block number of hashed block - stable during lifetime of the entry */ - sector_t e_block; - /* - * Head of hash list (for list bit lock) - stable. Combined with - * referenced bit of entry - */ - unsigned long _e_hash_list_head; -}; - -struct mb2_cache *mb2_cache_create(int bucket_bits); -void mb2_cache_destroy(struct mb2_cache *cache); - -int mb2_cache_entry_create(struct mb2_cache *cache, gfp_t mask, u32 key, - sector_t block); -void __mb2_cache_entry_free(struct mb2_cache_entry *entry); -static inline int mb2_cache_entry_put(struct mb2_cache *cache, - struct mb2_cache_entry *entry) -{ - if (!atomic_dec_and_test(&entry->e_refcnt)) - return 0; - __mb2_cache_entry_free(entry); - return 1; -} - -void mb2_cache_entry_delete_block(struct mb2_cache *cache, u32 key, - sector_t block); -struct mb2_cache_entry *mb2_cache_entry_find_first(struct mb2_cache *cache, - u32 key); -struct mb2_cache_entry *mb2_cache_entry_find_next(struct mb2_cache *cache, - struct mb2_cache_entry *entry); -void mb2_cache_entry_touch(struct mb2_cache *cache, - struct mb2_cache_entry *entry); - -#endif /* _LINUX_MB2CACHE_H */ -- cgit v1.2.3 From 6048c64b26097a0ffbd966866b599f990e674e9b Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Mon, 22 Feb 2016 22:44:04 -0500 Subject: mbcache: add reusable flag to cache entries To reduce amount of damage caused by single bad block, we limit number of inodes sharing an xattr block to 1024. Thus there can be more xattr blocks with the same contents when there are lots of files with the same extended attributes. These xattr blocks naturally result in hash collisions and can form long hash chains and we unnecessarily check each such block only to find out we cannot use it because it is already shared by too many inodes. Add a reusable flag to cache entries which is cleared when a cache entry has reached its maximum refcount. Cache entries which are not marked reusable are skipped by mb_cache_entry_find_{first,next}. This significantly speeds up mbcache when there are many same xattr blocks. For example for xattr-bench with 5 values and each process handling 20000 files, the run for 64 processes is 25x faster with this patch. Even for 8 processes the speedup is almost 3x. We have also verified that for situations where there is only one xattr block of each kind, the patch doesn't have a measurable cost. [JK: Remove handling of setting the same value since it is not needed anymore, check for races in e_reusable setting, improve changelog, add measurements] Signed-off-by: Andreas Gruenbacher Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext2/xattr.c | 2 +- fs/ext4/xattr.c | 66 +++++++++++++++++++++++++++++++------------------ fs/mbcache.c | 38 +++++++++++++++++++++++++--- include/linux/mbcache.h | 5 +++- 4 files changed, 81 insertions(+), 30 deletions(-) (limited to 'fs/ext2') diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 71d58c2d7a19..1a5e3bff0b63 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -823,7 +823,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) __u32 hash = le32_to_cpu(HDR(bh)->h_hash); int error; - error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr); + error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1); if (error) { if (error == -EBUSY) { ea_bdebug(bh, "already in cache (%d cache entries)", diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index b661ae8332e3..0441e055c8e8 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -545,6 +545,8 @@ static void ext4_xattr_release_block(handle_t *handle, struct inode *inode, struct buffer_head *bh) { + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); + u32 hash, ref; int error = 0; BUFFER_TRACE(bh, "get_write_access"); @@ -553,23 +555,34 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, goto out; lock_buffer(bh); - if (BHDR(bh)->h_refcount == cpu_to_le32(1)) { - __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); - + hash = le32_to_cpu(BHDR(bh)->h_hash); + ref = le32_to_cpu(BHDR(bh)->h_refcount); + if (ref == 1) { ea_bdebug(bh, "refcount now=0; freeing"); /* * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect freed block */ - mb_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash, - bh->b_blocknr); + mb_cache_entry_delete_block(ext4_mb_cache, hash, bh->b_blocknr); get_bh(bh); unlock_buffer(bh); ext4_free_blocks(handle, inode, bh, 0, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } else { - le32_add_cpu(&BHDR(bh)->h_refcount, -1); + ref--; + BHDR(bh)->h_refcount = cpu_to_le32(ref); + if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) { + struct mb_cache_entry *ce; + + ce = mb_cache_entry_get(ext4_mb_cache, hash, + bh->b_blocknr); + if (ce) { + ce->e_reusable = 1; + mb_cache_entry_put(ext4_mb_cache, ce); + } + } + /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect @@ -872,6 +885,8 @@ inserted: if (new_bh == bs->bh) ea_bdebug(new_bh, "keeping"); else { + u32 ref; + /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, @@ -886,15 +901,18 @@ inserted: lock_buffer(new_bh); /* * We have to be careful about races with - * freeing or rehashing of xattr block. Once we - * hold buffer lock xattr block's state is - * stable so we can check whether the block got - * freed / rehashed or not. Since we unhash - * mbcache entry under buffer lock when freeing - * / rehashing xattr block, checking whether - * entry is still hashed is reliable. + * freeing, rehashing or adding references to + * xattr block. Once we hold buffer lock xattr + * block's state is stable so we can check + * whether the block got freed / rehashed or + * not. Since we unhash mbcache entry under + * buffer lock when freeing / rehashing xattr + * block, checking whether entry is still + * hashed is reliable. Same rules hold for + * e_reusable handling. */ - if (hlist_bl_unhashed(&ce->e_hash_list)) { + if (hlist_bl_unhashed(&ce->e_hash_list) || + !ce->e_reusable) { /* * Undo everything and check mbcache * again. @@ -909,9 +927,12 @@ inserted: new_bh = NULL; goto inserted; } - le32_add_cpu(&BHDR(new_bh)->h_refcount, 1); + ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; + BHDR(new_bh)->h_refcount = cpu_to_le32(ref); + if (ref >= EXT4_XATTR_REFCOUNT_MAX) + ce->e_reusable = 0; ea_bdebug(new_bh, "reusing; refcount now=%d", - le32_to_cpu(BHDR(new_bh)->h_refcount)); + ref); unlock_buffer(new_bh); error = ext4_handle_dirty_xattr_block(handle, inode, @@ -1566,11 +1587,14 @@ cleanup: static void ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh) { - __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); + struct ext4_xattr_header *header = BHDR(bh); + __u32 hash = le32_to_cpu(header->h_hash); + int reusable = le32_to_cpu(header->h_refcount) < + EXT4_XATTR_REFCOUNT_MAX; int error; error = mb_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash, - bh->b_blocknr); + bh->b_blocknr, reusable); if (error) { if (error == -EBUSY) ea_bdebug(bh, "already in cache"); @@ -1645,12 +1669,6 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, if (!bh) { EXT4_ERROR_INODE(inode, "block %lu read error", (unsigned long) ce->e_block); - } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= - EXT4_XATTR_REFCOUNT_MAX) { - ea_idebug(inode, "block %lu refcount %d>=%d", - (unsigned long) ce->e_block, - le32_to_cpu(BHDR(bh)->h_refcount), - EXT4_XATTR_REFCOUNT_MAX); } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { *pce = ce; return bh; diff --git a/fs/mbcache.c b/fs/mbcache.c index 903be151dcfe..eccda3a02de6 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -63,13 +63,14 @@ static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, * @mask - gfp mask with which the entry should be allocated * @key - key of the entry * @block - block that contains data + * @reusable - is the block reusable by other inodes? * * Creates entry in @cache with key @key and records that data is stored in * block @block. The function returns -EBUSY if entry with the same key * and for the same block already exists in cache. Otherwise 0 is returned. */ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, - sector_t block) + sector_t block, bool reusable) { struct mb_cache_entry *entry, *dup; struct hlist_bl_node *dup_node; @@ -91,6 +92,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, atomic_set(&entry->e_refcnt, 1); entry->e_key = key; entry->e_block = block; + entry->e_reusable = reusable; head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { @@ -137,7 +139,7 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache, while (node) { entry = hlist_bl_entry(node, struct mb_cache_entry, e_hash_list); - if (entry->e_key == key) { + if (entry->e_key == key && entry->e_reusable) { atomic_inc(&entry->e_refcnt); goto out; } @@ -184,10 +186,38 @@ struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, } EXPORT_SYMBOL(mb_cache_entry_find_next); +/* + * mb_cache_entry_get - get a cache entry by block number (and key) + * @cache - cache we work with + * @key - key of block number @block + * @block - block number + */ +struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, + sector_t block) +{ + struct hlist_bl_node *node; + struct hlist_bl_head *head; + struct mb_cache_entry *entry; + + head = mb_cache_entry_head(cache, key); + hlist_bl_lock(head); + hlist_bl_for_each_entry(entry, node, head, e_hash_list) { + if (entry->e_key == key && entry->e_block == block) { + atomic_inc(&entry->e_refcnt); + goto out; + } + } + entry = NULL; +out: + hlist_bl_unlock(head); + return entry; +} +EXPORT_SYMBOL(mb_cache_entry_get); + /* mb_cache_entry_delete_block - remove information about block from cache * @cache - cache we work with - * @key - key of the entry to remove - * @block - block containing data for @key + * @key - key of block @block + * @block - block number * * Remove entry from cache @cache with key @key with data stored in @block. */ diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 607e6968542e..86c9a8b480c5 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -18,6 +18,7 @@ struct mb_cache_entry { /* Key in hash - stable during lifetime of the entry */ u32 e_key; u32 e_referenced:1; + u32 e_reusable:1; /* Block number of hashed block - stable during lifetime of the entry */ sector_t e_block; }; @@ -26,7 +27,7 @@ struct mb_cache *mb_cache_create(int bucket_bits); void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, - sector_t block); + sector_t block, bool reusable); void __mb_cache_entry_free(struct mb_cache_entry *entry); static inline int mb_cache_entry_put(struct mb_cache *cache, struct mb_cache_entry *entry) @@ -39,6 +40,8 @@ static inline int mb_cache_entry_put(struct mb_cache *cache, void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, sector_t block); +struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, + sector_t block); struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, u32 key); struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, -- cgit v1.2.3