From 41a2ee75aab0290a5899677437736ec715dcd1b6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 17 Jan 2020 09:02:21 -0500 Subject: btrfs: introduce per-inode file extent tree In order to keep track of where we have file extents on disk, and thus where it is safe to adjust the i_size to, we need to have a tree in place to keep track of the contiguous areas we have file extents for. Add helpers to use this tree, as it's not required for NO_HOLES file systems. We will use this by setting DIRTY for areas we know we have file extent item's set, and clearing it when we remove file extent items for truncation. Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 36df977b64d9..36dbf4e7c0dc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2859,6 +2859,11 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, struct btrfs_file_extent_item *fi, const bool new_inline, struct extent_map *em); +int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, + u64 len); +int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, + u64 len); +void btrfs_inode_safe_disk_i_size_write(struct inode *inode, u64 new_i_size); /* inode.c */ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, -- cgit v1.2.3 From 0d4b0463011de06288d8ca80a873a97a7d99a948 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 24 Jan 2020 09:32:53 -0500 Subject: btrfs: export and rename free_fs_info We're going to start freeing roots and doing other complicated things in free_fs_info, so we need to move it to disk-io.c and export it in order to use things lik btrfs_put_fs_root(). Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 17 ----------------- fs/btrfs/disk-io.c | 18 ++++++++++++++++++ fs/btrfs/disk-io.h | 1 + fs/btrfs/super.c | 6 +++--- 4 files changed, 22 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 36dbf4e7c0dc..8c1a8f275773 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2695,23 +2695,6 @@ static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info) return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info); } -static inline void free_fs_info(struct btrfs_fs_info *fs_info) -{ - kfree(fs_info->balance_ctl); - kfree(fs_info->delayed_root); - kfree(fs_info->extent_root); - kfree(fs_info->tree_root); - kfree(fs_info->chunk_root); - kfree(fs_info->dev_root); - kfree(fs_info->csum_root); - kfree(fs_info->quota_root); - kfree(fs_info->uuid_root); - kfree(fs_info->free_space_root); - kfree(fs_info->super_copy); - kfree(fs_info->super_for_commit); - kvfree(fs_info); -} - /* tree mod log functions from ctree.c */ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 43af9f9b8583..ce36a4b9c8a3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1523,6 +1523,24 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, return ret; } +void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) +{ + kfree(fs_info->balance_ctl); + kfree(fs_info->delayed_root); + kfree(fs_info->extent_root); + kfree(fs_info->tree_root); + kfree(fs_info->chunk_root); + kfree(fs_info->dev_root); + kfree(fs_info->csum_root); + kfree(fs_info->quota_root); + kfree(fs_info->uuid_root); + kfree(fs_info->free_space_root); + kfree(fs_info->super_copy); + kfree(fs_info->super_for_commit); + kvfree(fs_info); +} + + struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_key *location, bool check_ref) diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 8add2e14aab1..97e7ac474a52 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -68,6 +68,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_key *key, bool check_ref); +void btrfs_free_fs_info(struct btrfs_fs_info *fs_info); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info); void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2c459d29e099..a70c89b3a223 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1580,7 +1580,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, if (s->s_root) { btrfs_close_devices(fs_devices); - free_fs_info(fs_info); + btrfs_free_fs_info(fs_info); if ((flags ^ s->s_flags) & SB_RDONLY) error = -EBUSY; } else { @@ -1603,7 +1603,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, error_close_devices: btrfs_close_devices(fs_devices); error_fs_info: - free_fs_info(fs_info); + btrfs_free_fs_info(fs_info); error_sec_opts: security_free_mnt_opts(&new_sec_opts); return ERR_PTR(error); @@ -2179,7 +2179,7 @@ static void btrfs_kill_super(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); kill_anon_super(sb); - free_fs_info(fs_info); + btrfs_free_fs_info(fs_info); } static struct file_system_type btrfs_fs_type = { -- cgit v1.2.3 From bd647ce385ec110fe7796267b6555873e48e44eb Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 24 Jan 2020 09:33:00 -0500 Subject: btrfs: add a leak check for roots Now that we're going to start relying on getting ref counting right for roots, add a list to track allocated roots and print out any roots that aren't freed up at free_fs_info time. Hide this behind CONFIG_BTRFS_DEBUG because this will just be used for developers to verify they aren't breaking things. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 +++++ fs/btrfs/disk-io.c | 28 ++++++++++++++++++++++++++++ fs/btrfs/disk-io.h | 9 ++++++++- fs/btrfs/tests/btrfs-tests.c | 1 + 4 files changed, 42 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8c1a8f275773..ffd99c3f64db 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -947,6 +947,7 @@ struct btrfs_fs_info { #ifdef CONFIG_BTRFS_DEBUG struct kobject *debug_kobj; struct kobject *discard_debug_kobj; + struct list_head allocated_roots; #endif }; @@ -1149,6 +1150,10 @@ struct btrfs_root { #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS u64 alloc_bytenr; #endif + +#ifdef CONFIG_BTRFS_DEBUG + struct list_head leak_list; +#endif }; struct btrfs_clone_extent_info { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3e4df0c6a663..c2838a48327f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1202,6 +1202,12 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, spin_lock_init(&root->root_item_lock); btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); +#ifdef CONFIG_BTRFS_DEBUG + INIT_LIST_HEAD(&root->leak_list); + spin_lock(&fs_info->fs_roots_radix_lock); + list_add_tail(&root->leak_list, &fs_info->allocated_roots); + spin_unlock(&fs_info->fs_roots_radix_lock); +#endif } static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, @@ -1531,6 +1537,24 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, return ret; } +void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info) +{ +#ifdef CONFIG_BTRFS_DEBUG + struct btrfs_root *root; + + while (!list_empty(&fs_info->allocated_roots)) { + root = list_first_entry(&fs_info->allocated_roots, + struct btrfs_root, leak_list); + btrfs_err(fs_info, "leaked root %llu-%llu refcount %d", + root->root_key.objectid, root->root_key.offset, + refcount_read(&root->refs)); + while (refcount_read(&root->refs) > 1) + btrfs_put_fs_root(root); + btrfs_put_fs_root(root); + } +#endif +} + void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) { percpu_counter_destroy(&fs_info->dirty_metadata_bytes); @@ -1551,6 +1575,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) btrfs_put_fs_root(fs_info->uuid_root); btrfs_put_fs_root(fs_info->free_space_root); btrfs_put_fs_root(fs_info->fs_root); + btrfs_check_leaked_roots(fs_info); kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); kvfree(fs_info); @@ -2677,6 +2702,9 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&fs_info->space_info); INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); INIT_LIST_HEAD(&fs_info->unused_bgs); +#ifdef CONFIG_BTRFS_DEBUG + INIT_LIST_HEAD(&fs_info->allocated_roots); +#endif extent_map_tree_init(&fs_info->mapping_tree); btrfs_init_block_rsv(&fs_info->global_block_rsv, BTRFS_BLOCK_RSV_GLOBAL); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 2414d572bc9a..04a29f961527 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -39,6 +39,7 @@ static inline u64 btrfs_sb_offset(int mirror) struct btrfs_device; struct btrfs_fs_devices; +void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info); void btrfs_init_fs_info(struct btrfs_fs_info *fs_info); int btrfs_verify_level_key(struct extent_buffer *eb, int level, struct btrfs_key *first_key, u64 parent_transid); @@ -101,8 +102,14 @@ static inline void btrfs_put_fs_root(struct btrfs_root *root) { if (!root) return; - if (refcount_dec_and_test(&root->refs)) + if (refcount_dec_and_test(&root->refs)) { +#ifdef CONFIG_BTRFS_DEBUG + spin_lock(&root->fs_info->fs_roots_radix_lock); + list_del_init(&root->leak_list); + spin_unlock(&root->fs_info->fs_roots_radix_lock); +#endif kfree(root); + } } void btrfs_mark_buffer_dirty(struct extent_buffer *buf); diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 683381a692bc..609abca4fe3a 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -193,6 +193,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) btrfs_free_fs_roots(fs_info); cleanup_srcu_struct(&fs_info->subvol_srcu); kfree(fs_info->super_copy); + btrfs_check_leaked_roots(fs_info); kfree(fs_info->fs_devices); kfree(fs_info); } -- cgit v1.2.3 From b25c36f84b59a64fd5815f341b6ddbd8a8a2bb56 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 20 Jan 2020 16:09:09 +0200 Subject: btrfs: Make btrfs_pin_extent take trans handle Preparation for switching pinned extent tracking to a per-transaction basis. Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/extent-tree.c | 17 ++++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ffd99c3f64db..2f0ca1cb1fb9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2463,8 +2463,8 @@ int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 offset, int metadata, u64 *refs, u64 *flags); -int btrfs_pin_extent(struct btrfs_fs_info *fs_info, - u64 bytenr, u64 num, int reserved); +int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num, + int reserved); int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct extent_buffer *eb); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index da3d9eaf3b22..5a90d9d8b665 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1705,8 +1705,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, if (TRANS_ABORTED(trans)) { if (insert_reserved) - btrfs_pin_extent(trans->fs_info, node->bytenr, - node->num_bytes, 1); + btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); return 0; } @@ -1721,8 +1720,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, else BUG(); if (ret && insert_reserved) - btrfs_pin_extent(trans->fs_info, node->bytenr, - node->num_bytes, 1); + btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); return ret; } @@ -1867,8 +1865,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, spin_unlock(&delayed_refs->lock); if (head->must_insert_reserved) { - btrfs_pin_extent(fs_info, head->bytenr, - head->num_bytes, 1); + btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); if (head->is_data) { ret = btrfs_del_csums(trans, fs_info->csum_root, head->bytenr, head->num_bytes); @@ -2612,14 +2609,12 @@ static int pin_down_extent(struct btrfs_block_group *cache, return 0; } -int btrfs_pin_extent(struct btrfs_fs_info *fs_info, +int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_block_group *cache; - ASSERT(fs_info->running_transaction); - - cache = btrfs_lookup_block_group(fs_info, bytenr); + cache = btrfs_lookup_block_group(trans->fs_info, bytenr); BUG_ON(!cache); /* Logic error */ pin_down_extent(cache, bytenr, num_bytes, reserved); @@ -3345,7 +3340,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) (ref->type == BTRFS_REF_DATA && ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) { /* unlocks the pinned mutex */ - btrfs_pin_extent(fs_info, ref->bytenr, ref->len, 1); + btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); old_ref_mod = new_ref_mod = 0; ret = 0; } else if (ref->type == BTRFS_REF_METADATA) { -- cgit v1.2.3 From 7bfc10070573591163dae7be9bb09552d4e6dee5 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 20 Jan 2020 16:09:12 +0200 Subject: btrfs: Make btrfs_pin_reserved_extent take transaction handle btrfs_pin_reserved_extent is now only called with a valid transaction so exploit the fact to take a transaction. This is preparation for tracking pinned extents on a per-transaction basis. Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/extent-tree.c | 8 +++++--- fs/btrfs/tree-log.c | 6 +++--- 3 files changed, 9 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2f0ca1cb1fb9..3518cbd07015 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2501,7 +2501,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref); int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int delalloc); -int btrfs_pin_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, +int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, u64 len); void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5a90d9d8b665..6a61e972593e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4184,14 +4184,16 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, return 0; } -int btrfs_pin_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) +int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, + u64 len) { struct btrfs_block_group *cache; int ret = 0; - cache = btrfs_lookup_block_group(fs_info, start); + cache = btrfs_lookup_block_group(trans->fs_info, start); if (!cache) { - btrfs_err(fs_info, "unable to find block group for %llu", start); + btrfs_err(trans->fs_info, "unable to find block group for %llu", + start); return -ENOSPC; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c27e121603ac..8471f3c5525e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2745,7 +2745,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, btrfs_clean_tree_block(next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); - ret = btrfs_pin_reserved_extent(fs_info, + ret = btrfs_pin_reserved_extent(trans, bytenr, blocksize); if (ret) { free_extent_buffer(next); @@ -2814,7 +2814,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, btrfs_clean_tree_block(next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); - ret = btrfs_pin_reserved_extent(fs_info, + ret = btrfs_pin_reserved_extent(trans, path->nodes[*level]->start, path->nodes[*level]->len); if (ret) @@ -2896,7 +2896,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, btrfs_clean_tree_block(next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); - ret = btrfs_pin_reserved_extent(fs_info, + ret = btrfs_pin_reserved_extent(trans, next->start, next->len); if (ret) goto out; -- cgit v1.2.3 From 9fce5704542c5e97d458cc97f9cecef253f02f06 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 20 Jan 2020 16:09:13 +0200 Subject: btrfs: Make btrfs_pin_extent_for_log_replay take transaction handle Preparation for refactoring pinned extents tracking. Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/extent-tree.c | 4 ++-- fs/btrfs/tree-log.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3518cbd07015..22d0cb0019d1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2465,7 +2465,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, u64 offset, int metadata, u64 *refs, u64 *flags); int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num, int reserved); -int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, +int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6a61e972593e..e6ea01d76659 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2626,13 +2626,13 @@ int btrfs_pin_extent(struct btrfs_trans_handle *trans, /* * this function must be called within transaction */ -int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, +int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes) { struct btrfs_block_group *cache; int ret; - cache = btrfs_lookup_block_group(fs_info, bytenr); + cache = btrfs_lookup_block_group(trans->fs_info, bytenr); if (!cache) return -EINVAL; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8471f3c5525e..19c107be9ef6 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -313,7 +313,7 @@ static int process_one_buffer(struct btrfs_root *log, } if (wc->pin) - ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, + ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start, eb->len); if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { @@ -6189,7 +6189,7 @@ again: * each subsequent pass. */ if (ret == -ENOENT) - ret = btrfs_pin_extent_for_log_replay(fs_info, + ret = btrfs_pin_extent_for_log_replay(trans, log->node->start, log->node->len); free_extent_buffer(log->node); -- cgit v1.2.3 From fe119a6eeb670585e29dbe3932e00ad29ae8f5f9 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 20 Jan 2020 16:09:18 +0200 Subject: btrfs: switch to per-transaction pinned extents This commit flips the switch to start tracking/processing pinned extents on a per-transaction basis. It mostly replaces all references from btrfs_fs_info::(pinned_extents|freed_extents[]) to btrfs_transaction::pinned_extents. Two notable modifications that warrant explicit mention are changing clean_pinned_extents to get a reference to the previously running transaction. The other one is removal of call to btrfs_destroy_pinned_extent since transactions are going to be cleaned in btrfs_cleanup_one_transaction. Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 38 +++++++++++++++++++++++++------------- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/disk-io.c | 30 +++++------------------------- fs/btrfs/extent-io-tree.h | 4 ++-- fs/btrfs/extent-tree.c | 31 ++++++++----------------------- fs/btrfs/free-space-cache.c | 2 +- fs/btrfs/transaction.c | 2 ++ fs/btrfs/transaction.h | 1 + include/trace/events/btrfs.h | 4 ++-- 9 files changed, 48 insertions(+), 68 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 9fec78a8c759..b8f39a679064 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -460,7 +460,7 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end int ret; while (start < end) { - ret = find_first_extent_bit(info->pinned_extents, start, + ret = find_first_extent_bit(&info->excluded_extents, start, &extent_start, &extent_end, EXTENT_DIRTY | EXTENT_UPTODATE, NULL); @@ -1248,30 +1248,42 @@ out: return ret; } -static bool clean_pinned_extents(struct btrfs_block_group *bg) +static bool clean_pinned_extents(struct btrfs_trans_handle *trans, + struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; + struct btrfs_transaction *prev_trans = NULL; const u64 start = bg->start; const u64 end = start + bg->length - 1; int ret; + spin_lock(&fs_info->trans_lock); + if (trans->transaction->list.prev != &fs_info->trans_list) { + prev_trans = list_last_entry(&trans->transaction->list, + struct btrfs_transaction, list); + refcount_inc(&prev_trans->use_count); + } + spin_unlock(&fs_info->trans_lock); + /* * Hold the unused_bg_unpin_mutex lock to avoid racing with * btrfs_finish_extent_commit(). If we are at transaction N, another * task might be running finish_extent_commit() for the previous * transaction N - 1, and have seen a range belonging to the block - * group in freed_extents[] before we were able to clear the whole - * block group range from freed_extents[]. This means that task can - * lookup for the block group after we unpinned it from freed_extents - * and removed it, leading to a BUG_ON() at unpin_extent_range(). + * group in pinned_extents before we were able to clear the whole block + * group range from pinned_extents. This means that task can lookup for + * the block group after we unpinned it from pinned_extents and removed + * it, leading to a BUG_ON() at unpin_extent_range(). */ mutex_lock(&fs_info->unused_bg_unpin_mutex); - ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, - EXTENT_DIRTY); - if (ret) - goto err; + if (prev_trans) { + ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, + EXTENT_DIRTY); + if (ret) + goto err; + } - ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, + ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, EXTENT_DIRTY); if (ret) goto err; @@ -1380,7 +1392,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * We could have pending pinned extents for this block group, * just delete them, we don't care about them anymore. */ - if (!clean_pinned_extents(block_group)) + if (!clean_pinned_extents(trans, block_group)) goto end_trans; /* @@ -2890,7 +2902,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, &cache->space_info->total_bytes_pinned, num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH); - set_extent_dirty(info->pinned_extents, + set_extent_dirty(&trans->transaction->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 22d0cb0019d1..bb237d577725 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -596,8 +596,8 @@ struct btrfs_fs_info { /* keep track of unallocated space */ atomic64_t free_chunk_space; - struct extent_io_tree freed_extents[2]; - struct extent_io_tree *pinned_extents; + /* Track ranges which are used by log trees blocks/logged data extents */ + struct extent_io_tree excluded_extents; /* logical->physical extent mapping */ struct extent_map_tree mapping_tree; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 194c98a61095..e1e111c8b08b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2075,10 +2075,8 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) btrfs_drop_and_free_fs_root(fs_info, gang[i]); } - if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { + if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) btrfs_free_log_root_tree(NULL, fs_info); - btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); - } } static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) @@ -2749,11 +2747,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) fs_info->block_group_cache_tree = RB_ROOT; fs_info->first_logical_byte = (u64)-1; - extent_io_tree_init(fs_info, &fs_info->freed_extents[0], - IO_TREE_FS_INFO_FREED_EXTENTS0, NULL); - extent_io_tree_init(fs_info, &fs_info->freed_extents[1], - IO_TREE_FS_INFO_FREED_EXTENTS1, NULL); - fs_info->pinned_extents = &fs_info->freed_extents[0]; + extent_io_tree_init(fs_info, &fs_info->excluded_extents, + IO_TREE_FS_EXCLUDED_EXTENTS, NULL); set_bit(BTRFS_FS_BARRIER, &fs_info->flags); mutex_init(&fs_info->ordered_operations_mutex); @@ -4434,16 +4429,12 @@ static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, } static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, - struct extent_io_tree *pinned_extents) + struct extent_io_tree *unpin) { - struct extent_io_tree *unpin; u64 start; u64 end; int ret; - bool loop = true; - unpin = pinned_extents; -again: while (1) { struct extent_state *cached_state = NULL; @@ -4468,15 +4459,6 @@ again: cond_resched(); } - if (loop) { - if (unpin == &fs_info->freed_extents[0]) - unpin = &fs_info->freed_extents[1]; - else - unpin = &fs_info->freed_extents[0]; - loop = false; - goto again; - } - return 0; } @@ -4567,8 +4549,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, EXTENT_DIRTY); - btrfs_destroy_pinned_extent(fs_info, - fs_info->pinned_extents); + btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); cur_trans->state =TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); @@ -4620,7 +4601,6 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) btrfs_destroy_all_ordered_extents(fs_info); btrfs_destroy_delayed_inodes(fs_info); btrfs_assert_delayed_root_empty(fs_info); - btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); btrfs_destroy_all_delalloc_inodes(fs_info); mutex_unlock(&fs_info->transaction_kthread_mutex); diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index cc3037f9765e..b4a7bad3e82e 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -36,8 +36,8 @@ struct io_failure_record; #define CHUNK_TRIMMED EXTENT_DEFRAG enum { - IO_TREE_FS_INFO_FREED_EXTENTS0, - IO_TREE_FS_INFO_FREED_EXTENTS1, + IO_TREE_FS_PINNED_EXTENTS, + IO_TREE_FS_EXCLUDED_EXTENTS, IO_TREE_INODE_IO, IO_TREE_INODE_IO_FAILURE, IO_TREE_RELOC_BLOCKS, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f97e631aaca5..136fffb76428 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -64,10 +64,8 @@ int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { u64 end = start + num_bytes - 1; - set_extent_bits(&fs_info->freed_extents[0], - start, end, EXTENT_UPTODATE); - set_extent_bits(&fs_info->freed_extents[1], - start, end, EXTENT_UPTODATE); + set_extent_bits(&fs_info->excluded_extents, start, end, + EXTENT_UPTODATE); return 0; } @@ -79,10 +77,8 @@ void btrfs_free_excluded_extents(struct btrfs_block_group *cache) start = cache->start; end = start + cache->length - 1; - clear_extent_bits(&fs_info->freed_extents[0], - start, end, EXTENT_UPTODATE); - clear_extent_bits(&fs_info->freed_extents[1], - start, end, EXTENT_UPTODATE); + clear_extent_bits(&fs_info->excluded_extents, start, end, + EXTENT_UPTODATE); } static u64 generic_ref_to_space_flags(struct btrfs_ref *ref) @@ -2605,7 +2601,7 @@ static int pin_down_extent(struct btrfs_trans_handle *trans, percpu_counter_add_batch(&cache->space_info->total_bytes_pinned, num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH); - set_extent_dirty(fs_info->pinned_extents, bytenr, + set_extent_dirty(&trans->transaction->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); return 0; } @@ -2761,11 +2757,6 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) } } - if (fs_info->pinned_extents == &fs_info->freed_extents[0]) - fs_info->pinned_extents = &fs_info->freed_extents[1]; - else - fs_info->pinned_extents = &fs_info->freed_extents[0]; - up_write(&fs_info->commit_root_sem); btrfs_update_global_block_rsv(fs_info); @@ -2906,10 +2897,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) u64 end; int ret; - if (fs_info->pinned_extents == &fs_info->freed_extents[0]) - unpin = &fs_info->freed_extents[1]; - else - unpin = &fs_info->freed_extents[0]; + unpin = &trans->transaction->pinned_extents; while (!TRANS_ABORTED(trans)) { struct extent_state *cached_state = NULL; @@ -2921,12 +2909,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) mutex_unlock(&fs_info->unused_bg_unpin_mutex); break; } - if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { - clear_extent_bits(&fs_info->freed_extents[0], start, + if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) + clear_extent_bits(&fs_info->excluded_extents, start, end, EXTENT_UPTODATE); - clear_extent_bits(&fs_info->freed_extents[1], start, - end, EXTENT_UPTODATE); - } if (btrfs_test_opt(fs_info, DISCARD_SYNC)) ret = btrfs_discard_extent(fs_info, start, diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 9d6372139547..bd9c4b5da549 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1086,7 +1086,7 @@ static noinline_for_stack int write_pinned_extent_entries( * We shouldn't have switched the pinned extents yet so this is the * right one */ - unpin = block_group->fs_info->pinned_extents; + unpin = &trans->transaction->pinned_extents; start = block_group->start; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 37680351b7c3..fdfdfc426539 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -336,6 +336,8 @@ loop: list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(fs_info, &cur_trans->dirty_pages, IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode); + extent_io_tree_init(fs_info, &cur_trans->pinned_extents, + IO_TREE_FS_PINNED_EXTENTS, NULL); fs_info->generation++; cur_trans->transid = fs_info->generation; fs_info->running_transaction = cur_trans; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 453cea7c7a72..31ae8d273065 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -71,6 +71,7 @@ struct btrfs_transaction { */ struct list_head io_bgs; struct list_head dropped_roots; + struct extent_io_tree pinned_extents; /* * we need to make sure block group deletion doesn't race with diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f1f2b6a04052..bcbc763b8814 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -81,8 +81,8 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); #define show_extent_io_tree_owner(owner) \ __print_symbolic(owner, \ - { IO_TREE_FS_INFO_FREED_EXTENTS0, "FREED_EXTENTS0" }, \ - { IO_TREE_FS_INFO_FREED_EXTENTS1, "FREED_EXTENTS1" }, \ + { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \ + { IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS" }, \ { IO_TREE_INODE_IO, "INODE_IO" }, \ { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \ { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \ -- cgit v1.2.3 From 560b7a4aa2258e27ad38eb417aabebc1e2c05f5f Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 18 Feb 2020 16:56:07 +0200 Subject: btrfs: call btrfs_check_uuid_tree_entry directly in btrfs_uuid_tree_iterate btrfs_uuid_tree_iterate is called from only once place and its 2nd argument is always btrfs_check_uuid_tree_entry. Simplify btrfs_uuid_tree_iterate's signature by removing its 2nd argument and directly calling btrfs_check_uuid_tree_entry. Also move the latter into uuid-tree.h. No functional changes. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 +--- fs/btrfs/uuid-tree.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/volumes.c | 47 +--------------------------------------------- 3 files changed, 51 insertions(+), 53 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index bb237d577725..ad275d06e95f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2738,9 +2738,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type, u64 subid); int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type, u64 subid); -int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, - int (*check_func)(struct btrfs_fs_info *, u8 *, u8, - u64)); +int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info); /* dir-item.c */ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 76b84f2397b1..dc95e954ebbe 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -246,9 +246,53 @@ out: return ret; } -int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, - int (*check_func)(struct btrfs_fs_info *, u8 *, u8, - u64)) +/* + * Check if there's an matching subvolume for given UUID + * + * Return: + * 0 check succeeded, the entry is not outdated + * > 0 if the check failed, the caller should remove the entry + * < 0 if an error occurred + */ +static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, + u8 *uuid, u8 type, u64 subvolid) +{ + struct btrfs_key key; + int ret = 0; + struct btrfs_root *subvol_root; + + if (type != BTRFS_UUID_KEY_SUBVOL && + type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) + goto out; + + key.objectid = subvolid; + key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = (u64)-1; + subvol_root = btrfs_get_fs_root(fs_info, &key, true); + if (IS_ERR(subvol_root)) { + ret = PTR_ERR(subvol_root); + if (ret == -ENOENT) + ret = 1; + goto out; + } + + switch (type) { + case BTRFS_UUID_KEY_SUBVOL: + if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) + ret = 1; + break; + case BTRFS_UUID_KEY_RECEIVED_SUBVOL: + if (memcmp(uuid, subvol_root->root_item.received_uuid, + BTRFS_UUID_SIZE)) + ret = 1; + break; + } + btrfs_put_root(subvol_root); +out: + return ret; +} + +int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->uuid_root; struct btrfs_key key; @@ -305,7 +349,8 @@ again_search_slot: read_extent_buffer(leaf, &subid_le, offset, sizeof(subid_le)); subid_cpu = le64_to_cpu(subid_le); - ret = check_func(fs_info, uuid, key.type, subid_cpu); + ret = btrfs_check_uuid_tree_entry(fs_info, uuid, + key.type, subid_cpu); if (ret < 0) goto out; if (ret > 0) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d8a88866aaa3..63e499d47820 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4442,51 +4442,6 @@ out: return 0; } -/* - * Callback for btrfs_uuid_tree_iterate(). - * returns: - * 0 check succeeded, the entry is not outdated. - * < 0 if an error occurred. - * > 0 if the check failed, which means the caller shall remove the entry. - */ -static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, - u8 *uuid, u8 type, u64 subid) -{ - struct btrfs_key key; - int ret = 0; - struct btrfs_root *subvol_root; - - if (type != BTRFS_UUID_KEY_SUBVOL && - type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) - goto out; - - key.objectid = subid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - subvol_root = btrfs_get_fs_root(fs_info, &key, true); - if (IS_ERR(subvol_root)) { - ret = PTR_ERR(subvol_root); - if (ret == -ENOENT) - ret = 1; - goto out; - } - - switch (type) { - case BTRFS_UUID_KEY_SUBVOL: - if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) - ret = 1; - break; - case BTRFS_UUID_KEY_RECEIVED_SUBVOL: - if (memcmp(uuid, subvol_root->root_item.received_uuid, - BTRFS_UUID_SIZE)) - ret = 1; - break; - } - btrfs_put_root(subvol_root); -out: - return ret; -} - static int btrfs_uuid_rescan_kthread(void *data) { struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; @@ -4497,7 +4452,7 @@ static int btrfs_uuid_rescan_kthread(void *data) * to delete all entries that contain outdated data. * 2nd step is to add all missing entries to the UUID tree. */ - ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); + ret = btrfs_uuid_tree_iterate(fs_info); if (ret < 0) { btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); up(&fs_info->uuid_tree_rescan_sem); -- cgit v1.2.3 From c0c907a47dccf2cf26251a8fb4a8e7a3bf79ce84 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Fri, 21 Feb 2020 14:56:12 +0100 Subject: btrfs: export helpers for subvolume name/id resolution The functions will be used outside of export.c and super.c to allow resolving subvolume name from a given id, eg. for subvolume deletion by id ioctl. Signed-off-by: Marcos Paulo de Souza Reviewed-by: David Sterba [ split from the next patch ] Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/export.c | 8 ++++---- fs/btrfs/export.h | 5 +++++ fs/btrfs/super.c | 8 ++++---- 4 files changed, 15 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ad275d06e95f..2ee7d8b4968f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2999,6 +2999,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, int btrfs_parse_options(struct btrfs_fs_info *info, char *options, unsigned long new_flags); int btrfs_sync_fs(struct super_block *sb, int wait); +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid); static inline __printf(2, 3) __cold void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 657fd6ad6e18..25bd4317bf5a 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, return type; } -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, - int check_generation) +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; @@ -153,7 +153,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } -static struct dentry *btrfs_get_parent(struct dentry *child) +struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = d_inode(child); struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index 57488ecd7d4e..f32f4113c976 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -18,4 +18,9 @@ struct btrfs_fid { u64 parent_root_objectid; } __attribute__ ((packed)); +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation); +struct dentry *btrfs_get_parent(struct dentry *child); + #endif diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 5c16b4bcde9b..4969fcce0704 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1024,8 +1024,8 @@ out: return error; } -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, - u64 subvol_objectid) +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *fs_root = NULL; @@ -1442,8 +1442,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, goto out; } } - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), - subvol_objectid); + subvol_name = btrfs_get_subvol_name_from_objectid( + btrfs_sb(mnt->mnt_sb), subvol_objectid); if (IS_ERR(subvol_name)) { root = ERR_CAST(subvol_name); subvol_name = NULL; -- cgit v1.2.3 From 2992df73268f78ec9281692b9b44ae92f3933b54 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 30 Jan 2020 14:59:44 +0200 Subject: btrfs: Implement DREW lock A (D)ouble (R)eader (W)riter (E)xclustion lock is a locking primitive that allows to have multiple readers or multiple writers but not multiple readers and writers holding it concurrently. The code is factored out from the existing open-coded locking scheme used to exclude pending snapshots from nocow writers and vice-versa. Current implementation actually favors Readers (that is snapshot creaters) to writers (nocow writers of the filesystem). The API provides lock/unlock/trylock for reads and writes. Formal specification for TLA+ provided by Valentin Schneider is at https://lore.kernel.org/linux-btrfs/2dcaf81c-f0d3-409e-cb29-733d8b3b4cc9@arm.com/ Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/locking.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/locking.h | 20 ++++++++++++ 3 files changed, 114 insertions(+) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2ee7d8b4968f..ab8151247b93 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -33,6 +33,7 @@ #include "extent_map.h" #include "async-thread.h" #include "block-rsv.h" +#include "locking.h" struct btrfs_trans_handle; struct btrfs_transaction; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index e713900f96b6..fb647d8cf527 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -565,3 +565,96 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) } return eb; } + +/* + * DREW locks + * ========== + * + * DREW stands for double-reader-writer-exclusion lock. It's used in situation + * where you want to provide A-B exclusion but not AA or BB. + * + * Currently implementation gives more priority to reader. If a reader and a + * writer both race to acquire their respective sides of the lock the writer + * would yield its lock as soon as it detects a concurrent reader. Additionally + * if there are pending readers no new writers would be allowed to come in and + * acquire the lock. + */ + +int btrfs_drew_lock_init(struct btrfs_drew_lock *lock) +{ + int ret; + + ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); + if (ret) + return ret; + + atomic_set(&lock->readers, 0); + init_waitqueue_head(&lock->pending_readers); + init_waitqueue_head(&lock->pending_writers); + + return 0; +} + +void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock) +{ + percpu_counter_destroy(&lock->writers); +} + +/* Return true if acquisition is successful, false otherwise */ +bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) +{ + if (atomic_read(&lock->readers)) + return false; + + percpu_counter_inc(&lock->writers); + + /* Ensure writers count is updated before we check for pending readers */ + smp_mb(); + if (atomic_read(&lock->readers)) { + btrfs_drew_write_unlock(lock); + return false; + } + + return true; +} + +void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) +{ + while (true) { + if (btrfs_drew_try_write_lock(lock)) + return; + wait_event(lock->pending_writers, !atomic_read(&lock->readers)); + } +} + +void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) +{ + percpu_counter_dec(&lock->writers); + cond_wake_up(&lock->pending_readers); +} + +void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) +{ + atomic_inc(&lock->readers); + + /* + * Ensure the pending reader count is perceieved BEFORE this reader + * goes to sleep in case of active writers. This guarantees new writers + * won't be allowed and that the current reader will be woken up when + * the last active writer finishes its jobs. + */ + smp_mb__after_atomic(); + + wait_event(lock->pending_readers, + percpu_counter_sum(&lock->writers) == 0); +} + +void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) +{ + /* + * atomic_dec_and_test implies a full barrier, so woken up writers + * are guaranteed to see the decrement + */ + if (atomic_dec_and_test(&lock->readers)) + wake_up(&lock->pending_writers); +} diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 21a285883e89..d715846c10b8 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -6,6 +6,9 @@ #ifndef BTRFS_LOCKING_H #define BTRFS_LOCKING_H +#include +#include +#include #include "extent_io.h" #define BTRFS_WRITE_LOCK 1 @@ -13,6 +16,8 @@ #define BTRFS_WRITE_LOCK_BLOCKING 3 #define BTRFS_READ_LOCK_BLOCKING 4 +struct btrfs_path; + void btrfs_tree_lock(struct extent_buffer *eb); void btrfs_tree_unlock(struct extent_buffer *eb); @@ -48,4 +53,19 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) BUG(); } +struct btrfs_drew_lock { + atomic_t readers; + struct percpu_counter writers; + wait_queue_head_t pending_writers; + wait_queue_head_t pending_readers; +}; + +int btrfs_drew_lock_init(struct btrfs_drew_lock *lock); +void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock); +void btrfs_drew_write_lock(struct btrfs_drew_lock *lock); +bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock); +void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock); +void btrfs_drew_read_lock(struct btrfs_drew_lock *lock); +void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock); + #endif -- cgit v1.2.3 From dcc3eb9638c3c927f1597075e851d0a16300a876 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 30 Jan 2020 14:59:45 +0200 Subject: btrfs: convert snapshot/nocow exlcusion to drew lock This patch removes all haphazard code implementing nocow writers exclusion from pending snapshot creation and switches to using the drew lock to ensure this invariant still holds. 'Readers' are snapshot creators from create_snapshot and 'writers' are nocow writers from buffered write path or btrfs_setsize. This locking scheme allows for multiple snapshots to happen while any nocow writers are blocked, since writes to page cache in the nocow path will make snapshots inconsistent. So for performance reasons we'd like to have the ability to run multiple concurrent snapshots and also favors readers in this case. And in case there aren't pending snapshots (which will be the majority of the cases) we rely on the percpu's writers counter to avoid cacheline contention. The main gain from using the drew lock is it's now a lot easier to reason about the guarantees of the locking scheme and whether there is some silent breakage lurking. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 10 +++------- fs/btrfs/disk-io.c | 45 ++++++++++----------------------------------- fs/btrfs/extent-tree.c | 44 -------------------------------------------- fs/btrfs/file.c | 11 +++++------ fs/btrfs/inode.c | 8 ++++---- fs/btrfs/ioctl.c | 10 +++------- 6 files changed, 25 insertions(+), 103 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ab8151247b93..db9e872bcc79 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -957,11 +957,6 @@ static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) return sb->s_fs_info; } -struct btrfs_subvolume_writers { - struct percpu_counter counter; - wait_queue_head_t wait; -}; - /* * The state of btrfs root */ @@ -1133,8 +1128,9 @@ struct btrfs_root { * root_item_lock. */ int dedupe_in_progress; - struct btrfs_subvolume_writers *subv_writers; - atomic_t will_be_snapshotted; + /* For exclusion of snapshot creation and nocow writes */ + struct btrfs_drew_lock snapshot_lock; + atomic_t snapshot_force_cow; /* For qgroup metadata reserved space */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 770d469e1d9c..06819c41e4f4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1104,32 +1104,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf) } } -static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) -{ - struct btrfs_subvolume_writers *writers; - int ret; - - writers = kmalloc(sizeof(*writers), GFP_NOFS); - if (!writers) - return ERR_PTR(-ENOMEM); - - ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); - if (ret < 0) { - kfree(writers); - return ERR_PTR(ret); - } - - init_waitqueue_head(&writers->wait); - return writers; -} - -static void -btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) -{ - percpu_counter_destroy(&writers->counter); - kfree(writers); -} - static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, u64 objectid) { @@ -1178,7 +1152,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, atomic_set(&root->log_writers, 0); atomic_set(&root->log_batch, 0); refcount_set(&root->refs, 1); - atomic_set(&root->will_be_snapshotted, 0); atomic_set(&root->snapshot_force_cow, 0); atomic_set(&root->nr_swapfiles, 0); root->log_transid = 0; @@ -1450,7 +1423,7 @@ alloc_fail: static int btrfs_init_fs_root(struct btrfs_root *root) { int ret; - struct btrfs_subvolume_writers *writers; + unsigned int nofs_flag; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), @@ -1460,12 +1433,15 @@ static int btrfs_init_fs_root(struct btrfs_root *root) goto fail; } - writers = btrfs_alloc_subvolume_writers(); - if (IS_ERR(writers)) { - ret = PTR_ERR(writers); + /* + * We might be called under a transaction (e.g. indirect backref + * resolution) which could deadlock if it triggers memory reclaim + */ + nofs_flag = memalloc_nofs_save(); + ret = btrfs_drew_lock_init(&root->snapshot_lock); + memalloc_nofs_restore(nofs_flag); + if (ret) goto fail; - } - root->subv_writers = writers; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { set_bit(BTRFS_ROOT_REF_COWS, &root->state); @@ -3961,8 +3937,7 @@ void btrfs_free_fs_root(struct btrfs_root *root) WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_dev) free_anon_bdev(root->anon_dev); - if (root->subv_writers) - btrfs_free_subvolume_writers(root->subv_writers); + btrfs_drew_lock_destroy(&root->snapshot_lock); free_extent_buffer(root->node); free_extent_buffer(root->commit_root); kfree(root->free_ino_ctl); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7eef91d6c2b6..9dcd70cc3ca3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5740,47 +5740,3 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) return bg_ret; return dev_ret; } - -/* - * btrfs_{start,end}_write_no_snapshotting() are similar to - * mnt_{want,drop}_write(), they are used to prevent some tasks from writing - * data into the page cache through nocow before the subvolume is snapshoted, - * but flush the data into disk after the snapshot creation, or to prevent - * operations while snapshotting is ongoing and that cause the snapshot to be - * inconsistent (writes followed by expanding truncates for example). - */ -void btrfs_end_write_no_snapshotting(struct btrfs_root *root) -{ - percpu_counter_dec(&root->subv_writers->counter); - cond_wake_up(&root->subv_writers->wait); -} - -int btrfs_start_write_no_snapshotting(struct btrfs_root *root) -{ - if (atomic_read(&root->will_be_snapshotted)) - return 0; - - percpu_counter_inc(&root->subv_writers->counter); - /* - * Make sure counter is updated before we check for snapshot creation. - */ - smp_mb(); - if (atomic_read(&root->will_be_snapshotted)) { - btrfs_end_write_no_snapshotting(root); - return 0; - } - return 1; -} - -void btrfs_wait_for_snapshot_creation(struct btrfs_root *root) -{ - while (true) { - int ret; - - ret = btrfs_start_write_no_snapshotting(root); - if (ret) - break; - wait_var_event(&root->will_be_snapshotted, - !atomic_read(&root->will_be_snapshotted)); - } -} diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index fd52ad00b6c8..8a974a82be51 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1553,8 +1553,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, u64 num_bytes; int ret; - ret = btrfs_start_write_no_snapshotting(root); - if (!ret) + if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) return -EAGAIN; lockstart = round_down(pos, fs_info->sectorsize); @@ -1569,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, NULL, NULL, NULL); if (ret <= 0) { ret = 0; - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); } else { *write_bytes = min_t(size_t, *write_bytes , num_bytes - pos + lockstart); @@ -1675,7 +1674,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, data_reserved, pos, write_bytes); else - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); break; } @@ -1779,7 +1778,7 @@ again: release_bytes = 0; if (only_release_metadata) - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); if (only_release_metadata && copied > 0) { lockstart = round_down(pos, @@ -1808,7 +1807,7 @@ again: if (release_bytes) { if (only_release_metadata) { - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); btrfs_delalloc_release_metadata(BTRFS_I(inode), release_bytes, true); } else { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 254f5ea17e40..1e138c83cc6e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4726,16 +4726,16 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) * truncation, it must capture all writes that happened before * this truncation. */ - btrfs_wait_for_snapshot_creation(root); + btrfs_drew_write_lock(&root->snapshot_lock); ret = btrfs_cont_expand(inode, oldsize, newsize); if (ret) { - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); return ret; } trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); return PTR_ERR(trans); } @@ -4743,7 +4743,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) btrfs_inode_safe_disk_i_size_write(inode, 0); pagecache_isize_extended(inode, oldsize, newsize); ret = btrfs_update_inode(trans, root, inode); - btrfs_end_write_no_snapshotting(root); + btrfs_drew_write_unlock(&root->snapshot_lock); btrfs_end_transaction(trans); } else { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 56bd3ea7fb67..6ded5e346821 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -791,11 +791,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, * possible. This is to avoid later writeback (running dealloc) to * fallback to COW mode and unexpectedly fail with ENOSPC. */ - atomic_inc(&root->will_be_snapshotted); - smp_mb__after_atomic(); - /* wait for no snapshot writes */ - wait_event(root->subv_writers->wait, - percpu_counter_sum(&root->subv_writers->counter) == 0); + btrfs_drew_read_lock(&root->snapshot_lock); ret = btrfs_start_delalloc_snapshot(root); if (ret) @@ -876,8 +872,8 @@ fail: dec_and_free: if (snapshot_force_cow) atomic_dec(&root->snapshot_force_cow); - if (atomic_dec_and_test(&root->will_be_snapshotted)) - wake_up_var(&root->will_be_snapshotted); + btrfs_drew_read_unlock(&root->snapshot_lock); + free_pending: kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); -- cgit v1.2.3 From 9a8658e33d8fd45879eebc44178b2a172e76bb47 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Mar 2019 13:15:57 +0100 Subject: btrfs: open code trivial helper btrfs_header_fsid The helper btrfs_header_fsid follows naming convention of other struct accessors but does something compeletly different. As the offsetof calculation is clear in the context of extent buffer operations we can remove it. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 ----- fs/btrfs/disk-io.c | 6 ++++-- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index db9e872bcc79..f66c4ea7491d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1973,11 +1973,6 @@ static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, btrfs_set_header_flags(eb, flags); } -static inline unsigned long btrfs_header_fsid(void) -{ - return offsetof(struct btrfs_header, fsid); -} - static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb) { return offsetof(struct btrfs_header, chunk_tree_uuid); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 06819c41e4f4..2e482657a1b4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -541,7 +541,8 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) return -EUCLEAN; ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, - btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); + offsetof(struct btrfs_header, fsid), + BTRFS_FSID_SIZE) == 0); if (csum_tree_block(eb, result)) return -EINVAL; @@ -571,7 +572,8 @@ static int check_tree_block_fsid(struct extent_buffer *eb) u8 fsid[BTRFS_FSID_SIZE]; int ret = 1; - read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); + read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid), + BTRFS_FSID_SIZE); while (fs_devices) { u8 *metadata_uuid; -- cgit v1.2.3 From c4ac75419826c7afc997bbe4da07ad6f963da22f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Mar 2019 13:17:13 +0100 Subject: btrfs: open code trivial helper btrfs_header_chunk_tree_uuid The helper btrfs_header_chunk_tree_uuid follows naming convention of other struct accessors but does something compeletly different. As the offsetof calculation is clear in the context of extent buffer operations we can remove it. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 ----- fs/btrfs/disk-io.c | 3 ++- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f66c4ea7491d..1cde3f1d8f20 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1973,11 +1973,6 @@ static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, btrfs_set_header_flags(eb, flags); } -static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb) -{ - return offsetof(struct btrfs_header, chunk_tree_uuid); -} - static inline int btrfs_is_leaf(const struct extent_buffer *eb) { return btrfs_header_level(eb) == 0; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2e482657a1b4..abd7a613c8e6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3106,7 +3106,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device chunk_root->commit_root = btrfs_root_node(chunk_root); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, - btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); + offsetof(struct btrfs_header, chunk_tree_uuid), + BTRFS_UUID_SIZE); ret = btrfs_read_chunk_tree(fs_info); if (ret) { -- cgit v1.2.3 From 42c9d0b524cf9af180dcb788a938cdc4c678e8cb Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Mar 2019 11:54:13 +0100 Subject: btrfs: simplify parameters of btrfs_set_disk_extent_flags All callers pass extent buffer start and length so the extent buffer itself should work fine. Reviewed-by: Johannes Thumshirn Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 +--- fs/btrfs/ctree.h | 2 +- fs/btrfs/extent-tree.c | 7 +++---- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index b62721ac5ee8..f948435e87df 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -925,9 +925,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, if (new_flags != 0) { int level = btrfs_header_level(buf); - ret = btrfs_set_disk_extent_flags(trans, - buf->start, - buf->len, + ret = btrfs_set_disk_extent_flags(trans, buf, new_flags, level, 0); if (ret) return ret; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1cde3f1d8f20..ea5d0675465a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2482,7 +2482,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref); int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 flags, + struct extent_buffer *eb, u64 flags, int level, int is_data); int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9dcd70cc3ca3..161274118853 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2231,7 +2231,7 @@ out: } int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 flags, + struct extent_buffer *eb, u64 flags, int level, int is_data) { struct btrfs_delayed_extent_op *extent_op; @@ -2247,7 +2247,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, extent_op->is_data = is_data ? true : false; extent_op->level = level; - ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op); + ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); if (ret) btrfs_free_delayed_extent_op(extent_op); return ret; @@ -4741,8 +4741,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, BUG_ON(ret); /* -ENOMEM */ ret = btrfs_dec_ref(trans, root, eb, 0); BUG_ON(ret); /* -ENOMEM */ - ret = btrfs_set_disk_extent_flags(trans, eb->start, - eb->len, flag, + ret = btrfs_set_disk_extent_flags(trans, eb, flag, btrfs_header_level(eb), 0); BUG_ON(ret); /* -ENOMEM */ wc->flags[level] |= flag; -- cgit v1.2.3 From 6a177381007b463ad611375cce526c24f12ab081 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 28 Feb 2020 13:04:17 +0000 Subject: Btrfs: move all reflink implementation code into its own file The reflink code is quite large and has been living in ioctl.c since ever. It has grown over the years after many bug fixes and improvements, and since I'm planning on making some further improvements on it, it's time to get it better organized by moving into its own file, reflink.c (similar to what xfs does for example). This change only moves the code out of ioctl.c into the new file, it doesn't do any other change. Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.h | 3 - fs/btrfs/file.c | 1 + fs/btrfs/ioctl.c | 733 ----------------------------------------------------- fs/btrfs/reflink.c | 729 ++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/reflink.h | 12 + 6 files changed, 743 insertions(+), 737 deletions(-) create mode 100644 fs/btrfs/reflink.c create mode 100644 fs/btrfs/reflink.h (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 9a0ff3384381..e738f6206ea5 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \ - block-rsv.o delalloc-space.o block-group.o discard.o + block-rsv.o delalloc-space.o block-group.o discard.o reflink.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ea5d0675465a..ecd016f7dab1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2974,9 +2974,6 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, struct extent_state **cached); int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); -loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t len, unsigned int remap_flags); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8a974a82be51..31c72371a164 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -27,6 +27,7 @@ #include "qgroup.h" #include "compression.h" #include "delalloc-space.h" +#include "reflink.h" static struct kmem_cache *btrfs_inode_defrag_cachep; /* diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 04899d3d775e..f8a73a28022a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -87,10 +87,6 @@ struct btrfs_ioctl_send_args_32 { struct btrfs_ioctl_send_args_32) #endif -static int btrfs_clone(struct inode *src, struct inode *inode, - u64 off, u64 olen, u64 olen_aligned, u64 destoff, - int no_time_update); - /* Mask out flags that are inappropriate for the given type of inode. */ static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode, unsigned int flags) @@ -3315,735 +3311,6 @@ out: return ret; } -static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) -{ - unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); - unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); -} - -static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) -{ - if (inode1 < inode2) { - swap(inode1, inode2); - swap(loff1, loff2); - } else if (inode1 == inode2 && loff2 < loff1) { - swap(loff1, loff2); - } - lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); - lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); -} - -static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, - struct inode *dst, u64 dst_loff) -{ - const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; - int ret; - - /* - * Lock destination range to serialize with concurrent readpages() and - * source range to serialize with relocation. - */ - btrfs_double_extent_lock(src, loff, dst, dst_loff, len); - ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); - btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); - - return ret; -} - -#define BTRFS_MAX_DEDUPE_LEN SZ_16M - -static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, - struct inode *dst, u64 dst_loff) -{ - int ret; - u64 i, tail_len, chunk_count; - struct btrfs_root *root_dst = BTRFS_I(dst)->root; - - spin_lock(&root_dst->root_item_lock); - if (root_dst->send_in_progress) { - btrfs_warn_rl(root_dst->fs_info, -"cannot deduplicate to root %llu while send operations are using it (%d in progress)", - root_dst->root_key.objectid, - root_dst->send_in_progress); - spin_unlock(&root_dst->root_item_lock); - return -EAGAIN; - } - root_dst->dedupe_in_progress++; - spin_unlock(&root_dst->root_item_lock); - - tail_len = olen % BTRFS_MAX_DEDUPE_LEN; - chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); - - for (i = 0; i < chunk_count; i++) { - ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, - dst, dst_loff); - if (ret) - goto out; - - loff += BTRFS_MAX_DEDUPE_LEN; - dst_loff += BTRFS_MAX_DEDUPE_LEN; - } - - if (tail_len > 0) - ret = btrfs_extent_same_range(src, loff, tail_len, dst, - dst_loff); -out: - spin_lock(&root_dst->root_item_lock); - root_dst->dedupe_in_progress--; - spin_unlock(&root_dst->root_item_lock); - - return ret; -} - -static int clone_finish_inode_update(struct btrfs_trans_handle *trans, - struct inode *inode, - u64 endoff, - const u64 destoff, - const u64 olen, - int no_time_update) -{ - struct btrfs_root *root = BTRFS_I(inode)->root; - int ret; - - inode_inc_iversion(inode); - if (!no_time_update) - inode->i_mtime = inode->i_ctime = current_time(inode); - /* - * We round up to the block size at eof when determining which - * extents to clone above, but shouldn't round up the file size. - */ - if (endoff > destoff + olen) - endoff = destoff + olen; - if (endoff > inode->i_size) { - i_size_write(inode, endoff); - btrfs_inode_safe_disk_i_size_write(inode, 0); - } - - ret = btrfs_update_inode(trans, root, inode); - if (ret) { - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - ret = btrfs_end_transaction(trans); -out: - return ret; -} - -/* - * Make sure we do not end up inserting an inline extent into a file that has - * already other (non-inline) extents. If a file has an inline extent it can - * not have any other extents and the (single) inline extent must start at the - * file offset 0. Failing to respect these rules will lead to file corruption, - * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc - * - * We can have extents that have been already written to disk or we can have - * dirty ranges still in delalloc, in which case the extent maps and items are - * created only when we run delalloc, and the delalloc ranges might fall outside - * the range we are currently locking in the inode's io tree. So we check the - * inode's i_size because of that (i_size updates are done while holding the - * i_mutex, which we are holding here). - * We also check to see if the inode has a size not greater than "datal" but has - * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are - * protected against such concurrent fallocate calls by the i_mutex). - * - * If the file has no extents but a size greater than datal, do not allow the - * copy because we would need turn the inline extent into a non-inline one (even - * with NO_HOLES enabled). If we find our destination inode only has one inline - * extent, just overwrite it with the source inline extent if its size is less - * than the source extent's size, or we could copy the source inline extent's - * data into the destination inode's inline extent if the later is greater then - * the former. - */ -static int clone_copy_inline_extent(struct inode *dst, - struct btrfs_trans_handle *trans, - struct btrfs_path *path, - struct btrfs_key *new_key, - const u64 drop_start, - const u64 datal, - const u64 skip, - const u64 size, - char *inline_data) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); - struct btrfs_root *root = BTRFS_I(dst)->root; - const u64 aligned_end = ALIGN(new_key->offset + datal, - fs_info->sectorsize); - int ret; - struct btrfs_key key; - - if (new_key->offset > 0) - return -EOPNOTSUPP; - - key.objectid = btrfs_ino(BTRFS_I(dst)); - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = 0; - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) { - return ret; - } else if (ret > 0) { - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) - return ret; - else if (ret > 0) - goto copy_inline_extent; - } - btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid == btrfs_ino(BTRFS_I(dst)) && - key.type == BTRFS_EXTENT_DATA_KEY) { - ASSERT(key.offset > 0); - return -EOPNOTSUPP; - } - } else if (i_size_read(dst) <= datal) { - struct btrfs_file_extent_item *ei; - u64 ext_len; - - /* - * If the file size is <= datal, make sure there are no other - * extents following (can happen do to an fallocate call with - * the flag FALLOC_FL_KEEP_SIZE). - */ - ei = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - /* - * If it's an inline extent, it can not have other extents - * following it. - */ - if (btrfs_file_extent_type(path->nodes[0], ei) == - BTRFS_FILE_EXTENT_INLINE) - goto copy_inline_extent; - - ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei); - if (ext_len > aligned_end) - return -EOPNOTSUPP; - - ret = btrfs_next_item(root, path); - if (ret < 0) { - return ret; - } else if (ret == 0) { - btrfs_item_key_to_cpu(path->nodes[0], &key, - path->slots[0]); - if (key.objectid == btrfs_ino(BTRFS_I(dst)) && - key.type == BTRFS_EXTENT_DATA_KEY) - return -EOPNOTSUPP; - } - } - -copy_inline_extent: - /* - * We have no extent items, or we have an extent at offset 0 which may - * or may not be inlined. All these cases are dealt the same way. - */ - if (i_size_read(dst) > datal) { - /* - * If the destination inode has an inline extent... - * This would require copying the data from the source inline - * extent into the beginning of the destination's inline extent. - * But this is really complex, both extents can be compressed - * or just one of them, which would require decompressing and - * re-compressing data (which could increase the new compressed - * size, not allowing the compressed data to fit anymore in an - * inline extent). - * So just don't support this case for now (it should be rare, - * we are not really saving space when cloning inline extents). - */ - return -EOPNOTSUPP; - } - - btrfs_release_path(path); - ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1); - if (ret) - return ret; - ret = btrfs_insert_empty_item(trans, root, path, new_key, size); - if (ret) - return ret; - - if (skip) { - const u32 start = btrfs_file_extent_calc_inline_size(0); - - memmove(inline_data + start, inline_data + start + skip, datal); - } - - write_extent_buffer(path->nodes[0], inline_data, - btrfs_item_ptr_offset(path->nodes[0], - path->slots[0]), - size); - inode_add_bytes(dst, datal); - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); - - return 0; -} - -/** - * btrfs_clone() - clone a range from inode file to another - * - * @src: Inode to clone from - * @inode: Inode to clone to - * @off: Offset within source to start clone from - * @olen: Original length, passed by user, of range to clone - * @olen_aligned: Block-aligned value of olen - * @destoff: Offset within @inode to start clone - * @no_time_update: Whether to update mtime/ctime on the target inode - */ -static int btrfs_clone(struct inode *src, struct inode *inode, - const u64 off, const u64 olen, const u64 olen_aligned, - const u64 destoff, int no_time_update) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_path *path = NULL; - struct extent_buffer *leaf; - struct btrfs_trans_handle *trans; - char *buf = NULL; - struct btrfs_key key; - u32 nritems; - int slot; - int ret; - const u64 len = olen_aligned; - u64 last_dest_end = destoff; - - ret = -ENOMEM; - buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); - if (!buf) - return ret; - - path = btrfs_alloc_path(); - if (!path) { - kvfree(buf); - return ret; - } - - path->reada = READA_FORWARD; - /* clone data */ - key.objectid = btrfs_ino(BTRFS_I(src)); - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = off; - - while (1) { - u64 next_key_min_offset = key.offset + 1; - struct btrfs_file_extent_item *extent; - int type; - u32 size; - struct btrfs_key new_key; - u64 disko = 0, diskl = 0; - u64 datao = 0, datal = 0; - u8 comp; - u64 drop_start; - - /* - * note the key will change type as we walk through the - * tree. - */ - path->leave_spinning = 1; - ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, - 0, 0); - if (ret < 0) - goto out; - /* - * First search, if no extent item that starts at offset off was - * found but the previous item is an extent item, it's possible - * it might overlap our target range, therefore process it. - */ - if (key.offset == off && ret > 0 && path->slots[0] > 0) { - btrfs_item_key_to_cpu(path->nodes[0], &key, - path->slots[0] - 1); - if (key.type == BTRFS_EXTENT_DATA_KEY) - path->slots[0]--; - } - - nritems = btrfs_header_nritems(path->nodes[0]); -process_slot: - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(BTRFS_I(src)->root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - nritems = btrfs_header_nritems(path->nodes[0]); - } - leaf = path->nodes[0]; - slot = path->slots[0]; - - btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.type > BTRFS_EXTENT_DATA_KEY || - key.objectid != btrfs_ino(BTRFS_I(src))) - break; - - ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); - - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); - comp = btrfs_file_extent_compression(leaf, extent); - type = btrfs_file_extent_type(leaf, extent); - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - disko = btrfs_file_extent_disk_bytenr(leaf, extent); - diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); - datao = btrfs_file_extent_offset(leaf, extent); - datal = btrfs_file_extent_num_bytes(leaf, extent); - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - /* Take upper bound, may be compressed */ - datal = btrfs_file_extent_ram_bytes(leaf, extent); - } - - /* - * The first search might have left us at an extent item that - * ends before our target range's start, can happen if we have - * holes and NO_HOLES feature enabled. - */ - if (key.offset + datal <= off) { - path->slots[0]++; - goto process_slot; - } else if (key.offset >= off + len) { - break; - } - next_key_min_offset = key.offset + datal; - size = btrfs_item_size_nr(leaf, slot); - read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), - size); - - btrfs_release_path(path); - path->leave_spinning = 0; - - memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = btrfs_ino(BTRFS_I(inode)); - if (off <= key.offset) - new_key.offset = key.offset + destoff - off; - else - new_key.offset = destoff; - - /* - * Deal with a hole that doesn't have an extent item that - * represents it (NO_HOLES feature enabled). - * This hole is either in the middle of the cloning range or at - * the beginning (fully overlaps it or partially overlaps it). - */ - if (new_key.offset != last_dest_end) - drop_start = last_dest_end; - else - drop_start = new_key.offset; - - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - struct btrfs_clone_extent_info clone_info; - - /* - * a | --- range to clone ---| b - * | ------------- extent ------------- | - */ - - /* Subtract range b */ - if (key.offset + datal > off + len) - datal = off + len - key.offset; - - /* Subtract range a */ - if (off > key.offset) { - datao += off - key.offset; - datal -= off - key.offset; - } - - clone_info.disk_offset = disko; - clone_info.disk_len = diskl; - clone_info.data_offset = datao; - clone_info.data_len = datal; - clone_info.file_offset = new_key.offset; - clone_info.extent_buf = buf; - clone_info.item_size = size; - ret = btrfs_punch_hole_range(inode, path, - drop_start, - new_key.offset + datal - 1, - &clone_info, &trans); - if (ret) - goto out; - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - u64 skip = 0; - u64 trim = 0; - - if (off > key.offset) { - skip = off - key.offset; - new_key.offset += skip; - } - - if (key.offset + datal > off + len) - trim = key.offset + datal - (off + len); - - if (comp && (skip || trim)) { - ret = -EINVAL; - goto out; - } - size -= skip + trim; - datal -= skip + trim; - - /* - * If our extent is inline, we know we will drop or - * adjust at most 1 extent item in the destination root. - * - * 1 - adjusting old extent (we may have to split it) - * 1 - add new extent - * 1 - inode update - */ - trans = btrfs_start_transaction(root, 3); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out; - } - - ret = clone_copy_inline_extent(inode, trans, path, - &new_key, drop_start, - datal, skip, size, buf); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - } - - btrfs_release_path(path); - - last_dest_end = ALIGN(new_key.offset + datal, - fs_info->sectorsize); - ret = clone_finish_inode_update(trans, inode, last_dest_end, - destoff, olen, no_time_update); - if (ret) - goto out; - if (new_key.offset + datal >= destoff + len) - break; - - btrfs_release_path(path); - key.offset = next_key_min_offset; - - if (fatal_signal_pending(current)) { - ret = -EINTR; - goto out; - } - } - ret = 0; - - if (last_dest_end < destoff + len) { - /* - * We have an implicit hole that fully or partially overlaps our - * cloning range at its end. This means that we either have the - * NO_HOLES feature enabled or the implicit hole happened due to - * mixing buffered and direct IO writes against this file. - */ - btrfs_release_path(path); - path->leave_spinning = 0; - - ret = btrfs_punch_hole_range(inode, path, - last_dest_end, destoff + len - 1, - NULL, &trans); - if (ret) - goto out; - - ret = clone_finish_inode_update(trans, inode, destoff + len, - destoff, olen, no_time_update); - } - -out: - btrfs_free_path(path); - kvfree(buf); - return ret; -} - -static noinline int btrfs_clone_files(struct file *file, struct file *file_src, - u64 off, u64 olen, u64 destoff) -{ - struct inode *inode = file_inode(file); - struct inode *src = file_inode(file_src); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - int ret; - u64 len = olen; - u64 bs = fs_info->sb->s_blocksize; - - /* - * TODO: - * - split compressed inline extents. annoying: we need to - * decompress into destination's address_space (the file offset - * may change, so source mapping won't do), then recompress (or - * otherwise reinsert) a subrange. - * - * - split destination inode's inline extents. The inline extents can - * be either compressed or non-compressed. - */ - - /* - * VFS's generic_remap_file_range_prep() protects us from cloning the - * eof block into the middle of a file, which would result in corruption - * if the file size is not blocksize aligned. So we don't need to check - * for that case here. - */ - if (off + len == src->i_size) - len = ALIGN(src->i_size, bs) - off; - - if (destoff > inode->i_size) { - const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); - - ret = btrfs_cont_expand(inode, inode->i_size, destoff); - if (ret) - return ret; - /* - * We may have truncated the last block if the inode's size is - * not sector size aligned, so we need to wait for writeback to - * complete before proceeding further, otherwise we can race - * with cloning and attempt to increment a reference to an - * extent that no longer exists (writeback completed right after - * we found the previous extent covering eof and before we - * attempted to increment its reference count). - */ - ret = btrfs_wait_ordered_range(inode, wb_start, - destoff - wb_start); - if (ret) - return ret; - } - - /* - * Lock destination range to serialize with concurrent readpages() and - * source range to serialize with relocation. - */ - btrfs_double_extent_lock(src, off, inode, destoff, len); - ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); - btrfs_double_extent_unlock(src, off, inode, destoff, len); - /* - * Truncate page cache pages so that future reads will see the cloned - * data immediately and not the previous data. - */ - truncate_inode_pages_range(&inode->i_data, - round_down(destoff, PAGE_SIZE), - round_up(destoff + len, PAGE_SIZE) - 1); - - return ret; -} - -static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *len, unsigned int remap_flags) -{ - struct inode *inode_in = file_inode(file_in); - struct inode *inode_out = file_inode(file_out); - u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; - bool same_inode = inode_out == inode_in; - u64 wb_len; - int ret; - - if (!(remap_flags & REMAP_FILE_DEDUP)) { - struct btrfs_root *root_out = BTRFS_I(inode_out)->root; - - if (btrfs_root_readonly(root_out)) - return -EROFS; - - if (file_in->f_path.mnt != file_out->f_path.mnt || - inode_in->i_sb != inode_out->i_sb) - return -EXDEV; - } - - /* don't make the dst file partly checksummed */ - if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != - (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { - return -EINVAL; - } - - /* - * Now that the inodes are locked, we need to start writeback ourselves - * and can not rely on the writeback from the VFS's generic helper - * generic_remap_file_range_prep() because: - * - * 1) For compression we must call filemap_fdatawrite_range() range - * twice (btrfs_fdatawrite_range() does it for us), and the generic - * helper only calls it once; - * - * 2) filemap_fdatawrite_range(), called by the generic helper only - * waits for the writeback to complete, i.e. for IO to be done, and - * not for the ordered extents to complete. We need to wait for them - * to complete so that new file extent items are in the fs tree. - */ - if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) - wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); - else - wb_len = ALIGN(*len, bs); - - /* - * Since we don't lock ranges, wait for ongoing lockless dio writes (as - * any in progress could create its ordered extents after we wait for - * existing ordered extents below). - */ - inode_dio_wait(inode_in); - if (!same_inode) - inode_dio_wait(inode_out); - - /* - * Workaround to make sure NOCOW buffered write reach disk as NOCOW. - * - * Btrfs' back references do not have a block level granularity, they - * work at the whole extent level. - * NOCOW buffered write without data space reserved may not be able - * to fall back to CoW due to lack of data space, thus could cause - * data loss. - * - * Here we take a shortcut by flushing the whole inode, so that all - * nocow write should reach disk as nocow before we increase the - * reference of the extent. We could do better by only flushing NOCOW - * data, but that needs extra accounting. - * - * Also we don't need to check ASYNC_EXTENT, as async extent will be - * CoWed anyway, not affecting nocow part. - */ - ret = filemap_flush(inode_in->i_mapping); - if (ret < 0) - return ret; - - ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), - wb_len); - if (ret < 0) - return ret; - ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), - wb_len); - if (ret < 0) - return ret; - - return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, - len, remap_flags); -} - -loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, - struct file *dst_file, loff_t destoff, loff_t len, - unsigned int remap_flags) -{ - struct inode *src_inode = file_inode(src_file); - struct inode *dst_inode = file_inode(dst_file); - bool same_inode = dst_inode == src_inode; - int ret; - - if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) - return -EINVAL; - - if (same_inode) - inode_lock(src_inode); - else - lock_two_nondirectories(src_inode, dst_inode); - - ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, - &len, remap_flags); - if (ret < 0 || len == 0) - goto out_unlock; - - if (remap_flags & REMAP_FILE_DEDUP) - ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); - else - ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); - -out_unlock: - if (same_inode) - inode_unlock(src_inode); - else - unlock_two_nondirectories(src_inode, dst_inode); - - return ret < 0 ? ret : len; -} - static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c new file mode 100644 index 000000000000..367b11656118 --- /dev/null +++ b/fs/btrfs/reflink.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "ctree.h" +#include "reflink.h" +#include "transaction.h" + +#define BTRFS_MAX_DEDUPE_LEN SZ_16M + +static int clone_finish_inode_update(struct btrfs_trans_handle *trans, + struct inode *inode, + u64 endoff, + const u64 destoff, + const u64 olen, + int no_time_update) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + int ret; + + inode_inc_iversion(inode); + if (!no_time_update) + inode->i_mtime = inode->i_ctime = current_time(inode); + /* + * We round up to the block size at eof when determining which + * extents to clone above, but shouldn't round up the file size. + */ + if (endoff > destoff + olen) + endoff = destoff + olen; + if (endoff > inode->i_size) { + i_size_write(inode, endoff); + btrfs_inode_safe_disk_i_size_write(inode, 0); + } + + ret = btrfs_update_inode(trans, root, inode); + if (ret) { + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + goto out; + } + ret = btrfs_end_transaction(trans); +out: + return ret; +} + +/* + * Make sure we do not end up inserting an inline extent into a file that has + * already other (non-inline) extents. If a file has an inline extent it can + * not have any other extents and the (single) inline extent must start at the + * file offset 0. Failing to respect these rules will lead to file corruption, + * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc + * + * We can have extents that have been already written to disk or we can have + * dirty ranges still in delalloc, in which case the extent maps and items are + * created only when we run delalloc, and the delalloc ranges might fall outside + * the range we are currently locking in the inode's io tree. So we check the + * inode's i_size because of that (i_size updates are done while holding the + * i_mutex, which we are holding here). + * We also check to see if the inode has a size not greater than "datal" but has + * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are + * protected against such concurrent fallocate calls by the i_mutex). + * + * If the file has no extents but a size greater than datal, do not allow the + * copy because we would need turn the inline extent into a non-inline one (even + * with NO_HOLES enabled). If we find our destination inode only has one inline + * extent, just overwrite it with the source inline extent if its size is less + * than the source extent's size, or we could copy the source inline extent's + * data into the destination inode's inline extent if the later is greater then + * the former. + */ +static int clone_copy_inline_extent(struct inode *dst, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_key *new_key, + const u64 drop_start, + const u64 datal, + const u64 skip, + const u64 size, + char *inline_data) +{ + struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); + struct btrfs_root *root = BTRFS_I(dst)->root; + const u64 aligned_end = ALIGN(new_key->offset + datal, + fs_info->sectorsize); + int ret; + struct btrfs_key key; + + if (new_key->offset > 0) + return -EOPNOTSUPP; + + key.objectid = btrfs_ino(BTRFS_I(dst)); + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = 0; + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) { + return ret; + } else if (ret > 0) { + if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + return ret; + else if (ret > 0) + goto copy_inline_extent; + } + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); + if (key.objectid == btrfs_ino(BTRFS_I(dst)) && + key.type == BTRFS_EXTENT_DATA_KEY) { + ASSERT(key.offset > 0); + return -EOPNOTSUPP; + } + } else if (i_size_read(dst) <= datal) { + struct btrfs_file_extent_item *ei; + u64 ext_len; + + /* + * If the file size is <= datal, make sure there are no other + * extents following (can happen do to an fallocate call with + * the flag FALLOC_FL_KEEP_SIZE). + */ + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_file_extent_item); + /* + * If it's an inline extent, it can not have other extents + * following it. + */ + if (btrfs_file_extent_type(path->nodes[0], ei) == + BTRFS_FILE_EXTENT_INLINE) + goto copy_inline_extent; + + ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei); + if (ext_len > aligned_end) + return -EOPNOTSUPP; + + ret = btrfs_next_item(root, path); + if (ret < 0) { + return ret; + } else if (ret == 0) { + btrfs_item_key_to_cpu(path->nodes[0], &key, + path->slots[0]); + if (key.objectid == btrfs_ino(BTRFS_I(dst)) && + key.type == BTRFS_EXTENT_DATA_KEY) + return -EOPNOTSUPP; + } + } + +copy_inline_extent: + /* + * We have no extent items, or we have an extent at offset 0 which may + * or may not be inlined. All these cases are dealt the same way. + */ + if (i_size_read(dst) > datal) { + /* + * If the destination inode has an inline extent. + * This would require copying the data from the source inline + * extent into the beginning of the destination's inline extent. + * But this is really complex, both extents can be compressed + * or just one of them, which would require decompressing and + * re-compressing data (which could increase the new compressed + * size, not allowing the compressed data to fit anymore in an + * inline extent). + * So just don't support this case for now (it should be rare, + * we are not really saving space when cloning inline extents). + */ + return -EOPNOTSUPP; + } + + btrfs_release_path(path); + ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1); + if (ret) + return ret; + ret = btrfs_insert_empty_item(trans, root, path, new_key, size); + if (ret) + return ret; + + if (skip) { + const u32 start = btrfs_file_extent_calc_inline_size(0); + + memmove(inline_data + start, inline_data + start + skip, datal); + } + + write_extent_buffer(path->nodes[0], inline_data, + btrfs_item_ptr_offset(path->nodes[0], + path->slots[0]), + size); + inode_add_bytes(dst, datal); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); + + return 0; +} + +/** + * btrfs_clone() - clone a range from inode file to another + * + * @src: Inode to clone from + * @inode: Inode to clone to + * @off: Offset within source to start clone from + * @olen: Original length, passed by user, of range to clone + * @olen_aligned: Block-aligned value of olen + * @destoff: Offset within @inode to start clone + * @no_time_update: Whether to update mtime/ctime on the target inode + */ +static int btrfs_clone(struct inode *src, struct inode *inode, + const u64 off, const u64 olen, const u64 olen_aligned, + const u64 destoff, int no_time_update) +{ + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_path *path = NULL; + struct extent_buffer *leaf; + struct btrfs_trans_handle *trans; + char *buf = NULL; + struct btrfs_key key; + u32 nritems; + int slot; + int ret; + const u64 len = olen_aligned; + u64 last_dest_end = destoff; + + ret = -ENOMEM; + buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); + if (!buf) + return ret; + + path = btrfs_alloc_path(); + if (!path) { + kvfree(buf); + return ret; + } + + path->reada = READA_FORWARD; + /* Clone data */ + key.objectid = btrfs_ino(BTRFS_I(src)); + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = off; + + while (1) { + u64 next_key_min_offset = key.offset + 1; + struct btrfs_file_extent_item *extent; + int type; + u32 size; + struct btrfs_key new_key; + u64 disko = 0, diskl = 0; + u64 datao = 0, datal = 0; + u8 comp; + u64 drop_start; + + /* Note the key will change type as we walk through the tree */ + path->leave_spinning = 1; + ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, + 0, 0); + if (ret < 0) + goto out; + /* + * First search, if no extent item that starts at offset off was + * found but the previous item is an extent item, it's possible + * it might overlap our target range, therefore process it. + */ + if (key.offset == off && ret > 0 && path->slots[0] > 0) { + btrfs_item_key_to_cpu(path->nodes[0], &key, + path->slots[0] - 1); + if (key.type == BTRFS_EXTENT_DATA_KEY) + path->slots[0]--; + } + + nritems = btrfs_header_nritems(path->nodes[0]); +process_slot: + if (path->slots[0] >= nritems) { + ret = btrfs_next_leaf(BTRFS_I(src)->root, path); + if (ret < 0) + goto out; + if (ret > 0) + break; + nritems = btrfs_header_nritems(path->nodes[0]); + } + leaf = path->nodes[0]; + slot = path->slots[0]; + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.type > BTRFS_EXTENT_DATA_KEY || + key.objectid != btrfs_ino(BTRFS_I(src))) + break; + + ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); + + extent = btrfs_item_ptr(leaf, slot, + struct btrfs_file_extent_item); + comp = btrfs_file_extent_compression(leaf, extent); + type = btrfs_file_extent_type(leaf, extent); + if (type == BTRFS_FILE_EXTENT_REG || + type == BTRFS_FILE_EXTENT_PREALLOC) { + disko = btrfs_file_extent_disk_bytenr(leaf, extent); + diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); + datao = btrfs_file_extent_offset(leaf, extent); + datal = btrfs_file_extent_num_bytes(leaf, extent); + } else if (type == BTRFS_FILE_EXTENT_INLINE) { + /* Take upper bound, may be compressed */ + datal = btrfs_file_extent_ram_bytes(leaf, extent); + } + + /* + * The first search might have left us at an extent item that + * ends before our target range's start, can happen if we have + * holes and NO_HOLES feature enabled. + */ + if (key.offset + datal <= off) { + path->slots[0]++; + goto process_slot; + } else if (key.offset >= off + len) { + break; + } + next_key_min_offset = key.offset + datal; + size = btrfs_item_size_nr(leaf, slot); + read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), + size); + + btrfs_release_path(path); + path->leave_spinning = 0; + + memcpy(&new_key, &key, sizeof(new_key)); + new_key.objectid = btrfs_ino(BTRFS_I(inode)); + if (off <= key.offset) + new_key.offset = key.offset + destoff - off; + else + new_key.offset = destoff; + + /* + * Deal with a hole that doesn't have an extent item that + * represents it (NO_HOLES feature enabled). + * This hole is either in the middle of the cloning range or at + * the beginning (fully overlaps it or partially overlaps it). + */ + if (new_key.offset != last_dest_end) + drop_start = last_dest_end; + else + drop_start = new_key.offset; + + if (type == BTRFS_FILE_EXTENT_REG || + type == BTRFS_FILE_EXTENT_PREALLOC) { + struct btrfs_clone_extent_info clone_info; + + /* + * a | --- range to clone ---| b + * | ------------- extent ------------- | + */ + + /* Subtract range b */ + if (key.offset + datal > off + len) + datal = off + len - key.offset; + + /* Subtract range a */ + if (off > key.offset) { + datao += off - key.offset; + datal -= off - key.offset; + } + + clone_info.disk_offset = disko; + clone_info.disk_len = diskl; + clone_info.data_offset = datao; + clone_info.data_len = datal; + clone_info.file_offset = new_key.offset; + clone_info.extent_buf = buf; + clone_info.item_size = size; + ret = btrfs_punch_hole_range(inode, path, drop_start, + new_key.offset + datal - 1, &clone_info, + &trans); + if (ret) + goto out; + } else if (type == BTRFS_FILE_EXTENT_INLINE) { + u64 skip = 0; + u64 trim = 0; + + if (off > key.offset) { + skip = off - key.offset; + new_key.offset += skip; + } + + if (key.offset + datal > off + len) + trim = key.offset + datal - (off + len); + + if (comp && (skip || trim)) { + ret = -EINVAL; + goto out; + } + size -= skip + trim; + datal -= skip + trim; + + /* + * If our extent is inline, we know we will drop or + * adjust at most 1 extent item in the destination root. + * + * 1 - adjusting old extent (we may have to split it) + * 1 - add new extent + * 1 - inode update + */ + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } + + ret = clone_copy_inline_extent(inode, trans, path, + &new_key, drop_start, + datal, skip, size, buf); + if (ret) { + if (ret != -EOPNOTSUPP) + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + goto out; + } + } + + btrfs_release_path(path); + + last_dest_end = ALIGN(new_key.offset + datal, + fs_info->sectorsize); + ret = clone_finish_inode_update(trans, inode, last_dest_end, + destoff, olen, no_time_update); + if (ret) + goto out; + if (new_key.offset + datal >= destoff + len) + break; + + btrfs_release_path(path); + key.offset = next_key_min_offset; + + if (fatal_signal_pending(current)) { + ret = -EINTR; + goto out; + } + } + ret = 0; + + if (last_dest_end < destoff + len) { + /* + * We have an implicit hole that fully or partially overlaps our + * cloning range at its end. This means that we either have the + * NO_HOLES feature enabled or the implicit hole happened due to + * mixing buffered and direct IO writes against this file. + */ + btrfs_release_path(path); + path->leave_spinning = 0; + + ret = btrfs_punch_hole_range(inode, path, last_dest_end, + destoff + len - 1, NULL, &trans); + if (ret) + goto out; + + ret = clone_finish_inode_update(trans, inode, destoff + len, + destoff, olen, no_time_update); + } + +out: + btrfs_free_path(path); + kvfree(buf); + return ret; +} + +static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len) +{ + unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); + unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); +} + +static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len) +{ + if (inode1 < inode2) { + swap(inode1, inode2); + swap(loff1, loff2); + } else if (inode1 == inode2 && loff2 < loff1) { + swap(loff1, loff2); + } + lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); + lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); +} + +static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, + struct inode *dst, u64 dst_loff) +{ + const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; + int ret; + + /* + * Lock destination range to serialize with concurrent readpages() and + * source range to serialize with relocation. + */ + btrfs_double_extent_lock(src, loff, dst, dst_loff, len); + ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); + btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); + + return ret; +} + +static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, + struct inode *dst, u64 dst_loff) +{ + int ret; + u64 i, tail_len, chunk_count; + struct btrfs_root *root_dst = BTRFS_I(dst)->root; + + spin_lock(&root_dst->root_item_lock); + if (root_dst->send_in_progress) { + btrfs_warn_rl(root_dst->fs_info, +"cannot deduplicate to root %llu while send operations are using it (%d in progress)", + root_dst->root_key.objectid, + root_dst->send_in_progress); + spin_unlock(&root_dst->root_item_lock); + return -EAGAIN; + } + root_dst->dedupe_in_progress++; + spin_unlock(&root_dst->root_item_lock); + + tail_len = olen % BTRFS_MAX_DEDUPE_LEN; + chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); + + for (i = 0; i < chunk_count; i++) { + ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, + dst, dst_loff); + if (ret) + goto out; + + loff += BTRFS_MAX_DEDUPE_LEN; + dst_loff += BTRFS_MAX_DEDUPE_LEN; + } + + if (tail_len > 0) + ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); +out: + spin_lock(&root_dst->root_item_lock); + root_dst->dedupe_in_progress--; + spin_unlock(&root_dst->root_item_lock); + + return ret; +} + +static noinline int btrfs_clone_files(struct file *file, struct file *file_src, + u64 off, u64 olen, u64 destoff) +{ + struct inode *inode = file_inode(file); + struct inode *src = file_inode(file_src); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + int ret; + u64 len = olen; + u64 bs = fs_info->sb->s_blocksize; + + /* + * TODO: + * - split compressed inline extents. annoying: we need to + * decompress into destination's address_space (the file offset + * may change, so source mapping won't do), then recompress (or + * otherwise reinsert) a subrange. + * + * - split destination inode's inline extents. The inline extents can + * be either compressed or non-compressed. + */ + + /* + * VFS's generic_remap_file_range_prep() protects us from cloning the + * eof block into the middle of a file, which would result in corruption + * if the file size is not blocksize aligned. So we don't need to check + * for that case here. + */ + if (off + len == src->i_size) + len = ALIGN(src->i_size, bs) - off; + + if (destoff > inode->i_size) { + const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); + + ret = btrfs_cont_expand(inode, inode->i_size, destoff); + if (ret) + return ret; + /* + * We may have truncated the last block if the inode's size is + * not sector size aligned, so we need to wait for writeback to + * complete before proceeding further, otherwise we can race + * with cloning and attempt to increment a reference to an + * extent that no longer exists (writeback completed right after + * we found the previous extent covering eof and before we + * attempted to increment its reference count). + */ + ret = btrfs_wait_ordered_range(inode, wb_start, + destoff - wb_start); + if (ret) + return ret; + } + + /* + * Lock destination range to serialize with concurrent readpages() and + * source range to serialize with relocation. + */ + btrfs_double_extent_lock(src, off, inode, destoff, len); + ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); + btrfs_double_extent_unlock(src, off, inode, destoff, len); + /* + * Truncate page cache pages so that future reads will see the cloned + * data immediately and not the previous data. + */ + truncate_inode_pages_range(&inode->i_data, + round_down(destoff, PAGE_SIZE), + round_up(destoff + len, PAGE_SIZE) - 1); + + return ret; +} + +static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags) +{ + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; + bool same_inode = inode_out == inode_in; + u64 wb_len; + int ret; + + if (!(remap_flags & REMAP_FILE_DEDUP)) { + struct btrfs_root *root_out = BTRFS_I(inode_out)->root; + + if (btrfs_root_readonly(root_out)) + return -EROFS; + + if (file_in->f_path.mnt != file_out->f_path.mnt || + inode_in->i_sb != inode_out->i_sb) + return -EXDEV; + } + + /* Don't make the dst file partly checksummed */ + if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != + (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { + return -EINVAL; + } + + /* + * Now that the inodes are locked, we need to start writeback ourselves + * and can not rely on the writeback from the VFS's generic helper + * generic_remap_file_range_prep() because: + * + * 1) For compression we must call filemap_fdatawrite_range() range + * twice (btrfs_fdatawrite_range() does it for us), and the generic + * helper only calls it once; + * + * 2) filemap_fdatawrite_range(), called by the generic helper only + * waits for the writeback to complete, i.e. for IO to be done, and + * not for the ordered extents to complete. We need to wait for them + * to complete so that new file extent items are in the fs tree. + */ + if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) + wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); + else + wb_len = ALIGN(*len, bs); + + /* + * Since we don't lock ranges, wait for ongoing lockless dio writes (as + * any in progress could create its ordered extents after we wait for + * existing ordered extents below). + */ + inode_dio_wait(inode_in); + if (!same_inode) + inode_dio_wait(inode_out); + + /* + * Workaround to make sure NOCOW buffered write reach disk as NOCOW. + * + * Btrfs' back references do not have a block level granularity, they + * work at the whole extent level. + * NOCOW buffered write without data space reserved may not be able + * to fall back to CoW due to lack of data space, thus could cause + * data loss. + * + * Here we take a shortcut by flushing the whole inode, so that all + * nocow write should reach disk as nocow before we increase the + * reference of the extent. We could do better by only flushing NOCOW + * data, but that needs extra accounting. + * + * Also we don't need to check ASYNC_EXTENT, as async extent will be + * CoWed anyway, not affecting nocow part. + */ + ret = filemap_flush(inode_in->i_mapping); + if (ret < 0) + return ret; + + ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), + wb_len); + if (ret < 0) + return ret; + ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), + wb_len); + if (ret < 0) + return ret; + + return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, + len, remap_flags); +} + +loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, loff_t len, + unsigned int remap_flags) +{ + struct inode *src_inode = file_inode(src_file); + struct inode *dst_inode = file_inode(dst_file); + bool same_inode = dst_inode == src_inode; + int ret; + + if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) + return -EINVAL; + + if (same_inode) + inode_lock(src_inode); + else + lock_two_nondirectories(src_inode, dst_inode); + + ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, + &len, remap_flags); + if (ret < 0 || len == 0) + goto out_unlock; + + if (remap_flags & REMAP_FILE_DEDUP) + ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); + else + ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); + +out_unlock: + if (same_inode) + inode_unlock(src_inode); + else + unlock_two_nondirectories(src_inode, dst_inode); + + return ret < 0 ? ret : len; +} diff --git a/fs/btrfs/reflink.h b/fs/btrfs/reflink.h new file mode 100644 index 000000000000..ecb309b4dad0 --- /dev/null +++ b/fs/btrfs/reflink.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef BTRFS_REFLINK_H +#define BTRFS_REFLINK_H + +#include + +loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); + +#endif /* BTRFS_REFLINK_H */ -- cgit v1.2.3 From 726a342120eba8197b3bc5e01af1bd2dbf80f77f Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 17 Feb 2020 14:16:52 +0800 Subject: btrfs: relocation: add error injection points for cancelling balance Introduce a new error injection point, should_cancel_balance(). It's just a wrapper of atomic_read(&fs_info->balance_cancel_req), but allows us to override the return value. Currently there are only one locations using this function: - btrfs_balance() It checks cancel before each block group. There are other locations checking fs_info->balance_cancel_req, but they are not used as an indicator to exit, so there is no need to use the wrapper. But there will be more locations coming, and some locations can cause kernel panic if not handled properly. So introduce this error injection to provide better test interface. Reviewed-by: Johannes Thumshirn Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/relocation.c | 10 ++++++++++ fs/btrfs/volumes.c | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ecd016f7dab1..e490cfd70bba 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3378,6 +3378,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, u64 *bytes_to_reserve); int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending); +int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info); /* scrub.c */ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ba39f6969a72..0238801e6408 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -3314,6 +3315,15 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, return ret; } +/* + * Allow error injection to test balance cancellation + */ +int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) +{ + return atomic_read(&fs_info->balance_cancel_req); +} +ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); + static int relocate_file_extent_cluster(struct inode *inode, struct file_extent_cluster *cluster) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 74c34c388098..c1909e5f4506 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3994,7 +3994,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, if (btrfs_fs_closing(fs_info) || atomic_read(&fs_info->balance_pause_req) || - atomic_read(&fs_info->balance_cancel_req)) { + btrfs_should_cancel_balance(fs_info)) { ret = -EINVAL; goto out; } -- cgit v1.2.3 From 0078a9f941d2a994d756c330f225e888c31c768d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 10 Mar 2020 11:43:51 +0200 Subject: btrfs: Remove block_rsv parameter from btrfs_drop_snapshot It's no longer used following 30d40577e322 ("btrfs: reloc: Also queue orphan reloc tree for cleanup to avoid BUG_ON()"), so just remove it. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 ++--- fs/btrfs/extent-tree.c | 9 +-------- fs/btrfs/relocation.c | 4 ++-- fs/btrfs/transaction.c | 4 ++-- 4 files changed, 7 insertions(+), 15 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e490cfd70bba..af7229eac02b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2657,9 +2657,8 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) return btrfs_next_old_item(root, p, 0); } int btrfs_leaf_free_space(struct extent_buffer *leaf); -int __must_check btrfs_drop_snapshot(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, - int update_ref, int for_reloc); +int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, + int for_reloc); int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a3778937d705..001605b672b8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5288,9 +5288,7 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, * * If called with for_reloc == 0, may exit early with -EAGAIN */ -int btrfs_drop_snapshot(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, int update_ref, - int for_reloc) +int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; @@ -5329,9 +5327,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, if (err) goto out_end_trans; - if (block_rsv) - trans->block_rsv = block_rsv; - /* * This will help us catch people modifying the fs tree while we're * dropping it. It is unsafe to mess with the fs tree while it's being @@ -5459,8 +5454,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, err = PTR_ERR(trans); goto out_free; } - if (block_rsv) - trans->block_rsv = block_rsv; } } btrfs_release_path(path); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 782c9e90fa6f..5dfb7ddbb4ab 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2276,7 +2276,7 @@ static int clean_dirty_subvols(struct reloc_control *rc) root->reloc_root = NULL; if (reloc_root) { - ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1); + ret2 = btrfs_drop_snapshot(reloc_root, 0, 1); if (ret2 < 0 && !ret) ret = ret2; } @@ -2289,7 +2289,7 @@ static int clean_dirty_subvols(struct reloc_control *rc) btrfs_put_root(root); } else { /* Orphan reloc tree, just clean it up */ - ret2 = btrfs_drop_snapshot(root, NULL, 0, 1); + ret2 = btrfs_drop_snapshot(root, 0, 1); if (ret2 < 0 && !ret) ret = ret2; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 096c0aab34ee..5939bca9d5eb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2427,9 +2427,9 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) - ret = btrfs_drop_snapshot(root, NULL, 0, 0); + ret = btrfs_drop_snapshot(root, 0, 0); else - ret = btrfs_drop_snapshot(root, NULL, 1, 0); + ret = btrfs_drop_snapshot(root, 1, 0); return (ret < 0) ? 0 : 1; } -- cgit v1.2.3 From a5eeb3d17b979f7afe3ac68fe049ce8b0a039b03 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 9 Mar 2020 12:41:06 +0000 Subject: btrfs: add helper to get the end offset of a file extent item Getting the end offset for a file extent item requires a bit of code since the extent can be either inline or regular/prealloc. There are some places all over the code base that open code this logic and in another patch later in this series it will be needed again. Therefore encapsulate this logic in a helper function and use it. Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/file-item.c | 40 ++++++++++++++++++++++++++++------------ fs/btrfs/inode.c | 10 +--------- fs/btrfs/send.c | 44 +++----------------------------------------- fs/btrfs/tree-log.c | 15 +-------------- 5 files changed, 34 insertions(+), 76 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af7229eac02b..ebca8e8365ce 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2836,6 +2836,7 @@ int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, u64 len); void btrfs_inode_safe_disk_i_size_write(struct inode *inode, u64 new_i_size); +u64 btrfs_file_extent_end(const struct btrfs_path *path); /* inode.c */ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6c849e8fd5a1..b618ad5339ba 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -1040,18 +1040,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, btrfs_item_key_to_cpu(leaf, &key, slot); extent_start = key.offset; - - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - extent_end = extent_start + - btrfs_file_extent_num_bytes(leaf, fi); - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - size_t size; - size = btrfs_file_extent_ram_bytes(leaf, fi); - extent_end = ALIGN(extent_start + size, - fs_info->sectorsize); - } - + extent_end = btrfs_file_extent_end(path); em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); if (type == BTRFS_FILE_EXTENT_REG || type == BTRFS_FILE_EXTENT_PREALLOC) { @@ -1098,3 +1087,30 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, root->root_key.objectid); } } + +/* + * Returns the end offset (non inclusive) of the file extent item the given path + * points to. If it points to an inline extent, the returned offset is rounded + * up to the sector size. + */ +u64 btrfs_file_extent_end(const struct btrfs_path *path) +{ + const struct extent_buffer *leaf = path->nodes[0]; + const int slot = path->slots[0]; + struct btrfs_file_extent_item *fi; + struct btrfs_key key; + u64 end; + + btrfs_item_key_to_cpu(leaf, &key, slot); + ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + + if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { + end = btrfs_file_extent_ram_bytes(leaf, fi); + end = ALIGN(key.offset + end, leaf->fs_info->sectorsize); + } else { + end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); + } + + return end; +} diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f85051d0390f..d31543350799 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6521,6 +6521,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, extent_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; + extent_end = btrfs_file_extent_end(path); if (extent_type == BTRFS_FILE_EXTENT_REG || extent_type == BTRFS_FILE_EXTENT_PREALLOC) { /* Only regular file could have regular/prealloc extent */ @@ -6531,18 +6532,9 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, btrfs_ino(inode)); goto out; } - extent_end = extent_start + - btrfs_file_extent_num_bytes(leaf, item); - trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, extent_start); } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { - size_t size; - - size = btrfs_file_extent_ram_bytes(leaf, item); - extent_end = ALIGN(extent_start + size, - fs_info->sectorsize); - trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, path->slots[0], extent_start); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 6b86841315be..e47f768cec3d 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5586,10 +5586,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset) { struct btrfs_path *path; struct btrfs_root *root = sctx->send_root; - struct btrfs_file_extent_item *fi; struct btrfs_key key; - u64 extent_end; - u8 type; int ret; path = alloc_path_for_send(); @@ -5609,18 +5606,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset) if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) goto out; - fi = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - type = btrfs_file_extent_type(path->nodes[0], fi); - if (type == BTRFS_FILE_EXTENT_INLINE) { - u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi); - extent_end = ALIGN(key.offset + size, - sctx->send_root->fs_info->sectorsize); - } else { - extent_end = key.offset + - btrfs_file_extent_num_bytes(path->nodes[0], fi); - } - sctx->cur_inode_last_extent = extent_end; + sctx->cur_inode_last_extent = btrfs_file_extent_end(path); out: btrfs_free_path(path); return ret; @@ -5674,16 +5660,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx, break; fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) { - u64 size = btrfs_file_extent_ram_bytes(leaf, fi); - - extent_end = ALIGN(key.offset + size, - root->fs_info->sectorsize); - } else { - extent_end = key.offset + - btrfs_file_extent_num_bytes(leaf, fi); - } + extent_end = btrfs_file_extent_end(path); if (extent_end <= start) goto next; if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { @@ -5704,9 +5681,6 @@ out: static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { - struct btrfs_file_extent_item *fi; - u64 extent_end; - u8 type; int ret = 0; if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) @@ -5718,18 +5692,6 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, return ret; } - fi = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - type = btrfs_file_extent_type(path->nodes[0], fi); - if (type == BTRFS_FILE_EXTENT_INLINE) { - u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi); - extent_end = ALIGN(key->offset + size, - sctx->send_root->fs_info->sectorsize); - } else { - extent_end = key->offset + - btrfs_file_extent_num_bytes(path->nodes[0], fi); - } - if (path->slots[0] == 0 && sctx->cur_inode_last_extent < key->offset) { /* @@ -5755,7 +5717,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, else ret = 0; } - sctx->cur_inode_last_extent = extent_end; + sctx->cur_inode_last_extent = btrfs_file_extent_end(path); return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 19c107be9ef6..b723ee03de26 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4555,9 +4555,7 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans, return ret; while (true) { - struct btrfs_file_extent_item *extent; struct extent_buffer *leaf = path->nodes[0]; - u64 len; if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); @@ -4606,18 +4604,7 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; } - extent = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, extent) == - BTRFS_FILE_EXTENT_INLINE) { - len = btrfs_file_extent_ram_bytes(leaf, extent); - prev_extent_end = ALIGN(key.offset + len, - fs_info->sectorsize); - } else { - len = btrfs_file_extent_num_bytes(leaf, extent); - prev_extent_end = key.offset + len; - } - + prev_extent_end = btrfs_file_extent_end(path); path->slots[0]++; cond_resched(); } -- cgit v1.2.3 From 3fd6372758d91d8ba801e0733b17d082066a04ef Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 14 Feb 2020 16:11:40 -0500 Subject: btrfs: make the extent buffer leak check per fs info I'm going to make the entire destruction of btrfs_root's controlled by their refcount, so it will be helpful to notice if we're leaking their eb's on umount. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 +++ fs/btrfs/disk-io.c | 3 +++ fs/btrfs/extent_io.c | 45 +++++++++++++++++++++++---------------------- fs/btrfs/extent_io.h | 7 +++++++ 4 files changed, 36 insertions(+), 22 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ebca8e8365ce..f1eef81aaa49 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -949,6 +949,9 @@ struct btrfs_fs_info { struct kobject *debug_kobj; struct kobject *discard_debug_kobj; struct list_head allocated_roots; + + spinlock_t eb_leak_lock; + struct list_head allocated_ebs; #endif }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6b00ddea0b48..56cad6a51a7d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1530,6 +1530,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) btrfs_put_root(fs_info->free_space_root); btrfs_put_root(fs_info->fs_root); btrfs_check_leaked_roots(fs_info); + btrfs_extent_buffer_leak_debug_check(fs_info); kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); kvfree(fs_info); @@ -2656,6 +2657,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&fs_info->unused_bgs); #ifdef CONFIG_BTRFS_DEBUG INIT_LIST_HEAD(&fs_info->allocated_roots); + INIT_LIST_HEAD(&fs_info->allocated_ebs); + spin_lock_init(&fs_info->eb_leak_lock); #endif extent_map_tree_init(&fs_info->mapping_tree); btrfs_init_block_rsv(&fs_info->global_block_rsv, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 837262d54e28..36af71040974 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -35,42 +35,45 @@ static inline bool extent_state_in_tree(const struct extent_state *state) } #ifdef CONFIG_BTRFS_DEBUG -static LIST_HEAD(buffers); static LIST_HEAD(states); - static DEFINE_SPINLOCK(leak_lock); -static inline -void btrfs_leak_debug_add(struct list_head *new, struct list_head *head) +static inline void btrfs_leak_debug_add(spinlock_t *lock, + struct list_head *new, + struct list_head *head) { unsigned long flags; - spin_lock_irqsave(&leak_lock, flags); + spin_lock_irqsave(lock, flags); list_add(new, head); - spin_unlock_irqrestore(&leak_lock, flags); + spin_unlock_irqrestore(lock, flags); } -static inline -void btrfs_leak_debug_del(struct list_head *entry) +static inline void btrfs_leak_debug_del(spinlock_t *lock, + struct list_head *entry) { unsigned long flags; - spin_lock_irqsave(&leak_lock, flags); + spin_lock_irqsave(lock, flags); list_del(entry); - spin_unlock_irqrestore(&leak_lock, flags); + spin_unlock_irqrestore(lock, flags); } -static inline void btrfs_extent_buffer_leak_debug_check(void) +void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) { struct extent_buffer *eb; + unsigned long flags; - while (!list_empty(&buffers)) { - eb = list_entry(buffers.next, struct extent_buffer, leak_list); + spin_lock_irqsave(&fs_info->eb_leak_lock, flags); + while (!list_empty(&fs_info->allocated_ebs)) { + eb = list_first_entry(&fs_info->allocated_ebs, + struct extent_buffer, leak_list); pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n", eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); list_del(&eb->leak_list); kmem_cache_free(extent_buffer_cache, eb); } + spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); } static inline void btrfs_extent_state_leak_debug_check(void) @@ -107,9 +110,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller, } } #else -#define btrfs_leak_debug_add(new, head) do {} while (0) -#define btrfs_leak_debug_del(entry) do {} while (0) -#define btrfs_extent_buffer_leak_debug_check() do {} while (0) +#define btrfs_leak_debug_add(lock, new, head) do {} while (0) +#define btrfs_leak_debug_del(lock, entry) do {} while (0) #define btrfs_extent_state_leak_debug_check() do {} while (0) #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) #endif @@ -245,8 +247,6 @@ void __cold extent_state_cache_exit(void) void __cold extent_io_exit(void) { - btrfs_extent_buffer_leak_debug_check(); - /* * Make sure all delayed rcu free are flushed before we * destroy caches. @@ -324,7 +324,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask) state->state = 0; state->failrec = NULL; RB_CLEAR_NODE(&state->rb_node); - btrfs_leak_debug_add(&state->leak_list, &states); + btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states); refcount_set(&state->refs, 1); init_waitqueue_head(&state->wq); trace_alloc_extent_state(state, mask, _RET_IP_); @@ -337,7 +337,7 @@ void free_extent_state(struct extent_state *state) return; if (refcount_dec_and_test(&state->refs)) { WARN_ON(extent_state_in_tree(state)); - btrfs_leak_debug_del(&state->leak_list); + btrfs_leak_debug_del(&leak_lock, &state->leak_list); trace_free_extent_state(state, _RET_IP_); kmem_cache_free(extent_state_cache, state); } @@ -4875,7 +4875,7 @@ out_free_ulist: static void __free_extent_buffer(struct extent_buffer *eb) { - btrfs_leak_debug_del(&eb->leak_list); + btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); kmem_cache_free(extent_buffer_cache, eb); } @@ -4962,7 +4962,8 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, init_waitqueue_head(&eb->write_lock_wq); init_waitqueue_head(&eb->read_lock_wq); - btrfs_leak_debug_add(&eb->leak_list, &buffers); + btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list, + &fs_info->allocated_ebs); spin_lock_init(&eb->refs_lock); atomic_set(&eb->refs, 1); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 234622101230..2ed65bd0760e 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -325,4 +325,11 @@ bool find_lock_delalloc_range(struct inode *inode, #endif struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); + +#ifdef CONFIG_BTRFS_DEBUG +void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info); +#else +#define btrfs_extent_buffer_leak_debug_check(fs_info) do {} while (0) +#endif + #endif -- cgit v1.2.3 From c75e839414d3610e6487ae3145199c500d55f7f7 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 14 Feb 2020 16:11:47 -0500 Subject: btrfs: kill the subvol_srcu Now that we have proper root ref counting everywhere we can kill the subvol_srcu. * removal of fs_info::subvol_srcu reduces size of fs_info by 1176 bytes * the refcount_t used for the references checks for accidental 0->1 in cases where the root lifetime would not be properly protected * there's a leak detector for roots to catch unfreed roots at umount time * SRCU served us well over the years but is was not a proper synchronization mechanism for some cases Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/backref.c | 12 +----------- fs/btrfs/ctree.h | 1 - fs/btrfs/disk-io.c | 37 +++++++++---------------------------- fs/btrfs/export.c | 21 ++++----------------- fs/btrfs/file.c | 5 ----- fs/btrfs/inode.c | 3 --- fs/btrfs/send.c | 14 -------------- fs/btrfs/tests/btrfs-tests.c | 9 --------- 8 files changed, 14 insertions(+), 88 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index f2728fb3ee8f..cd2d39b60be0 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -542,24 +542,19 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, int ret = 0; int root_level; int level = ref->level; - int index; struct btrfs_key search_key = ref->key_for_search; root_key.objectid = ref->root_id; root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = (u64)-1; - index = srcu_read_lock(&fs_info->subvol_srcu); - root = btrfs_get_fs_root(fs_info, &root_key, false); if (IS_ERR(root)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = PTR_ERR(root); goto out_free; } if (btrfs_is_testing(fs_info)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = -ENOENT; goto out; } @@ -571,10 +566,8 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, else root_level = btrfs_old_root_level(root, time_seq); - if (root_level + 1 == level) { - srcu_read_unlock(&fs_info->subvol_srcu, index); + if (root_level + 1 == level) goto out; - } /* * We can often find data backrefs with an offset that is too large @@ -604,9 +597,6 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, else ret = btrfs_search_old_slot(root, &search_key, path, time_seq); - /* root node has been locked, we can release @subvol_srcu safely here */ - srcu_read_unlock(&fs_info->subvol_srcu, index); - btrfs_debug(fs_info, "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)", ref->root_id, level, ref->count, ret, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f1eef81aaa49..8aa7b9dac405 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -697,7 +697,6 @@ struct btrfs_fs_info { struct rw_semaphore cleanup_work_sem; struct rw_semaphore subvol_sem; - struct srcu_struct subvol_srcu; spinlock_t trans_lock; /* diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4d3eba909664..a6cb5cbbdb9f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2757,46 +2757,33 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); - ret = init_srcu_struct(&fs_info->subvol_srcu); - if (ret) - return ret; - ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL); if (ret) - goto fail; + return ret; ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); if (ret) - goto fail; + return ret; fs_info->dirty_metadata_batch = PAGE_SIZE * (1 + ilog2(nr_cpu_ids)); ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); if (ret) - goto fail; + return ret; ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, GFP_KERNEL); if (ret) - goto fail; + return ret; fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), GFP_KERNEL); - if (!fs_info->delayed_root) { - ret = -ENOMEM; - goto fail; - } + if (!fs_info->delayed_root) + return -ENOMEM; btrfs_init_delayed_root(fs_info->delayed_root); - ret = btrfs_alloc_stripe_hash_table(fs_info); - if (ret) - goto fail; - - return 0; -fail: - cleanup_srcu_struct(&fs_info->subvol_srcu); - return ret; + return btrfs_alloc_stripe_hash_table(fs_info); } static int btrfs_uuid_rescan_kthread(void *data) @@ -2870,13 +2857,13 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device fs_info->chunk_root = chunk_root; if (!tree_root || !chunk_root) { err = -ENOMEM; - goto fail_srcu; + goto fail; } fs_info->btree_inode = new_inode(sb); if (!fs_info->btree_inode) { err = -ENOMEM; - goto fail_srcu; + goto fail; } mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); btrfs_init_btree_inode(fs_info); @@ -3398,8 +3385,6 @@ fail_alloc: btrfs_mapping_tree_free(&fs_info->mapping_tree); iput(fs_info->btree_inode); -fail_srcu: - cleanup_srcu_struct(&fs_info->subvol_srcu); fail: btrfs_close_devices(fs_info->fs_devices); return err; @@ -3902,9 +3887,6 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, drop_ref = true; spin_unlock(&fs_info->fs_roots_radix_lock); - if (btrfs_root_refs(&root->root_item) == 0) - synchronize_srcu(&fs_info->subvol_srcu); - if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { btrfs_free_log(NULL, root); if (root->reloc_root) { @@ -4116,7 +4098,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) btrfs_mapping_tree_free(&fs_info->mapping_tree); btrfs_close_devices(fs_info->fs_devices); - cleanup_srcu_struct(&fs_info->subvol_srcu); } int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 25bd4317bf5a..2bb25d2dc44b 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -65,8 +65,6 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, struct btrfs_root *root; struct inode *inode; struct btrfs_key key; - int index; - int err = 0; if (objectid < BTRFS_FIRST_FREE_OBJECTID) return ERR_PTR(-ESTALE); @@ -75,13 +73,9 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; - index = srcu_read_lock(&fs_info->subvol_srcu); - root = btrfs_get_fs_root(fs_info, &key, true); - if (IS_ERR(root)) { - err = PTR_ERR(root); - goto fail; - } + if (IS_ERR(root)) + return ERR_CAST(root); key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; @@ -89,12 +83,8 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, inode = btrfs_iget(sb, &key, root); btrfs_put_root(root); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto fail; - } - - srcu_read_unlock(&fs_info->subvol_srcu, index); + if (IS_ERR(inode)) + return ERR_CAST(inode); if (check_generation && generation != inode->i_generation) { iput(inode); @@ -102,9 +92,6 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, } return d_obtain_alias(inode); -fail: - srcu_read_unlock(&fs_info->subvol_srcu, index); - return ERR_PTR(err); } static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 18c88f514a0d..20107f42a766 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -278,7 +278,6 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, struct btrfs_key key; struct btrfs_ioctl_defrag_range_args range; int num_defrag; - int index; int ret; /* get the inode */ @@ -286,8 +285,6 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; - index = srcu_read_lock(&fs_info->subvol_srcu); - inode_root = btrfs_get_fs_root(fs_info, &key, true); if (IS_ERR(inode_root)) { ret = PTR_ERR(inode_root); @@ -303,7 +300,6 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, ret = PTR_ERR(inode); goto cleanup; } - srcu_read_unlock(&fs_info->subvol_srcu, index); /* do a chunk of defrag */ clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); @@ -339,7 +335,6 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, iput(inode); return 0; cleanup: - srcu_read_unlock(&fs_info->subvol_srcu, index); kmem_cache_free(btrfs_inode_defrag_cachep, defrag); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 683bb20c1d0f..320d1062068d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5364,7 +5364,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) struct btrfs_root *sub_root = root; struct btrfs_key location; u8 di_type = 0; - int index; int ret = 0; if (dentry->d_name.len > BTRFS_NAME_LEN) @@ -5391,7 +5390,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return inode; } - index = srcu_read_lock(&fs_info->subvol_srcu); ret = fixup_tree_root_location(fs_info, dir, dentry, &location, &sub_root); if (ret < 0) { @@ -5404,7 +5402,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) } if (root != sub_root) btrfs_put_root(sub_root); - srcu_read_unlock(&fs_info->subvol_srcu, index); if (!IS_ERR(inode) && root != sub_root) { down_read(&fs_info->cleanup_work_sem); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index e47f768cec3d..c5f41bd86765 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -7028,7 +7028,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) int clone_sources_to_rollback = 0; unsigned alloc_size; int sort_clone_roots = 0; - int index; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -7155,11 +7154,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; - index = srcu_read_lock(&fs_info->subvol_srcu); - clone_root = btrfs_get_fs_root(fs_info, &key, true); if (IS_ERR(clone_root)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = PTR_ERR(clone_root); goto out; } @@ -7168,7 +7164,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) btrfs_root_dead(clone_root)) { spin_unlock(&clone_root->root_item_lock); btrfs_put_root(clone_root); - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = -EPERM; goto out; } @@ -7176,13 +7171,11 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) dedupe_in_progress_warn(clone_root); spin_unlock(&clone_root->root_item_lock); btrfs_put_root(clone_root); - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = -EAGAIN; goto out; } clone_root->send_in_progress++; spin_unlock(&clone_root->root_item_lock); - srcu_read_unlock(&fs_info->subvol_srcu, index); sctx->clone_roots[i].root = clone_root; clone_sources_to_rollback = i + 1; @@ -7196,11 +7189,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; - index = srcu_read_lock(&fs_info->subvol_srcu); - sctx->parent_root = btrfs_get_fs_root(fs_info, &key, true); if (IS_ERR(sctx->parent_root)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = PTR_ERR(sctx->parent_root); goto out; } @@ -7210,20 +7200,16 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) if (!btrfs_root_readonly(sctx->parent_root) || btrfs_root_dead(sctx->parent_root)) { spin_unlock(&sctx->parent_root->root_item_lock); - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = -EPERM; goto out; } if (sctx->parent_root->dedupe_in_progress) { dedupe_in_progress_warn(sctx->parent_root); spin_unlock(&sctx->parent_root->root_item_lock); - srcu_read_unlock(&fs_info->subvol_srcu, index); ret = -EAGAIN; goto out; } spin_unlock(&sctx->parent_root->root_item_lock); - - srcu_read_unlock(&fs_info->subvol_srcu, index); } /* diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 42e62fd2809c..999c14e5d0bd 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -134,14 +134,6 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) fs_info->nodesize = nodesize; fs_info->sectorsize = sectorsize; - - if (init_srcu_struct(&fs_info->subvol_srcu)) { - kfree(fs_info->fs_devices); - kfree(fs_info->super_copy); - kfree(fs_info); - return NULL; - } - set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); test_mnt->mnt_sb->s_fs_info = fs_info; @@ -191,7 +183,6 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) } btrfs_free_qgroup_config(fs_info); btrfs_free_fs_roots(fs_info); - cleanup_srcu_struct(&fs_info->subvol_srcu); kfree(fs_info->super_copy); btrfs_check_leaked_roots(fs_info); btrfs_extent_buffer_leak_debug_check(fs_info); -- cgit v1.2.3