summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2019-11-05 09:35:35 +0800
committerDavid Sterba <dsterba@suse.com>2019-11-18 17:51:50 +0100
commitd49a2ddb1568d533ef3b77841a3ea1ff5bfa8b98 (patch)
tree6f86031fd5bc66c684ebf0ea5b3447fb0c52c1c1 /fs/btrfs
parentffb9e0f05fab5f7fe62c5d6964825e59e91be0db (diff)
downloadlinux-d49a2ddb1568d533ef3b77841a3ea1ff5bfa8b98.tar.bz2
btrfs: block-group: Reuse the item key from caller of read_one_block_group()
For read_one_block_group(), its only caller has already got the item key to search next block group item. So we can use that key directly without doing our own convertion on stack. Also, since that key used in btrfs_read_block_groups() is vital for block group item search, add 'const' keyword for that parameter to prevent read_one_block_group() to modify it. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/block-group.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1bde24d336ca..3ed853acfa05 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1688,21 +1688,20 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
static int read_one_block_group(struct btrfs_fs_info *info,
struct btrfs_path *path,
+ const struct btrfs_key *key,
int need_clear)
{
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_block_group_cache *cache;
struct btrfs_space_info *space_info;
- struct btrfs_key key;
struct btrfs_block_group_item bgi;
const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
int slot = path->slots[0];
int ret;
- btrfs_item_key_to_cpu(leaf, &key, slot);
- ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY);
+ ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
- cache = btrfs_create_block_group_cache(info, key.objectid, key.offset);
+ cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
if (!cache)
return -ENOMEM;
@@ -1751,15 +1750,15 @@ static int read_one_block_group(struct btrfs_fs_info *info,
* are empty, and we can just add all the space in and be done with it.
* This saves us _a_lot_ of time, particularly in the full case.
*/
- if (key.offset == cache->used) {
+ if (key->offset == cache->used) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, key.objectid,
- key.objectid + key.offset);
+ add_new_free_space(cache, key->objectid,
+ key->objectid + key->offset);
btrfs_free_excluded_extents(cache);
}
@@ -1769,7 +1768,7 @@ static int read_one_block_group(struct btrfs_fs_info *info,
goto error;
}
trace_btrfs_add_block_group(info, cache, 0);
- btrfs_update_space_info(info, cache->flags, key.offset,
+ btrfs_update_space_info(info, cache->flags, key->offset,
cache->used, cache->bytes_super, &space_info);
cache->space_info = space_info;
@@ -1822,7 +1821,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
goto error;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- ret = read_one_block_group(info, path, need_clear);
+ ret = read_one_block_group(info, path, &key, need_clear);
if (ret < 0)
goto error;
key.objectid += key.offset;