summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2019-06-20 15:37:54 -0400
committerDavid Sterba <dsterba@suse.com>2019-09-09 14:59:08 +0200
commit3b2a78f21d5c53ff34b8e03cba4f904c91d4b3a2 (patch)
tree51cbbf39ec899933cec45903c9783220f98828c5 /fs/btrfs
parent9f21246d8c7efb940b96098cb556bfe86205fbed (diff)
downloadlinux-3b2a78f21d5c53ff34b8e03cba4f904c91d4b3a2.tar.bz2
btrfs: temporarily export inc_block_group_ro
This is used in a few logical parts of the block group code, temporarily export it so we can move things in pieces. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/block-group.h2
-rw-r--r--fs/btrfs/extent-tree.c14
2 files changed, 9 insertions, 7 deletions
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 80b388ece277..143baaa54684 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -185,4 +185,6 @@ static inline int btrfs_block_group_cache_done(
cache->cached == BTRFS_CACHE_ERROR;
}
+int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force);
+
#endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4b352325ff7f..08bd67169590 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6697,7 +6697,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
* data in this block group. That check should be done by relocation routine,
* not this function.
*/
-static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
@@ -6807,14 +6807,14 @@ again:
goto out;
}
- ret = inc_block_group_ro(cache, 0);
+ ret = __btrfs_inc_block_group_ro(cache, 0);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
- ret = inc_block_group_ro(cache, 0);
+ ret = __btrfs_inc_block_group_ro(cache, 0);
out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags);
@@ -7347,7 +7347,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
- inc_block_group_ro(cache, 1);
+ __btrfs_inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
ASSERT(list_empty(&cache->bg_list));
btrfs_mark_bg_unused(cache);
@@ -7368,11 +7368,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_RAID0],
list)
- inc_block_group_ro(cache, 1);
+ __btrfs_inc_block_group_ro(cache, 1);
list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_SINGLE],
list)
- inc_block_group_ro(cache, 1);
+ __btrfs_inc_block_group_ro(cache, 1);
}
btrfs_init_global_block_rsv(info);
@@ -7911,7 +7911,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_unlock(&block_group->lock);
/* We don't want to force the issue, only flip if it's ok. */
- ret = inc_block_group_ro(block_group, 0);
+ ret = __btrfs_inc_block_group_ro(block_group, 0);
up_write(&space_info->groups_sem);
if (ret < 0) {
ret = 0;