summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/zoned.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-11 14:53:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-11 14:53:40 -0800
commitd601e58c5f2901783428bc1181e83ff783592b6b (patch)
treec8c10d1863b0649cec8c1dee5755a3491683e7be /fs/btrfs/zoned.h
parent9149fe8ba7ff798ea1c6b1fa05eeb59f95f9a94a (diff)
parent36c86a9e1be3b29f9f075a946df55dfe1d818019 (diff)
downloadlinux-d601e58c5f2901783428bc1181e83ff783592b6b.tar.bz2
Merge tag 'for-5.17-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "This end of the year branch is intentionally not that exciting. Most of the changes are under the hood, but there are some minor user visible improvements and several performance improvements too. Features: - make send work with concurrent block group relocation. We're not allowed to prevent send failing or silently producing some bad stream but with more fine grained locking and checks it's possible. The send vs deduplication exclusion could reuse the same logic in the future. - new exclusive operation 'balance paused' to allow adding a device to filesystem with paused balance - new sysfs file for fsid stored in the per-device directory to help distinguish devices when seeding is enabled, the fsid may differ from the one reported by the filesystem Performance improvements: - less metadata needed for directory logging, directory deletion is 20-40% faster - in zoned mode, cache zone information during mount to speed up repeated queries (about 50% speedup) - free space tree entries get indexed and searched by size (latency -30%, search run time -30%) - less contention in tree node locking when inserting a key and no splits are needed (files/sec in fsmark improves by 1-20%) Fixes: - fix ENOSPC failure when attempting direct IO write into NOCOW range - fix deadlock between quota enable and other quota operations - global reserve minimum calculations fixed to account for free space tree - in zoned mode, fix condition for chunk allocation that may not find the right zone for reuse and could lead to early ENOSPC Core: - global reserve stealing got simplified and cleaned up in evict - remove async transaction commit based on manual transaction refs, reuse existing kthread and mechanisms to let it commit transaction before timeout - preparatory work for extent tree v2, add wrappers for global tree roots, truncation path cleanups - remove readahead framework, it's a bit overengineered and used only for scrub, and yet it does not cover all its needs, there is another readahead built in the b-tree search that is now used, performance drop on HDD is about 5% which is acceptable and scrub is often throttled anyway, on SSDs there's no reported drop but slight improvement - self tests report extent tree state when error occurs - replace assert with debugging information when an uncommitted transaction is found at unmount time Other: - error handling improvements - other cleanups and refactoring" * tag 'for-5.17-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (115 commits) btrfs: output more debug messages for uncommitted transaction btrfs: respect the max size in the header when activating swap file btrfs: fix argument list that the kdoc format and script verified btrfs: remove unnecessary parameter type from compression_decompress_bio btrfs: selftests: dump extent io tree if extent-io-tree test failed btrfs: scrub: cleanup the argument list of scrub_stripe() btrfs: scrub: cleanup the argument list of scrub_chunk() btrfs: remove reada infrastructure btrfs: scrub: use btrfs_path::reada for extent tree readahead btrfs: scrub: remove the unnecessary path parameter for scrub_raid56_parity() btrfs: refactor unlock_up btrfs: skip transaction commit after failure to create subvolume btrfs: zoned: fix chunk allocation condition for zoned allocator btrfs: add extent allocator hook to decide to allocate chunk or not btrfs: zoned: unset dedicated block group on allocation failure btrfs: zoned: drop redundant check for REQ_OP_ZONE_APPEND and btrfs_is_zoned btrfs: zoned: sink zone check into btrfs_repair_one_zone btrfs: zoned: simplify btrfs_check_meta_write_pointer btrfs: zoned: encapsulate inode locking for zoned relocation btrfs: sysfs: add devinfo/fsid to retrieve actual fsid from the device ...
Diffstat (limited to 'fs/btrfs/zoned.h')
-rw-r--r--fs/btrfs/zoned.h30
1 files changed, 25 insertions, 5 deletions
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index e53ab7b96437..cbf016a7bb5d 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -8,6 +8,7 @@
#include "volumes.h"
#include "disk-io.h"
#include "block-group.h"
+#include "btrfs_inode.h"
/*
* Block groups with more than this value (percents) of unusable space will be
@@ -28,6 +29,7 @@ struct btrfs_zoned_device_info {
unsigned long *seq_zones;
unsigned long *empty_zones;
unsigned long *active_zones;
+ struct blk_zone *zone_cache;
struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
};
@@ -35,7 +37,7 @@ struct btrfs_zoned_device_info {
int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone);
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
-int btrfs_get_dev_zone_info(struct btrfs_device *device);
+int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
@@ -71,11 +73,11 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
bool btrfs_zone_activate(struct btrfs_block_group *block_group);
int btrfs_zone_finish(struct btrfs_block_group *block_group);
-bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices,
- int raid_index);
+bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags);
void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
u64 length);
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
+void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@@ -88,7 +90,8 @@ static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_i
return 0;
}
-static inline int btrfs_get_dev_zone_info(struct btrfs_device *device)
+static inline int btrfs_get_dev_zone_info(struct btrfs_device *device,
+ bool populate_cache)
{
return 0;
}
@@ -222,7 +225,7 @@ static inline int btrfs_zone_finish(struct btrfs_block_group *block_group)
}
static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices,
- int raid_index)
+ u64 flags)
{
return true;
}
@@ -232,6 +235,7 @@ static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
+static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
@@ -350,4 +354,20 @@ static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg)
spin_unlock(&fs_info->treelog_bg_lock);
}
+static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode)
+{
+ struct btrfs_root *root = inode->root;
+
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
+ btrfs_inode_lock(&inode->vfs_inode, 0);
+}
+
+static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
+{
+ struct btrfs_root *root = inode->root;
+
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
+ btrfs_inode_unlock(&inode->vfs_inode, 0);
+}
+
#endif