summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/block-group.h1
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent_io.c25
-rw-r--r--fs/btrfs/zoned.c50
-rw-r--r--fs/btrfs/zoned.h32
6 files changed, 109 insertions, 1 deletions
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 31c7c5872b92..a07108d65c44 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -193,6 +193,7 @@ struct btrfs_block_group {
*/
u64 alloc_offset;
u64 zone_unusable;
+ u64 meta_write_pointer;
};
static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 10da47ab093a..1bb4f767966a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -975,6 +975,7 @@ struct btrfs_fs_info {
/* Max size to emit ZONE_APPEND write command */
u64 max_zone_append_size;
+ struct mutex zoned_meta_io_lock;
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
spinlock_t ref_verify_lock;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 70621184a731..458bb27e0327 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2769,6 +2769,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
+ mutex_init(&fs_info->zoned_meta_io_lock);
seqlock_init(&fs_info->profiles_lock);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 14a68f3589cc..dfa6c6106b94 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -26,6 +26,7 @@
#include "disk-io.h"
#include "subpage.h"
#include "zoned.h"
+#include "block-group.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
@@ -4161,6 +4162,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
struct extent_buffer **eb_context)
{
struct address_space *mapping = page->mapping;
+ struct btrfs_block_group *cache = NULL;
struct extent_buffer *eb;
int ret;
@@ -4193,13 +4195,31 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
if (!ret)
return 0;
+ if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
+ /*
+ * If for_sync, this hole will be filled with
+ * trasnsaction commit.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ free_extent_buffer(eb);
+ return ret;
+ }
+
*eb_context = eb;
ret = lock_extent_buffer_for_io(eb, epd);
if (ret <= 0) {
+ btrfs_revert_meta_write_pointer(cache, eb);
+ if (cache)
+ btrfs_put_block_group(cache);
free_extent_buffer(eb);
return ret;
}
+ if (cache)
+ btrfs_put_block_group(cache);
ret = write_one_eb(eb, wbc, epd);
free_extent_buffer(eb);
if (ret < 0)
@@ -4245,6 +4265,7 @@ int btree_write_cache_pages(struct address_space *mapping,
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
+ btrfs_zoned_meta_io_lock(fs_info);
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
@@ -4285,7 +4306,7 @@ retry:
}
if (ret < 0) {
end_write_bio(&epd, ret);
- return ret;
+ goto out;
}
/*
* If something went wrong, don't allow any metadata write bio to be
@@ -4320,6 +4341,8 @@ retry:
ret = -EROFS;
end_write_bio(&epd, ret);
}
+out:
+ btrfs_zoned_meta_io_unlock(fs_info);
return ret;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index f4e226bda9b0..b2a6553b2db0 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1159,6 +1159,9 @@ out:
ret = -EIO;
}
+ if (!ret)
+ cache->meta_write_pointer = cache->alloc_offset + cache->start;
+
kfree(alloc_offsets);
free_extent_map(em);
@@ -1317,3 +1320,50 @@ out:
kfree(logical);
bdput(bdev);
}
+
+bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb,
+ struct btrfs_block_group **cache_ret)
+{
+ struct btrfs_block_group *cache;
+ bool ret = true;
+
+ if (!btrfs_is_zoned(fs_info))
+ return true;
+
+ cache = *cache_ret;
+
+ if (cache && (eb->start < cache->start ||
+ cache->start + cache->length <= eb->start)) {
+ btrfs_put_block_group(cache);
+ cache = NULL;
+ *cache_ret = NULL;
+ }
+
+ if (!cache)
+ cache = btrfs_lookup_block_group(fs_info, eb->start);
+
+ if (cache) {
+ if (cache->meta_write_pointer != eb->start) {
+ btrfs_put_block_group(cache);
+ cache = NULL;
+ ret = false;
+ } else {
+ cache->meta_write_pointer = eb->start + eb->len;
+ }
+
+ *cache_ret = cache;
+ }
+
+ return ret;
+}
+
+void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
+ struct extent_buffer *eb)
+{
+ if (!btrfs_is_zoned(eb->fs_info) || !cache)
+ return;
+
+ ASSERT(cache->meta_write_pointer == eb->start + eb->len);
+ cache->meta_write_pointer = eb->start;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 04f7b21652b6..0755a25d0f4c 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -50,6 +50,11 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
struct bio *bio);
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
+bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb,
+ struct btrfs_block_group **cache_ret);
+void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
+ struct extent_buffer *eb);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@@ -151,6 +156,19 @@ static inline void btrfs_record_physical_zoned(struct inode *inode,
static inline void btrfs_rewrite_logical_zoned(
struct btrfs_ordered_extent *ordered) { }
+static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb,
+ struct btrfs_block_group **cache_ret)
+{
+ return true;
+}
+
+static inline void btrfs_revert_meta_write_pointer(
+ struct btrfs_block_group *cache,
+ struct extent_buffer *eb)
+{
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
@@ -242,4 +260,18 @@ static inline bool btrfs_can_zone_reset(struct btrfs_device *device,
return true;
}
+static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_is_zoned(fs_info))
+ return;
+ mutex_lock(&fs_info->zoned_meta_io_lock);
+}
+
+static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_is_zoned(fs_info))
+ return;
+ mutex_unlock(&fs_info->zoned_meta_io_lock);
+}
+
#endif