summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-09-24 14:51:30 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 15:41:59 -0400
commit24ab9cd85c11bccacbd0cce7f8e1aebd4930404c (patch)
tree2ace993dd54bb6dd94fdd4c0cb03813c6bf4c6a3 /fs/btrfs/disk-io.c
parent4434c33c7f233a7ee5dcaf357fbf795818ea8861 (diff)
downloadlinux-24ab9cd85c11bccacbd0cce7f8e1aebd4930404c.tar.bz2
Btrfs: Raise thresholds for metadata writeback
Btrfs metadata writeback is fairly expensive. Once a tree block is written it must be cowed before it can be changed again. The btree writepages code has a threshold based on a count of dirty btree bytes which is updated as IO is sent out. This changes btree_writepages to skip the writeout if there are less than 32MB of dirty bytes from the btrees, improving performance across many workloads. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index dffb8dabd533..71e81f3a765b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -556,7 +556,7 @@ static int btree_writepages(struct address_space *mapping,
if (wbc->sync_mode == WB_SYNC_NONE) {
u64 num_dirty;
u64 start = 0;
- unsigned long thresh = 8 * 1024 * 1024;
+ unsigned long thresh = 32 * 1024 * 1024;
if (wbc->for_kupdate)
return 0;
@@ -690,7 +690,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
int btrfs_write_tree_block(struct extent_buffer *buf)
{
return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
- buf->start + buf->len - 1, WB_SYNC_NONE);
+ buf->start + buf->len - 1, WB_SYNC_ALL);
}
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)