summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
authorFan Li <fanofcode.li@samsung.com>2013-12-09 16:09:00 +0800
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-12-23 10:18:06 +0900
commit63a0b7cb33d85aeb0df39b984c08e234db4925d1 (patch)
tree75de6dbf2311e606124221d11344bc3ad8e2ebfa /fs/f2fs/data.c
parent6bacf52fb58aeb3e89d9a62970b85a5570aa8ace (diff)
downloadlinux-63a0b7cb33d85aeb0df39b984c08e234db4925d1.tar.bz2
f2fs: merge pages with the same sync_mode flag
Previously f2fs submits most of write requests using WRITE_SYNC, but f2fs_write_data_pages submits last write requests by sync_mode flags callers pass. This causes a performance problem since continuous pages with different sync flags can't be merged in cfq IO scheduler(thanks yu chao for pointing it out), and synchronous requests often take more time. This patch makes the following modifies to DATA writebacks: 1. every page will be written back using the sync mode caller pass. 2. only pages with the same sync mode can be merged in one bio request. These changes are restricted to DATA pages.Other types of writebacks are modified To remain synchronous. In my test with tiotest, f2fs sequence write performance is improved by about 7%-10% , and this patch has no obvious impact on other performance tests. Signed-off-by: Fan Li <fanofcode.li@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5607393198df..fb5e5c2627e5 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -194,8 +194,9 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
if (!is_read_io(rw))
inc_page_count(sbi, F2FS_WRITEBACK);
- if (io->bio && io->last_block_in_bio != blk_addr - 1)
- __submit_merged_bio(sbi, io, type, true, rw);
+ if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
+ io->rw_flag != rw))
+ __submit_merged_bio(sbi, io, type, false, io->rw_flag);
alloc_new:
if (io->bio == NULL) {
bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
@@ -203,6 +204,7 @@ alloc_new:
io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
io->bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io :
f2fs_write_end_io;
+ io->rw_flag = rw;
/*
* The end_io will be assigned at the sumbission phase.
* Until then, let bio_add_page() merge consecutive IOs as much
@@ -212,7 +214,7 @@ alloc_new:
if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) {
- __submit_merged_bio(sbi, io, type, true, rw);
+ __submit_merged_bio(sbi, io, type, false, rw);
goto alloc_new;
}
@@ -641,7 +643,7 @@ static int f2fs_read_data_pages(struct file *file,
return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
}
-int do_write_data_page(struct page *page)
+int do_write_data_page(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
block_t old_blk_addr, new_blk_addr;
@@ -669,10 +671,10 @@ int do_write_data_page(struct page *page)
!is_cold_data(page) &&
need_inplace_update(inode))) {
rewrite_data_page(F2FS_SB(inode->i_sb), page,
- old_blk_addr);
+ old_blk_addr, wbc);
} else {
write_data_page(inode, page, &dn,
- old_blk_addr, &new_blk_addr);
+ old_blk_addr, &new_blk_addr, wbc);
update_extent_cache(new_blk_addr, &dn);
}
out_writepage:
@@ -719,10 +721,10 @@ write:
if (S_ISDIR(inode->i_mode)) {
dec_page_count(sbi, F2FS_DIRTY_DENTS);
inode_dec_dirty_dents(inode);
- err = do_write_data_page(page);
+ err = do_write_data_page(page, wbc);
} else {
f2fs_lock_op(sbi);
- err = do_write_data_page(page);
+ err = do_write_data_page(page, wbc);
f2fs_unlock_op(sbi);
need_balance_fs = true;
}