From 40452ffca3c1a0f2994e826f9fa213b107f1a2d4 Mon Sep 17 00:00:00 2001 From: Huang Jianan Date: Mon, 6 Dec 2021 22:35:52 +0800 Subject: erofs: add sysfs node to control sync decompression strategy Although readpage is a synchronous path, there will be no additional kworker scheduling overhead in non-atomic contexts together with dm-verity. Let's add a sysfs node to disable sync decompression as an option. Link: https://lore.kernel.org/r/20211206143552.8384-1-huangjianan@oppo.com Reviewed-by: Chao Yu Signed-off-by: Huang Jianan Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'fs/erofs/zdata.c') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 9a249bfc2770..bc765d8a6dc2 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -762,6 +762,21 @@ err_out: goto out; } +static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, + unsigned int readahead_pages) +{ + /* auto: enable for readpage, disable for readahead */ + if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && + !readahead_pages) + return true; + + if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && + (readahead_pages <= sbi->opt.max_sync_decompress_pages)) + return true; + + return false; +} + static void z_erofs_decompressqueue_work(struct work_struct *work); static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, bool sync, int bios) @@ -784,7 +799,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, /* Use workqueue and sync decompression for atomic contexts only */ if (in_atomic() || irqs_disabled()) { queue_work(z_erofs_workqueue, &io->u.work); - sbi->opt.readahead_sync_decompress = true; + /* enable sync decompression for readahead */ + if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) + sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; return; } z_erofs_decompressqueue_work(&io->u.work); @@ -1435,6 +1452,7 @@ skip: static int z_erofs_readpage(struct file *file, struct page *page) { struct inode *const inode = page->mapping->host; + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct page *pagepool = NULL; int err; @@ -1450,7 +1468,8 @@ static int z_erofs_readpage(struct file *file, struct page *page) (void)z_erofs_collector_end(&f.clt); /* if some compressed cluster ready, need submit them anyway */ - z_erofs_runqueue(inode->i_sb, &f, &pagepool, true); + z_erofs_runqueue(inode->i_sb, &f, &pagepool, + z_erofs_get_sync_decompress_policy(sbi, 0)); if (err) erofs_err(inode->i_sb, "failed to read, err [%d]", err); @@ -1501,8 +1520,7 @@ static void z_erofs_readahead(struct readahead_control *rac) (void)z_erofs_collector_end(&f.clt); z_erofs_runqueue(inode->i_sb, &f, &pagepool, - sbi->opt.readahead_sync_decompress && - nr_pages <= sbi->opt.max_sync_decompress_pages); + z_erofs_get_sync_decompress_policy(sbi, nr_pages)); if (f.map.mpage) put_page(f.map.mpage); erofs_release_pages(&pagepool); -- cgit v1.2.3