summaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2017-09-30 02:09:06 -0600
committerJens Axboe <axboe@kernel.dk>2017-10-04 11:24:12 -0600
commit85009b4f5f0399669a44f07cb9a5622c0e71d419 (patch)
tree9aa95947a827cf53f7d48cf187b99fa4f03411d2 /fs/fs-writeback.c
parentfc13457f74dcf054b0d17efb7b94b46fdf17f412 (diff)
downloadlinux-85009b4f5f0399669a44f07cb9a5622c0e71d419.tar.bz2
writeback: eliminate work item allocation in bd_start_writeback()
Handle start-all writeback like we do periodic or kupdate style writeback - by marking the bdi_writeback as needing a full flush, and simply waking the thread. This eliminates the need to allocate and queue a specific work item just for this purpose. After this change, we truly only ever have one of them running at any point in time. We mark the need to start all flushes, and the writeback thread will clear it once it has processed the request. Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c71
1 files changed, 34 insertions, 37 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 399619c97567..9e24d604c59c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -53,7 +53,6 @@ struct wb_writeback_work {
unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int auto_free:1; /* free on completion */
- unsigned int start_all:1; /* nr_pages == 0 (all) writeback */
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
@@ -947,8 +946,6 @@ static unsigned long get_nr_dirty_pages(void)
static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
{
- struct wb_writeback_work *work;
-
if (!wb_has_dirty_io(wb))
return;
@@ -958,35 +955,14 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
* high frequency, causing pointless allocations of tons of
* work items and keeping the flusher threads busy retrieving
* that work. Ensure that we only allow one of them pending and
- * inflight at the time. It doesn't matter if we race a little
- * bit on this, so use the faster separate test/set bit variants.
+ * inflight at the time.
*/
- if (test_bit(WB_start_all, &wb->state))
+ if (test_bit(WB_start_all, &wb->state) ||
+ test_and_set_bit(WB_start_all, &wb->state))
return;
- set_bit(WB_start_all, &wb->state);
-
- /*
- * This is WB_SYNC_NONE writeback, so if allocation fails just
- * wakeup the thread for old dirty data writeback
- */
- work = kzalloc(sizeof(*work),
- GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (!work) {
- clear_bit(WB_start_all, &wb->state);
- trace_writeback_nowork(wb);
- wb_wakeup(wb);
- return;
- }
-
- work->sync_mode = WB_SYNC_NONE;
- work->nr_pages = wb_split_bdi_pages(wb, get_nr_dirty_pages());
- work->range_cyclic = 1;
- work->reason = reason;
- work->auto_free = 1;
- work->start_all = 1;
-
- wb_queue_work(wb, work);
+ wb->start_all_reason = reason;
+ wb_wakeup(wb);
}
/**
@@ -1838,14 +1814,6 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
list_del_init(&work->list);
}
spin_unlock_bh(&wb->work_lock);
-
- /*
- * Once we start processing a work item that had !nr_pages,
- * clear the wb state bit for that so we can allow more.
- */
- if (work && work->start_all)
- clear_bit(WB_start_all, &wb->state);
-
return work;
}
@@ -1901,6 +1869,30 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
return 0;
}
+static long wb_check_start_all(struct bdi_writeback *wb)
+{
+ long nr_pages;
+
+ if (!test_bit(WB_start_all, &wb->state))
+ return 0;
+
+ nr_pages = get_nr_dirty_pages();
+ if (nr_pages) {
+ struct wb_writeback_work work = {
+ .nr_pages = wb_split_bdi_pages(wb, nr_pages),
+ .sync_mode = WB_SYNC_NONE,
+ .range_cyclic = 1,
+ .reason = wb->start_all_reason,
+ };
+
+ nr_pages = wb_writeback(wb, &work);
+ }
+
+ clear_bit(WB_start_all, &wb->state);
+ return nr_pages;
+}
+
+
/*
* Retrieve work items and do the writeback they describe
*/
@@ -1917,6 +1909,11 @@ static long wb_do_writeback(struct bdi_writeback *wb)
}
/*
+ * Check for a flush-everything request
+ */
+ wrote += wb_check_start_all(wb);
+
+ /*
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);