summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-01-18 19:22:32 -0700
committerJens Axboe <axboe@kernel.dk>2022-01-18 19:27:59 -0700
commitefdf518459b17e18a93c7c9cb622fd3051dabd0c (patch)
treebcb920b57522ce127f22340ad96dd0c967d97f41
parent36e4c58bf044b07204c8c7e6dd7c2384e439921a (diff)
downloadlinux-efdf518459b17e18a93c7c9cb622fd3051dabd0c.tar.bz2
io-wq: perform both unstarted and started work cancelations in one go
Rather than split these into two separate lookups and matches, combine them into one loop. This will become important when we can guarantee that we don't have a window where a pending work item isn't discoverable in either state. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io-wq.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a92fbdc8bea3..db150186ce94 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -1072,27 +1072,25 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
- */
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- raw_spin_lock(&wqe->lock);
- io_wqe_cancel_pending_work(wqe, &match);
- raw_spin_unlock(&wqe->lock);
- if (match.nr_pending && !match.cancel_all)
- return IO_WQ_CANCEL_OK;
- }
-
- /*
- * Now check if a free (going busy) or busy worker has the work
+ *
+ * Then check if a free (going busy) or busy worker has the work
* currently running. If we find it there, we'll return CANCEL_RUNNING
* as an indication that we attempt to signal cancellation. The
* completion will run normally in this case.
+ *
+ * Do both of these while holding the wqe->lock, to ensure that
+ * we'll find a work item regardless of state.
*/
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
raw_spin_lock(&wqe->lock);
+ io_wqe_cancel_pending_work(wqe, &match);
+ if (match.nr_pending && !match.cancel_all) {
+ raw_spin_unlock(&wqe->lock);
+ return IO_WQ_CANCEL_OK;
+ }
+
io_wqe_cancel_running_work(wqe, &match);
raw_spin_unlock(&wqe->lock);
if (match.nr_running && !match.cancel_all)