diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-03-04 17:15:48 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-03-05 08:43:09 -0700 |
commit | ca0a26511c679a797f86589894a4523db36d833e (patch) | |
tree | 14af0ed2f0ece33adc851d01b3ed0acafc667e7e | |
parent | 46fe18b16c4656969347fc0a3d83a034e47d9119 (diff) | |
download | linux-ca0a26511c679a797f86589894a4523db36d833e.tar.bz2 |
io_uring: don't keep looping for more events if we can't flush overflow
It doesn't make sense to wait for more events to come in, if we can't
even flush the overflow we already have to the ring. Return -EBUSY for
that condition, just like we do for attempts to submit with overflow
pending.
Cc: stable@vger.kernel.org # 5.11
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | fs/io_uring.c | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 76e243c056b5..044170165402 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1451,18 +1451,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, return all_flushed; } -static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, struct task_struct *tsk, struct files_struct *files) { + bool ret = true; + if (test_bit(0, &ctx->cq_check_overflow)) { /* iopoll syncs against uring_lock, not completion_lock */ if (ctx->flags & IORING_SETUP_IOPOLL) mutex_lock(&ctx->uring_lock); - __io_cqring_overflow_flush(ctx, force, tsk, files); + ret = __io_cqring_overflow_flush(ctx, force, tsk, files); if (ctx->flags & IORING_SETUP_IOPOLL) mutex_unlock(&ctx->uring_lock); } + + return ret; } static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) @@ -6883,11 +6887,16 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); trace_io_uring_cqring_wait(ctx, min_events); do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + /* if we can't even flush overflow, don't wait for more */ + if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) { + ret = -EBUSY; + break; + } prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, TASK_INTERRUPTIBLE); ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); finish_wait(&ctx->wait, &iowq.wq); + cond_resched(); } while (ret > 0); restore_saved_sigmask_unless(ret == -EINTR); |