summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-07-05 10:41:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-07-05 10:41:33 -0700
commit9fbe565cb78d10ca2619ee23eac27262cff45629 (patch)
treeab41b9a8a61bcf46504f77a897a945c6d36638fd
parent7783485401e802b4802c93cb1b23deb7f0d168a4 (diff)
parentb7db41c9e03b5189bc94993bd50e4506ac9e34c1 (diff)
downloadlinux-9fbe565cb78d10ca2619ee23eac27262cff45629.tar.bz2
Merge tag 'io_uring-5.8-2020-07-05' of git://git.kernel.dk/linux-block
Pull io_uring fix from Jens Axboe: "Andres reported a regression with the fix that was merged earlier this week, where his setup of using signals to interrupt io_uring CQ waits no longer worked correctly. Fix this, and also limit our use of TWA_SIGNAL to the case where we need it, and continue using TWA_RESUME for task_work as before. Since the original is marked for 5.7 stable, let's flush this one out early" * tag 'io_uring-5.8-2020-07-05' of git://git.kernel.dk/linux-block: io_uring: fix regression with always ignoring signals in io_cqring_wait()
-rw-r--r--fs/io_uring.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 700644a016a7..d37d7ea5ebe5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4072,14 +4072,22 @@ struct io_poll_table {
int error;
};
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
- int notify)
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
{
struct task_struct *tsk = req->task;
- int ret;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, notify = TWA_RESUME;
- if (req->ctx->flags & IORING_SETUP_SQPOLL)
+ /*
+ * SQPOLL kernel thread doesn't need notification, just a wakeup.
+ * If we're not using an eventfd, then TWA_RESUME is always fine,
+ * as we won't have dependencies between request completions for
+ * other kernel wait conditions.
+ */
+ if (ctx->flags & IORING_SETUP_SQPOLL)
notify = 0;
+ else if (ctx->cq_ev_fd)
+ notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify);
if (!ret)
@@ -4110,7 +4118,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req, &req->task_work, TWA_SIGNAL);
+ ret = io_req_task_work_add(req, &req->task_work);
if (unlikely(ret)) {
WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq);
@@ -6201,7 +6209,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (current->task_works)
task_work_run();
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ if (current->jobctl & JOBCTL_TASK_WORK) {
+ spin_lock_irq(&current->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TASK_WORK;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ continue;
+ }
+ ret = -EINTR;
break;
}
if (io_should_wake(&iowq, false))
@@ -6210,7 +6225,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
} while (1);
finish_wait(&ctx->wait, &iowq.wq);
- restore_saved_sigmask_unless(ret == -ERESTARTSYS);
+ restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}