summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-10-19 23:43:46 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-20 09:54:06 -0600
commite139a1ec92f8dbaaa7380d7e7ea17e148d473d06 (patch)
tree50c714e7246dc6131dcba3dec1ed51a67f7648d9 /fs
parentbc369921d6708542eb93da33478762f1162a5805 (diff)
downloadlinux-e139a1ec92f8dbaaa7380d7e7ea17e148d473d06.tar.bz2
io_uring: apply max_workers limit to all future users
Currently, IORING_REGISTER_IOWQ_MAX_WORKERS applies only to the task that issued it, it's unexpected for users. If one task creates a ring, limits workers and then passes it to another task the limit won't be applied to the other task. Another pitfall is that a task should either create a ring or submit at least one request for IORING_REGISTER_IOWQ_MAX_WORKERS to work at all, furher complicating the picture. Change the API, save the limits and apply to all future users. Note, it should be done first before giving away the ring or submitting new requests otherwise the result is not guaranteed. Fixes: 2e480058ddc2 ("io-wq: provide a way to limit max number of workers") Link: https://github.com/axboe/liburing/issues/460 Reported-by: Beld Zhang <beldzhang@gmail.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/51d0bae97180e08ab722c0d5c93e7439cfb6f697.1634683237.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e68d27829bb2..e8b71f14ac8b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -456,6 +456,8 @@ struct io_ring_ctx {
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
+ u32 iowq_limits[2];
+ bool iowq_limits_set;
};
};
@@ -9638,7 +9640,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret))
return ret;
+
tctx = current->io_uring;
+ if (ctx->iowq_limits_set) {
+ unsigned int limits[2] = { ctx->iowq_limits[0],
+ ctx->iowq_limits[1], };
+
+ ret = io_wq_max_workers(tctx->io_wq, limits);
+ if (ret)
+ return ret;
+ }
}
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -10674,13 +10685,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
tctx = current->io_uring;
}
- ret = -EINVAL;
- if (!tctx || !tctx->io_wq)
- goto err;
+ BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
- ret = io_wq_max_workers(tctx->io_wq, new_count);
- if (ret)
- goto err;
+ memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
+ ctx->iowq_limits_set = true;
+
+ ret = -EINVAL;
+ if (tctx && tctx->io_wq) {
+ ret = io_wq_max_workers(tctx->io_wq, new_count);
+ if (ret)
+ goto err;
+ } else {
+ memset(new_count, 0, sizeof(new_count));
+ }
if (sqd) {
mutex_unlock(&sqd->lock);