summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-07 09:35:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-07 09:35:50 -0700
commit7c989b1da3946e40bf71be00a0b401015235605a (patch)
tree33de4ff984af6a301d6e80a05d40a893909388c9 /io_uring
parent513389809e138ae903b6ef43c1d5d2ffaf4dca17 (diff)
parent0e0abad2a71bcd7ba0f30e7975f5b4199ade4e60 (diff)
downloadlinux-7c989b1da3946e40bf71be00a0b401015235605a.tar.bz2
Merge tag 'for-6.1/passthrough-2022-10-04' of git://git.kernel.dk/linux
Pull passthrough updates from Jens Axboe: "With these changes, passthrough NVMe support over io_uring now performs at the same level as block device O_DIRECT, and in many cases 6-8% better. This contains: - Add support for fixed buffers for passthrough (Anuj, Kanchan) - Enable batched allocations and freeing on passthrough, similarly to what we support on the normal storage path (me) - Fix from Geert fixing an issue with !CONFIG_IO_URING" * tag 'for-6.1/passthrough-2022-10-04' of git://git.kernel.dk/linux: io_uring: Add missing inline to io_uring_cmd_import_fixed() dummy nvme: wire up fixed buffer support for nvme passthrough nvme: pass ubuffer as an integer block: extend functionality to map bvec iterator block: factor out blk_rq_map_bio_alloc helper block: rename bio_map_put to blk_mq_map_bio_put nvme: refactor nvme_alloc_request nvme: refactor nvme_add_user_metadata nvme: Use blk_rq_map_user_io helper scsi: Use blk_rq_map_user_io helper block: add blk_rq_map_user_io io_uring: introduce fixed buffer support for io_uring_cmd io_uring: add io_uring_cmd_import_fixed nvme: enable batched completions of passthrough IO nvme: split out metadata vs non metadata end_io uring_cmd completions block: allow end_io based requests in the completion batch handling block: change request end_io handler to pass back a return value block: enable batched allocation for blk_mq_alloc_request() block: kill deprecated BUG_ON() in the flush handling
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/uring_cmd.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index f3ed61e9bd0f..e50de0b6b9f8 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -4,10 +4,12 @@
#include <linux/file.h>
#include <linux/io_uring.h>
#include <linux/security.h>
+#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "rsrc.h"
#include "uring_cmd.h"
static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
@@ -76,8 +78,24 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- if (sqe->rw_flags || sqe->__pad1)
+ if (sqe->__pad1)
return -EINVAL;
+
+ ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
+ if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
+ return -EINVAL;
+
+ if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+ struct io_ring_ctx *ctx = req->ctx;
+ u16 index;
+
+ req->buf_index = READ_ONCE(sqe->buf_index);
+ if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ return -EFAULT;
+ index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+ req->imu = ctx->user_bufs[index];
+ io_req_set_rsrc_node(req, ctx, 0);
+ }
ioucmd->cmd = sqe->cmd;
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return 0;
@@ -129,3 +147,12 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
return IOU_ISSUE_SKIP_COMPLETE;
}
+
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+ return io_import_fixed(rw, iter, req->imu, ubuf, len);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);