diff options
author | Jens Axboe <axboe@kernel.dk> | 2022-07-07 14:30:09 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:39:17 -0600 |
commit | 43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc (patch) | |
tree | 36990dc646e88f2bc176bb4873fa783b09ac9c69 /io_uring | |
parent | 9731bc9855dc169f27433fef3c4d0ff3496c512d (diff) | |
download | linux-43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc.tar.bz2 |
io_uring: add netmsg cache
For recvmsg/sendmsg, if they don't complete inline, we currently need
to allocate a struct io_async_msghdr for each request. This is a
somewhat large struct.
Hook up sendmsg/recvmsg to use the io_alloc_cache. This reduces the
alloc + free overhead considerably, yielding 4-5% of extra performance
running netbench.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 3 | ||||
-rw-r--r-- | io_uring/net.c | 63 | ||||
-rw-r--r-- | io_uring/net.h | 13 |
3 files changed, 69 insertions, 10 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c9c23e459766..f697ca4e8f55 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -89,6 +89,7 @@ #include "kbuf.h" #include "rsrc.h" #include "cancel.h" +#include "net.h" #include "timeout.h" #include "poll.h" @@ -297,6 +298,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->io_buffers_cache); io_alloc_cache_init(&ctx->apoll_cache); + io_alloc_cache_init(&ctx->netmsg_cache); init_completion(&ctx->ref_comp); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); @@ -2469,6 +2471,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) __io_cqring_overflow_flush(ctx, true); io_eventfd_unregister(ctx); io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); + io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); mutex_unlock(&ctx->uring_lock); io_destroy_buffers(ctx); if (ctx->sq_creds) diff --git a/io_uring/net.c b/io_uring/net.c index 6679069eeef1..185553174437 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -12,6 +12,7 @@ #include "io_uring.h" #include "kbuf.h" +#include "alloc_cache.h" #include "net.h" #if defined(CONFIG_NET) @@ -97,18 +98,55 @@ static bool io_net_retry(struct socket *sock, int flags) return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; } +static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_async_msghdr *hdr = req->async_data; + + if (!hdr || issue_flags & IO_URING_F_UNLOCKED) + return; + + /* Let normal cleanup path reap it if we fail adding to the cache */ + if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { + req->async_data = NULL; + req->flags &= ~REQ_F_ASYNC_DATA; + } +} + +static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req, + unsigned int issue_flags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_cache_entry *entry; + + if (!(issue_flags & IO_URING_F_UNLOCKED) && + (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) { + struct io_async_msghdr *hdr; + + hdr = container_of(entry, struct io_async_msghdr, cache); + req->flags |= REQ_F_ASYNC_DATA; + req->async_data = hdr; + return hdr; + } + + if (!io_alloc_async_data(req)) + return req->async_data; + + return NULL; +} + static int io_setup_async_msg(struct io_kiocb *req, - struct io_async_msghdr *kmsg) + struct io_async_msghdr *kmsg, + unsigned int issue_flags) { struct io_async_msghdr *async_msg = req->async_data; if (async_msg) return -EAGAIN; - if (io_alloc_async_data(req)) { + async_msg = io_recvmsg_alloc_async(req, issue_flags); + if (!async_msg) { kfree(kmsg->free_iov); return -ENOMEM; } - async_msg = req->async_data; req->flags |= REQ_F_NEED_CLEANUP; memcpy(async_msg, kmsg, sizeof(*kmsg)); async_msg->msg.msg_name = &async_msg->addr; @@ -195,7 +233,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) if (!(req->flags & REQ_F_POLLED) && (sr->flags & IORING_RECVSEND_POLL_FIRST)) - return io_setup_async_msg(req, kmsg); + return io_setup_async_msg(req, kmsg, issue_flags); flags = sr->msg_flags; if (issue_flags & IO_URING_F_NONBLOCK) @@ -207,13 +245,13 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) if (ret < min_ret) { if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) - return io_setup_async_msg(req, kmsg); + return io_setup_async_msg(req, kmsg, issue_flags); if (ret == -ERESTARTSYS) ret = -EINTR; if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; req->flags |= REQ_F_PARTIAL_IO; - return io_setup_async_msg(req, kmsg); + return io_setup_async_msg(req, kmsg, issue_flags); } req_set_fail(req); } @@ -221,6 +259,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) if (kmsg->free_iov) kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; + io_netmsg_recycle(req, issue_flags); if (ret >= 0) ret += sr->done_io; else if (sr->done_io) @@ -495,7 +534,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (!(req->flags & REQ_F_POLLED) && (sr->flags & IORING_RECVSEND_POLL_FIRST)) - return io_setup_async_msg(req, kmsg); + return io_setup_async_msg(req, kmsg, issue_flags); if (io_do_buffer_select(req)) { void __user *buf; @@ -519,13 +558,13 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, kmsg->uaddr, flags); if (ret < min_ret) { if (ret == -EAGAIN && force_nonblock) - return io_setup_async_msg(req, kmsg); + return io_setup_async_msg(req, kmsg, issue_flags); if (ret == -ERESTARTSYS) ret = -EINTR; if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; req->flags |= REQ_F_PARTIAL_IO; - return io_setup_async_msg(req, kmsg); + return io_setup_async_msg(req, kmsg, issue_flags); } req_set_fail(req); } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { @@ -535,6 +574,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) /* fast path, check for non-NULL to avoid function call */ if (kmsg->free_iov) kfree(kmsg->free_iov); + io_netmsg_recycle(req, issue_flags); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret > 0) ret += sr->done_io; @@ -848,4 +888,9 @@ out: io_req_set_res(req, ret, 0); return IOU_OK; } + +void io_netmsg_cache_free(struct io_cache_entry *entry) +{ + kfree(container_of(entry, struct io_async_msghdr, cache)); +} #endif diff --git a/io_uring/net.h b/io_uring/net.h index 81d71d164770..178a6d8b76e0 100644 --- a/io_uring/net.h +++ b/io_uring/net.h @@ -3,9 +3,14 @@ #include <linux/net.h> #include <linux/uio.h> +#include "alloc_cache.h" + #if defined(CONFIG_NET) struct io_async_msghdr { - struct iovec fast_iov[UIO_FASTIOV]; + union { + struct iovec fast_iov[UIO_FASTIOV]; + struct io_cache_entry cache; + }; /* points to an allocated iov, if NULL we use fast_iov instead */ struct iovec *free_iov; struct sockaddr __user *uaddr; @@ -40,4 +45,10 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags); int io_connect_prep_async(struct io_kiocb *req); int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_connect(struct io_kiocb *req, unsigned int issue_flags); + +void io_netmsg_cache_free(struct io_cache_entry *entry); +#else +static inline void io_netmsg_cache_free(struct io_cache_entry *entry) +{ +} #endif |