From 52307ac4f2b507f60bae6df5be938d35e199c688 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 Feb 2024 14:16:47 -0700 Subject: io_uring/net: unify how recvmsg and sendmsg copy in the msghdr For recvmsg, we roll our own since we support buffer selections. This isn't the case for sendmsg right now, but in preparation for doing so, make the recvmsg copy helpers generic so we can call them from the sendmsg side as well. Signed-off-by: Jens Axboe --- io_uring/net.c | 271 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 142 insertions(+), 129 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 43bc9a5f96f9..8c64e552a00f 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -204,16 +204,150 @@ static int io_setup_async_msg(struct io_kiocb *req, return -EAGAIN; } +static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) +{ + int hdr; + + if (iomsg->namelen < 0) + return true; + if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), + iomsg->namelen, &hdr)) + return true; + if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) + return true; + + return false; +} + +#ifdef CONFIG_COMPAT +static int __io_compat_msg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg, + struct sockaddr __user **addr, int ddir) +{ + struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + struct compat_msghdr msg; + struct compat_iovec __user *uiov; + int ret; + + if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) + return -EFAULT; + + ret = __get_compat_msghdr(&iomsg->msg, &msg, addr); + if (ret) + return ret; + + uiov = compat_ptr(msg.msg_iov); + if (req->flags & REQ_F_BUFFER_SELECT) { + compat_ssize_t clen; + + iomsg->free_iov = NULL; + if (msg.msg_iovlen == 0) { + sr->len = 0; + } else if (msg.msg_iovlen > 1) { + return -EINVAL; + } else { + if (!access_ok(uiov, sizeof(*uiov))) + return -EFAULT; + if (__get_user(clen, &uiov->iov_len)) + return -EFAULT; + if (clen < 0) + return -EINVAL; + sr->len = clen; + } + + if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) { + iomsg->namelen = msg.msg_namelen; + iomsg->controllen = msg.msg_controllen; + if (io_recvmsg_multishot_overflow(iomsg)) + return -EOVERFLOW; + } + + return 0; + } + + iomsg->free_iov = iomsg->fast_iov; + ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg.msg_iovlen, + UIO_FASTIOV, &iomsg->free_iov, + &iomsg->msg.msg_iter, true); + if (unlikely(ret < 0)) + return ret; + + return 0; +} +#endif + +static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, + struct sockaddr __user **addr, int ddir) +{ + struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + struct user_msghdr msg; + int ret; + + if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) + return -EFAULT; + + ret = __copy_msghdr(&iomsg->msg, &msg, addr); + if (ret) + return ret; + + if (req->flags & REQ_F_BUFFER_SELECT) { + if (msg.msg_iovlen == 0) { + sr->len = iomsg->fast_iov[0].iov_len = 0; + iomsg->fast_iov[0].iov_base = NULL; + iomsg->free_iov = NULL; + } else if (msg.msg_iovlen > 1) { + return -EINVAL; + } else { + if (copy_from_user(iomsg->fast_iov, msg.msg_iov, + sizeof(*msg.msg_iov))) + return -EFAULT; + sr->len = iomsg->fast_iov[0].iov_len; + iomsg->free_iov = NULL; + } + + if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) { + iomsg->namelen = msg.msg_namelen; + iomsg->controllen = msg.msg_controllen; + if (io_recvmsg_multishot_overflow(iomsg)) + return -EOVERFLOW; + } + + return 0; + } + + iomsg->free_iov = iomsg->fast_iov; + ret = __import_iovec(ddir, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, + &iomsg->free_iov, &iomsg->msg.msg_iter, false); + if (unlikely(ret < 0)) + return ret; + + return 0; +} + +static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, + struct sockaddr __user **addr, int ddir) +{ + iomsg->msg.msg_name = &iomsg->addr; + iomsg->msg.msg_iter.nr_segs = 0; + +#ifdef CONFIG_COMPAT + if (req->ctx->compat) + return __io_compat_msg_copy_hdr(req, iomsg, addr, ddir); +#endif + + return __io_msg_copy_hdr(req, iomsg, addr, ddir); +} + static int io_sendmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); int ret; - iomsg->msg.msg_name = &iomsg->addr; - iomsg->free_iov = iomsg->fast_iov; - ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags, - &iomsg->free_iov); + ret = io_msg_copy_hdr(req, iomsg, NULL, ITER_SOURCE); + if (ret) + return ret; + /* save msg_control as sys_sendmsg() overwrites it */ sr->msg_control = iomsg->msg.msg_control_user; return ret; @@ -435,142 +569,21 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) return IOU_OK; } -static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) -{ - int hdr; - - if (iomsg->namelen < 0) - return true; - if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), - iomsg->namelen, &hdr)) - return true; - if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) - return true; - - return false; -} - -static int __io_recvmsg_copy_hdr(struct io_kiocb *req, - struct io_async_msghdr *iomsg) -{ - struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - struct user_msghdr msg; - int ret; - - if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) - return -EFAULT; - - ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); - if (ret) - return ret; - - if (req->flags & REQ_F_BUFFER_SELECT) { - if (msg.msg_iovlen == 0) { - sr->len = iomsg->fast_iov[0].iov_len = 0; - iomsg->fast_iov[0].iov_base = NULL; - iomsg->free_iov = NULL; - } else if (msg.msg_iovlen > 1) { - return -EINVAL; - } else { - if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov))) - return -EFAULT; - sr->len = iomsg->fast_iov[0].iov_len; - iomsg->free_iov = NULL; - } - - if (req->flags & REQ_F_APOLL_MULTISHOT) { - iomsg->namelen = msg.msg_namelen; - iomsg->controllen = msg.msg_controllen; - if (io_recvmsg_multishot_overflow(iomsg)) - return -EOVERFLOW; - } - } else { - iomsg->free_iov = iomsg->fast_iov; - ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, - &iomsg->free_iov, &iomsg->msg.msg_iter, - false); - if (ret > 0) - ret = 0; - } - - return ret; -} - -#ifdef CONFIG_COMPAT -static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, - struct io_async_msghdr *iomsg) -{ - struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - struct compat_msghdr msg; - struct compat_iovec __user *uiov; - int ret; - - if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) - return -EFAULT; - - ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); - if (ret) - return ret; - - uiov = compat_ptr(msg.msg_iov); - if (req->flags & REQ_F_BUFFER_SELECT) { - compat_ssize_t clen; - - iomsg->free_iov = NULL; - if (msg.msg_iovlen == 0) { - sr->len = 0; - } else if (msg.msg_iovlen > 1) { - return -EINVAL; - } else { - if (!access_ok(uiov, sizeof(*uiov))) - return -EFAULT; - if (__get_user(clen, &uiov->iov_len)) - return -EFAULT; - if (clen < 0) - return -EINVAL; - sr->len = clen; - } - - if (req->flags & REQ_F_APOLL_MULTISHOT) { - iomsg->namelen = msg.msg_namelen; - iomsg->controllen = msg.msg_controllen; - if (io_recvmsg_multishot_overflow(iomsg)) - return -EOVERFLOW; - } - } else { - iomsg->free_iov = iomsg->fast_iov; - ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen, - UIO_FASTIOV, &iomsg->free_iov, - &iomsg->msg.msg_iter, true); - if (ret < 0) - return ret; - } - - return 0; -} -#endif - static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - iomsg->msg.msg_name = &iomsg->addr; - iomsg->msg.msg_iter.nr_segs = 0; - -#ifdef CONFIG_COMPAT - if (req->ctx->compat) - return __io_compat_recvmsg_copy_hdr(req, iomsg); -#endif - - return __io_recvmsg_copy_hdr(req, iomsg); + return io_msg_copy_hdr(req, iomsg, &iomsg->uaddr, ITER_DEST); } int io_recvmsg_prep_async(struct io_kiocb *req) { + struct io_async_msghdr *iomsg; int ret; if (!io_msg_alloc_async_prep(req)) return -ENOMEM; - ret = io_recvmsg_copy_hdr(req, req->async_data); + iomsg = req->async_data; + ret = io_recvmsg_copy_hdr(req, iomsg); if (!ret) req->flags |= REQ_F_NEED_CLEANUP; return ret; -- cgit v1.2.3 From c55978024d123d43808ab393a0a4ce3ce8568150 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 27 Feb 2024 11:09:20 -0700 Subject: io_uring/net: move receive multishot out of the generic msghdr path Move the actual user_msghdr / compat_msghdr into the send and receive sides, respectively, so we can move the uaddr receive handling into its own handler, and ditto the multishot with buffer selection logic. Signed-off-by: Jens Axboe --- io_uring/net.c | 161 ++++++++++++++++++++++++++++++++------------------------- 1 file changed, 91 insertions(+), 70 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 8c64e552a00f..7a07c5563d66 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -204,46 +204,26 @@ static int io_setup_async_msg(struct io_kiocb *req, return -EAGAIN; } -static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) -{ - int hdr; - - if (iomsg->namelen < 0) - return true; - if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), - iomsg->namelen, &hdr)) - return true; - if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) - return true; - - return false; -} - #ifdef CONFIG_COMPAT -static int __io_compat_msg_copy_hdr(struct io_kiocb *req, - struct io_async_msghdr *iomsg, - struct sockaddr __user **addr, int ddir) +static int io_compat_msg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg, + struct compat_msghdr *msg, int ddir) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - struct compat_msghdr msg; struct compat_iovec __user *uiov; int ret; - if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) + if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) return -EFAULT; - ret = __get_compat_msghdr(&iomsg->msg, &msg, addr); - if (ret) - return ret; - - uiov = compat_ptr(msg.msg_iov); + uiov = compat_ptr(msg->msg_iov); if (req->flags & REQ_F_BUFFER_SELECT) { compat_ssize_t clen; iomsg->free_iov = NULL; - if (msg.msg_iovlen == 0) { + if (msg->msg_iovlen == 0) { sr->len = 0; - } else if (msg.msg_iovlen > 1) { + } else if (msg->msg_iovlen > 1) { return -EINVAL; } else { if (!access_ok(uiov, sizeof(*uiov))) @@ -255,18 +235,11 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req, sr->len = clen; } - if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) { - iomsg->namelen = msg.msg_namelen; - iomsg->controllen = msg.msg_controllen; - if (io_recvmsg_multishot_overflow(iomsg)) - return -EOVERFLOW; - } - return 0; } iomsg->free_iov = iomsg->fast_iov; - ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg.msg_iovlen, + ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen, UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, true); if (unlikely(ret < 0)) @@ -276,47 +249,35 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req, } #endif -static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, - struct sockaddr __user **addr, int ddir) +static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, + struct user_msghdr *msg, int ddir) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - struct user_msghdr msg; int ret; - if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) + if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg))) return -EFAULT; - ret = __copy_msghdr(&iomsg->msg, &msg, addr); - if (ret) - return ret; - if (req->flags & REQ_F_BUFFER_SELECT) { - if (msg.msg_iovlen == 0) { + if (msg->msg_iovlen == 0) { sr->len = iomsg->fast_iov[0].iov_len = 0; iomsg->fast_iov[0].iov_base = NULL; iomsg->free_iov = NULL; - } else if (msg.msg_iovlen > 1) { + } else if (msg->msg_iovlen > 1) { return -EINVAL; } else { - if (copy_from_user(iomsg->fast_iov, msg.msg_iov, - sizeof(*msg.msg_iov))) + if (copy_from_user(iomsg->fast_iov, msg->msg_iov, + sizeof(*msg->msg_iov))) return -EFAULT; sr->len = iomsg->fast_iov[0].iov_len; iomsg->free_iov = NULL; } - if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) { - iomsg->namelen = msg.msg_namelen; - iomsg->controllen = msg.msg_controllen; - if (io_recvmsg_multishot_overflow(iomsg)) - return -EOVERFLOW; - } - return 0; } iomsg->free_iov = iomsg->fast_iov; - ret = __import_iovec(ddir, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, + ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, false); if (unlikely(ret < 0)) return ret; @@ -324,30 +285,34 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg return 0; } -static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, - struct sockaddr __user **addr, int ddir) +static int io_sendmsg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg) { + struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + struct user_msghdr msg; + int ret; + iomsg->msg.msg_name = &iomsg->addr; iomsg->msg.msg_iter.nr_segs = 0; #ifdef CONFIG_COMPAT - if (req->ctx->compat) - return __io_compat_msg_copy_hdr(req, iomsg, addr, ddir); -#endif + if (unlikely(req->ctx->compat)) { + struct compat_msghdr cmsg; - return __io_msg_copy_hdr(req, iomsg, addr, ddir); -} + ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE); + if (unlikely(ret)) + return ret; -static int io_sendmsg_copy_hdr(struct io_kiocb *req, - struct io_async_msghdr *iomsg) -{ - struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - int ret; + return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + } +#endif - ret = io_msg_copy_hdr(req, iomsg, NULL, ITER_SOURCE); - if (ret) + ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE); + if (unlikely(ret)) return ret; + ret = __copy_msghdr(&iomsg->msg, &msg, NULL); + /* save msg_control as sys_sendmsg() overwrites it */ sr->msg_control = iomsg->msg.msg_control_user; return ret; @@ -569,10 +534,66 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) return IOU_OK; } +static int io_recvmsg_mshot_prep(struct io_kiocb *req, + struct io_async_msghdr *iomsg, + size_t namelen, size_t controllen) +{ + if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == + (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { + int hdr; + + if (unlikely(namelen < 0)) + return -EOVERFLOW; + if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), + namelen, &hdr)) + return -EOVERFLOW; + if (check_add_overflow(hdr, (int)controllen, &hdr)) + return -EOVERFLOW; + + iomsg->namelen = namelen; + iomsg->controllen = controllen; + return 0; + } + + return 0; +} + static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - return io_msg_copy_hdr(req, iomsg, &iomsg->uaddr, ITER_DEST); + struct user_msghdr msg; + int ret; + + iomsg->msg.msg_name = &iomsg->addr; + iomsg->msg.msg_iter.nr_segs = 0; + +#ifdef CONFIG_COMPAT + if (unlikely(req->ctx->compat)) { + struct compat_msghdr cmsg; + + ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST); + if (unlikely(ret)) + return ret; + + ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr); + if (unlikely(ret)) + return ret; + + return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen, + cmsg.msg_controllen); + } +#endif + + ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST); + if (unlikely(ret)) + return ret; + + ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); + if (unlikely(ret)) + return ret; + + return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, + msg.msg_controllen); } int io_recvmsg_prep_async(struct io_kiocb *req) -- cgit v1.2.3 From 792060de8b3e9ca11fab4afc0c3c5927186152a2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 26 Feb 2024 16:43:01 -0700 Subject: io_uring/net: improve the usercopy for sendmsg/recvmsg We're spending a considerable amount of the sendmsg/recvmsg time just copying in the message header. And for provided buffers, the known single entry iovec. Be a bit smarter about it and enable/disable user access around our copying. In a test case that does both sendmsg and recvmsg, the runtime before this change (averaged over multiple runs, very stable times however): Kernel Time Diff ==================================== -git 4720 usec -git+commit 4311 usec -8.7% and looking at a profile diff, we see the following: 0.25% +9.33% [kernel.kallsyms] [k] _copy_from_user 4.47% -3.32% [kernel.kallsyms] [k] __io_msg_copy_hdr.constprop.0 where we drop more than 9% of _copy_from_user() time, and consequently add time to __io_msg_copy_hdr() where the copies are now attributed to, but with a net win of 6%. In comparison, the same test case with send/recv runs in 3745 usec, which is (expectedly) still quite a bit faster. But at least sendmsg/recvmsg is now only ~13% slower, where it was ~21% slower before. Signed-off-by: Jens Axboe --- io_uring/net.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 7a07c5563d66..83fba2882720 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -255,27 +255,42 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); int ret; - if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg))) + if (!user_access_begin(sr->umsg, sizeof(*sr->umsg))) return -EFAULT; + ret = -EFAULT; + unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end); + unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end); + unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end); + unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end); + unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end); + unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end); + msg->msg_flags = 0; + if (req->flags & REQ_F_BUFFER_SELECT) { if (msg->msg_iovlen == 0) { sr->len = iomsg->fast_iov[0].iov_len = 0; iomsg->fast_iov[0].iov_base = NULL; iomsg->free_iov = NULL; } else if (msg->msg_iovlen > 1) { - return -EINVAL; + ret = -EINVAL; + goto ua_end; } else { - if (copy_from_user(iomsg->fast_iov, msg->msg_iov, - sizeof(*msg->msg_iov))) - return -EFAULT; + /* we only need the length for provided buffers */ + if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t))) + goto ua_end; + unsafe_get_user(iomsg->fast_iov[0].iov_len, + &msg->msg_iov[0].iov_len, ua_end); sr->len = iomsg->fast_iov[0].iov_len; iomsg->free_iov = NULL; } - - return 0; + ret = 0; +ua_end: + user_access_end(); + return ret; } + user_access_end(); iomsg->free_iov = iomsg->fast_iov; ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, false); -- cgit v1.2.3 From eb18c29dd2a3d49cf220ee34411ff0fe60b36bf2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 25 Feb 2024 12:59:05 -0700 Subject: io_uring/net: move recv/recvmsg flags out of retry loop The flags don't change, just intialize them once rather than every loop for multishot. Signed-off-by: Jens Axboe --- io_uring/net.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 83fba2882720..8b9a9f5bbd14 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -855,6 +855,10 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (!io_check_multishot(req, issue_flags)) return io_setup_async_msg(req, kmsg, issue_flags); + flags = sr->msg_flags; + if (force_nonblock) + flags |= MSG_DONTWAIT; + retry_multishot: if (io_do_buffer_select(req)) { void __user *buf; @@ -875,10 +879,6 @@ retry_multishot: iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); } - flags = sr->msg_flags; - if (force_nonblock) - flags |= MSG_DONTWAIT; - kmsg->msg.msg_get_inq = 1; kmsg->msg.msg_inq = -1; if (req->flags & REQ_F_APOLL_MULTISHOT) { @@ -964,6 +964,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) msg.msg_iocb = NULL; msg.msg_ubuf = NULL; + flags = sr->msg_flags; + if (force_nonblock) + flags |= MSG_DONTWAIT; + retry_multishot: if (io_do_buffer_select(req)) { void __user *buf; @@ -982,9 +986,6 @@ retry_multishot: msg.msg_inq = -1; msg.msg_flags = 0; - flags = sr->msg_flags; - if (force_nonblock) - flags |= MSG_DONTWAIT; if (flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); -- cgit v1.2.3 From 86bcacc957fc2d0403aa0e652757eec59a5fd7ca Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Fri, 1 Mar 2024 19:43:48 +0500 Subject: io_uring/net: correct the type of variable The namelen is of type int. It shouldn't be made size_t which is unsigned. The signed number is needed for error checking before use. Fixes: c55978024d12 ("io_uring/net: move receive multishot out of the generic msghdr path") Signed-off-by: Muhammad Usama Anjum Link: https://lore.kernel.org/r/20240301144349.2807544-1-usama.anjum@collabora.com Signed-off-by: Jens Axboe --- io_uring/net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 8b9a9f5bbd14..40d4542bfe2a 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -551,7 +551,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) static int io_recvmsg_mshot_prep(struct io_kiocb *req, struct io_async_msghdr *iomsg, - size_t namelen, size_t controllen) + int namelen, size_t controllen) { if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { -- cgit v1.2.3 From 8ede3db5061bb1fe28e2c9683329aafa89d2b1b4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 1 Mar 2024 18:29:39 +0300 Subject: io_uring/net: fix overflow check in io_recvmsg_mshot_prep() The "controllen" variable is type size_t (unsigned long). Casting it to int could lead to an integer underflow. The check_add_overflow() function considers the type of the destination which is type int. If we add two positive values and the result cannot fit in an integer then that's counted as an overflow. However, if we cast "controllen" to an int and it turns negative, then negative values *can* fit into an int type so there is no overflow. Good: 100 + (unsigned long)-4 = 96 <-- overflow Bad: 100 + (int)-4 = 96 <-- no overflow I deleted the cast of the sizeof() as well. That's not a bug but the cast is unnecessary. Fixes: 9b0fc3c054ff ("io_uring: fix types in io_recvmsg_multishot_overflow") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/138bd2e2-ede8-4bcc-aa7b-f3d9de167a37@moroto.mountain Signed-off-by: Jens Axboe --- io_uring/net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 40d4542bfe2a..1640e985cd08 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -559,10 +559,10 @@ static int io_recvmsg_mshot_prep(struct io_kiocb *req, if (unlikely(namelen < 0)) return -EOVERFLOW; - if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), + if (check_add_overflow(sizeof(struct io_uring_recvmsg_out), namelen, &hdr)) return -EOVERFLOW; - if (check_add_overflow(hdr, (int)controllen, &hdr)) + if (check_add_overflow(hdr, controllen, &hdr)) return -EOVERFLOW; iomsg->namelen = namelen; -- cgit v1.2.3 From b5311dbc2c2eefac00f12888dcd15e90238d1828 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 7 Mar 2024 13:19:46 -0700 Subject: io_uring/net: clear REQ_F_BL_EMPTY in the multishot retry handler This flag should not be persistent across retries, so ensure we clear it before potentially attemting a retry. Fixes: c3f9109dbc9e ("io_uring/kbuf: flag request if buffer pool is empty after buffer pick") Signed-off-by: Jens Axboe --- io_uring/net.c | 1 + 1 file changed, 1 insertion(+) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 1640e985cd08..e50947e7cd57 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -676,6 +676,7 @@ static inline void io_recv_prep_retry(struct io_kiocb *req) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + req->flags &= ~REQ_F_BL_EMPTY; sr->done_io = 0; sr->len = 0; /* get from the provided buffer */ req->buf_index = sr->buf_group; -- cgit v1.2.3 From deaef31bc1ec7966698a427da8c161930830e1cf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 7 Mar 2024 17:48:03 -0700 Subject: io_uring/net: correctly handle multishot recvmsg retry setup If we loop for multishot receive on the initial attempt, and then abort later on to wait for more, we miss a case where we should be copying the io_async_msghdr from the stack to stable storage. This leads to the next retry potentially failing, if the application had the msghdr on the stack. Cc: stable@vger.kernel.org Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg") Signed-off-by: Jens Axboe --- io_uring/net.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index e50947e7cd57..52f0d3b735fd 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -931,7 +931,8 @@ retry_multishot: kfree(kmsg->free_iov); io_netmsg_recycle(req, issue_flags); req->flags &= ~REQ_F_NEED_CLEANUP; - } + } else if (ret == -EAGAIN) + return io_setup_async_msg(req, kmsg, issue_flags); return ret; } -- cgit v1.2.3 From 9817ad85899fb695f875610fb743cb18cf087582 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 7 Mar 2024 12:43:22 -0700 Subject: io_uring/net: remove dependency on REQ_F_PARTIAL_IO for sr->done_io Ensure that prep handlers always initialize sr->done_io before any potential failure conditions, and with that, we now it's always been set even for the failure case. With that, we don't need to use the REQ_F_PARTIAL_IO flag to gate on that. Additionally, we should not overwrite req->cqe.res unless sr->done_io is actually positive. Signed-off-by: Jens Axboe --- io_uring/net.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 52f0d3b735fd..b832c586be36 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -387,6 +387,8 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + sr->done_io = 0; + if (req->opcode == IORING_OP_SEND) { if (READ_ONCE(sqe->__pad3[0])) return -EINVAL; @@ -409,7 +411,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->ctx->compat) sr->msg_flags |= MSG_CMSG_COMPAT; #endif - sr->done_io = 0; return 0; } @@ -631,6 +632,8 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + sr->done_io = 0; + if (unlikely(sqe->file_index || sqe->addr2)) return -EINVAL; @@ -667,7 +670,6 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->ctx->compat) sr->msg_flags |= MSG_CMSG_COMPAT; #endif - sr->done_io = 0; sr->nr_multishot_loops = 0; return 0; } @@ -1055,6 +1057,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *notif; + zc->done_io = 0; + if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) return -EINVAL; /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ @@ -1107,8 +1111,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (zc->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; - zc->done_io = 0; - #ifdef CONFIG_COMPAT if (req->ctx->compat) zc->msg_flags |= MSG_CMSG_COMPAT; @@ -1353,7 +1355,7 @@ void io_sendrecv_fail(struct io_kiocb *req) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - if (req->flags & REQ_F_PARTIAL_IO) + if (sr->done_io) req->cqe.res = sr->done_io; if ((req->flags & REQ_F_NEED_CLEANUP) && -- cgit v1.2.3 From 186daf2385295acf19ecf48f4d5214cc2d925933 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 7 Mar 2024 12:53:24 -0700 Subject: io_uring/kbuf: rename REQ_F_PARTIAL_IO to REQ_F_BL_NO_RECYCLE We only use the flag for this purpose, so rename it accordingly. This further prevents various other use cases of it, keeping it clean and consistent. Then we can also check it in one spot, when it's being attempted recycled, and remove some dead code in io_kbuf_recycle_ring(). Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 6 +++--- io_uring/kbuf.c | 9 --------- io_uring/kbuf.h | 20 +++++--------------- io_uring/net.c | 12 ++++++------ io_uring/rw.c | 4 ++-- 5 files changed, 16 insertions(+), 35 deletions(-) (limited to 'io_uring/net.c') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index d8111d64812b..e24893625085 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -470,7 +470,6 @@ enum { REQ_F_SKIP_LINK_CQES_BIT, REQ_F_SINGLE_POLL_BIT, REQ_F_DOUBLE_POLL_BIT, - REQ_F_PARTIAL_IO_BIT, REQ_F_APOLL_MULTISHOT_BIT, REQ_F_CLEAR_POLLIN_BIT, REQ_F_HASH_LOCKED_BIT, @@ -481,6 +480,7 @@ enum { REQ_F_CANCEL_SEQ_BIT, REQ_F_CAN_POLL_BIT, REQ_F_BL_EMPTY_BIT, + REQ_F_BL_NO_RECYCLE_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -543,8 +543,6 @@ enum { REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT), /* double poll may active */ REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT), - /* request has already done partial IO */ - REQ_F_PARTIAL_IO = IO_REQ_FLAG(REQ_F_PARTIAL_IO_BIT), /* fast poll multishot mode */ REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), /* recvmsg special flag, clear EPOLLIN */ @@ -559,6 +557,8 @@ enum { REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT), /* buffer list was empty after selection of buffer */ REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT), + /* don't recycle provided buffers for this request */ + REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 3d257ed9031b..9be42bff936b 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -81,15 +81,6 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) struct io_buffer_list *bl; struct io_buffer *buf; - /* - * For legacy provided buffer mode, don't recycle if we already did - * IO to this buffer. For ring-mapped provided buffer mode, we should - * increment ring->head to explicitly monopolize the buffer to avoid - * multiple use. - */ - if (req->flags & REQ_F_PARTIAL_IO) - return false; - io_ring_submit_lock(ctx, issue_flags); buf = req->kbuf; diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index f74c910b83f4..5218bfd79e87 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -73,21 +73,9 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) * to monopolize the buffer. */ if (req->buf_list) { - if (req->flags & REQ_F_PARTIAL_IO) { - /* - * If we end up here, then the io_uring_lock has - * been kept held since we retrieved the buffer. - * For the io-wq case, we already cleared - * req->buf_list when the buffer was retrieved, - * hence it cannot be set here for that case. - */ - req->buf_list->head++; - req->buf_list = NULL; - } else { - req->buf_index = req->buf_list->bgid; - req->flags &= ~REQ_F_BUFFER_RING; - return true; - } + req->buf_index = req->buf_list->bgid; + req->flags &= ~REQ_F_BUFFER_RING; + return true; } return false; } @@ -101,6 +89,8 @@ static inline bool io_do_buffer_select(struct io_kiocb *req) static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) { + if (req->flags & REQ_F_BL_NO_RECYCLE) + return false; if (req->flags & REQ_F_BUFFER_SELECTED) return io_kbuf_recycle_legacy(req, issue_flags); if (req->flags & REQ_F_BUFFER_RING) diff --git a/io_uring/net.c b/io_uring/net.c index b832c586be36..b97d70f905a8 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -456,7 +456,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg->msg.msg_controllen = 0; kmsg->msg.msg_control = NULL; sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + req->flags |= REQ_F_BL_NO_RECYCLE; return io_setup_async_msg(req, kmsg, issue_flags); } if (ret == -ERESTARTSYS) @@ -535,7 +535,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) sr->len -= ret; sr->buf += ret; sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + req->flags |= REQ_F_BL_NO_RECYCLE; return io_setup_async_addr(req, &__address, issue_flags); } if (ret == -ERESTARTSYS) @@ -907,7 +907,7 @@ retry_multishot: } if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + req->flags |= REQ_F_BL_NO_RECYCLE; return io_setup_async_msg(req, kmsg, issue_flags); } if (ret == -ERESTARTSYS) @@ -1007,7 +1007,7 @@ retry_multishot: sr->len -= ret; sr->buf += ret; sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + req->flags |= REQ_F_BL_NO_RECYCLE; return -EAGAIN; } if (ret == -ERESTARTSYS) @@ -1250,7 +1250,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) zc->len -= ret; zc->buf += ret; zc->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + req->flags |= REQ_F_BL_NO_RECYCLE; return io_setup_async_addr(req, &__address, issue_flags); } if (ret == -ERESTARTSYS) @@ -1320,7 +1320,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; - req->flags |= REQ_F_PARTIAL_IO; + req->flags |= REQ_F_BL_NO_RECYCLE; return io_setup_async_msg(req, kmsg, issue_flags); } if (ret == -ERESTARTSYS) diff --git a/io_uring/rw.c b/io_uring/rw.c index 7733449271f2..5651a5ad4e11 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -275,7 +275,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) * current cycle. */ io_req_io_end(req); - req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; + req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; return true; } req_set_fail(req); @@ -342,7 +342,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) io_req_end_write(req); if (unlikely(res != req->cqe.res)) { if (res == -EAGAIN && io_rw_should_reissue(req)) { - req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; + req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; return; } req->cqe.res = res; -- cgit v1.2.3 From fb6328bc2ab58dcf2998bd173f1ef0f3eb7be19a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Mar 2024 10:57:33 -0700 Subject: io_uring/net: simplify msghd->msg_inq checking Just check for larger than zero rather than check for non-zero and not -1. This is easier to read, and also protects against any errants < 0 values that aren't -1. Signed-off-by: Jens Axboe --- io_uring/net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index b97d70f905a8..1928629d490b 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -697,7 +697,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, unsigned int cflags; cflags = io_put_kbuf(req, issue_flags); - if (msg->msg_inq && msg->msg_inq != -1) + if (msg->msg_inq > 0) cflags |= IORING_CQE_F_SOCK_NONEMPTY; if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { @@ -720,7 +720,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, io_recv_prep_retry(req); /* Known not-empty or unknown state, retry */ - if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) { + if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) { if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY) return false; /* mshot retries exceeded, force a requeue */ -- cgit v1.2.3 From d9b441889c3595aa18f89ee42c6d22bb62234343 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Mar 2024 07:57:57 -0700 Subject: io_uring/net: add io_req_msg_cleanup() helper For the fast inline path, we manually recycle the io_async_msghdr and free the iovec, and then clear the REQ_F_NEED_CLEANUP flag to avoid that needing doing in the slower path. We already do that in 2 spots, and in preparation for adding more, add a helper and use it. Signed-off-by: Jens Axboe --- io_uring/net.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 1928629d490b..f70e657b6f73 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -414,6 +414,17 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } +static void io_req_msg_cleanup(struct io_kiocb *req, + struct io_async_msghdr *kmsg, + unsigned int issue_flags) +{ + req->flags &= ~REQ_F_NEED_CLEANUP; + /* fast path, check for non-NULL to avoid function call */ + if (kmsg->free_iov) + kfree(kmsg->free_iov); + io_netmsg_recycle(req, issue_flags); +} + int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); @@ -463,11 +474,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ret = -EINTR; req_set_fail(req); } - /* fast path, check for non-NULL to avoid function call */ - if (kmsg->free_iov) - kfree(kmsg->free_iov); - req->flags &= ~REQ_F_NEED_CLEANUP; - io_netmsg_recycle(req, issue_flags); + io_req_msg_cleanup(req, kmsg, issue_flags); if (ret >= 0) ret += sr->done_io; else if (sr->done_io) @@ -927,13 +934,9 @@ retry_multishot: if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags)) goto retry_multishot; - if (mshot_finished) { - /* fast path, check for non-NULL to avoid function call */ - if (kmsg->free_iov) - kfree(kmsg->free_iov); - io_netmsg_recycle(req, issue_flags); - req->flags &= ~REQ_F_NEED_CLEANUP; - } else if (ret == -EAGAIN) + if (mshot_finished) + io_req_msg_cleanup(req, kmsg, issue_flags); + else if (ret == -EAGAIN) return io_setup_async_msg(req, kmsg, issue_flags); return ret; -- cgit v1.2.3 From 3a96378e22cc46c7c49b5911f6c8631527a133a9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 8 Mar 2024 13:55:56 +0000 Subject: io_uring: fix mshot io-wq checks When checking for concurrent CQE posting, we're not only interested in requests running from the poll handler but also strayed requests ended up in normal io-wq execution. We're disallowing multishots in general from io-wq, not only when they came in a certain way. Cc: stable@vger.kernel.org Fixes: 17add5cea2bba ("io_uring: force multishot CQEs into task context") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d8c5b36a39258036f93301cd60d3cd295e40653d.1709905727.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index f70e657b6f73..62a5819779b5 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -87,7 +87,7 @@ static inline bool io_check_multishot(struct io_kiocb *req, * generic paths but multipoll may decide to post extra cqes. */ return !(issue_flags & IO_URING_F_IOWQ) || - !(issue_flags & IO_URING_F_MULTISHOT) || + !(req->flags & REQ_F_APOLL_MULTISHOT) || !req->ctx->task_complete; } -- cgit v1.2.3 From e0e4ab52d17096d96c21a6805ccd424b283c3c6d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 8 Mar 2024 13:55:57 +0000 Subject: io_uring: refactor DEFER_TASKRUN multishot checks We disallow DEFER_TASKRUN multishots from running by io-wq, which is checked by individual opcodes in the issue path. We can consolidate all it in io_wq_submit_work() at the same time moving the checks out of the hot path. Suggested-by: Jens Axboe Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e492f0f11588bb5aa11d7d24e6f53b7c7628afdb.1709905727.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 20 ++++++++++++++++++++ io_uring/net.c | 21 --------------------- io_uring/rw.c | 2 -- 3 files changed, 20 insertions(+), 23 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index cf2f514b7cc0..cf348c33f485 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -944,6 +944,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags) u64 user_data = req->cqe.user_data; struct io_uring_cqe *cqe; + lockdep_assert(!io_wq_current_is_worker()); + if (!defer) return __io_post_aux_cqe(ctx, user_data, res, cflags, false); @@ -1968,6 +1970,24 @@ fail: goto fail; } + /* + * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the + * submitter task context. Final request completions are handed to the + * right context, however this is not the case of auxiliary CQEs, + * which is the main mean of operation for multishot requests. + * Don't allow any multishot execution from io-wq. It's more restrictive + * than necessary and also cleaner. + */ + if (req->flags & REQ_F_APOLL_MULTISHOT) { + err = -EBADFD; + if (!io_file_can_poll(req)) + goto fail; + err = -ECANCELED; + if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) + goto fail; + return; + } + if (req->flags & REQ_F_FORCE_ASYNC) { bool opcode_poll = def->pollin || def->pollout; diff --git a/io_uring/net.c b/io_uring/net.c index 62a5819779b5..86ec26a58bb0 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -78,19 +78,6 @@ struct io_sr_msg { */ #define MULTISHOT_MAX_RETRY 32 -static inline bool io_check_multishot(struct io_kiocb *req, - unsigned int issue_flags) -{ - /* - * When ->locked_cq is set we only allow to post CQEs from the original - * task context. Usual request completions will be handled in other - * generic paths but multipoll may decide to post extra cqes. - */ - return !(issue_flags & IO_URING_F_IOWQ) || - !(req->flags & REQ_F_APOLL_MULTISHOT) || - !req->ctx->task_complete; -} - int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); @@ -862,9 +849,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) (sr->flags & IORING_RECVSEND_POLL_FIRST)) return io_setup_async_msg(req, kmsg, issue_flags); - if (!io_check_multishot(req, issue_flags)) - return io_setup_async_msg(req, kmsg, issue_flags); - flags = sr->msg_flags; if (force_nonblock) flags |= MSG_DONTWAIT; @@ -956,9 +940,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) (sr->flags & IORING_RECVSEND_POLL_FIRST)) return -EAGAIN; - if (!io_check_multishot(req, issue_flags)) - return -EAGAIN; - sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; @@ -1408,8 +1389,6 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) struct file *file; int ret, fd; - if (!io_check_multishot(req, issue_flags)) - return -EAGAIN; retry: if (!fixed) { fd = __get_unused_fd_flags(accept->flags, accept->nofile); diff --git a/io_uring/rw.c b/io_uring/rw.c index 5651a5ad4e11..47e097ab5d7e 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -933,8 +933,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) */ if (!io_file_can_poll(req)) return -EBADFD; - if (issue_flags & IO_URING_F_IOWQ) - return -EAGAIN; ret = __io_read(req, issue_flags); -- cgit v1.2.3 From 1af04699c59713a7693cc63d80b29152579e61c3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 8 Mar 2024 13:55:58 +0000 Subject: io_uring/net: dedup io_recv_finish req completion There are two block in io_recv_finish() completing the request, which we can combine and remove jumping. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/0e338dcb33c88de83809fda021cba9e7c9681620.1709905727.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/net.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 86ec26a58bb0..2892236fb021 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -694,20 +694,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, if (msg->msg_inq > 0) cflags |= IORING_CQE_F_SOCK_NONEMPTY; - if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { - io_req_set_res(req, *ret, cflags); - *ret = IOU_OK; - return true; - } - - if (mshot_finished) - goto finish; - /* * Fill CQE for this receive and see if we should keep trying to * receive from this socket. */ - if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, + if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && + io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, *ret, cflags | IORING_CQE_F_MORE)) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE; @@ -727,8 +719,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, *ret = -EAGAIN; return true; } - /* Otherwise stop multishot but use the current result. */ -finish: + + /* Finish the request / stop multishot. */ io_req_set_res(req, *ret, cflags); if (issue_flags & IO_URING_F_MULTISHOT) -- cgit v1.2.3 From f3a640cca951ef9715597e68f5363afc0f452a88 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Mar 2024 16:36:23 -0600 Subject: io_uring/net: ensure async prep handlers always initialize ->done_io If we get a request with IOSQE_ASYNC set, then we first run the prep async handlers. But if we then fail setting it up and want to post a CQE with -EINVAL, we use ->done_io. This was previously guarded with REQ_F_PARTIAL_IO, and the normal setup handlers do set it up before any potential errors, but we need to cover the async setup too. Fixes: 9817ad85899f ("io_uring/net: remove dependency on REQ_F_PARTIAL_IO for sr->done_io") Signed-off-by: Jens Axboe --- io_uring/net.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'io_uring/net.c') diff --git a/io_uring/net.c b/io_uring/net.c index 19451f0dbf81..1e7665ff6ef7 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -326,7 +326,10 @@ int io_send_prep_async(struct io_kiocb *req) struct io_async_msghdr *io; int ret; - if (!zc->addr || req_has_async_data(req)) + if (req_has_async_data(req)) + return 0; + zc->done_io = 0; + if (!zc->addr) return 0; io = io_msg_alloc_async_prep(req); if (!io) @@ -353,8 +356,10 @@ static int io_setup_async_addr(struct io_kiocb *req, int io_sendmsg_prep_async(struct io_kiocb *req) { + struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); int ret; + sr->done_io = 0; if (!io_msg_alloc_async_prep(req)) return -ENOMEM; ret = io_sendmsg_copy_hdr(req, req->async_data); @@ -608,9 +613,11 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, int io_recvmsg_prep_async(struct io_kiocb *req) { + struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_async_msghdr *iomsg; int ret; + sr->done_io = 0; if (!io_msg_alloc_async_prep(req)) return -ENOMEM; iomsg = req->async_data; -- cgit v1.2.3