summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c47
1 files changed, 15 insertions, 32 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index fc94c465a985..e6701b7aa147 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -54,9 +54,6 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
continue;
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
- /* ->sqe isn't available if no async data */
- if (!req_has_async_data(req))
- cmd->sqe = NULL;
file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
IO_URING_F_COMPLETE_DEFER);
ret = true;
@@ -168,32 +165,26 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-static void io_uring_cmd_init_once(void *obj)
-{
- struct io_uring_cmd_data *data = obj;
-
- data->op_data = NULL;
-}
-
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_uring_cmd_data *cache;
- cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req,
- io_uring_cmd_init_once);
+ cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
if (!cache)
return -ENOMEM;
-
- if (!(req->flags & REQ_F_FORCE_ASYNC)) {
- /* defer memcpy until we need it */
- ioucmd->sqe = sqe;
- return 0;
- }
-
- memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = req->async_data;
+ cache->op_data = NULL;
+
+ /*
+ * Unconditionally cache the SQE for now - this is only needed for
+ * requests that go async, but prep handlers must ensure that any
+ * sqe data is stable beyond prep. Since uring_cmd is special in
+ * that it doesn't read in per-op data, play it safe and ensure that
+ * any SQE data is stable beyond prep. This can later get relaxed.
+ */
+ memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = cache->sqes;
return 0;
}
@@ -256,16 +247,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
}
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN) {
- struct io_uring_cmd_data *cache = req->async_data;
-
- if (ioucmd->sqe != (void *) cache)
- memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
- return -EAGAIN;
- } else if (ret == -EIOCBQUEUED) {
- return -EIOCBQUEUED;
- }
-
+ if (ret == -EAGAIN || ret == -EIOCBQUEUED)
+ return ret;
if (ret < 0)
req_set_fail(req);
io_req_uring_cleanup(req, issue_flags);
@@ -350,7 +333,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
- switch (cmd->sqe->cmd_op) {
+ switch (cmd->cmd_op) {
case SOCKET_URING_OP_SIOCINQ:
ret = prot->ioctl(sk, SIOCINQ, &arg);
if (ret)