summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c71
1 files changed, 28 insertions, 43 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index a9a2733be842..9edc6baebd01 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -23,6 +23,9 @@
#include "poll.h"
#include "rw.h"
+static void io_complete_rw(struct kiocb *kiocb, long res);
+static void io_complete_rw_iopoll(struct kiocb *kiocb, long res);
+
struct io_rw {
/* NOTE: kiocb has the file as the first member, so don't do it here */
struct kiocb kiocb;
@@ -146,28 +149,15 @@ static inline int io_import_iovec(int rw, struct io_kiocb *req,
return 0;
}
-static void io_rw_iovec_free(struct io_async_rw *rw)
-{
- if (rw->free_iovec) {
- kfree(rw->free_iovec);
- rw->free_iov_nr = 0;
- rw->free_iovec = NULL;
- }
-}
-
static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_rw *rw = req->async_data;
- struct iovec *iov;
- if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
- io_rw_iovec_free(rw);
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
return;
- }
- iov = rw->free_iovec;
+
+ io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr);
if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
- if (iov)
- kasan_mempool_poison_object(iov);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
@@ -208,27 +198,16 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
}
}
-static void io_rw_async_data_init(void *obj)
-{
- struct io_async_rw *rw = (struct io_async_rw *)obj;
-
- rw->free_iovec = NULL;
- rw->bytes_done = 0;
-}
-
static int io_rw_alloc_async(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_async_rw *rw;
- rw = io_uring_alloc_async_data(&ctx->rw_cache, req, io_rw_async_data_init);
+ rw = io_uring_alloc_async_data(&ctx->rw_cache, req);
if (!rw)
return -ENOMEM;
- if (rw->free_iovec) {
- kasan_mempool_unpoison_object(rw->free_iovec,
- rw->free_iov_nr * sizeof(struct iovec));
+ if (rw->free_iovec)
req->flags |= REQ_F_NEED_CLEANUP;
- }
rw->bytes_done = 0;
return 0;
}
@@ -313,6 +292,11 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
rw->kiocb.dio_complete = NULL;
rw->kiocb.ki_flags = 0;
+ if (req->ctx->flags & IORING_SETUP_IOPOLL)
+ rw->kiocb.ki_complete = io_complete_rw_iopoll;
+ else
+ rw->kiocb.ki_complete = io_complete_rw;
+
rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len);
rw->flags = READ_ONCE(sqe->rw_flags);
@@ -587,8 +571,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
smp_store_release(&req->iopoll_completed, 1);
}
-static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
+static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
/* IO was queued async, completion will happen later */
if (ret == -EIOCBQUEUED)
return;
@@ -610,8 +596,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
- INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
- io_complete_rw, kiocb, ret);
+ if (req->ctx->flags & IORING_SETUP_IOPOLL)
+ io_complete_rw_iopoll(&rw->kiocb, ret);
+ else
+ io_complete_rw(&rw->kiocb, ret);
}
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -622,7 +610,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
req->file->f_pos = rw->kiocb.ki_pos;
- if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
+ if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
__io_complete_rw_common(req, ret);
/*
* Safe to call io_end from here as we're inline
@@ -633,7 +621,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
io_req_rw_cleanup(req, issue_flags);
return IOU_OK;
} else {
- io_rw_done(&rw->kiocb, ret);
+ io_rw_done(req, ret);
}
return IOU_ISSUE_SKIP_COMPLETE;
@@ -837,10 +825,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
return -EOPNOTSUPP;
-
kiocb->private = NULL;
kiocb->ki_flags |= IOCB_HIPRI;
- kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
/* make sure every req only blocks once*/
@@ -850,7 +836,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
- kiocb->ki_complete = io_complete_rw;
}
if (req->flags & REQ_F_HAS_METADATA) {
@@ -928,7 +913,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
} else if (ret == -EIOCBQUEUED) {
return IOU_ISSUE_SKIP_COMPLETE;
} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
- (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
+ (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
+ (issue_flags & IO_URING_F_MULTISHOT)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
@@ -1001,6 +987,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
if (!io_file_can_poll(req))
return -EBADFD;
+ /* make it sync, multishot doesn't support async execution */
+ rw->kiocb.ki_complete = NULL;
ret = __io_read(req, issue_flags);
/*
@@ -1323,10 +1311,7 @@ void io_rw_cache_free(const void *entry)
{
struct io_async_rw *rw = (struct io_async_rw *) entry;
- if (rw->free_iovec) {
- kasan_mempool_unpoison_object(rw->free_iovec,
- rw->free_iov_nr * sizeof(struct iovec));
- io_rw_iovec_free(rw);
- }
+ if (rw->free_iovec)
+ kfree(rw->free_iovec);
kfree(rw);
}