summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h79
1 files changed, 30 insertions, 49 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index d3606d30cf6f..547c30582fb8 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -38,14 +38,13 @@ enum {
IOU_STOP_MULTISHOT = -ECANCELED,
};
-struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
-bool io_req_cqe_overflow(struct io_kiocb *req);
+bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
+void io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
-bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
- bool allow_overflow);
+bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
@@ -73,7 +72,7 @@ int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
-void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
+void __io_submit_flush_completions(struct io_ring_ctx *ctx);
int io_req_prep_async(struct io_kiocb *req);
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
@@ -110,31 +109,31 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
- bool overflow)
+static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
+ struct io_uring_cqe **ret,
+ bool overflow)
{
io_lockdep_assert_cq_locked(ctx);
- if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
- struct io_uring_cqe *cqe = ctx->cqe_cached;
-
- ctx->cached_cq_tail++;
- ctx->cqe_cached++;
- if (ctx->flags & IORING_SETUP_CQE32)
- ctx->cqe_cached++;
- return cqe;
+ if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
+ if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
+ return false;
}
-
- return __io_get_cqe(ctx, overflow);
+ *ret = ctx->cqe_cached;
+ ctx->cached_cq_tail++;
+ ctx->cqe_cached++;
+ if (ctx->flags & IORING_SETUP_CQE32)
+ ctx->cqe_cached++;
+ return true;
}
-static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
+static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
{
- return io_get_cqe_overflow(ctx, false);
+ return io_get_cqe_overflow(ctx, ret, false);
}
-static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
{
struct io_uring_cqe *cqe;
@@ -143,39 +142,22 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
- cqe = io_get_cqe(ctx);
- if (unlikely(!cqe))
+ if (unlikely(!io_get_cqe(ctx, &cqe)))
return false;
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
- (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
+ if (trace_io_uring_complete_enabled())
+ trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ req->big_cqe.extra1, req->big_cqe.extra2);
memcpy(cqe, &req->cqe, sizeof(*cqe));
-
if (ctx->flags & IORING_SETUP_CQE32) {
- u64 extra1 = 0, extra2 = 0;
-
- if (req->flags & REQ_F_CQE32_INIT) {
- extra1 = req->extra1;
- extra2 = req->extra2;
- }
-
- WRITE_ONCE(cqe->big_cqe[0], extra1);
- WRITE_ONCE(cqe->big_cqe[1], extra2);
+ memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
return true;
}
-static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
-{
- if (likely(__io_fill_cqe_req(ctx, req)))
- return true;
- return io_req_cqe_overflow(req);
-}
-
static inline void req_set_fail(struct io_kiocb *req)
{
req->flags |= REQ_F_FAIL;
@@ -196,10 +178,10 @@ static inline bool req_has_async_data(struct io_kiocb *req)
return req->flags & REQ_F_ASYNC_DATA;
}
-static inline void io_put_file(struct file *file)
+static inline void io_put_file(struct io_kiocb *req)
{
- if (file)
- fput(file);
+ if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
+ fput(req->file);
}
static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
@@ -354,7 +336,6 @@ static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
- kasan_unpoison_object_data(req_cachep, req);
wq_stack_extract(&ctx->submit_state.free_list);
return req;
}