summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c14
-rw-r--r--io_uring/io_uring.h3
-rw-r--r--io_uring/msg_ring.c4
-rw-r--r--io_uring/net.c2
-rw-r--r--io_uring/poll.c2
-rw-r--r--io_uring/rsrc.c4
6 files changed, 18 insertions, 11 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 745264938a48..523b6ebad15a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -736,7 +736,8 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
}
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
- u64 user_data, s32 res, u32 cflags)
+ u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow)
{
struct io_uring_cqe *cqe;
@@ -760,16 +761,21 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
}
return true;
}
- return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
+
+ if (allow_overflow)
+ return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
+
+ return false;
}
bool io_post_aux_cqe(struct io_ring_ctx *ctx,
- u64 user_data, s32 res, u32 cflags)
+ u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow)
{
bool filled;
io_cq_lock(ctx);
- filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
+ filled = io_fill_cqe_aux(ctx, user_data, res, cflags, allow_overflow);
io_cq_unlock_post(ctx);
return filled;
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index e8da70781fa3..e022d71c177a 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -31,7 +31,8 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
void __io_req_complete_post(struct io_kiocb *req);
-bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
+bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 939205b30c8b..753d16734319 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -31,7 +31,7 @@ static int io_msg_ring_data(struct io_kiocb *req)
if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
return 0;
return -EOVERFLOW;
@@ -113,7 +113,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
* completes with -EOVERFLOW, then the sender must ensure that a
* later IORING_OP_MSG_RING delivers the message.
*/
- if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
ret = -EOVERFLOW;
out_unlock:
io_double_unlock_ctx(ctx, target_ctx, issue_flags);
diff --git a/io_uring/net.c b/io_uring/net.c
index e4422dff0704..601955fdb124 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -658,7 +658,7 @@ retry:
if (ret < 0)
return ret;
- if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE))
+ if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
goto retry;
return -ECANCELED;
}
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 64d426d696ab..e8f922a4f6d7 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -243,7 +243,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
req->apoll_events);
if (!io_post_aux_cqe(ctx, req->cqe.user_data,
- mask, IORING_CQE_F_MORE))
+ mask, IORING_CQE_F_MORE, true))
return -ECANCELED;
} else {
ret = io_poll_issue(req, locked);
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index d2e589c703d0..0250c13ae1cd 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -175,10 +175,10 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
if (prsrc->tag) {
if (ctx->flags & IORING_SETUP_IOPOLL) {
mutex_lock(&ctx->uring_lock);
- io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true);
mutex_unlock(&ctx->uring_lock);
} else {
- io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true);
}
}