summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHao Xu <haoxu@linux.alibaba.com>2021-06-28 05:37:30 +0800
committerJens Axboe <axboe@kernel.dk>2021-06-30 14:15:40 -0600
commit915b3dde9b72cb4f531b04208daafcd0a257b847 (patch)
tree7c7d74779edb837045b006b3d657d6b0681d5a31
parent99ebe4efbd3882422db1fd6a1b477291ea8bdab7 (diff)
io_uring: spin in iopoll() only when reqs are in a single queue
We currently spin in iopoll() when requests to be iopolled are for same file(device), while one device may have multiple hardware queues. given an example: hw_queue_0 | hw_queue_1 req(30us) req(10us) If we first spin on iopolling for the hw_queue_0. the avg latency would be (30us + 30us) / 2 = 30us. While if we do round robin, the avg latency would be (30us + 10us) / 2 = 20us since we reap the request in hw_queue_1 in time. So it's better to do spinning only when requests are in same hardware queue. Signed-off-by: Hao Xu <haoxu@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b14de92832e1..67099bb99a02 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -434,7 +434,7 @@ struct io_ring_ctx {
struct list_head iopoll_list;
struct hlist_head *cancel_hash;
unsigned cancel_hash_bits;
- bool poll_multi_file;
+ bool poll_multi_queue;
} ____cacheline_aligned_in_smp;
struct io_restriction restrictions;
@@ -2314,7 +2314,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
* Only spin for completions if we don't have multiple devices hanging
* off our complete list, and we're under the requested amount.
*/
- spin = !ctx->poll_multi_file && *nr_events < min;
+ spin = !ctx->poll_multi_queue && *nr_events < min;
ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
@@ -2553,14 +2553,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
* different devices.
*/
if (list_empty(&ctx->iopoll_list)) {
- ctx->poll_multi_file = false;
- } else if (!ctx->poll_multi_file) {
+ ctx->poll_multi_queue = false;
+ } else if (!ctx->poll_multi_queue) {
struct io_kiocb *list_req;
+ unsigned int queue_num0, queue_num1;
list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
inflight_entry);
- if (list_req->file != req->file)
- ctx->poll_multi_file = true;
+
+ if (list_req->file != req->file) {
+ ctx->poll_multi_queue = true;
+ } else {
+ queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
+ queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
+ if (queue_num0 != queue_num1)
+ ctx->poll_multi_queue = true;
+ }
}
/*