summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-16 10:22:05 +0100
committerJens Axboe <axboe@kernel.dk>2022-07-24 18:39:13 -0600
commit4a07723fb4bb2568a31d43709904ab0d4c33d6c8 (patch)
tree5653ef6d2eb9e95a9f7b729914d70f1e8b96eaef /io_uring/io_uring.c
parent4dfab8abb4721da278a2ccd45c1b6a69f8a9dd14 (diff)
io_uring: limit the number of cancellation buckets
Don't allocate to many hash/cancellation buckets, there might be too many, clamp it to 8 bits, or 256 * 64B = 16KB. We don't usually have too many requests, and 256 buckets should be enough, especially since we do hash search only in the cancellation path. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/b9620c8072ba61a2d50eba894b89bd93a94a9abd.1655371007.git.asml.silence@gmail.com Reviewed-by: Hao Xu <howeyxu@tencent.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ac6946e3f174..aafdf1330ec6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -254,12 +254,12 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
/*
* Use 5 bits less than the max cq entries, that should give us around
- * 32 entries per hash list if totally full and uniformly spread.
+ * 32 entries per hash list if totally full and uniformly spread, but
+ * don't keep too many buckets to not overconsume memory.
*/
- hash_bits = ilog2(p->cq_entries);
- hash_bits -= 5;
- if (hash_bits <= 0)
- hash_bits = 1;
+ hash_bits = ilog2(p->cq_entries) - 5;
+ hash_bits = clamp(hash_bits, 1, 8);
+
ctx->cancel_hash_bits = hash_bits;
ctx->cancel_hash =
kmalloc((1U << hash_bits) * sizeof(struct io_hash_bucket),