summaryrefslogtreecommitdiff
path: root/block/kyber-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/kyber-iosched.c')
-rw-r--r--block/kyber-iosched.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 95d062c07c61..08eb5295c18d 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -40,8 +40,6 @@ enum {
};
enum {
- KYBER_MIN_DEPTH = 256,
-
/*
* In order to prevent starvation of synchronous requests by a flood of
* asynchronous requests, we reserve 25% of requests for synchronous
@@ -305,7 +303,6 @@ static int kyber_bucket_fn(const struct request *rq)
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
- unsigned int max_tokens;
unsigned int shift;
int ret = -ENOMEM;
int i;
@@ -320,25 +317,17 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
if (!kqd->cb)
goto err_kqd;
- /*
- * The maximum number of tokens for any scheduling domain is at least
- * the queue depth of a single hardware queue. If the hardware doesn't
- * have many tags, still provide a reasonable number.
- */
- max_tokens = max_t(unsigned int, q->tag_set->queue_depth,
- KYBER_MIN_DEPTH);
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
WARN_ON(!kyber_depth[i]);
WARN_ON(!kyber_batch_size[i]);
ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
- max_tokens, -1, false, GFP_KERNEL,
- q->node);
+ kyber_depth[i], -1, false,
+ GFP_KERNEL, q->node);
if (ret) {
while (--i >= 0)
sbitmap_queue_free(&kqd->domain_tokens[i]);
goto err_cb;
}
- sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]);
}
shift = kyber_sched_tags_shift(kqd);