summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorXuewen Yan <xuewen.yan@unisoc.com>2021-06-30 22:12:04 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-07-20 16:05:58 +0200
commit143a6b8ec5c6b6e85d1f3a80d5326a83a914effa (patch)
tree421cd5ade0f2f77341b2b3f6b4519e442c5b0987 /kernel
parent43b89ef7bc4aebd6476ad688ae3d0806c10c6375 (diff)
sched/uclamp: Ignore max aggregation if rq is idle
[ Upstream commit 3e1493f46390618ea78607cb30c58fc19e2a5035 ] When a task wakes up on an idle rq, uclamp_rq_util_with() would max aggregate with rq value. But since there is no task enqueued yet, the values are stale based on the last task that was running. When the new task actually wakes up and enqueued, then the rq uclamp values should reflect that of the newly woken up task effective uclamp values. This is a problem particularly for uclamp_max because it default to 1024. If a task p with uclamp_max = 512 wakes up, then max aggregation would ignore the capping that should apply when this task is enqueued, which is wrong. Fix that by ignoring max aggregation if the rq is idle since in that case the effective uclamp value of the rq will be the ones of the task that will wake up. Fixes: 9d20ad7dfc9a ("sched/uclamp: Add uclamp_util_with()") Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> [qias: Changelog] Reviewed-by: Qais Yousef <qais.yousef@arm.com> Link: https://lore.kernel.org/r/20210630141204.8197-1-xuewen.yan94@gmail.com Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/sched.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fdebfcbdfca9..39112ac7ab34 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2422,20 +2422,27 @@ static __always_inline
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
struct task_struct *p)
{
- unsigned long min_util;
- unsigned long max_util;
+ unsigned long min_util = 0;
+ unsigned long max_util = 0;
if (!static_branch_likely(&sched_uclamp_used))
return util;
- min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
- max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
-
if (p) {
- min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
- max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
+ min_util = uclamp_eff_value(p, UCLAMP_MIN);
+ max_util = uclamp_eff_value(p, UCLAMP_MAX);
+
+ /*
+ * Ignore last runnable task's max clamp, as this task will
+ * reset it. Similarly, no need to read the rq's min clamp.
+ */
+ if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
+ goto out;
}
+ min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
+ max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
+out:
/*
* Since CPU's {min,max}_util clamps are MAX aggregated considering
* RUNNABLE tasks with _different_ clamps, we can end up with an