diff options
author | Con Kolivas <kernel@kolivas.org> | 2016-10-28 12:17:09 +1100 |
---|---|---|
committer | Con Kolivas <kernel@kolivas.org> | 2016-10-28 12:30:13 +1100 |
commit | cd1ffb9641e41beefd6e42feb79f8371e7d6f656 (patch) | |
tree | 26992c9467c3642d6048a30358945dbcf602fbe4 /kernel | |
parent | 68173477c584e33fec0f892266f34abb7037f6e5 (diff) |
When one CPU has all the load, make it report its load ramping up quickly to benefit more from turbo modes.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/MuQSS.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index a07c284b4a27..91c97ef5c1af 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -864,18 +864,23 @@ static inline bool rq_local(struct rq *rq); */ static void update_load_avg(struct rq *rq) { - if (likely(rq->niffies > rq->load_update)) { - unsigned long us_interval = NS_TO_US(rq->niffies - rq->load_update); - long load, curload = rq_load(rq) + atomic_read(&rq->nr_iowait); - - load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144); - if (unlikely(load < 0)) - load = 0; - load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; - rq->load_avg = load; - } else + unsigned long us_interval; + long load, curload; + + if (unlikely(rq->niffies <= rq->load_update)) return; + us_interval = NS_TO_US(rq->niffies - rq->load_update); + curload = rq_load(rq); + load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144); + if (unlikely(load < 0)) + load = 0; + load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; + /* If this CPU has all the load, make it ramp up quickly */ + if (curload > load && curload >= atomic_read(&grq.nr_running)) + load = curload; + rq->load_avg = load; + rq->load_update = rq->niffies; if (likely(rq_local(rq))) cpufreq_trigger(rq->niffies, rq->load_avg); |