summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2016-10-25 18:44:36 +1100
committerCon Kolivas <kernel@kolivas.org>2016-10-25 22:36:08 +1100
commitd8bca0f15e0c2f3dbcc7910b789bd86c889f4390 (patch)
tree514fb83971d4547f8fdb72700f6ce405ea4a53a0 /kernel/sched
parente839d768a1a42dfd13cf686a36bcaea0beb4b423 (diff)
Account for unaccounted CPU time in wunt before distributing time_slice to child.
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/MuQSS.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index 781ce7baa95a..920d9f8c7c9a 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -2347,6 +2347,16 @@ int sysctl_schedstats(struct ctl_table *table, int write,
static inline void init_schedstats(void) {}
#endif /* CONFIG_SCHEDSTATS */
+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
+
+static void account_task_cpu(struct rq *rq, struct task_struct *p)
+{
+ update_clocks(rq);
+ /* This isn't really a context switch but accounting is the same */
+ update_cpu_clock_switch(rq, p);
+ p->last_ran = rq->niffies;
+}
+
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
@@ -2372,7 +2382,6 @@ void wake_up_new_task(struct task_struct *p)
}
double_rq_lock(rq, new_rq);
- update_clocks(rq);
rq_curr = rq->curr;
/*
@@ -2380,7 +2389,6 @@ void wake_up_new_task(struct task_struct *p)
*/
p->prio = rq_curr->normal_prio;
- activate_task(p, rq);
trace_sched_wakeup_new(p);
/*
@@ -2391,17 +2399,17 @@ void wake_up_new_task(struct task_struct *p)
* modified within schedule() so it is always equal to
* current->deadline.
*/
+ account_task_cpu(rq, rq_curr);
p->last_ran = rq_curr->last_ran;
if (likely(rq_curr->policy != SCHED_FIFO)) {
rq_curr->time_slice /= 2;
- if (unlikely(rq_curr->time_slice < RESCHED_US)) {
+ if (rq_curr->time_slice < RESCHED_US) {
/*
* Forking task has run out of timeslice. Reschedule it and
* start its child with a new time slice and deadline. The
* child will end up running first because its deadline will
* be slightly earlier.
*/
- rq_curr->time_slice = 0;
__set_tsk_resched(rq_curr);
time_slice_expired(p, new_rq);
if (suitable_idle_cpus(p))
@@ -2424,6 +2432,7 @@ void wake_up_new_task(struct task_struct *p)
time_slice_expired(p, new_rq);
try_preempt(p, new_rq);
}
+ activate_task(p, new_rq);
double_rq_unlock(rq, new_rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
@@ -3197,8 +3206,7 @@ static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
* Bank in p->sched_time the ns elapsed since the last tick or switch.
* CPU scheduler quota accounting is also performed here in microseconds.
*/
-static void
-update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
+static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
{
s64 account_ns = rq->niffies - p->last_ran;
struct task_struct *idle = rq->idle;
@@ -3230,8 +3238,7 @@ ts_account:
* Bank in p->sched_time the ns elapsed since the last tick or switch.
* CPU scheduler quota accounting is also performed here in microseconds.
*/
-static void
-update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
{
s64 account_ns = rq->niffies - p->last_ran;
struct task_struct *idle = rq->idle;