summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2016-10-21 08:11:47 +1100
committerCon Kolivas <kernel@kolivas.org>2016-10-21 11:08:30 +1100
commit152f8d7902e6a79f5d4dcbd65f5fd23576ac9693 (patch)
treec79f0b79e454800d17aa74adff2287bebcb78ca6 /kernel
parent6caa689a9a9d2817e2a77a033dc32b37e3cd3d68 (diff)
Use niffies for accounting wherever possible
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/MuQSS.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index f6df6702fa8f..7cf7bbb99d15 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -1382,7 +1382,7 @@ static void activate_task(struct task_struct *p, struct rq *rq)
if (unlikely(prof_on == SLEEP_PROFILING)) {
if (p->state == TASK_UNINTERRUPTIBLE)
profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
- (rq->clock - p->last_ran) >> 20);
+ (rq->niffies - p->last_ran) >> 20);
}
p->prio = effective_prio(p);
@@ -3210,7 +3210,7 @@ static void pc_user_time(struct rq *rq, struct task_struct *p,
static void
update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
{
- long account_ns = rq->clock - p->last_ran;
+ long account_ns = rq->niffies - p->last_ran;
struct task_struct *idle = rq->idle;
unsigned long account_pc;
@@ -3234,13 +3234,13 @@ update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
ts_account:
/* time_slice accounting is done in usecs to avoid overflow on 32bit */
if (p->policy != SCHED_FIFO && p != idle) {
- s64 time_diff = rq->clock - p->last_ran;
+ s64 time_diff = rq->niffies - p->last_ran;
niffy_diff(&time_diff, 1);
p->time_slice -= NS_TO_US(time_diff);
}
- p->last_ran = rq->clock;
+ p->last_ran = rq->niffies;
}
/*
@@ -3251,7 +3251,7 @@ ts_account:
static void
update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
{
- long account_ns = rq->clock - p->last_ran;
+ long account_ns = rq->niffies - p->last_ran;
struct task_struct *idle = rq->idle;
unsigned long account_pc;
@@ -3270,13 +3270,13 @@ update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
ts_account:
/* time_slice accounting is done in usecs to avoid overflow on 32bit */
if (p->policy != SCHED_FIFO && p != idle) {
- s64 time_diff = rq->clock - p->last_ran;
+ s64 time_diff = rq->niffies - p->last_ran;
niffy_diff(&time_diff, 1);
p->time_slice -= NS_TO_US(time_diff);
}
- p->last_ran = rq->clock;
+ p->last_ran = rq->niffies;
}
/*
@@ -3295,8 +3295,8 @@ static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
* thread, breaking clock_gettime().
*/
if (p == rq->curr && task_on_rq_queued(p)) {
- update_rq_clock(rq);
- ns = rq->clock - p->last_ran;
+ update_clocks(rq);
+ ns = rq->niffies - p->last_ran;
if (unlikely((s64)ns < 0))
ns = 0;
}
@@ -3844,7 +3844,7 @@ static inline void schedule_debug(struct task_struct *prev)
static inline void set_rq_task(struct rq *rq, struct task_struct *p)
{
rq->rq_deadline = p->deadline;
- p->last_ran = rq->clock;
+ p->last_ran = rq->niffies;
rq->rq_prio = p->prio;
#ifdef CONFIG_SMT_NICE
rq->rq_mm = p->mm;
@@ -5684,7 +5684,7 @@ void init_idle(struct task_struct *idle, int cpu)
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
- idle->last_ran = rq->clock;
+ idle->last_ran = rq->niffies;
idle->state = TASK_RUNNING;
/* Setting prio to illegal value shouldn't matter when never queued */
idle->prio = PRIO_LIMIT;