diff options
author | Con Kolivas <kernel@kolivas.org> | 2016-10-28 10:35:21 +1100 |
---|---|---|
committer | Con Kolivas <kernel@kolivas.org> | 2016-10-28 12:30:13 +1100 |
commit | 4ae1fbd097e8c242d1b079dc2198addedef52efb (patch) | |
tree | 18a7b358790daece8d5977fe23ff64614979a2ae /kernel | |
parent | dd9cf810d290043746c089ec46584ea59388f6b7 (diff) |
Fix various combinations of irq accounting and dynticks config by relying on the existing code in cputime.c and removing it from MuQSS.c.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/Makefile | 4 | ||||
-rw-r--r-- | kernel/sched/MuQSS.c | 521 | ||||
-rw-r--r-- | kernel/sched/MuQSS.h | 50 | ||||
-rw-r--r-- | kernel/sched/cputime.c | 7 | ||||
-rw-r--r-- | kernel/time/Kconfig | 1 |
5 files changed, 58 insertions, 525 deletions
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index a787aa942603..77bdf9807f99 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -18,14 +18,14 @@ endif ifdef CONFIG_SCHED_MUQSS obj-y += MuQSS.o clock.o else -obj-y += core.o loadavg.o clock.o cputime.o +obj-y += core.o loadavg.o clock.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o obj-$(CONFIG_SMP) += cpudeadline.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o endif -obj-y += wait.o swait.o completion.o idle.o +obj-y += wait.o swait.o completion.o idle.o cputime.o obj-$(CONFIG_SMP) += cpupri.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index 4d7e1c49d54f..6ee07b81585a 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -2804,116 +2804,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kernel_cpustat); -#ifdef CONFIG_IRQ_TIME_ACCOUNTING - -/* - * There are no locks covering percpu hardirq/softirq time. - * They are only modified in account_system_vtime, on corresponding CPU - * with interrupts disabled. So, writes are safe. - * They are read and saved off onto struct rq in update_rq_clock(). - * This may result in other CPU reading this CPU's irq time and can - * race with irq/account_system_vtime on this CPU. We would either get old - * or new value with a side effect of accounting a slice of irq time to wrong - * task when irq is in progress while we read rq->clock. That is a worthy - * compromise in place of having locks on each irq in account_system_time. - */ -static DEFINE_PER_CPU(u64, cpu_hardirq_time); -static DEFINE_PER_CPU(u64, cpu_softirq_time); - -static DEFINE_PER_CPU(u64, irq_start_time); -static int sched_clock_irqtime; - -void enable_sched_clock_irqtime(void) -{ - sched_clock_irqtime = 1; -} - -void disable_sched_clock_irqtime(void) -{ - sched_clock_irqtime = 0; -} - -#ifndef CONFIG_64BIT -static DEFINE_PER_CPU(seqcount_t, irq_time_seq); - -static inline void irq_time_write_begin(void) -{ - __this_cpu_inc(irq_time_seq.sequence); - smp_wmb(); -} - -static inline void irq_time_write_end(void) -{ - smp_wmb(); - __this_cpu_inc(irq_time_seq.sequence); -} - -static inline u64 irq_time_read(int cpu) -{ - u64 irq_time; - unsigned seq; - - do { - seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); - irq_time = per_cpu(cpu_softirq_time, cpu) + - per_cpu(cpu_hardirq_time, cpu); - } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); - - return irq_time; -} -#else /* CONFIG_64BIT */ -static inline void irq_time_write_begin(void) -{ -} - -static inline void irq_time_write_end(void) -{ -} - -static inline u64 irq_time_read(int cpu) -{ - return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); -} -#endif /* CONFIG_64BIT */ - -/* - * Called before incrementing preempt_count on {soft,}irq_enter - * and before decrementing preempt_count on {soft,}irq_exit. - */ -void irqtime_account_irq(struct task_struct *curr) -{ - unsigned long flags; - s64 delta; - int cpu; - - if (!sched_clock_irqtime) - return; - - local_irq_save(flags); - - cpu = smp_processor_id(); - delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); - __this_cpu_add(irq_start_time, delta); - - irq_time_write_begin(); - /* - * We do not account for softirq time from ksoftirqd here. - * We want to continue accounting softirq time to ksoftirqd thread - * in that case, so as not to confuse scheduler with a special task - * that do not consume any time, but still wants to run. - */ - if (hardirq_count()) - __this_cpu_add(cpu_hardirq_time, delta); - else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) - __this_cpu_add(cpu_softirq_time, delta); - - irq_time_write_end(); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(irqtime_account_irq); - -#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ - #ifdef CONFIG_PARAVIRT static inline u64 steal_ticks(u64 steal) { @@ -2975,89 +2865,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) #endif -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -static void irqtime_account_hi_si(void) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - u64 latest_ns; - - latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)); - if (latest_ns > cpustat[CPUTIME_IRQ]) - cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy; - - latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)); - if (latest_ns > cpustat[CPUTIME_SOFTIRQ]) - cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy; -} -#else /* CONFIG_IRQ_TIME_ACCOUNTING */ - -#define sched_clock_irqtime (0) - -static inline void irqtime_account_hi_si(void) -{ -} -#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ - -static __always_inline bool steal_account_process_tick(void) -{ -#ifdef CONFIG_PARAVIRT - if (static_key_false(¶virt_steal_enabled)) { - u64 steal; - cputime_t steal_ct; - - steal = paravirt_steal_clock(smp_processor_id()); - steal -= this_rq()->prev_steal_time; - - /* - * cputime_t may be less precise than nsecs (eg: if it's - * based on jiffies). Lets cast the result to cputime - * granularity and account the rest on the next rounds. - */ - steal_ct = nsecs_to_cputime(steal); - this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); - - account_steal_time(steal_ct); - return steal_ct; - } -#endif - return false; -} - -/* - * Accumulate raw cputime values of dead tasks (sig->[us]time) and live - * tasks (sum on group iteration) belonging to @tsk's group. - */ -void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) -{ - struct signal_struct *sig = tsk->signal; - cputime_t utime, stime; - struct task_struct *t; - unsigned int seq, nextseq; - unsigned long flags; - - rcu_read_lock(); - /* Attempt a lockless read on the first round. */ - nextseq = 0; - do { - seq = nextseq; - flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); - times->utime = sig->utime; - times->stime = sig->stime; - times->sum_exec_runtime = sig->sum_sched_runtime; - - for_each_thread(tsk, t) { - task_cputime(t, &utime, &stime); - times->utime += utime; - times->stime += stime; - times->sum_exec_runtime += task_sched_runtime(t); - } - /* If lockless access failed, take the lock. */ - nextseq = 1; - } while (need_seqretry(&sig->stats_lock, seq)); - done_seqretry_irqrestore(&sig->stats_lock, seq, flags); - rcu_read_unlock(); -} - /* * On each tick, add the number of nanoseconds to the unbanked variables and * once one tick's worth has accumulated, account it allowing for accurate @@ -3187,9 +2994,6 @@ static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p) s64 account_ns = rq->niffies - p->last_ran; struct task_struct *idle = rq->idle; - if (steal_account_process_tick()) - goto ts_account; - /* Accurate tick timekeeping */ if (user_mode(get_irq_regs())) pc_user_time(rq, p, account_ns); @@ -3198,10 +3002,6 @@ static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p) } else pc_idle_time(rq, idle, account_ns); - if (sched_clock_irqtime) - irqtime_account_hi_si(); - -ts_account: /* time_slice accounting is done in usecs to avoid overflow on 32bit */ if (p->policy != SCHED_FIFO && p != idle) p->time_slice -= NS_TO_US(account_ns); @@ -3288,134 +3088,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) return ns; } -/* Compatibility crap */ -void account_user_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled) -{ -} - -void account_idle_time(cputime_t cputime) -{ -} - -/* - * Account guest cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in virtual machine since the last update - * @cputime_scaled: cputime scaled by cpu frequency - */ -static void account_guest_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - - /* Add guest time to process. */ - p->utime += (__force u64)cputime; - p->utimescaled += (__force u64)cputime_scaled; - account_group_user_time(p, cputime); - p->gtime += (__force u64)cputime; - - /* Add guest time to cpustat. */ - if (task_nice(p) > 0) { - cpustat[CPUTIME_NICE] += (__force u64)cputime; - cpustat[CPUTIME_GUEST_NICE] += (__force u64)cputime; - } else { - cpustat[CPUTIME_USER] += (__force u64)cputime; - cpustat[CPUTIME_GUEST] += (__force u64)cputime; - } -} - -/* - * Account system cpu time to a process and desired cpustat field - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in kernel space since the last update - * @cputime_scaled: cputime scaled by cpu frequency - * @target_cputime64: pointer to cpustat field that has to be updated - */ -static inline -void __account_system_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled, cputime64_t *target_cputime64) -{ - /* Add system time to process. */ - p->stime += (__force u64)cputime; - p->stimescaled += (__force u64)cputime_scaled; - account_group_system_time(p, cputime); - - /* Add system time to cpustat. */ - *target_cputime64 += (__force u64)cputime; - - /* Account for system time used */ - acct_update_integrals(p); -} - -/* - * Account system cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @hardirq_offset: the offset to subtract from hardirq_count() - * @cputime: the cpu time spent in kernel space since the last update - * @cputime_scaled: cputime scaled by cpu frequency - * This is for guest only now. - */ -void account_system_time(struct task_struct *p, int hardirq_offset, - cputime_t cputime, cputime_t cputime_scaled) -{ - - if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) - account_guest_time(p, cputime, cputime_scaled); -} - -/* - * Account for involuntary wait time. - * @steal: the cpu time spent in involuntary wait - */ -void account_steal_time(cputime_t cputime) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - - cpustat[CPUTIME_STEAL] += (__force u64)cputime; -} - -/* - * Account for idle time. - * @cputime: the cpu time spent in idle wait - */ -static void account_idle_times(cputime_t cputime) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - struct rq *rq = this_rq(); - - if (atomic_read(&rq->nr_iowait) > 0) - cpustat[CPUTIME_IOWAIT] += (__force u64)cputime; - else - cpustat[CPUTIME_IDLE] += (__force u64)cputime; -} - -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - -void account_process_tick(struct task_struct *p, int user_tick) -{ -} - -/* - * Account multiple ticks of steal time. - * @p: the process from which the cpu time has been stolen - * @ticks: number of stolen ticks - */ -void account_steal_ticks(unsigned long ticks) -{ - account_steal_time(jiffies_to_cputime(ticks)); -} - -/* - * Account multiple ticks of idle time. - * @ticks: number of stolen ticks - */ -void account_idle_ticks(unsigned long ticks) -{ - account_idle_times(jiffies_to_cputime(ticks)); -} -#endif - #ifdef CONFIG_HIGH_RES_TIMERS static inline int hrexpiry_enabled(struct rq *rq) { @@ -8058,199 +7730,6 @@ void set_curr_task(int cpu, struct task_struct *p) #endif -/* - * Use precise platform statistics if available: - */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING - -#ifndef __ARCH_HAS_VTIME_TASK_SWITCH -void vtime_common_task_switch(struct task_struct *prev) -{ - if (is_idle_task(prev)) - vtime_account_idle(prev); - else - vtime_account_system(prev); - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - vtime_account_user(prev); -#endif - arch_vtime_task_switch(prev); -} -#endif - -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - *ut = p->utime; - *st = p->stime; -} -EXPORT_SYMBOL_GPL(task_cputime_adjusted); - -void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - struct task_cputime cputime; - - thread_group_cputime(p, &cputime); - - *ut = cputime.utime; - *st = cputime.stime; -} - -void vtime_account_system_irqsafe(struct task_struct *tsk) -{ - unsigned long flags; - - local_irq_save(flags); - vtime_account_system(tsk); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe); - -/* - * Archs that account the whole time spent in the idle task - * (outside irq) as idle time can rely on this and just implement - * vtime_account_system() and vtime_account_idle(). Archs that - * have other meaning of the idle time (s390 only includes the - * time spent by the CPU when it's in low power mode) must override - * vtime_account(). - */ -#ifndef __ARCH_HAS_VTIME_ACCOUNT -void vtime_account_irq_enter(struct task_struct *tsk) -{ - if (!in_interrupt() && is_idle_task(tsk)) - vtime_account_idle(tsk); - else - vtime_account_system(tsk); -} -EXPORT_SYMBOL_GPL(vtime_account_irq_enter); -#endif /* __ARCH_HAS_VTIME_ACCOUNT */ - -#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -/* - * Perform (stime * rtime) / total, but avoid multiplication overflow by - * losing precision when the numbers are big. - */ -static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) -{ - u64 scaled; - - for (;;) { - /* Make sure "rtime" is the bigger of stime/rtime */ - if (stime > rtime) { - u64 tmp = rtime; rtime = stime; stime = tmp; - } - - /* Make sure 'total' fits in 32 bits */ - if (total >> 32) - goto drop_precision; - - /* Does rtime (and thus stime) fit in 32 bits? */ - if (!(rtime >> 32)) - break; - - /* Can we just balance rtime/stime rather than dropping bits? */ - if (stime >> 31) - goto drop_precision; - - /* We can grow stime and shrink rtime and try to make them both fit */ - stime <<= 1; - rtime >>= 1; - continue; - -drop_precision: - /* We drop from rtime, it has more bits than stime */ - rtime >>= 1; - total >>= 1; - } - - /* - * Make sure gcc understands that this is a 32x32->64 multiply, - * followed by a 64/32->64 divide. - */ - scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); - return (__force cputime_t) scaled; -} - -/* - * Adjust tick based cputime random precision against scheduler - * runtime accounting. - */ -static void cputime_adjust(struct task_cputime *curr, - struct prev_cputime *prev, - cputime_t *ut, cputime_t *st) -{ - cputime_t rtime, stime, utime, total; - - stime = curr->stime; - total = stime + curr->utime; - - /* - * Tick based cputime accounting depend on random scheduling - * timeslices of a task to be interrupted or not by the timer. - * Depending on these circumstances, the number of these interrupts - * may be over or under-optimistic, matching the real user and system - * cputime with a variable precision. - * - * Fix this by scaling these tick based values against the total - * runtime accounted by the CFS scheduler. - */ - rtime = nsecs_to_cputime(curr->sum_exec_runtime); - - /* - * Update userspace visible utime/stime values only if actual execution - * time is bigger than already exported. Note that can happen, that we - * provided bigger values due to scaling inaccuracy on big numbers. - */ - if (prev->stime + prev->utime >= rtime) - goto out; - - if (total) { - stime = scale_stime((__force u64)stime, - (__force u64)rtime, (__force u64)total); - utime = rtime - stime; - } else { - stime = rtime; - utime = 0; - } - - /* - * If the tick based count grows faster than the scheduler one, - * the result of the scaling may go backward. - * Let's enforce monotonicity. - */ - prev->stime = max(prev->stime, stime); - prev->utime = max(prev->utime, utime); - -out: - *ut = prev->utime; - *st = prev->stime; -} - -void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - struct task_cputime cputime = { - .sum_exec_runtime = tsk_seruntime(p), - }; - - task_cputime(p, &cputime.utime, &cputime.stime); - cputime_adjust(&cputime, &p->prev_cputime, ut, st); -} -EXPORT_SYMBOL_GPL(task_cputime_adjusted); - -/* - * Must be called with siglock held. - */ -void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - struct task_cputime cputime; - - thread_group_cputime(p, &cputime); - cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); -} -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ - void init_idle_bootup_task(struct task_struct *idle) {} diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h index 858c881ec832..f9510d739777 100644 --- a/kernel/sched/MuQSS.h +++ b/kernel/sched/MuQSS.h @@ -2,6 +2,7 @@ #include <linux/cpuidle.h> #include <linux/skip_list.h> #include <linux/stop_machine.h> +#include "cpuacct.h" #ifndef MUQSS_SCHED_H #define MUQSS_SCHED_H @@ -248,6 +249,55 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq) } #endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +DECLARE_PER_CPU(u64, cpu_hardirq_time); +DECLARE_PER_CPU(u64, cpu_softirq_time); + +#ifndef CONFIG_64BIT +DECLARE_PER_CPU(seqcount_t, irq_time_seq); + +static inline void irq_time_write_begin(void) +{ + __this_cpu_inc(irq_time_seq.sequence); + smp_wmb(); +} + +static inline void irq_time_write_end(void) +{ + smp_wmb(); + __this_cpu_inc(irq_time_seq.sequence); +} + +static inline u64 irq_time_read(int cpu) +{ + u64 irq_time; + unsigned seq; + + do { + seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); + irq_time = per_cpu(cpu_softirq_time, cpu) + + per_cpu(cpu_hardirq_time, cpu); + } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); + + return irq_time; +} +#else /* CONFIG_64BIT */ +static inline void irq_time_write_begin(void) +{ +} + +static inline void irq_time_write_end(void) +{ +} + +static inline u64 irq_time_read(int cpu) +{ + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); +} +#endif /* CONFIG_64BIT */ +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + #ifdef CONFIG_CPU_FREQ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a846cf89eb96..f09077a45a4c 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -4,7 +4,12 @@ #include <linux/kernel_stat.h> #include <linux/static_key.h> #include <linux/context_tracking.h> +#ifdef CONFIG_SCHED_MUQSS +#include "MuQSS.h" +#include "stats.h" +#else #include "sched.h" +#endif #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #endif @@ -671,7 +676,7 @@ out: void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime = { - .sum_exec_runtime = p->se.sum_exec_runtime, + .sum_exec_runtime = tsk_seruntime(p), }; task_cputime(p, &cputime.utime, &cputime.stime); diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 029afbd378ae..4008d9f95dd7 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -95,7 +95,6 @@ config NO_HZ_FULL depends on HAVE_CONTEXT_TRACKING # VIRT_CPU_ACCOUNTING_GEN dependency depends on HAVE_VIRT_CPU_ACCOUNTING_GEN - select CONTEXT_TRACKING select NO_HZ_COMMON select RCU_NOCB_CPU select VIRT_CPU_ACCOUNTING_GEN |