From 525705d15e63b7455977408e4601e76e6bc41524 Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Tue, 10 Nov 2015 09:36:02 +0900 Subject: sched/fair: Consider missed ticks in NOHZ_FULL in update_cpu_load_nohz() Usually the tick can be stopped for an idle CPU in NOHZ. However in NOHZ_FULL mode, a non-idle CPU's tick can also be stopped. However, update_cpu_load_nohz() does not consider the case a non-idle CPU's tick has been stopped at all. This patch makes the update_cpu_load_nohz() know if the calling path comes from NOHZ_FULL or idle NOHZ. Signed-off-by: Byungchul Park Signed-off-by: Peter Zijlstra (Intel) Acked-by: Frederic Weisbecker Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1447115762-19734-3-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index edad7a43edea..f425aac63317 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -177,9 +177,9 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void update_cpu_load_nohz(void); +extern void update_cpu_load_nohz(int active); #else -static inline void update_cpu_load_nohz(void) { } +static inline void update_cpu_load_nohz(int active) { } #endif extern unsigned long get_parent_ip(unsigned long addr); -- cgit v1.2.3 From 7098c1eac75dc03fdbb7249171a6e68ce6044a5a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 19 Nov 2015 16:47:30 +0100 Subject: sched/cputime: Clarify vtime symbols and document them VTIME_SLEEPING state happens either when: 1) The task is sleeping and no tickless delta is to be added on the task cputime stats. 2) The CPU isn't running vtime at all, so the same properties of 1) applies. Lets rename the vtime symbol to reflect both states. Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Chris Metcalf Cc: Christoph Lameter Cc: Hiroshi Shimamoto Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E . McKenney Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1447948054-28668-4-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 5 ++++- kernel/fork.c | 2 +- kernel/sched/cputime.c | 6 +++--- 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index f425aac63317..3533168fe7d1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1522,8 +1522,11 @@ struct task_struct { seqlock_t vtime_seqlock; unsigned long long vtime_snap; enum { - VTIME_SLEEPING = 0, + /* Task is sleeping or running in a CPU with VTIME inactive */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active */ VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active */ VTIME_SYS, } vtime_snap_whence; #endif diff --git a/kernel/fork.c b/kernel/fork.c index f97f2c449f5c..c0a13706b1a7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1350,7 +1350,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqlock_init(&p->vtime_seqlock); p->vtime_snap = 0; - p->vtime_snap_whence = VTIME_SLEEPING; + p->vtime_snap_whence = VTIME_INACTIVE; #endif #if defined(SPLIT_RSS_COUNTING) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 1128d4ba6c55..4a18a6ed7723 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -680,7 +680,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) { unsigned long long delta = vtime_delta(tsk); - WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); + WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); tsk->vtime_snap += delta; /* CHECKME: always safe to convert nsecs to cputime? */ @@ -764,7 +764,7 @@ void vtime_account_idle(struct task_struct *tsk) void arch_vtime_task_switch(struct task_struct *prev) { write_seqlock(&prev->vtime_seqlock); - prev->vtime_snap_whence = VTIME_SLEEPING; + prev->vtime_snap_whence = VTIME_INACTIVE; write_sequnlock(&prev->vtime_seqlock); write_seqlock(¤t->vtime_seqlock); @@ -829,7 +829,7 @@ fetch_task_cputime(struct task_struct *t, *s_dst = *s_src; /* Task is sleeping, nothing to add */ - if (t->vtime_snap_whence == VTIME_SLEEPING || + if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t)) continue; -- cgit v1.2.3 From b7ce2277f087fd052e7e1bbf432f7fecbee82bb6 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 19 Nov 2015 16:47:34 +0100 Subject: sched/cputime: Convert vtime_seqlock to seqcount The cputime can only be updated by the current task itself, even in vtime case. So we can safely use seqcount instead of seqlock as there is no writer concurrency involved. Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Chris Metcalf Cc: Christoph Lameter Cc: Hiroshi Shimamoto Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E . McKenney Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1447948054-28668-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 2 +- include/linux/sched.h | 2 +- kernel/fork.c | 2 +- kernel/sched/cputime.c | 46 ++++++++++++++++++++++++---------------------- 4 files changed, 27 insertions(+), 25 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1c1ff7e4faa4..f2cb8d45513d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -150,7 +150,7 @@ extern struct task_group root_task_group; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ + .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ .vtime_snap = 0, \ .vtime_snap_whence = VTIME_SYS, #else diff --git a/include/linux/sched.h b/include/linux/sched.h index 3533168fe7d1..3b0de68bce41 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1519,7 +1519,7 @@ struct task_struct { cputime_t gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqlock_t vtime_seqlock; + seqcount_t vtime_seqcount; unsigned long long vtime_snap; enum { /* Task is sleeping or running in a CPU with VTIME inactive */ diff --git a/kernel/fork.c b/kernel/fork.c index c0a13706b1a7..eea32b55432a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1348,7 +1348,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqlock_init(&p->vtime_seqlock); + seqcount_init(&p->vtime_seqcount); p->vtime_snap = 0; p->vtime_snap_whence = VTIME_INACTIVE; #endif diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 9989c3f61723..d5ff5c6bf829 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -696,37 +696,37 @@ static void __vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_gen_account_irq_exit(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); if (context_tracking_in_user()) tsk->vtime_snap_whence = VTIME_USER; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_account_user(struct task_struct *tsk) { cputime_t delta_cpu; - write_seqlock(&tsk->vtime_seqlock); + write_seqcount_begin(&tsk->vtime_seqcount); delta_cpu = get_vtime_delta(tsk); tsk->vtime_snap_whence = VTIME_SYS; account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_user_enter(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); tsk->vtime_snap_whence = VTIME_USER; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_guest_enter(struct task_struct *tsk) @@ -738,19 +738,19 @@ void vtime_guest_enter(struct task_struct *tsk) * synchronization against the reader (task_gtime()) * that can thus safely catch up with a tickless delta. */ - write_seqlock(&tsk->vtime_seqlock); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); current->flags |= PF_VCPU; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seqcount); } EXPORT_SYMBOL_GPL(vtime_guest_enter); void vtime_guest_exit(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); current->flags &= ~PF_VCPU; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seqcount); } EXPORT_SYMBOL_GPL(vtime_guest_exit); @@ -763,24 +763,26 @@ void vtime_account_idle(struct task_struct *tsk) void arch_vtime_task_switch(struct task_struct *prev) { - write_seqlock(&prev->vtime_seqlock); + write_seqcount_begin(&prev->vtime_seqcount); prev->vtime_snap_whence = VTIME_INACTIVE; - write_sequnlock(&prev->vtime_seqlock); + write_seqcount_end(&prev->vtime_seqcount); - write_seqlock(¤t->vtime_seqlock); + write_seqcount_begin(¤t->vtime_seqcount); current->vtime_snap_whence = VTIME_SYS; current->vtime_snap = sched_clock_cpu(smp_processor_id()); - write_sequnlock(¤t->vtime_seqlock); + write_seqcount_end(¤t->vtime_seqcount); } void vtime_init_idle(struct task_struct *t, int cpu) { unsigned long flags; - write_seqlock_irqsave(&t->vtime_seqlock, flags); + local_irq_save(flags); + write_seqcount_begin(&t->vtime_seqcount); t->vtime_snap_whence = VTIME_SYS; t->vtime_snap = sched_clock_cpu(cpu); - write_sequnlock_irqrestore(&t->vtime_seqlock, flags); + write_seqcount_end(&t->vtime_seqcount); + local_irq_restore(flags); } cputime_t task_gtime(struct task_struct *t) @@ -792,13 +794,13 @@ cputime_t task_gtime(struct task_struct *t) return t->gtime; do { - seq = read_seqbegin(&t->vtime_seqlock); + seq = read_seqcount_begin(&t->vtime_seqcount); gtime = t->gtime; if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) gtime += vtime_delta(t); - } while (read_seqretry(&t->vtime_seqlock, seq)); + } while (read_seqcount_retry(&t->vtime_seqcount, seq)); return gtime; } @@ -821,7 +823,7 @@ fetch_task_cputime(struct task_struct *t, *udelta = 0; *sdelta = 0; - seq = read_seqbegin(&t->vtime_seqlock); + seq = read_seqcount_begin(&t->vtime_seqcount); if (u_dst) *u_dst = *u_src; @@ -845,7 +847,7 @@ fetch_task_cputime(struct task_struct *t, if (t->vtime_snap_whence == VTIME_SYS) *sdelta = delta; } - } while (read_seqretry(&t->vtime_seqlock, seq)); + } while (read_seqcount_retry(&t->vtime_seqcount, seq)); } -- cgit v1.2.3 From 03e0d4610bf4d4a93bfa16b2474ed4fd5243aa71 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 8 Dec 2015 11:28:04 -0500 Subject: watchdog: introduce touch_softlockup_watchdog_sched() touch_softlockup_watchdog() is used to tell watchdog that scheduler stall is expected. One group of usage is from paths where the task may not be able to yield for a long time such as performing slow PIO to finicky device and coming out of suspend. The other is to account for scheduler and timer going idle. For scheduler softlockup detection, there's no reason to distinguish the two cases; however, workqueue lockup detector is planned and it can use the same signals from the former group while the latter would spuriously prevent detection. This patch introduces a new function touch_softlockup_watchdog_sched() and convert the latter group to call it instead. For now, it just calls touch_softlockup_watchdog() and there's no functional difference. Signed-off-by: Tejun Heo Cc: Ulrich Obergfell Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Andrew Morton --- include/linux/sched.h | 4 ++++ kernel/sched/clock.c | 2 +- kernel/time/tick-sched.c | 6 +++--- kernel/watchdog.c | 15 ++++++++++++++- 4 files changed, 22 insertions(+), 5 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index edad7a43edea..d56cdde2f12c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -377,6 +377,7 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); @@ -387,6 +388,9 @@ extern unsigned int softlockup_panic; extern unsigned int hardlockup_panic; void lockup_detector_init(void); #else +static inline void touch_softlockup_watchdog_sched(void) +{ +} static inline void touch_softlockup_watchdog(void) { } diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c0a205101c23..bf1f37507a49 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -354,7 +354,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) return; sched_clock_tick(); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 7c7ec4515983..58219f6ff3c6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -143,7 +143,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) * when we go busy again does not account too much ticks. */ if (ts->tick_stopped) { - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; } @@ -430,7 +430,7 @@ static void tick_nohz_update_jiffies(ktime_t now) tick_do_update_jiffies64(now); local_irq_restore(flags); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); } /* @@ -701,7 +701,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) update_cpu_load_nohz(); calc_load_exit_idle(); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); /* * Cancel the scheduled timer and restore the tick */ diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 18f34cf75f74..9eaf3dbec7e8 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -225,7 +225,15 @@ static void __touch_watchdog(void) __this_cpu_write(watchdog_touch_ts, get_timestamp()); } -void touch_softlockup_watchdog(void) +/** + * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls + * + * Call when the scheduler may have stalled for legitimate reasons + * preventing the watchdog task from executing - e.g. the scheduler + * entering idle state. This should only be used for scheduler events. + * Use touch_softlockup_watchdog() for everything else. + */ +void touch_softlockup_watchdog_sched(void) { /* * Preemption can be enabled. It doesn't matter which CPU's timestamp @@ -233,6 +241,11 @@ void touch_softlockup_watchdog(void) */ raw_cpu_write(watchdog_touch_ts, 0); } + +void touch_softlockup_watchdog(void) +{ + touch_softlockup_watchdog_sched(); +} EXPORT_SYMBOL(touch_softlockup_watchdog); void touch_all_softlockup_watchdogs(void) -- cgit v1.2.3 From 570f52412ae9432c56897472791ea8db420cbaf1 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Fri, 1 Jan 2016 23:03:01 +0900 Subject: sched/core: Check tgid in is_global_init() Our global init task can have sub-threads, so ->pid check is not reliable enough for is_global_init(), we need to check tgid instead. This has been spotted by Oleg and a fix was proposed by Richard a long time ago (see the link below). Oleg wrote: : Because is_global_init() is only true for the main thread of /sbin/init. : : Just look at oom_unkillable_task(). It tries to not kill init. But, say, : select_bad_process() can happily find a sub-thread of is_global_init() : and still kill it. I recently hit the problem in question; re-sending the patch (to the best of my knowledge it has never been submitted) with updated function comment. Credit goes to Oleg and Richard. Suggested-by: Richard Guy Briggs Reported-by: Oleg Nesterov Signed-off-by: Sergey Senozhatsky Signed-off-by: Peter Zijlstra (Intel) Acked-by: Serge Hallyn Cc: Andrew Morton Cc: Eric W . Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Serge E . Hallyn Cc: Sergey Senozhatsky Cc: Thomas Gleixner Link: https://www.redhat.com/archives/linux-audit/2013-December/msg00086.html Signed-off-by: Ingo Molnar --- include/linux/sched.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index edad7a43edea..9cf9dd1c4cbe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2002,7 +2002,8 @@ static inline int pid_alive(const struct task_struct *p) } /** - * is_global_init - check if a task structure is init + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. @@ -2011,7 +2012,7 @@ static inline int pid_alive(const struct task_struct *p) */ static inline int is_global_init(struct task_struct *tsk) { - return tsk->pid == 1; + return task_tgid_nr(tsk) == 1; } extern struct pid *cad_pid; -- cgit v1.2.3 From be958bdc96f18bc1356177bbb79d46ea0c037b96 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Nov 2015 16:02:07 +0100 Subject: sched/core: Fix unserialized r-m-w scribbling stuff Some of the sched bitfieds (notably sched_reset_on_fork) can be set on other than current, this can cause the r-m-w to race with other updates. Since all the sched bits are serialized by scheduler locks, pull them in a separate word. Reported-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: hannes@cmpxchg.org Cc: mhocko@kernel.org Cc: vdavydov@parallels.com Link: http://lkml.kernel.org/r/20151125150207.GM11639@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- include/linux/sched.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 9cf9dd1c4cbe..fa39434e3fdd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1455,14 +1455,15 @@ struct task_struct { /* Used for emulating ABI behavior of previous Linux versions */ unsigned int personality; - unsigned in_execve:1; /* Tell the LSMs that the process is doing an - * execve */ - unsigned in_iowait:1; - - /* Revert to default priority/policy when forking */ + /* scheduler bits, serialized by scheduler locks */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned :0; /* force alignment to the next boundary */ + + /* unserialized, strictly 'current' */ + unsigned in_execve:1; /* bit to tell LSMs we're in execve */ + unsigned in_iowait:1; #ifdef CONFIG_MEMCG unsigned memcg_may_oom:1; #endif -- cgit v1.2.3 From 5a1078043f844074cbd53981432778a8d5dd56e9 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 8 Dec 2015 21:23:59 +0100 Subject: sched/core: Move sched_entity::avg into separate cache line The sched_entity::avg collides with read-mostly sched_entity data. The perf c2c tool showed many read HITM accesses across many CPUs for sched_entity's cfs_rq and my_q, while having at the same time tons of stores for avg. After placing sched_entity::avg into separate cache line, the perf bench sched pipe showed around 20 seconds speedup. NOTE I cut out all perf events except for cycles and instructions from following output. Before: $ perf stat -r 5 perf bench sched pipe -l 10000000 # Running 'sched/pipe' benchmark: # Executed 10000000 pipe operations between two processes Total time: 270.348 [sec] 27.034805 usecs/op 36989 ops/sec ... 245,537,074,035 cycles # 1.433 GHz 187,264,548,519 instructions # 0.77 insns per cycle 272.653840535 seconds time elapsed ( +- 1.31% ) After: $ perf stat -r 5 perf bench sched pipe -l 10000000 # Running 'sched/pipe' benchmark: # Executed 10000000 pipe operations between two processes Total time: 251.076 [sec] 25.107678 usecs/op 39828 ops/sec ... 244,573,513,928 cycles # 1.572 GHz 187,409,641,157 instructions # 0.76 insns per cycle 251.679315188 seconds time elapsed ( +- 0.31% ) Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Don Zickus Cc: Joe Mario Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1449606239-28602-1-git-send-email-jolsa@kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 791b47e40317..0c0e78102850 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1268,8 +1268,13 @@ struct sched_entity { #endif #ifdef CONFIG_SMP - /* Per entity load average tracking */ - struct sched_avg avg; + /* + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. + */ + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; -- cgit v1.2.3