diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-02-24 23:40:29 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-05-05 11:27:41 +0200 |
commit | 83249540375a77eafeb29026dfc10318093e4710 (patch) | |
tree | c1956f4001b1248070edacf78f4b038b3dbb2043 | |
parent | 6d430b371b367a636937a016edab6c3ee978b6d0 (diff) |
x86,tracing: Robustify ftrace_nmi_enter()
ftrace_nmi_enter()
trace_hwlat_callback()
trace_clock_local()
sched_clock()
paravirt_sched_clock()
native_sched_clock()
All must not be traced or kprobed, it will be called from do_debug()
before the kprobe handler.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 4 | ||||
-rw-r--r-- | include/linux/ftrace_irq.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_hwlat.c | 2 |
5 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 694d8daf4983..90e7c027224f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -17,7 +17,7 @@ #include <linux/cpumask.h> #include <asm/frame.h> -static inline unsigned long long paravirt_sched_clock(void) +static __always_inline unsigned long long paravirt_sched_clock(void) { return PVOP_CALL0(unsigned long long, time.sched_clock); } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index fdd4c1078632..eb6e8d01b826 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -207,7 +207,7 @@ static void __init cyc2ns_init_secondary_cpus(void) /* * Scheduler clock - returns current time in nanosec units. */ -u64 native_sched_clock(void) +noinstr u64 native_sched_clock(void) { if (static_branch_likely(&__use_tsc)) { u64 tsc_now = rdtsc(); @@ -240,7 +240,7 @@ u64 native_sched_clock_from_tsc(u64 tsc) /* We need to define a real function for sched_clock, to override the weak default version */ #ifdef CONFIG_PARAVIRT -unsigned long long sched_clock(void) +noinstr unsigned long long sched_clock(void) { return paravirt_sched_clock(); } diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 0abd9a1d2852..67c0cae4fb9a 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -7,7 +7,7 @@ extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); #endif -static inline void ftrace_nmi_enter(void) +static __always_inline void ftrace_nmi_enter(void) { #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) @@ -15,7 +15,7 @@ static inline void ftrace_nmi_enter(void) #endif } -static inline void ftrace_nmi_exit(void) +static __always_inline void ftrace_nmi_exit(void) { #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index aaf6793ededa..b8739df3845c 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -22,6 +22,7 @@ #include <linux/sched/clock.h> #include <linux/ktime.h> #include <linux/trace_clock.h> +#include <linux/kprobes.h> /* * trace_clock_local(): the simplest and least coherent tracing clock. @@ -29,7 +30,7 @@ * Useful for tracing that does not cross to other CPUs nor * does it go through idle events. */ -u64 notrace trace_clock_local(void) +u64 noinstr trace_clock_local(void) { u64 clock; diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index e2be7bb7ef7e..c9b4ba9749e6 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -139,7 +139,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample) #define init_time(a, b) (a = b) #define time_u64(a) a -void trace_hwlat_callback(bool enter) +noinstr void trace_hwlat_callback(bool enter) { if (smp_processor_id() != nmi_cpu) return; |