summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmutrace.h2
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/trace_events.h (renamed from include/linux/ftrace_event.h)177
-rw-r--r--include/trace/define_trace.h3
-rw-r--r--include/trace/events/power.h2
-rw-r--r--include/trace/perf.h350
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_events.h (renamed from include/trace/ftrace.h)413
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/ring_buffer.c221
-rw-r--r--kernel/trace/ring_buffer_benchmark.c23
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/trace/trace.h42
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_clock.c3
-rw-r--r--kernel/trace/trace_event_perf.c20
-rw-r--r--kernel/trace/trace_events.c304
-rw-r--r--kernel/trace/trace_events_filter.c86
-rw-r--r--kernel/trace/trace_events_trigger.c70
-rw-r--r--kernel/trace/trace_export.c10
-rw-r--r--kernel/trace/trace_functions_graph.c8
-rw-r--r--kernel/trace/trace_kprobe.c70
-rw-r--r--kernel/trace/trace_mmiotrace.c4
-rw-r--r--kernel/trace/trace_output.c78
-rw-r--r--kernel/trace/trace_output.h2
-rw-r--r--kernel/trace/trace_probe.h8
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--kernel/trace/trace_syscalls.c72
-rw-r--r--kernel/trace/trace_uprobe.c46
-rw-r--r--mm/debug.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
41 files changed, 1024 insertions, 1075 deletions
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index ce463a9cc8fb..5a24b846a1cb 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -2,7 +2,7 @@
#define _TRACE_KVMMMU_H
#include <linux/tracepoint.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvmmmu
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 851a9a1c6dfc..602b974a60a6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -29,7 +29,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/sched.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/slab.h>
#include <asm/perf_event.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ab53d80b0f64..e856dd566f4c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -28,7 +28,7 @@
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/hrtimer.h>
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 060dd7b61c6d..5acf5b70866d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -533,12 +533,6 @@ bool mac_pton(const char *s, u8 *mac);
*
* Most likely, you want to use tracing_on/tracing_off.
*/
-#ifdef CONFIG_RING_BUFFER
-/* trace_off_permanent stops recording with no way to bring it back */
-void tracing_off_permanent(void);
-#else
-static inline void tracing_off_permanent(void) { }
-#endif
enum ftrace_dump_mode {
DUMP_NONE,
diff --git a/include/linux/module.h b/include/linux/module.h
index 1e5436042eb0..255fca74de7d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -336,7 +336,7 @@ struct module {
const char **trace_bprintk_fmt_start;
#endif
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call **trace_events;
+ struct trace_event_call **trace_events;
unsigned int num_trace_events;
struct trace_enum_map **trace_enums;
unsigned int num_trace_enums;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3d80c432ede7..2027809433b3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -484,7 +484,7 @@ struct perf_event {
void *overflow_handler_context;
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call *tp_event;
+ struct trace_event_call *tp_event;
struct event_filter *filter;
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bb51becf23f8..b45c45b8c829 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -111,14 +111,14 @@ union bpf_attr;
#define __SC_STR_ADECL(t, a) #a
#define __SC_STR_TDECL(t, a) #t
-extern struct ftrace_event_class event_class_syscall_enter;
-extern struct ftrace_event_class event_class_syscall_exit;
+extern struct trace_event_class event_class_syscall_enter;
+extern struct trace_event_class event_class_syscall_exit;
extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_enter_##sname = { \
.class = &event_class_syscall_enter, \
{ \
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_exit_##sname = { \
.class = &event_class_syscall_exit, \
{ \
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_exit_##sname = &event_exit_##sname;
diff --git a/include/linux/ftrace_event.h b/include/linux/trace_events.h
index f9ecf63d47f1..1063c850dbab 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/trace_events.h
@@ -1,6 +1,6 @@
-#ifndef _LINUX_FTRACE_EVENT_H
-#define _LINUX_FTRACE_EVENT_H
+#ifndef _LINUX_TRACE_EVENT_H
+#define _LINUX_TRACE_EVENT_H
#include <linux/ring_buffer.h>
#include <linux/trace_seq.h>
@@ -25,35 +25,35 @@ struct trace_print_flags_u64 {
const char *name;
};
-const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
- unsigned long flags,
- const struct trace_print_flags *flag_array);
+const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
-const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
- const struct trace_print_flags *symbol_array);
+const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
#if BITS_PER_LONG == 32
-const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
- unsigned long long val,
- const struct trace_print_flags_u64
+const char *trace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
*symbol_array);
#endif
-const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
- unsigned int bitmask_size);
+const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+ unsigned int bitmask_size);
-const char *ftrace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+const char *trace_print_hex_seq(struct trace_seq *p,
+ const unsigned char *buf, int len);
-const char *ftrace_print_array_seq(struct trace_seq *p,
+const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
size_t el_size);
struct trace_iterator;
struct trace_event;
-int ftrace_raw_output_prep(struct trace_iterator *iter,
- struct trace_event *event);
+int trace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *event);
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -68,7 +68,7 @@ struct trace_entry {
int pid;
};
-#define FTRACE_MAX_EVENT \
+#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
/*
@@ -132,8 +132,8 @@ struct trace_event {
struct trace_event_functions *funcs;
};
-extern int register_ftrace_event(struct trace_event *event);
-extern int unregister_ftrace_event(struct trace_event *event);
+extern int register_trace_event(struct trace_event *event);
+extern int unregister_trace_event(struct trace_event *event);
/* Return values for print_line callback */
enum print_line_t {
@@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
-struct ftrace_event_file;
+struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
- struct ftrace_event_file *ftrace_file,
+ struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
struct ring_buffer_event *
@@ -183,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
void tracing_record_cmdline(struct task_struct *tsk);
-int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
+int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
struct event_filter;
@@ -200,50 +200,39 @@ enum trace_reg {
#endif
};
-struct ftrace_event_call;
+struct trace_event_call;
-struct ftrace_event_class {
+struct trace_event_class {
const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
#endif
- int (*reg)(struct ftrace_event_call *event,
+ int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
- int (*define_fields)(struct ftrace_event_call *);
- struct list_head *(*get_fields)(struct ftrace_event_call *);
+ int (*define_fields)(struct trace_event_call *);
+ struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
- int (*raw_init)(struct ftrace_event_call *);
+ int (*raw_init)(struct trace_event_call *);
};
-extern int ftrace_event_reg(struct ftrace_event_call *event,
+extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);
-int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
- char *fmt, ...);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
-
-struct ftrace_event_buffer {
+struct trace_event_buffer {
struct ring_buffer *buffer;
struct ring_buffer_event *event;
- struct ftrace_event_file *ftrace_file;
+ struct trace_event_file *trace_file;
void *entry;
unsigned long flags;
int pc;
};
-void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
- struct ftrace_event_file *ftrace_file,
+void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+ struct trace_event_file *trace_file,
unsigned long len);
-void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
enum {
TRACE_EVENT_FL_FILTERED_BIT,
@@ -261,11 +250,11 @@ enum {
* FILTERED - The event has a filter attached
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
- * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
+ * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
- * USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ * USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
*/
@@ -280,9 +269,9 @@ enum {
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
};
-struct ftrace_event_call {
+struct trace_event_call {
struct list_head list;
- struct ftrace_event_class *class;
+ struct trace_event_class *class;
union {
char *name;
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
@@ -297,7 +286,7 @@ struct ftrace_event_call {
* bit 0: filter_active
* bit 1: allow trace by non root (cap any)
* bit 2: failed to apply filter
- * bit 3: ftrace internal event (do not enable)
+ * bit 3: trace internal event (do not enable)
* bit 4: Event was enabled by module
* bit 5: use call filter rather than file filter
* bit 6: Event is a tracepoint
@@ -309,13 +298,13 @@ struct ftrace_event_call {
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
- int (*perf_perm)(struct ftrace_event_call *,
+ int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
#endif
};
static inline const char *
-ftrace_event_name(struct ftrace_event_call *call)
+trace_event_name(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : NULL;
@@ -324,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call)
}
struct trace_array;
-struct ftrace_subsystem_dir;
+struct trace_subsystem_dir;
enum {
- FTRACE_EVENT_FL_ENABLED_BIT,
- FTRACE_EVENT_FL_RECORDED_CMD_BIT,
- FTRACE_EVENT_FL_FILTERED_BIT,
- FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
- FTRACE_EVENT_FL_SOFT_MODE_BIT,
- FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
- FTRACE_EVENT_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_ENABLED_BIT,
+ EVENT_FILE_FL_RECORDED_CMD_BIT,
+ EVENT_FILE_FL_FILTERED_BIT,
+ EVENT_FILE_FL_NO_SET_FILTER_BIT,
+ EVENT_FILE_FL_SOFT_MODE_BIT,
+ EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ EVENT_FILE_FL_TRIGGER_MODE_BIT,
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
};
/*
- * Ftrace event file flags:
+ * Event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
* FILTERED - The event has a filter attached
@@ -350,23 +339,23 @@ enum {
* TRIGGER_COND - When set, one or more triggers has an associated filter
*/
enum {
- FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
- FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
- FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
- FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
- FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
- FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
- FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
- FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+ EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
+ EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
+ EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
+ EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
+ EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
+ EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
};
-struct ftrace_event_file {
+struct trace_event_file {
struct list_head list;
- struct ftrace_event_call *event_call;
+ struct trace_event_call *event_call;
struct event_filter *filter;
struct dentry *dir;
struct trace_array *tr;
- struct ftrace_subsystem_dir *system;
+ struct trace_subsystem_dir *system;
struct list_head triggers;
/*
@@ -399,7 +388,7 @@ struct ftrace_event_file {
early_initcall(trace_init_flags_##name);
#define __TRACE_EVENT_PERF_PERM(name, expr...) \
- static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ static int perf_perm_##name(struct trace_event_call *tp_event, \
struct perf_event *p_event) \
{ \
return ({ expr; }); \
@@ -425,19 +414,19 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+extern int filter_check_discard(struct trace_event_file *file, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
+extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
-extern void event_triggers_post_call(struct ftrace_event_file *file,
+extern void event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt);
/**
- * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
+ * trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
*
* If any triggers without filters are attached to this event, they
@@ -446,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
* otherwise false.
*/
static inline bool
-ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
+trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
+ if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
event_triggers_call(file, NULL);
- if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
}
return false;
@@ -473,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
* Returns true if the event is discarded, false otherwise.
*/
static inline bool
-__event_trigger_test_discard(struct ftrace_event_file *file,
+__event_trigger_test_discard(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry,
@@ -481,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
{
unsigned long eflags = file->flags;
- if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
+ if (eflags & EVENT_FILE_FL_TRIGGER_COND)
*tt = event_triggers_call(file, entry);
- if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
+ if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
ring_buffer_discard_commit(buffer, event);
else if (!filter_check_discard(file, entry, buffer, event))
return false;
@@ -506,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
* if the event is soft disabled and should be discarded.
*/
static inline void
-event_trigger_unlock_commit(struct ftrace_event_file *file,
+event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc)
@@ -537,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
*/
static inline void
-event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
+event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc,
@@ -570,12 +559,12 @@ enum {
FILTER_TRACE_FN,
};
-extern int trace_event_raw_init(struct ftrace_event_call *call);
-extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+extern int trace_event_raw_init(struct trace_event_call *call);
+extern int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
-extern int trace_add_event_call(struct ftrace_event_call *call);
-extern int trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_add_event_call(struct trace_event_call *call);
+extern int trace_remove_event_call(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -624,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
}
#endif
-#endif /* _LINUX_FTRACE_EVENT_H */
+#endif /* _LINUX_TRACE_EVENT_H */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 02e1003568a4..09b3880105a9 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -87,7 +87,8 @@
#define DECLARE_TRACE(name, proto, args)
#ifdef CONFIG_EVENT_TRACING
-#include <trace/ftrace.h>
+#include <trace/trace_events.h>
+#include <trace/perf.h>
#endif
#undef TRACE_EVENT
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 630d1e5e4de0..284244ebfe8d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -7,7 +7,7 @@
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#define TPS(x) tracepoint_string(x)
diff --git a/include/trace/perf.h b/include/trace/perf.h
new file mode 100644
index 000000000000..1b5443cebedc
--- /dev/null
+++ b/include/trace/perf.h
@@ -0,0 +1,350 @@
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct trace_event_call event_<call>;
+ *
+ * static void trace_event_raw_event_<call>(void *__data, proto)
+ * {
+ * struct trace_event_file *trace_file = __data;
+ * struct trace_event_call *event_call = trace_file->event_call;
+ * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
+ * unsigned long eflags = trace_file->flags;
+ * enum event_trigger_type __tt = ETT_NONE;
+ * struct ring_buffer_event *event;
+ * struct trace_event_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
+ * unsigned long irq_flags;
+ * int __data_size;
+ * int pc;
+ *
+ * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ * event_triggers_call(trace_file, NULL);
+ * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ * return;
+ * }
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
+ *
+ * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ * __tt = event_triggers_call(trace_file, entry);
+ *
+ * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ * &trace_file->flags))
+ * ring_buffer_discard_commit(buffer, event);
+ * else if (!filter_check_discard(trace_file, entry, buffer, event))
+ * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ *
+ * if (__tt)
+ * event_triggers_post_call(trace_file, __tt);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = trace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct trace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = trace_event_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = trace_event_raw_event_##call,
+ * .reg = trace_event_reg,
+ * };
+ *
+ * static struct trace_event_call event_<call> = {
+ * .class = event_class_<template>,
+ * {
+ * .tp = &__tracepoint_<call>,
+ * },
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .flags = TRACE_EVENT_FL_TRACEPOINT,
+ * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct trace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ *
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
+#define _TRACE_PERF_INIT(call) \
+ .perf_probe = perf_trace_##call,
+
+#else
+#define _TRACE_PERF_PROTO(call, proto)
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_buffer fbuffer; \
+ struct trace_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void ftrace_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_event_raw_event_##template); \
+}
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __print_array
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static char print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .define_fields = trace_event_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+ _TRACE_PERF_INIT(call) \
+};
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##template, \
+ .print_fmt = print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+ \
+static char print_fmt_##call[] = print; \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##call, \
+ .print_fmt = print_fmt_##call, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef TRACE_SYSTEM_VAR
+
+#ifdef CONFIG_PERF_EVENTS
+
+#undef __entry
+#define __entry entry
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __perf_addr
+#define __perf_addr(a) (__addr = (a))
+
+#undef __perf_count
+#define __perf_count(c) (__count = (c))
+
+#undef __perf_task
+#define __perf_task(t) (__task = (t))
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ struct trace_event_call *event_call = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_raw_##call *entry; \
+ struct pt_regs *__regs; \
+ u64 __addr = 0, __count = 1; \
+ struct task_struct *__task = NULL; \
+ struct hlist_head *head; \
+ int __entry_size; \
+ int __data_size; \
+ int rctx; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ head = this_cpu_ptr(event_call->perf_events); \
+ if (__builtin_constant_p(!__task) && !__task && \
+ hlist_empty(head)) \
+ return; \
+ \
+ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
+ sizeof(u64)); \
+ __entry_size -= sizeof(u32); \
+ \
+ entry = perf_trace_buf_prepare(__entry_size, \
+ event_call->event.type, &__regs, &rctx); \
+ if (!entry) \
+ return; \
+ \
+ perf_fetch_caller_regs(__regs); \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
+ __count, __regs, head, __task); \
+}
+
+/*
+ * This part is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the
+ * perf probe will fail to compile unless it too is updated.
+ */
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void perf_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(perf_trace_##template); \
+}
+
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 9674145e2f6a..7434f0f5d3f6 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -3,7 +3,7 @@
#include <linux/tracepoint.h>
#include <linux/unistd.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
@@ -29,8 +29,8 @@ struct syscall_metadata {
const char **args;
struct list_head enter_fields;
- struct ftrace_event_call *enter_event;
- struct ftrace_event_call *exit_event;
+ struct trace_event_call *enter_event;
+ struct trace_event_call *exit_event;
};
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
diff --git a/include/trace/ftrace.h b/include/trace/trace_events.h
index 37d4b10b111d..43be3b0e44d3 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/trace_events.h
@@ -3,7 +3,7 @@
*
* Override the macros in <trace/trace_events.h> to include the following:
*
- * struct ftrace_raw_<call> {
+ * struct trace_event_raw_<call> {
* struct trace_entry ent;
* <type> <item>;
* <type2> <item2>[<len>];
@@ -16,7 +16,7 @@
* in the structure.
*/
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#ifndef TRACE_SYSTEM_VAR
#define TRACE_SYSTEM_VAR TRACE_SYSTEM
@@ -95,17 +95,17 @@ TRACE_MAKE_SYSTEM_STR();
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
- struct ftrace_raw_##name { \
+ struct trace_event_raw_##name { \
struct trace_entry ent; \
tstruct \
char __data[0]; \
}; \
\
- static struct ftrace_event_class event_class_##name;
+ static struct trace_event_class event_class_##name;
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((__aligned__(4))) event_##name
#undef DEFINE_EVENT_FN
@@ -138,7 +138,7 @@ TRACE_MAKE_SYSTEM_STR();
*
* Include the following:
*
- * struct ftrace_data_offsets_<call> {
+ * struct trace_event_data_offsets_<call> {
* u32 <item1>;
* u32 <item2>;
* [...]
@@ -178,7 +178,7 @@ TRACE_MAKE_SYSTEM_STR();
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- struct ftrace_data_offsets_##call { \
+ struct trace_event_data_offsets_##call { \
tstruct; \
};
@@ -203,10 +203,10 @@ TRACE_MAKE_SYSTEM_STR();
* Override the macros in <trace/trace_events.h> to include the following:
*
* enum print_line_t
- * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
* {
* struct trace_seq *s = &iter->seq;
- * struct ftrace_raw_<call> *field; <-- defined in stage 1
+ * struct trace_event_raw_<call> *field; <-- defined in stage 1
* struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq;
* int ret;
@@ -258,7 +258,7 @@ TRACE_MAKE_SYSTEM_STR();
void *__bitmask = __get_dynamic_array(field); \
unsigned int __bitmask_size; \
__bitmask_size = __get_dynamic_array_len(field); \
- ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
})
#undef __print_flags
@@ -266,7 +266,7 @@ TRACE_MAKE_SYSTEM_STR();
({ \
static const struct trace_print_flags __flags[] = \
{ flag_array, { -1, NULL }}; \
- ftrace_print_flags_seq(p, delim, flag, __flags); \
+ trace_print_flags_seq(p, delim, flag, __flags); \
})
#undef __print_symbolic
@@ -274,7 +274,7 @@ TRACE_MAKE_SYSTEM_STR();
({ \
static const struct trace_print_flags symbols[] = \
{ symbol_array, { -1, NULL }}; \
- ftrace_print_symbols_seq(p, value, symbols); \
+ trace_print_symbols_seq(p, value, symbols); \
})
#undef __print_symbolic_u64
@@ -283,7 +283,7 @@ TRACE_MAKE_SYSTEM_STR();
({ \
static const struct trace_print_flags_u64 symbols[] = \
{ symbol_array, { -1, NULL } }; \
- ftrace_print_symbols_seq_u64(p, value, symbols); \
+ trace_print_symbols_seq_u64(p, value, symbols); \
})
#else
#define __print_symbolic_u64(value, symbol_array...) \
@@ -291,30 +291,30 @@ TRACE_MAKE_SYSTEM_STR();
#endif
#undef __print_hex
-#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
+#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
#undef __print_array
#define __print_array(array, count, el_size) \
({ \
BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
el_size != 4 && el_size != 8); \
- ftrace_print_array_seq(p, array, count, el_size); \
+ trace_print_array_seq(p, array, count, el_size); \
})
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
- struct trace_event *trace_event) \
+trace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
{ \
struct trace_seq *s = &iter->seq; \
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
- struct ftrace_raw_##call *field; \
+ struct trace_event_raw_##call *field; \
int ret; \
\
field = (typeof(field))iter->ent; \
\
- ret = ftrace_raw_output_prep(iter, trace_event); \
+ ret = trace_raw_output_prep(iter, trace_event); \
if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
@@ -322,17 +322,17 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
return trace_handle_return(s); \
} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
+static struct trace_event_functions trace_event_type_funcs_##call = { \
+ .trace = trace_raw_output_##call, \
};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+trace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *event) \
{ \
- struct ftrace_raw_##template *field; \
+ struct trace_event_raw_##template *field; \
struct trace_entry *entry; \
struct trace_seq *p = &iter->tmp_seq; \
\
@@ -346,10 +346,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
field = (typeof(field))entry; \
\
trace_seq_init(p); \
- return ftrace_output_call(iter, #call, print); \
+ return trace_output_call(iter, #call, print); \
} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
+static struct trace_event_functions trace_event_type_funcs_##call = { \
+ .trace = trace_raw_output_##call, \
};
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -407,9 +407,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static int notrace __init \
-ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+trace_event_define_fields_##call(struct trace_event_call *event_call) \
{ \
- struct ftrace_raw_##call field; \
+ struct trace_event_raw_##call field; \
int ret; \
\
tstruct; \
@@ -485,12 +485,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static inline notrace int ftrace_get_offsets_##call( \
- struct ftrace_data_offsets_##call *__data_offsets, proto) \
+static inline notrace int trace_event_get_offsets_##call( \
+ struct trace_event_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
int __maybe_unused __item_length; \
- struct ftrace_raw_##call __maybe_unused *entry; \
+ struct trace_event_raw_##call __maybe_unused *entry; \
\
tstruct; \
\
@@ -506,354 +506,3 @@ static inline notrace int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * Stage 4 of the trace events.
- *
- * Override the macros in <trace/trace_events.h> to include the following:
- *
- * For those macros defined with TRACE_EVENT:
- *
- * static struct ftrace_event_call event_<call>;
- *
- * static void ftrace_raw_event_<call>(void *__data, proto)
- * {
- * struct ftrace_event_file *ftrace_file = __data;
- * struct ftrace_event_call *event_call = ftrace_file->event_call;
- * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- * unsigned long eflags = ftrace_file->flags;
- * enum event_trigger_type __tt = ETT_NONE;
- * struct ring_buffer_event *event;
- * struct ftrace_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
- * unsigned long irq_flags;
- * int __data_size;
- * int pc;
- *
- * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
- * event_triggers_call(ftrace_file, NULL);
- * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
- * return;
- * }
- *
- * local_save_flags(irq_flags);
- * pc = preempt_count();
- *
- * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
- * event_<call>->event.type,
- * sizeof(*entry) + __data_size,
- * irq_flags, pc);
- * if (!event)
- * return;
- * entry = ring_buffer_event_data(event);
- *
- * { <assign>; } <-- Here we assign the entries by the __field and
- * __array macros.
- *
- * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
- * __tt = event_triggers_call(ftrace_file, entry);
- *
- * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- * &ftrace_file->flags))
- * ring_buffer_discard_commit(buffer, event);
- * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
- * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
- *
- * if (__tt)
- * event_triggers_post_call(ftrace_file, __tt);
- * }
- *
- * static struct trace_event ftrace_event_type_<call> = {
- * .trace = ftrace_raw_output_<call>, <-- stage 2
- * };
- *
- * static char print_fmt_<call>[] = <TP_printk>;
- *
- * static struct ftrace_event_class __used event_class_<template> = {
- * .system = "<system>",
- * .define_fields = ftrace_define_fields_<call>,
- * .fields = LIST_HEAD_INIT(event_class_##call.fields),
- * .raw_init = trace_event_raw_init,
- * .probe = ftrace_raw_event_##call,
- * .reg = ftrace_event_reg,
- * };
- *
- * static struct ftrace_event_call event_<call> = {
- * .class = event_class_<template>,
- * {
- * .tp = &__tracepoint_<call>,
- * },
- * .event = &ftrace_event_type_<call>,
- * .print_fmt = print_fmt_<call>,
- * .flags = TRACE_EVENT_FL_TRACEPOINT,
- * };
- * // its only safe to use pointers when doing linker tricks to
- * // create an array.
- * static struct ftrace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
- *
- */
-
-#ifdef CONFIG_PERF_EVENTS
-
-#define _TRACE_PERF_PROTO(call, proto) \
- static notrace void \
- perf_trace_##call(void *__data, proto);
-
-#define _TRACE_PERF_INIT(call) \
- .perf_probe = perf_trace_##call,
-
-#else
-#define _TRACE_PERF_PROTO(call, proto)
-#define _TRACE_PERF_INIT(call)
-#endif /* CONFIG_PERF_EVENTS */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
-static notrace void \
-ftrace_raw_event_##call(void *__data, proto) \
-{ \
- struct ftrace_event_file *ftrace_file = __data; \
- struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- struct ftrace_event_buffer fbuffer; \
- struct ftrace_raw_##call *entry; \
- int __data_size; \
- \
- if (ftrace_trigger_soft_disabled(ftrace_file)) \
- return; \
- \
- __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
- \
- entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
- sizeof(*entry) + __data_size); \
- \
- if (!entry) \
- return; \
- \
- tstruct \
- \
- { assign; } \
- \
- ftrace_event_buffer_commit(&fbuffer); \
-}
-/*
- * The ftrace_test_probe is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the ftrace probe will
- * fail to compile unless it too is updated.
- */
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void ftrace_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(ftrace_raw_event_##template); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __print_array
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static char print_fmt_##call[] = print; \
-static struct ftrace_event_class __used __refdata event_class_##call = { \
- .system = TRACE_SYSTEM_STRING, \
- .define_fields = ftrace_define_fields_##call, \
- .fields = LIST_HEAD_INIT(event_class_##call.fields),\
- .raw_init = trace_event_raw_init, \
- .probe = ftrace_raw_event_##call, \
- .reg = ftrace_event_reg, \
- _TRACE_PERF_INIT(call) \
-};
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static struct ftrace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &ftrace_event_type_funcs_##template, \
- .print_fmt = print_fmt_##template, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct ftrace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
- \
-static char print_fmt_##call[] = print; \
- \
-static struct ftrace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &ftrace_event_type_funcs_##call, \
- .print_fmt = print_fmt_##call, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct ftrace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef TRACE_SYSTEM_VAR
-
-#ifdef CONFIG_PERF_EVENTS
-
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) (char *)__get_dynamic_array(field)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __perf_addr
-#define __perf_addr(a) (__addr = (a))
-
-#undef __perf_count
-#define __perf_count(c) (__count = (c))
-
-#undef __perf_task
-#define __perf_task(t) (__task = (t))
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static notrace void \
-perf_trace_##call(void *__data, proto) \
-{ \
- struct ftrace_event_call *event_call = __data; \
- struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- struct ftrace_raw_##call *entry; \
- struct pt_regs *__regs; \
- u64 __addr = 0, __count = 1; \
- struct task_struct *__task = NULL; \
- struct hlist_head *head; \
- int __entry_size; \
- int __data_size; \
- int rctx; \
- \
- __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
- \
- head = this_cpu_ptr(event_call->perf_events); \
- if (__builtin_constant_p(!__task) && !__task && \
- hlist_empty(head)) \
- return; \
- \
- __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
- sizeof(u64)); \
- __entry_size -= sizeof(u32); \
- \
- entry = perf_trace_buf_prepare(__entry_size, \
- event_call->event.type, &__regs, &rctx); \
- if (!entry) \
- return; \
- \
- perf_fetch_caller_regs(__regs); \
- \
- tstruct \
- \
- { assign; } \
- \
- perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, __regs, head, __task); \
-}
-
-/*
- * This part is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the
- * perf probe will fail to compile unless it too is updated.
- */
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void perf_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(perf_trace_##template); \
-}
-
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif /* CONFIG_PERF_EVENTS */
-
diff --git a/kernel/events/core.c b/kernel/events/core.c
index bc95b6a6220b..d1f37ddd1960 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -36,7 +36,7 @@
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/module.h>
diff --git a/kernel/module.c b/kernel/module.c
index cfc9e843a924..b38f96a183b5 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -18,7 +18,7 @@
*/
#include <linux/export.h>
#include <linux/moduleloader.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/file.h>
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 591af0cb7b9f..c291bd65d2cb 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -35,7 +35,7 @@
#include <linux/time.h>
#include <linux/cpu.h>
#include <linux/prefetch.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include "rcu.h"
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index add042926a66..65137bc28b2b 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -54,7 +54,7 @@
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/suspend.h>
#include "tree.h"
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 4eeae4674b5a..b3e6b39b6cf9 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1448,14 +1448,14 @@ static struct trace_event trace_blk_event = {
static int __init init_blk_tracer(void)
{
- if (!register_ftrace_event(&trace_blk_event)) {
+ if (!register_trace_event(&trace_blk_event)) {
pr_warning("Warning: could not register block events\n");
return 1;
}
if (register_tracer(&blk_tracer) != 0) {
pr_warning("Warning: could not register the block tracer\n");
- unregister_ftrace_event(&trace_blk_event);
+ unregister_trace_event(&trace_blk_event);
return 1;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0315d43176d8..6260717c18e3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/ring_buffer.h>
#include <linux/trace_clock.h>
#include <linux/trace_seq.h>
@@ -115,63 +115,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
*
*/
-/*
- * A fast way to enable or disable all ring buffers is to
- * call tracing_on or tracing_off. Turning off the ring buffers
- * prevents all ring buffers from being recorded to.
- * Turning this switch on, makes it OK to write to the
- * ring buffer, if the ring buffer is enabled itself.
- *
- * There's three layers that must be on in order to write
- * to the ring buffer.
- *
- * 1) This global flag must be set.
- * 2) The ring buffer must be enabled for recording.
- * 3) The per cpu buffer must be enabled for recording.
- *
- * In case of an anomaly, this global flag has a bit set that
- * will permantly disable all ring buffers.
- */
-
-/*
- * Global flag to disable all recording to ring buffers
- * This has two bits: ON, DISABLED
- *
- * ON DISABLED
- * ---- ----------
- * 0 0 : ring buffers are off
- * 1 0 : ring buffers are on
- * X 1 : ring buffers are permanently disabled
- */
-
-enum {
- RB_BUFFERS_ON_BIT = 0,
- RB_BUFFERS_DISABLED_BIT = 1,
-};
-
-enum {
- RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
- RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
-};
-
-static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
-
/* Used for individual buffers (after the counter) */
#define RB_BUFFER_OFF (1 << 20)
#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
-/**
- * tracing_off_permanent - permanently disable ring buffers
- *
- * This function, once called, will disable all ring buffers
- * permanently.
- */
-void tracing_off_permanent(void)
-{
- set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
-}
-
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -452,6 +400,23 @@ struct rb_irq_work {
};
/*
+ * Used for which event context the event is in.
+ * NMI = 0
+ * IRQ = 1
+ * SOFTIRQ = 2
+ * NORMAL = 3
+ *
+ * See trace_recursive_lock() comment below for more details.
+ */
+enum {
+ RB_CTX_NMI,
+ RB_CTX_IRQ,
+ RB_CTX_SOFTIRQ,
+ RB_CTX_NORMAL,
+ RB_CTX_MAX
+};
+
+/*
* head_page == tail_page && head == tail then buffer is empty.
*/
struct ring_buffer_per_cpu {
@@ -462,6 +427,7 @@ struct ring_buffer_per_cpu {
arch_spinlock_t lock;
struct lock_class_key lock_key;
unsigned int nr_pages;
+ unsigned int current_context;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
@@ -2224,7 +2190,7 @@ static unsigned rb_calculate_event_length(unsigned length)
/* zero length can cause confusions */
if (!length)
- length = 1;
+ length++;
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
length += sizeof(event.array[0]);
@@ -2636,8 +2602,6 @@ rb_reserve_next_event(struct ring_buffer *buffer,
return NULL;
}
-#ifdef CONFIG_TRACING
-
/*
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
@@ -2675,44 +2639,38 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* just so happens that it is the same bit corresponding to
* the current context.
*/
-static DEFINE_PER_CPU(unsigned int, current_context);
-static __always_inline int trace_recursive_lock(void)
+static __always_inline int
+trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
- unsigned int val = __this_cpu_read(current_context);
+ unsigned int val = cpu_buffer->current_context;
int bit;
if (in_interrupt()) {
if (in_nmi())
- bit = 0;
+ bit = RB_CTX_NMI;
else if (in_irq())
- bit = 1;
+ bit = RB_CTX_IRQ;
else
- bit = 2;
+ bit = RB_CTX_SOFTIRQ;
} else
- bit = 3;
+ bit = RB_CTX_NORMAL;
if (unlikely(val & (1 << bit)))
return 1;
val |= (1 << bit);
- __this_cpu_write(current_context, val);
+ cpu_buffer->current_context = val;
return 0;
}
-static __always_inline void trace_recursive_unlock(void)
+static __always_inline void
+trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
- __this_cpu_and(current_context, __this_cpu_read(current_context) - 1);
+ cpu_buffer->current_context &= cpu_buffer->current_context - 1;
}
-#else
-
-#define trace_recursive_lock() (0)
-#define trace_recursive_unlock() do { } while (0)
-
-#endif
-
/**
* ring_buffer_lock_reserve - reserve a part of the buffer
* @buffer: the ring buffer to reserve from
@@ -2735,41 +2693,37 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
struct ring_buffer_event *event;
int cpu;
- if (ring_buffer_flags != RB_BUFFERS_ON)
- return NULL;
-
/* If we are tracing schedule, we don't want to recurse */
preempt_disable_notrace();
- if (atomic_read(&buffer->record_disabled))
- goto out_nocheck;
-
- if (trace_recursive_lock())
- goto out_nocheck;
+ if (unlikely(atomic_read(&buffer->record_disabled)))
+ goto out;
cpu = raw_smp_processor_id();
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
goto out;
cpu_buffer = buffer->buffers[cpu];
- if (atomic_read(&cpu_buffer->record_disabled))
+ if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
goto out;
- if (length > BUF_MAX_DATA_SIZE)
+ if (unlikely(length > BUF_MAX_DATA_SIZE))
+ goto out;
+
+ if (unlikely(trace_recursive_lock(cpu_buffer)))
goto out;
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
- goto out;
+ goto out_unlock;
return event;
+ out_unlock:
+ trace_recursive_unlock(cpu_buffer);
out:
- trace_recursive_unlock();
-
- out_nocheck:
preempt_enable_notrace();
return NULL;
}
@@ -2859,7 +2813,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
rb_wakeups(buffer, cpu_buffer);
- trace_recursive_unlock();
+ trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
@@ -2970,7 +2924,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
out:
rb_end_commit(cpu_buffer);
- trace_recursive_unlock();
+ trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
@@ -3000,9 +2954,6 @@ int ring_buffer_write(struct ring_buffer *buffer,
int ret = -EBUSY;
int cpu;
- if (ring_buffer_flags != RB_BUFFERS_ON)
- return -EBUSY;
-
preempt_disable_notrace();
if (atomic_read(&buffer->record_disabled))
@@ -3021,9 +2972,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
if (length > BUF_MAX_DATA_SIZE)
goto out;
+ if (unlikely(trace_recursive_lock(cpu_buffer)))
+ goto out;
+
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
- goto out;
+ goto out_unlock;
body = rb_event_data(event);
@@ -3034,6 +2988,10 @@ int ring_buffer_write(struct ring_buffer *buffer,
rb_wakeups(buffer, cpu_buffer);
ret = 0;
+
+ out_unlock:
+ trace_recursive_unlock(cpu_buffer);
+
out:
preempt_enable_notrace();
@@ -3860,19 +3818,36 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
-static inline int rb_ok_to_lock(void)
+static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
+ if (likely(!in_nmi())) {
+ raw_spin_lock(&cpu_buffer->reader_lock);
+ return true;
+ }
+
/*
* If an NMI die dumps out the content of the ring buffer
- * do not grab locks. We also permanently disable the ring
- * buffer too. A one time deal is all you get from reading
- * the ring buffer from an NMI.
+ * trylock must be used to prevent a deadlock if the NMI
+ * preempted a task that holds the ring buffer locks. If
+ * we get the lock then all is fine, if not, then continue
+ * to do the read, but this can corrupt the ring buffer,
+ * so it must be permanently disabled from future writes.
+ * Reading from NMI is a oneshot deal.
*/
- if (likely(!in_nmi()))
- return 1;
+ if (raw_spin_trylock(&cpu_buffer->reader_lock))
+ return true;
- tracing_off_permanent();
- return 0;
+ /* Continue without locking, but disable the ring buffer */
+ atomic_inc(&cpu_buffer->record_disabled);
+ return false;
+}
+
+static inline void
+rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
+{
+ if (likely(locked))
+ raw_spin_unlock(&cpu_buffer->reader_lock);
+ return;
}
/**
@@ -3892,21 +3867,18 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
- int dolock;
+ bool dolock;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
- dolock = rb_ok_to_lock();
again:
local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ dolock = rb_reader_lock(cpu_buffer);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
+ rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3959,9 +3931,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL;
unsigned long flags;
- int dolock;
-
- dolock = rb_ok_to_lock();
+ bool dolock;
again:
/* might be called in atomic */
@@ -3972,8 +3942,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ dolock = rb_reader_lock(cpu_buffer);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
@@ -3981,8 +3950,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
rb_advance_reader(cpu_buffer);
}
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
+ rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
out:
@@ -4263,21 +4231,17 @@ int ring_buffer_empty(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- int dolock;
+ bool dolock;
int cpu;
int ret;
- dolock = rb_ok_to_lock();
-
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ dolock = rb_reader_lock(cpu_buffer);
ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
+ rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
if (!ret)
@@ -4297,21 +4261,17 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- int dolock;
+ bool dolock;
int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
- dolock = rb_ok_to_lock();
-
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ dolock = rb_reader_lock(cpu_buffer);
ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
+ rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
return ret;
@@ -4349,9 +4309,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
ret = -EAGAIN;
- if (ring_buffer_flags != RB_BUFFERS_ON)
- goto out;
-
if (atomic_read(&buffer_a->record_disabled))
goto out;
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 1b28df2d9104..a1503a027ee2 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -32,11 +32,11 @@ static struct task_struct *producer;
static struct task_struct *consumer;
static unsigned long read;
-static int disable_reader;
+static unsigned int disable_reader;
module_param(disable_reader, uint, 0644);
MODULE_PARM_DESC(disable_reader, "only run producer");
-static int write_iteration = 50;
+static unsigned int write_iteration = 50;
module_param(write_iteration, uint, 0644);
MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
@@ -46,16 +46,16 @@ static int consumer_nice = MAX_NICE;
static int producer_fifo = -1;
static int consumer_fifo = -1;
-module_param(producer_nice, uint, 0644);
+module_param(producer_nice, int, 0644);
MODULE_PARM_DESC(producer_nice, "nice prio for producer");
-module_param(consumer_nice, uint, 0644);
+module_param(consumer_nice, int, 0644);
MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
-module_param(producer_fifo, uint, 0644);
+module_param(producer_fifo, int, 0644);
MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
-module_param(consumer_fifo, uint, 0644);
+module_param(consumer_fifo, int, 0644);
MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
static int read_events;
@@ -263,6 +263,8 @@ static void ring_buffer_producer(void)
if (cnt % wakeup_interval)
cond_resched();
#endif
+ if (kthread_should_stop())
+ kill_test = 1;
} while (ktime_before(end_time, timeout) && !kill_test);
trace_printk("End ring buffer hammer\n");
@@ -285,7 +287,7 @@ static void ring_buffer_producer(void)
entries = ring_buffer_entries(buffer);
overruns = ring_buffer_overruns(buffer);
- if (kill_test)
+ if (kill_test && !kthread_should_stop())
trace_printk("ERROR!\n");
if (!disable_reader) {
@@ -379,7 +381,7 @@ static int ring_buffer_consumer_thread(void *arg)
}
__set_current_state(TASK_RUNNING);
- if (kill_test)
+ if (!kthread_should_stop())
wait_to_die();
return 0;
@@ -399,13 +401,16 @@ static int ring_buffer_producer_thread(void *arg)
}
ring_buffer_producer();
+ if (kill_test)
+ goto out_kill;
trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ * SLEEP_TIME);
}
- if (kill_test)
+out_kill:
+ if (!kthread_should_stop())
wait_to_die();
return 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 05330494a0df..abcbf7ff8743 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -297,11 +297,11 @@ void trace_array_put(struct trace_array *this_tr)
mutex_unlock(&trace_types_lock);
}
-int filter_check_discard(struct ftrace_event_file *file, void *rec,
+int filter_check_discard(struct trace_event_file *file, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
- if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
+ if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
!filter_match_preds(file->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
@@ -311,7 +311,7 @@ int filter_check_discard(struct ftrace_event_file *file, void *rec,
}
EXPORT_SYMBOL_GPL(filter_check_discard);
-int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
@@ -876,6 +876,7 @@ static struct {
{ trace_clock_jiffies, "uptime", 0 },
{ trace_clock, "perf", 1 },
{ ktime_get_mono_fast_ns, "mono", 1 },
+ { ktime_get_raw_fast_ns, "mono_raw", 1 },
ARCH_TRACE_CLOCKS
};
@@ -1693,13 +1694,13 @@ static struct ring_buffer *temp_buffer;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
- struct ftrace_event_file *ftrace_file,
+ struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc)
{
struct ring_buffer_event *entry;
- *current_rb = ftrace_file->tr->trace_buffer.buffer;
+ *current_rb = trace_file->tr->trace_buffer.buffer;
entry = trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
/*
@@ -1708,7 +1709,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
* to store the trace event for the tigger to use. It's recusive
* safe and will not be recorded anywhere.
*/
- if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
+ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
entry = trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
@@ -1760,7 +1761,7 @@ trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
- struct ftrace_event_call *call = &event_function;
+ struct trace_event_call *call = &event_function;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
@@ -1795,7 +1796,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
- struct ftrace_event_call *call = &event_kernel_stack;
+ struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
@@ -1923,7 +1924,7 @@ static DEFINE_PER_CPU(int, user_stack_count);
void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
- struct ftrace_event_call *call = &event_user_stack;
+ struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
@@ -2129,7 +2130,7 @@ static void trace_printk_start_stop_comm(int enabled)
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
- struct ftrace_event_call *call = &event_bprint;
+ struct trace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct trace_array *tr = &global_trace;
@@ -2187,7 +2188,7 @@ static int
__trace_array_vprintk(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, va_list args)
{
- struct ftrace_event_call *call = &event_print;
+ struct trace_event_call *call = &event_print;
struct ring_buffer_event *event;
int len = 0, size, pc;
struct print_entry *entry;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3d2ad5f83e94..f060716b02ae 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -12,7 +12,7 @@
#include <linux/ftrace.h>
#include <linux/hw_breakpoint.h>
#include <linux/trace_seq.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/compiler.h>
#include <linux/trace_seq.h>
@@ -211,8 +211,8 @@ struct trace_array {
#ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter;
int sys_refcount_exit;
- struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
- struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
+ struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
+ struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
#endif
int stop_count;
int clock_id;
@@ -858,7 +858,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops);
#define ftrace_destroy_filter_files(ops) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
-int ftrace_event_is_function(struct ftrace_event_call *call);
+int ftrace_event_is_function(struct trace_event_call *call);
/*
* struct trace_parser - servers for reading the user input separated by spaces
@@ -992,7 +992,7 @@ struct event_subsystem {
int ref_count;
};
-struct ftrace_subsystem_dir {
+struct trace_subsystem_dir {
struct list_head list;
struct event_subsystem *subsystem;
struct trace_array *tr;
@@ -1052,30 +1052,30 @@ struct filter_pred {
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
-extern void print_event_filter(struct ftrace_event_file *file,
+extern void print_event_filter(struct trace_event_file *file,
struct trace_seq *s);
-extern int apply_event_filter(struct ftrace_event_file *file,
+extern int apply_event_filter(struct trace_event_file *file,
char *filter_string);
-extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
+extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s);
extern int filter_assign_type(const char *type);
-extern int create_event_filter(struct ftrace_event_call *call,
+extern int create_event_filter(struct trace_event_call *call,
char *filter_str, bool set_str,
struct event_filter **filterp);
extern void free_event_filter(struct event_filter *filter);
struct ftrace_event_field *
-trace_find_event_field(struct ftrace_event_call *call, char *name);
+trace_find_event_field(struct trace_event_call *call, char *name);
extern void trace_event_enable_cmd_record(bool enable);
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr);
-extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
- const char *system,
- const char *event);
+extern struct trace_event_file *find_event_file(struct trace_array *tr,
+ const char *system,
+ const char *event);
static inline void *event_file_data(struct file *filp)
{
@@ -1180,7 +1180,7 @@ struct event_trigger_ops {
* commands need to do this if they themselves log to the trace
* buffer (see the @post_trigger() member below). @trigger_type
* values are defined by adding new values to the trigger_type
- * enum in include/linux/ftrace_event.h.
+ * enum in include/linux/trace_events.h.
*
* @post_trigger: A flag that says whether or not this command needs
* to have its action delayed until after the current event has
@@ -1242,23 +1242,23 @@ struct event_command {
enum event_trigger_type trigger_type;
bool post_trigger;
int (*func)(struct event_command *cmd_ops,
- struct ftrace_event_file *file,
+ struct trace_event_file *file,
char *glob, char *cmd, char *params);
int (*reg)(char *glob,
struct event_trigger_ops *ops,
struct event_trigger_data *data,
- struct ftrace_event_file *file);
+ struct trace_event_file *file);
void (*unreg)(char *glob,
struct event_trigger_ops *ops,
struct event_trigger_data *data,
- struct ftrace_event_file *file);
+ struct trace_event_file *file);
int (*set_filter)(char *filter_str,
struct event_trigger_data *data,
- struct ftrace_event_file *file);
+ struct trace_event_file *file);
struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
};
-extern int trace_event_enable_disable(struct ftrace_event_file *file,
+extern int trace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable);
extern int tracing_alloc_snapshot(void);
@@ -1286,7 +1286,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
- extern struct ftrace_event_call \
+ extern struct trace_event_call \
__aligned(4) event_##call;
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
@@ -1295,7 +1295,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
#include "trace_entries.h"
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
-int perf_ftrace_event_register(struct ftrace_event_call *call,
+int perf_ftrace_event_register(struct trace_event_call *call,
enum trace_reg type, void *data);
#else
#define perf_ftrace_event_register NULL
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 57cbf1efdd44..a87b43f49eb4 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -29,7 +29,7 @@ static struct trace_array *branch_tracer;
static void
probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
- struct ftrace_event_call *call = &event_branch;
+ struct trace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer;
struct trace_array_cpu *data;
struct ring_buffer_event *event;
@@ -191,7 +191,7 @@ __init static int init_branch_tracer(void)
{
int ret;
- ret = register_ftrace_event(&trace_branch_event);
+ ret = register_trace_event(&trace_branch_event);
if (!ret) {
printk(KERN_WARNING "Warning: could not register "
"branch events\n");
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 57b67b1f24d1..0f06532a755b 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -56,6 +56,7 @@ u64 notrace trace_clock(void)
{
return local_clock();
}
+EXPORT_SYMBOL_GPL(trace_clock);
/*
* trace_jiffy_clock(): Simply use jiffies as a clock counter.
@@ -68,6 +69,7 @@ u64 notrace trace_clock_jiffies(void)
{
return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
}
+EXPORT_SYMBOL_GPL(trace_clock_jiffies);
/*
* trace_clock_global(): special globally coherent trace clock
@@ -123,6 +125,7 @@ u64 notrace trace_clock_global(void)
return now;
}
+EXPORT_SYMBOL_GPL(trace_clock_global);
static atomic64_t trace_counter;
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 6fa484de2ba1..abfc903e741e 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -21,7 +21,7 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
-static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
+static int perf_trace_event_perm(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
if (tp_event->perf_perm) {
@@ -83,7 +83,7 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
return 0;
}
-static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
+static int perf_trace_event_reg(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
struct hlist_head __percpu *list;
@@ -143,7 +143,7 @@ fail:
static void perf_trace_event_unreg(struct perf_event *p_event)
{
- struct ftrace_event_call *tp_event = p_event->tp_event;
+ struct trace_event_call *tp_event = p_event->tp_event;
int i;
if (--tp_event->perf_refcount > 0)
@@ -172,17 +172,17 @@ out:
static int perf_trace_event_open(struct perf_event *p_event)
{
- struct ftrace_event_call *tp_event = p_event->tp_event;
+ struct trace_event_call *tp_event = p_event->tp_event;
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
}
static void perf_trace_event_close(struct perf_event *p_event)
{
- struct ftrace_event_call *tp_event = p_event->tp_event;
+ struct trace_event_call *tp_event = p_event->tp_event;
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
}
-static int perf_trace_event_init(struct ftrace_event_call *tp_event,
+static int perf_trace_event_init(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
int ret;
@@ -206,7 +206,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
int perf_trace_init(struct perf_event *p_event)
{
- struct ftrace_event_call *tp_event;
+ struct trace_event_call *tp_event;
u64 event_id = p_event->attr.config;
int ret = -EINVAL;
@@ -236,7 +236,7 @@ void perf_trace_destroy(struct perf_event *p_event)
int perf_trace_add(struct perf_event *p_event, int flags)
{
- struct ftrace_event_call *tp_event = p_event->tp_event;
+ struct trace_event_call *tp_event = p_event->tp_event;
struct hlist_head __percpu *pcpu_list;
struct hlist_head *list;
@@ -255,7 +255,7 @@ int perf_trace_add(struct perf_event *p_event, int flags)
void perf_trace_del(struct perf_event *p_event, int flags)
{
- struct ftrace_event_call *tp_event = p_event->tp_event;
+ struct trace_event_call *tp_event = p_event->tp_event;
hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
@@ -357,7 +357,7 @@ static void perf_ftrace_function_disable(struct perf_event *event)
ftrace_function_local_disable(&event->ftrace_ops);
}
-int perf_ftrace_event_register(struct ftrace_event_call *call,
+int perf_ftrace_event_register(struct trace_event_call *call,
enum trace_reg type, void *data)
{
switch (type) {
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c4de47fc5cca..404a372ad85a 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -61,14 +61,14 @@ static int system_refcount_dec(struct event_subsystem *system)
#define do_for_each_event_file_safe(tr, file) \
list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
- struct ftrace_event_file *___n; \
+ struct trace_event_file *___n; \
list_for_each_entry_safe(file, ___n, &tr->events, list)
#define while_for_each_event_file() \
}
static struct list_head *
-trace_get_fields(struct ftrace_event_call *event_call)
+trace_get_fields(struct trace_event_call *event_call)
{
if (!event_call->class->get_fields)
return &event_call->class->fields;
@@ -89,7 +89,7 @@ __find_event_field(struct list_head *head, char *name)
}
struct ftrace_event_field *
-trace_find_event_field(struct ftrace_event_call *call, char *name)
+trace_find_event_field(struct trace_event_call *call, char *name)
{
struct ftrace_event_field *field;
struct list_head *head;
@@ -129,7 +129,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
return 0;
}
-int trace_define_field(struct ftrace_event_call *call, const char *type,
+int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size, int is_signed,
int filter_type)
{
@@ -166,7 +166,7 @@ static int trace_define_common_fields(void)
return ret;
}
-static void trace_destroy_fields(struct ftrace_event_call *call)
+static void trace_destroy_fields(struct trace_event_call *call)
{
struct ftrace_event_field *field, *next;
struct list_head *head;
@@ -178,11 +178,11 @@ static void trace_destroy_fields(struct ftrace_event_call *call)
}
}
-int trace_event_raw_init(struct ftrace_event_call *call)
+int trace_event_raw_init(struct trace_event_call *call)
{
int id;
- id = register_ftrace_event(&call->event);
+ id = register_trace_event(&call->event);
if (!id)
return -ENODEV;
@@ -190,18 +190,18 @@ int trace_event_raw_init(struct ftrace_event_call *call)
}
EXPORT_SYMBOL_GPL(trace_event_raw_init);
-void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
- struct ftrace_event_file *ftrace_file,
- unsigned long len)
+void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+ struct trace_event_file *trace_file,
+ unsigned long len)
{
- struct ftrace_event_call *event_call = ftrace_file->event_call;
+ struct trace_event_call *event_call = trace_file->event_call;
local_save_flags(fbuffer->flags);
fbuffer->pc = preempt_count();
- fbuffer->ftrace_file = ftrace_file;
+ fbuffer->trace_file = trace_file;
fbuffer->event =
- trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
+ trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
event_call->event.type, len,
fbuffer->flags, fbuffer->pc);
if (!fbuffer->event)
@@ -210,13 +210,13 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
fbuffer->entry = ring_buffer_event_data(fbuffer->event);
return fbuffer->entry;
}
-EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
+EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
static DEFINE_SPINLOCK(tracepoint_iter_lock);
-static void output_printk(struct ftrace_event_buffer *fbuffer)
+static void output_printk(struct trace_event_buffer *fbuffer)
{
- struct ftrace_event_call *event_call;
+ struct trace_event_call *event_call;
struct trace_event *event;
unsigned long flags;
struct trace_iterator *iter = tracepoint_print_iter;
@@ -224,12 +224,12 @@ static void output_printk(struct ftrace_event_buffer *fbuffer)
if (!iter)
return;
- event_call = fbuffer->ftrace_file->event_call;
+ event_call = fbuffer->trace_file->event_call;
if (!event_call || !event_call->event.funcs ||
!event_call->event.funcs->trace)
return;
- event = &fbuffer->ftrace_file->event_call->event;
+ event = &fbuffer->trace_file->event_call->event;
spin_lock_irqsave(&tracepoint_iter_lock, flags);
trace_seq_init(&iter->seq);
@@ -241,21 +241,21 @@ static void output_printk(struct ftrace_event_buffer *fbuffer)
spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
-void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
{
if (tracepoint_printk)
output_printk(fbuffer);
- event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
+ event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
fbuffer->flags, fbuffer->pc);
}
-EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
+EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
-int ftrace_event_reg(struct ftrace_event_call *call,
- enum trace_reg type, void *data)
+int trace_event_reg(struct trace_event_call *call,
+ enum trace_reg type, void *data)
{
- struct ftrace_event_file *file = data;
+ struct trace_event_file *file = data;
WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
switch (type) {
@@ -288,34 +288,34 @@ int ftrace_event_reg(struct ftrace_event_call *call,
}
return 0;
}
-EXPORT_SYMBOL_GPL(ftrace_event_reg);
+EXPORT_SYMBOL_GPL(trace_event_reg);
void trace_event_enable_cmd_record(bool enable)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct trace_array *tr;
mutex_lock(&event_mutex);
do_for_each_event_file(tr, file) {
- if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
+ if (!(file->flags & EVENT_FILE_FL_ENABLED))
continue;
if (enable) {
tracing_start_cmdline_record();
- set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
} else {
tracing_stop_cmdline_record();
- clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+ clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
} while_for_each_event_file();
mutex_unlock(&event_mutex);
}
-static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
+static int __ftrace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
int ret = 0;
int disable;
@@ -337,24 +337,24 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
if (soft_disable) {
if (atomic_dec_return(&file->sm_ref) > 0)
break;
- disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
- clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+ disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
+ clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
} else
- disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
+ disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
- if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
- clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
- if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
+ if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
+ clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
+ if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
tracing_stop_cmdline_record();
- clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+ clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
call->class->reg(call, TRACE_REG_UNREGISTER, file);
}
/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
- if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
- set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+ if (file->flags & EVENT_FILE_FL_SOFT_MODE)
+ set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
else
- clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+ clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
break;
case 1:
/*
@@ -366,31 +366,31 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
* it still seems to be disabled.
*/
if (!soft_disable)
- clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+ clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
else {
if (atomic_inc_return(&file->sm_ref) > 1)
break;
- set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
}
- if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
+ if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
/* Keep the event disabled, when going to SOFT_MODE. */
if (soft_disable)
- set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
if (trace_flags & TRACE_ITER_RECORD_CMD) {
tracing_start_cmdline_record();
- set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
ret = call->class->reg(call, TRACE_REG_REGISTER, file);
if (ret) {
tracing_stop_cmdline_record();
pr_info("event trace: Could not enable event "
- "%s\n", ftrace_event_name(call));
+ "%s\n", trace_event_name(call));
break;
}
- set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
/* WAS_ENABLED gets set but never cleared. */
call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
@@ -401,13 +401,13 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
return ret;
}
-int trace_event_enable_disable(struct ftrace_event_file *file,
+int trace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable)
{
return __ftrace_event_enable_disable(file, enable, soft_disable);
}
-static int ftrace_event_enable_disable(struct ftrace_event_file *file,
+static int ftrace_event_enable_disable(struct trace_event_file *file,
int enable)
{
return __ftrace_event_enable_disable(file, enable, 0);
@@ -415,7 +415,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file,
static void ftrace_clear_events(struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
@@ -449,14 +449,14 @@ static void __get_system(struct event_subsystem *system)
system_refcount_inc(system);
}
-static void __get_system_dir(struct ftrace_subsystem_dir *dir)
+static void __get_system_dir(struct trace_subsystem_dir *dir)
{
WARN_ON_ONCE(dir->ref_count == 0);
dir->ref_count++;
__get_system(dir->subsystem);
}
-static void __put_system_dir(struct ftrace_subsystem_dir *dir)
+static void __put_system_dir(struct trace_subsystem_dir *dir)
{
WARN_ON_ONCE(dir->ref_count == 0);
/* If the subsystem is about to be freed, the dir must be too */
@@ -467,14 +467,14 @@ static void __put_system_dir(struct ftrace_subsystem_dir *dir)
kfree(dir);
}
-static void put_system(struct ftrace_subsystem_dir *dir)
+static void put_system(struct trace_subsystem_dir *dir)
{
mutex_lock(&event_mutex);
__put_system_dir(dir);
mutex_unlock(&event_mutex);
}
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
+static void remove_subsystem(struct trace_subsystem_dir *dir)
{
if (!dir)
return;
@@ -486,7 +486,7 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir)
}
}
-static void remove_event_file_dir(struct ftrace_event_file *file)
+static void remove_event_file_dir(struct trace_event_file *file)
{
struct dentry *dir = file->dir;
struct dentry *child;
@@ -515,15 +515,15 @@ static int
__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
const char *sub, const char *event, int set)
{
- struct ftrace_event_file *file;
- struct ftrace_event_call *call;
+ struct trace_event_file *file;
+ struct trace_event_call *call;
const char *name;
int ret = -EINVAL;
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- name = ftrace_event_name(call);
+ name = trace_event_name(call);
if (!name || !call->class || !call->class->reg)
continue;
@@ -671,8 +671,8 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_file *file = v;
- struct ftrace_event_call *call;
+ struct trace_event_file *file = v;
+ struct trace_event_call *call;
struct trace_array *tr = m->private;
(*pos)++;
@@ -692,13 +692,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct trace_array *tr = m->private;
loff_t l;
mutex_lock(&event_mutex);
- file = list_entry(&tr->events, struct ftrace_event_file, list);
+ file = list_entry(&tr->events, struct trace_event_file, list);
for (l = 0; l <= *pos; ) {
file = t_next(m, file, &l);
if (!file)
@@ -710,13 +710,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_file *file = v;
+ struct trace_event_file *file = v;
struct trace_array *tr = m->private;
(*pos)++;
list_for_each_entry_continue(file, &tr->events, list) {
- if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ if (file->flags & EVENT_FILE_FL_ENABLED)
return file;
}
@@ -725,13 +725,13 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
static void *s_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct trace_array *tr = m->private;
loff_t l;
mutex_lock(&event_mutex);
- file = list_entry(&tr->events, struct ftrace_event_file, list);
+ file = list_entry(&tr->events, struct trace_event_file, list);
for (l = 0; l <= *pos; ) {
file = s_next(m, file, &l);
if (!file)
@@ -742,12 +742,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
static int t_show(struct seq_file *m, void *v)
{
- struct ftrace_event_file *file = v;
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_file *file = v;
+ struct trace_event_call *call = file->event_call;
if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
seq_printf(m, "%s:", call->class->system);
- seq_printf(m, "%s\n", ftrace_event_name(call));
+ seq_printf(m, "%s\n", trace_event_name(call));
return 0;
}
@@ -761,7 +761,7 @@ static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
unsigned long flags;
char buf[4] = "0";
@@ -774,12 +774,12 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
if (!file)
return -ENODEV;
- if (flags & FTRACE_EVENT_FL_ENABLED &&
- !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ if (flags & EVENT_FILE_FL_ENABLED &&
+ !(flags & EVENT_FILE_FL_SOFT_DISABLED))
strcpy(buf, "1");
- if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
- flags & FTRACE_EVENT_FL_SOFT_MODE)
+ if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
+ flags & EVENT_FILE_FL_SOFT_MODE)
strcat(buf, "*");
strcat(buf, "\n");
@@ -791,7 +791,7 @@ static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
unsigned long val;
int ret;
@@ -828,10 +828,10 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
const char set_to_char[4] = { '?', '0', '1', 'X' };
- struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct trace_subsystem_dir *dir = filp->private_data;
struct event_subsystem *system = dir->subsystem;
- struct ftrace_event_call *call;
- struct ftrace_event_file *file;
+ struct trace_event_call *call;
+ struct trace_event_file *file;
struct trace_array *tr = dir->tr;
char buf[2];
int set = 0;
@@ -840,7 +840,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- if (!ftrace_event_name(call) || !call->class || !call->class->reg)
+ if (!trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)
@@ -851,7 +851,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
* or if all events or cleared, or if we have
* a mixture.
*/
- set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
+ set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
/*
* If we have a mixture, no need to look further.
@@ -873,7 +873,7 @@ static ssize_t
system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct trace_subsystem_dir *dir = filp->private_data;
struct event_subsystem *system = dir->subsystem;
const char *name = NULL;
unsigned long val;
@@ -917,7 +917,7 @@ enum {
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = event_file_data(m->private);
+ struct trace_event_call *call = event_file_data(m->private);
struct list_head *common_head = &ftrace_common_fields;
struct list_head *head = trace_get_fields(call);
struct list_head *node = v;
@@ -949,13 +949,13 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
static int f_show(struct seq_file *m, void *v)
{
- struct ftrace_event_call *call = event_file_data(m->private);
+ struct trace_event_call *call = event_file_data(m->private);
struct ftrace_event_field *field;
const char *array_descriptor;
switch ((unsigned long)v) {
case FORMAT_HEADER:
- seq_printf(m, "name: %s\n", ftrace_event_name(call));
+ seq_printf(m, "name: %s\n", trace_event_name(call));
seq_printf(m, "ID: %d\n", call->event.type);
seq_puts(m, "format:\n");
return 0;
@@ -1062,7 +1062,7 @@ static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct trace_seq *s;
int r = -ENODEV;
@@ -1095,7 +1095,7 @@ static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
char *buf;
int err = -ENODEV;
@@ -1132,7 +1132,7 @@ static LIST_HEAD(event_subsystems);
static int subsystem_open(struct inode *inode, struct file *filp)
{
struct event_subsystem *system = NULL;
- struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
+ struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
struct trace_array *tr;
int ret;
@@ -1181,7 +1181,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
static int system_tr_open(struct inode *inode, struct file *filp)
{
- struct ftrace_subsystem_dir *dir;
+ struct trace_subsystem_dir *dir;
struct trace_array *tr = inode->i_private;
int ret;
@@ -1214,7 +1214,7 @@ static int system_tr_open(struct inode *inode, struct file *filp)
static int subsystem_release(struct inode *inode, struct file *file)
{
- struct ftrace_subsystem_dir *dir = file->private_data;
+ struct trace_subsystem_dir *dir = file->private_data;
trace_array_put(dir->tr);
@@ -1235,7 +1235,7 @@ static ssize_t
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct trace_subsystem_dir *dir = filp->private_data;
struct event_subsystem *system = dir->subsystem;
struct trace_seq *s;
int r;
@@ -1262,7 +1262,7 @@ static ssize_t
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct trace_subsystem_dir *dir = filp->private_data;
char *buf;
int err;
@@ -1497,9 +1497,9 @@ create_new_subsystem(const char *name)
static struct dentry *
event_subsystem_dir(struct trace_array *tr, const char *name,
- struct ftrace_event_file *file, struct dentry *parent)
+ struct trace_event_file *file, struct dentry *parent)
{
- struct ftrace_subsystem_dir *dir;
+ struct trace_subsystem_dir *dir;
struct event_subsystem *system;
struct dentry *entry;
@@ -1571,9 +1571,9 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
}
static int
-event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
+event_create_dir(struct dentry *parent, struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
struct trace_array *tr = file->tr;
struct list_head *head;
struct dentry *d_events;
@@ -1591,7 +1591,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
} else
d_events = parent;
- name = ftrace_event_name(call);
+ name = trace_event_name(call);
file->dir = tracefs_create_dir(name, d_events);
if (!file->dir) {
pr_warn("Could not create tracefs '%s' directory\n", name);
@@ -1634,9 +1634,9 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
return 0;
}
-static void remove_event_from_tracers(struct ftrace_event_call *call)
+static void remove_event_from_tracers(struct trace_event_call *call)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct trace_array *tr;
do_for_each_event_file_safe(tr, file) {
@@ -1654,10 +1654,10 @@ static void remove_event_from_tracers(struct ftrace_event_call *call)
} while_for_each_event_file();
}
-static void event_remove(struct ftrace_event_call *call)
+static void event_remove(struct trace_event_call *call)
{
struct trace_array *tr;
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
do_for_each_event_file(tr, file) {
if (file->event_call != call)
@@ -1673,17 +1673,17 @@ static void event_remove(struct ftrace_event_call *call)
} while_for_each_event_file();
if (call->event.funcs)
- __unregister_ftrace_event(&call->event);
+ __unregister_trace_event(&call->event);
remove_event_from_tracers(call);
list_del(&call->list);
}
-static int event_init(struct ftrace_event_call *call)
+static int event_init(struct trace_event_call *call)
{
int ret = 0;
const char *name;
- name = ftrace_event_name(call);
+ name = trace_event_name(call);
if (WARN_ON(!name))
return -EINVAL;
@@ -1697,7 +1697,7 @@ static int event_init(struct ftrace_event_call *call)
}
static int
-__register_event(struct ftrace_event_call *call, struct module *mod)
+__register_event(struct trace_event_call *call, struct module *mod)
{
int ret;
@@ -1733,7 +1733,7 @@ static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
return ptr + elen;
}
-static void update_event_printk(struct ftrace_event_call *call,
+static void update_event_printk(struct trace_event_call *call,
struct trace_enum_map *map)
{
char *ptr;
@@ -1811,7 +1811,7 @@ static void update_event_printk(struct ftrace_event_call *call,
void trace_event_enum_update(struct trace_enum_map **map, int len)
{
- struct ftrace_event_call *call, *p;
+ struct trace_event_call *call, *p;
const char *last_system = NULL;
int last_i;
int i;
@@ -1836,11 +1836,11 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
up_write(&trace_event_sem);
}
-static struct ftrace_event_file *
-trace_create_new_event(struct ftrace_event_call *call,
+static struct trace_event_file *
+trace_create_new_event(struct trace_event_call *call,
struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
@@ -1858,9 +1858,9 @@ trace_create_new_event(struct ftrace_event_call *call,
/* Add an event to a trace directory */
static int
-__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
+__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
file = trace_create_new_event(call, tr);
if (!file)
@@ -1875,10 +1875,10 @@ __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
* the filesystem is initialized.
*/
static __init int
-__trace_early_add_new_event(struct ftrace_event_call *call,
+__trace_early_add_new_event(struct trace_event_call *call,
struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
file = trace_create_new_event(call, tr);
if (!file)
@@ -1888,10 +1888,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
}
struct ftrace_module_file_ops;
-static void __add_event_to_tracers(struct ftrace_event_call *call);
+static void __add_event_to_tracers(struct trace_event_call *call);
/* Add an additional event_call dynamically */
-int trace_add_event_call(struct ftrace_event_call *call)
+int trace_add_event_call(struct trace_event_call *call)
{
int ret;
mutex_lock(&trace_types_lock);
@@ -1910,7 +1910,7 @@ int trace_add_event_call(struct ftrace_event_call *call)
* Must be called under locking of trace_types_lock, event_mutex and
* trace_event_sem.
*/
-static void __trace_remove_event_call(struct ftrace_event_call *call)
+static void __trace_remove_event_call(struct trace_event_call *call)
{
event_remove(call);
trace_destroy_fields(call);
@@ -1918,10 +1918,10 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
call->filter = NULL;
}
-static int probe_remove_event_call(struct ftrace_event_call *call)
+static int probe_remove_event_call(struct trace_event_call *call)
{
struct trace_array *tr;
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
#ifdef CONFIG_PERF_EVENTS
if (call->perf_refcount)
@@ -1932,10 +1932,10 @@ static int probe_remove_event_call(struct ftrace_event_call *call)
continue;
/*
* We can't rely on ftrace_event_enable_disable(enable => 0)
- * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+ * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
* TRACE_REG_UNREGISTER.
*/
- if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ if (file->flags & EVENT_FILE_FL_ENABLED)
return -EBUSY;
/*
* The do_for_each_event_file_safe() is
@@ -1952,7 +1952,7 @@ static int probe_remove_event_call(struct ftrace_event_call *call)
}
/* Remove an event_call */
-int trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct trace_event_call *call)
{
int ret;
@@ -1976,7 +1976,7 @@ int trace_remove_event_call(struct ftrace_event_call *call)
static void trace_module_add_events(struct module *mod)
{
- struct ftrace_event_call **call, **start, **end;
+ struct trace_event_call **call, **start, **end;
if (!mod->num_trace_events)
return;
@@ -1999,7 +1999,7 @@ static void trace_module_add_events(struct module *mod)
static void trace_module_remove_events(struct module *mod)
{
- struct ftrace_event_call *call, *p;
+ struct trace_event_call *call, *p;
bool clear_trace = false;
down_write(&trace_event_sem);
@@ -2055,28 +2055,28 @@ static struct notifier_block trace_module_nb = {
static void
__trace_add_event_dirs(struct trace_array *tr)
{
- struct ftrace_event_call *call;
+ struct trace_event_call *call;
int ret;
list_for_each_entry(call, &ftrace_events, list) {
ret = __trace_add_new_event(call, tr);
if (ret < 0)
pr_warn("Could not create directory for event %s\n",
- ftrace_event_name(call));
+ trace_event_name(call));
}
}
-struct ftrace_event_file *
+struct trace_event_file *
find_event_file(struct trace_array *tr, const char *system, const char *event)
{
- struct ftrace_event_file *file;
- struct ftrace_event_call *call;
+ struct trace_event_file *file;
+ struct trace_event_call *call;
const char *name;
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- name = ftrace_event_name(call);
+ name = trace_event_name(call);
if (!name || !call->class || !call->class->reg)
continue;
@@ -2098,7 +2098,7 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
#define DISABLE_EVENT_STR "disable_event"
struct event_probe_data {
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
unsigned long count;
int ref;
bool enable;
@@ -2114,9 +2114,9 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
return;
if (data->enable)
- clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
+ clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
else
- set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
+ set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
}
static void
@@ -2132,7 +2132,7 @@ event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data
return;
/* Skip if the event is in a state we want to switch to */
- if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
return;
if (data->count != -1)
@@ -2152,7 +2152,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
seq_printf(m, "%s:%s:%s",
data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
data->file->event_call->class->system,
- ftrace_event_name(data->file->event_call));
+ trace_event_name(data->file->event_call));
if (data->count == -1)
seq_puts(m, ":unlimited\n");
@@ -2226,7 +2226,7 @@ event_enable_func(struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enabled)
{
struct trace_array *tr = top_trace_array();
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct ftrace_probe_ops *ops;
struct event_probe_data *data;
const char *system;
@@ -2358,7 +2358,7 @@ static inline int register_event_cmds(void) { return 0; }
#endif /* CONFIG_DYNAMIC_FTRACE */
/*
- * The top level array has already had its ftrace_event_file
+ * The top level array has already had its trace_event_file
* descriptors created in order to allow for early events to
* be recorded. This function is called after the tracefs has been
* initialized, and we now have to create the files associated
@@ -2367,7 +2367,7 @@ static inline int register_event_cmds(void) { return 0; }
static __init void
__trace_early_add_event_dirs(struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
int ret;
@@ -2375,7 +2375,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
ret = event_create_dir(tr->event_dir, file);
if (ret < 0)
pr_warn("Could not create directory for event %s\n",
- ftrace_event_name(file->event_call));
+ trace_event_name(file->event_call));
}
}
@@ -2388,7 +2388,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
static __init void
__trace_early_add_events(struct trace_array *tr)
{
- struct ftrace_event_call *call;
+ struct trace_event_call *call;
int ret;
list_for_each_entry(call, &ftrace_events, list) {
@@ -2399,7 +2399,7 @@ __trace_early_add_events(struct trace_array *tr)
ret = __trace_early_add_new_event(call, tr);
if (ret < 0)
pr_warn("Could not create early event %s\n",
- ftrace_event_name(call));
+ trace_event_name(call));
}
}
@@ -2407,13 +2407,13 @@ __trace_early_add_events(struct trace_array *tr)
static void
__trace_remove_event_dirs(struct trace_array *tr)
{
- struct ftrace_event_file *file, *next;
+ struct trace_event_file *file, *next;
list_for_each_entry_safe(file, next, &tr->events, list)
remove_event_file_dir(file);
}
-static void __add_event_to_tracers(struct ftrace_event_call *call)
+static void __add_event_to_tracers(struct trace_event_call *call)
{
struct trace_array *tr;
@@ -2421,8 +2421,8 @@ static void __add_event_to_tracers(struct ftrace_event_call *call)
__trace_add_new_event(call, tr);
}
-extern struct ftrace_event_call *__start_ftrace_events[];
-extern struct ftrace_event_call *__stop_ftrace_events[];
+extern struct trace_event_call *__start_ftrace_events[];
+extern struct trace_event_call *__stop_ftrace_events[];
static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
@@ -2557,7 +2557,7 @@ int event_trace_del_tracer(struct trace_array *tr)
static __init int event_trace_memsetup(void)
{
field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
- file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
+ file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
return 0;
}
@@ -2593,7 +2593,7 @@ early_enable_events(struct trace_array *tr, bool disable_first)
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
- struct ftrace_event_call **iter, *call;
+ struct trace_event_call **iter, *call;
int ret;
if (!tr)
@@ -2754,9 +2754,9 @@ static __init void event_test_stuff(void)
*/
static __init void event_trace_self_tests(void)
{
- struct ftrace_subsystem_dir *dir;
- struct ftrace_event_file *file;
- struct ftrace_event_call *call;
+ struct trace_subsystem_dir *dir;
+ struct trace_event_file *file;
+ struct trace_event_call *call;
struct event_subsystem *system;
struct trace_array *tr;
int ret;
@@ -2787,13 +2787,13 @@ static __init void event_trace_self_tests(void)
continue;
#endif
- pr_info("Testing event %s: ", ftrace_event_name(call));
+ pr_info("Testing event %s: ", trace_event_name(call));
/*
* If an event is already enabled, someone is using
* it and the self test should not be on.
*/
- if (file->flags & FTRACE_EVENT_FL_ENABLED) {
+ if (file->flags & EVENT_FILE_FL_ENABLED) {
pr_warn("Enabled event during self test!\n");
WARN_ON_ONCE(1);
continue;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 1c92dfa1dd17..d81d6f302b14 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -643,7 +643,7 @@ static void append_filter_err(struct filter_parse_state *ps,
free_page((unsigned long) buf);
}
-static inline struct event_filter *event_filter(struct ftrace_event_file *file)
+static inline struct event_filter *event_filter(struct trace_event_file *file)
{
if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
return file->event_call->filter;
@@ -652,7 +652,7 @@ static inline struct event_filter *event_filter(struct ftrace_event_file *file)
}
/* caller must hold event_mutex */
-void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
+void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
{
struct event_filter *filter = event_filter(file);
@@ -780,14 +780,14 @@ static void __free_preds(struct event_filter *filter)
filter->n_preds = 0;
}
-static void filter_disable(struct ftrace_event_file *file)
+static void filter_disable(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
call->flags &= ~TRACE_EVENT_FL_FILTERED;
else
- file->flags &= ~FTRACE_EVENT_FL_FILTERED;
+ file->flags &= ~EVENT_FILE_FL_FILTERED;
}
static void __free_filter(struct event_filter *filter)
@@ -837,9 +837,9 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
return 0;
}
-static inline void __remove_filter(struct ftrace_event_file *file)
+static inline void __remove_filter(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
filter_disable(file);
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
@@ -848,10 +848,10 @@ static inline void __remove_filter(struct ftrace_event_file *file)
remove_filter_string(file->filter);
}
-static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
+static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
if (file->system != dir)
@@ -860,9 +860,9 @@ static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
}
}
-static inline void __free_subsystem_filter(struct ftrace_event_file *file)
+static inline void __free_subsystem_filter(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
__free_filter(call->filter);
@@ -873,10 +873,10 @@ static inline void __free_subsystem_filter(struct ftrace_event_file *file)
}
}
-static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir,
+static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
if (file->system != dir)
@@ -1342,7 +1342,7 @@ parse_operand:
}
static struct filter_pred *create_pred(struct filter_parse_state *ps,
- struct ftrace_event_call *call,
+ struct trace_event_call *call,
int op, char *operand1, char *operand2)
{
struct ftrace_event_field *field;
@@ -1564,7 +1564,7 @@ static int fold_pred_tree(struct event_filter *filter,
filter->preds);
}
-static int replace_preds(struct ftrace_event_call *call,
+static int replace_preds(struct trace_event_call *call,
struct event_filter *filter,
struct filter_parse_state *ps,
bool dry_run)
@@ -1677,20 +1677,20 @@ fail:
return err;
}
-static inline void event_set_filtered_flag(struct ftrace_event_file *file)
+static inline void event_set_filtered_flag(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
call->flags |= TRACE_EVENT_FL_FILTERED;
else
- file->flags |= FTRACE_EVENT_FL_FILTERED;
+ file->flags |= EVENT_FILE_FL_FILTERED;
}
-static inline void event_set_filter(struct ftrace_event_file *file,
+static inline void event_set_filter(struct trace_event_file *file,
struct event_filter *filter)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
rcu_assign_pointer(call->filter, filter);
@@ -1698,9 +1698,9 @@ static inline void event_set_filter(struct ftrace_event_file *file,
rcu_assign_pointer(file->filter, filter);
}
-static inline void event_clear_filter(struct ftrace_event_file *file)
+static inline void event_clear_filter(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
RCU_INIT_POINTER(call->filter, NULL);
@@ -1709,33 +1709,33 @@ static inline void event_clear_filter(struct ftrace_event_file *file)
}
static inline void
-event_set_no_set_filter_flag(struct ftrace_event_file *file)
+event_set_no_set_filter_flag(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
else
- file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
+ file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
}
static inline void
-event_clear_no_set_filter_flag(struct ftrace_event_file *file)
+event_clear_no_set_filter_flag(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
else
- file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
+ file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
}
static inline bool
-event_no_set_filter_flag(struct ftrace_event_file *file)
+event_no_set_filter_flag(struct trace_event_file *file)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
- if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
+ if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
return true;
if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
@@ -1750,12 +1750,12 @@ struct filter_list {
struct event_filter *filter;
};
-static int replace_system_preds(struct ftrace_subsystem_dir *dir,
+static int replace_system_preds(struct trace_subsystem_dir *dir,
struct trace_array *tr,
struct filter_parse_state *ps,
char *filter_string)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct filter_list *filter_item;
struct filter_list *tmp;
LIST_HEAD(filter_list);
@@ -1899,8 +1899,8 @@ static void create_filter_finish(struct filter_parse_state *ps)
}
/**
- * create_filter - create a filter for a ftrace_event_call
- * @call: ftrace_event_call to create a filter for
+ * create_filter - create a filter for a trace_event_call
+ * @call: trace_event_call to create a filter for
* @filter_str: filter string
* @set_str: remember @filter_str and enable detailed error in filter
* @filterp: out param for created filter (always updated on return)
@@ -1914,7 +1914,7 @@ static void create_filter_finish(struct filter_parse_state *ps)
* information if @set_str is %true and the caller is responsible for
* freeing it.
*/
-static int create_filter(struct ftrace_event_call *call,
+static int create_filter(struct trace_event_call *call,
char *filter_str, bool set_str,
struct event_filter **filterp)
{
@@ -1934,7 +1934,7 @@ static int create_filter(struct ftrace_event_call *call,
return err;
}
-int create_event_filter(struct ftrace_event_call *call,
+int create_event_filter(struct trace_event_call *call,
char *filter_str, bool set_str,
struct event_filter **filterp)
{
@@ -1950,7 +1950,7 @@ int create_event_filter(struct ftrace_event_call *call,
* Identical to create_filter() except that it creates a subsystem filter
* and always remembers @filter_str.
*/
-static int create_system_filter(struct ftrace_subsystem_dir *dir,
+static int create_system_filter(struct trace_subsystem_dir *dir,
struct trace_array *tr,
char *filter_str, struct event_filter **filterp)
{
@@ -1976,9 +1976,9 @@ static int create_system_filter(struct ftrace_subsystem_dir *dir,
}
/* caller must hold event_mutex */
-int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
+int apply_event_filter(struct trace_event_file *file, char *filter_string)
{
- struct ftrace_event_call *call = file->event_call;
+ struct trace_event_call *call = file->event_call;
struct event_filter *filter;
int err;
@@ -2027,7 +2027,7 @@ int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
return err;
}
-int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
+int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
char *filter_string)
{
struct event_subsystem *system = dir->subsystem;
@@ -2226,7 +2226,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
{
int err;
struct event_filter *filter;
- struct ftrace_event_call *call;
+ struct trace_event_call *call;
mutex_lock(&event_mutex);
@@ -2282,7 +2282,7 @@ out_unlock:
static struct test_filter_data_t {
char *filter;
- struct ftrace_raw_ftrace_test_filter rec;
+ struct trace_event_raw_ftrace_test_filter rec;
int match;
char *not_visited;
} test_filter_data[] = {
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 8712df9decb4..42a4009fd75a 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -40,7 +40,7 @@ trigger_data_free(struct event_trigger_data *data)
/**
* event_triggers_call - Call triggers associated with a trace event
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
* @rec: The trace entry for the event, NULL for unconditional invocation
*
* For each trigger associated with an event, invoke the trigger
@@ -63,7 +63,7 @@ trigger_data_free(struct event_trigger_data *data)
* any trigger that should be deferred, ETT_NONE if nothing to defer.
*/
enum event_trigger_type
-event_triggers_call(struct ftrace_event_file *file, void *rec)
+event_triggers_call(struct trace_event_file *file, void *rec)
{
struct event_trigger_data *data;
enum event_trigger_type tt = ETT_NONE;
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
/**
* event_triggers_post_call - Call 'post_triggers' for a trace event
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
* @tt: enum event_trigger_type containing a set bit for each trigger to invoke
*
* For each trigger associated with an event, invoke the trigger
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
* Called from tracepoint handlers (with rcu_read_lock_sched() held).
*/
void
-event_triggers_post_call(struct ftrace_event_file *file,
+event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt)
{
struct event_trigger_data *data;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(event_triggers_post_call);
static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
{
- struct ftrace_event_file *event_file = event_file_data(m->private);
+ struct trace_event_file *event_file = event_file_data(m->private);
if (t == SHOW_AVAILABLE_TRIGGERS)
return NULL;
@@ -129,7 +129,7 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
static void *trigger_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_file *event_file;
+ struct trace_event_file *event_file;
/* ->stop() is called even if ->start() fails */
mutex_lock(&event_mutex);
@@ -201,7 +201,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
return ret;
}
-static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
+static int trigger_process_regex(struct trace_event_file *file, char *buff)
{
char *command, *next = buff;
struct event_command *p;
@@ -227,7 +227,7 @@ static ssize_t event_trigger_regex_write(struct file *file,
const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct ftrace_event_file *event_file;
+ struct trace_event_file *event_file;
ssize_t ret;
char *buf;
@@ -430,7 +430,7 @@ event_trigger_free(struct event_trigger_ops *ops,
trigger_data_free(data);
}
-static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
+static int trace_event_trigger_enable_disable(struct trace_event_file *file,
int trigger_enable)
{
int ret = 0;
@@ -438,12 +438,12 @@ static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
if (trigger_enable) {
if (atomic_inc_return(&file->tm_ref) > 1)
return ret;
- set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
ret = trace_event_enable_disable(file, 1, 1);
} else {
if (atomic_dec_return(&file->tm_ref) > 0)
return ret;
- clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
+ clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
ret = trace_event_enable_disable(file, 0, 1);
}
@@ -466,7 +466,7 @@ static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
void
clear_event_triggers(struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
struct event_trigger_data *data;
@@ -480,7 +480,7 @@ clear_event_triggers(struct trace_array *tr)
/**
* update_cond_flag - Set or reset the TRIGGER_COND bit
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
*
* If an event has triggers and any of those triggers has a filter or
* a post_trigger, trigger invocation needs to be deferred until after
@@ -488,7 +488,7 @@ clear_event_triggers(struct trace_array *tr)
* its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
* cleared.
*/
-static void update_cond_flag(struct ftrace_event_file *file)
+static void update_cond_flag(struct trace_event_file *file)
{
struct event_trigger_data *data;
bool set_cond = false;
@@ -501,9 +501,9 @@ static void update_cond_flag(struct ftrace_event_file *file)
}
if (set_cond)
- set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
+ set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
else
- clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
+ clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
}
/**
@@ -511,7 +511,7 @@ static void update_cond_flag(struct ftrace_event_file *file)
* @glob: The raw string used to register the trigger
* @ops: The trigger ops associated with the trigger
* @data: Trigger-specific data to associate with the trigger
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
*
* Common implementation for event trigger registration.
*
@@ -522,7 +522,7 @@ static void update_cond_flag(struct ftrace_event_file *file)
*/
static int register_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
- struct ftrace_event_file *file)
+ struct trace_event_file *file)
{
struct event_trigger_data *test;
int ret = 0;
@@ -557,7 +557,7 @@ out:
* @glob: The raw string used to register the trigger
* @ops: The trigger ops associated with the trigger
* @test: Trigger-specific data used to find the trigger to remove
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
*
* Common implementation for event trigger unregistration.
*
@@ -566,7 +566,7 @@ out:
*/
static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *test,
- struct ftrace_event_file *file)
+ struct trace_event_file *file)
{
struct event_trigger_data *data;
bool unregistered = false;
@@ -588,7 +588,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
/**
* event_trigger_callback - Generic event_command @func implementation
* @cmd_ops: The command ops, used for trigger registration
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
* @glob: The raw string used to register the trigger
* @cmd: The cmd portion of the string used to register the trigger
* @param: The params portion of the string used to register the trigger
@@ -603,7 +603,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
*/
static int
event_trigger_callback(struct event_command *cmd_ops,
- struct ftrace_event_file *file,
+ struct trace_event_file *file,
char *glob, char *cmd, char *param)
{
struct event_trigger_data *trigger_data;
@@ -688,7 +688,7 @@ event_trigger_callback(struct event_command *cmd_ops,
* set_trigger_filter - Generic event_command @set_filter implementation
* @filter_str: The filter string for the trigger, NULL to remove filter
* @trigger_data: Trigger-specific data
- * @file: The ftrace_event_file associated with the event
+ * @file: The trace_event_file associated with the event
*
* Common implementation for event command filter parsing and filter
* instantiation.
@@ -702,7 +702,7 @@ event_trigger_callback(struct event_command *cmd_ops,
*/
static int set_trigger_filter(char *filter_str,
struct event_trigger_data *trigger_data,
- struct ftrace_event_file *file)
+ struct trace_event_file *file)
{
struct event_trigger_data *data = trigger_data;
struct event_filter *filter = NULL, *tmp;
@@ -900,7 +900,7 @@ snapshot_count_trigger(struct event_trigger_data *data)
static int
register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
- struct ftrace_event_file *file)
+ struct trace_event_file *file)
{
int ret = register_trigger(glob, ops, data, file);
@@ -968,7 +968,7 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
* Skip 3:
* stacktrace_trigger()
* event_triggers_post_call()
- * ftrace_raw_event_xxx()
+ * trace_event_raw_event_xxx()
*/
#define STACK_SKIP 3
@@ -1053,7 +1053,7 @@ static __init void unregister_trigger_traceon_traceoff_cmds(void)
#define DISABLE_EVENT_STR "disable_event"
struct enable_trigger_data {
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
bool enable;
};
@@ -1063,9 +1063,9 @@ event_enable_trigger(struct event_trigger_data *data)
struct enable_trigger_data *enable_data = data->private_data;
if (enable_data->enable)
- clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
+ clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
else
- set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
+ set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
}
static void
@@ -1077,7 +1077,7 @@ event_enable_count_trigger(struct event_trigger_data *data)
return;
/* Skip if the event is in a state we want to switch to */
- if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
return;
if (data->count != -1)
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
seq_printf(m, "%s:%s:%s",
enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
enable_data->file->event_call->class->system,
- ftrace_event_name(enable_data->file->event_call));
+ trace_event_name(enable_data->file->event_call));
if (data->count == -1)
seq_puts(m, ":unlimited");
@@ -1159,10 +1159,10 @@ static struct event_trigger_ops event_disable_count_trigger_ops = {
static int
event_enable_trigger_func(struct event_command *cmd_ops,
- struct ftrace_event_file *file,
+ struct trace_event_file *file,
char *glob, char *cmd, char *param)
{
- struct ftrace_event_file *event_enable_file;
+ struct trace_event_file *event_enable_file;
struct enable_trigger_data *enable_data;
struct event_trigger_data *trigger_data;
struct event_trigger_ops *trigger_ops;
@@ -1294,7 +1294,7 @@ event_enable_trigger_func(struct event_command *cmd_ops,
static int event_enable_register_trigger(char *glob,
struct event_trigger_ops *ops,
struct event_trigger_data *data,
- struct ftrace_event_file *file)
+ struct trace_event_file *file)
{
struct enable_trigger_data *enable_data = data->private_data;
struct enable_trigger_data *test_enable_data;
@@ -1331,7 +1331,7 @@ out:
static void event_enable_unregister_trigger(char *glob,
struct event_trigger_ops *ops,
struct event_trigger_data *test,
- struct ftrace_event_file *file)
+ struct trace_event_file *file)
{
struct enable_trigger_data *test_enable_data = test->private_data;
struct enable_trigger_data *enable_data;
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 174a6a71146c..adabf7da9113 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -125,7 +125,7 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
static int __init \
-ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
+ftrace_define_fields_##name(struct trace_event_call *event_call) \
{ \
struct struct_name field; \
int ret; \
@@ -163,14 +163,14 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
regfn) \
\
-struct ftrace_event_class __refdata event_class_ftrace_##call = { \
+struct trace_event_class __refdata event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
.reg = regfn, \
}; \
\
-struct ftrace_event_call __used event_##call = { \
+struct trace_event_call __used event_##call = { \
.class = &event_class_ftrace_##call, \
{ \
.name = #call, \
@@ -179,7 +179,7 @@ struct ftrace_event_call __used event_##call = { \
.print_fmt = print, \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \
-struct ftrace_event_call __used \
+struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
#undef FTRACE_ENTRY
@@ -187,7 +187,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
FTRACE_ENTRY_REG(call, struct_name, etype, \
PARAMS(tstruct), PARAMS(print), filter, NULL)
-int ftrace_event_is_function(struct ftrace_event_call *call)
+int ftrace_event_is_function(struct trace_event_call *call)
{
return call == &event_function;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a51e79688455..8968bf720c12 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -278,7 +278,7 @@ int __trace_graph_entry(struct trace_array *tr,
unsigned long flags,
int pc)
{
- struct ftrace_event_call *call = &event_funcgraph_entry;
+ struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
@@ -393,7 +393,7 @@ void __trace_graph_return(struct trace_array *tr,
unsigned long flags,
int pc)
{
- struct ftrace_event_call *call = &event_funcgraph_exit;
+ struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
@@ -1454,12 +1454,12 @@ static __init int init_graph_trace(void)
{
max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
- if (!register_ftrace_event(&graph_trace_entry_event)) {
+ if (!register_trace_event(&graph_trace_entry_event)) {
pr_warning("Warning: could not register graph trace events\n");
return 1;
}
- if (!register_ftrace_event(&graph_trace_ret_event)) {
+ if (!register_trace_event(&graph_trace_ret_event)) {
pr_warning("Warning: could not register graph trace events\n");
return 1;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d0ce590f06e1..b7d0cdd9906c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -348,7 +348,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
struct trace_kprobe *tk;
list_for_each_entry(tk, &probe_list, list)
- if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
+ if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
strcmp(tk->tp.call.class->system, group) == 0)
return tk;
return NULL;
@@ -359,7 +359,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
*/
static int
-enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
+enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
{
int ret = 0;
@@ -394,7 +394,7 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
* if the file is NULL, disable "perf" handler, or disable "trace" handler.
*/
static int
-disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
+disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
{
struct event_file_link *link = NULL;
int wait = 0;
@@ -523,7 +523,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
mutex_lock(&probe_lock);
/* Delete old (same name) event if exist */
- old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
+ old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
tk->tp.call.class->system);
if (old_tk) {
ret = unregister_trace_kprobe(old_tk);
@@ -572,7 +572,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
if (ret)
pr_warning("Failed to re-register probe %s on"
"%s: %d\n",
- ftrace_event_name(&tk->tp.call),
+ trace_event_name(&tk->tp.call),
mod->name, ret);
}
}
@@ -829,7 +829,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
seq_printf(m, ":%s/%s", tk->tp.call.class->system,
- ftrace_event_name(&tk->tp.call));
+ trace_event_name(&tk->tp.call));
if (!tk->symbol)
seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -888,7 +888,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
struct trace_kprobe *tk = v;
seq_printf(m, " %-44s %15lu %15lu\n",
- ftrace_event_name(&tk->tp.call), tk->nhit,
+ trace_event_name(&tk->tp.call), tk->nhit,
tk->rp.kp.nmissed);
return 0;
@@ -917,18 +917,18 @@ static const struct file_operations kprobe_profile_ops = {
/* Kprobe handler */
static nokprobe_inline void
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
- struct ftrace_event_file *ftrace_file)
+ struct trace_event_file *trace_file)
{
struct kprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
int size, dsize, pc;
unsigned long irq_flags;
- struct ftrace_event_call *call = &tk->tp.call;
+ struct trace_event_call *call = &tk->tp.call;
- WARN_ON(call != ftrace_file->event_call);
+ WARN_ON(call != trace_file->event_call);
- if (ftrace_trigger_soft_disabled(ftrace_file))
+ if (trace_trigger_soft_disabled(trace_file))
return;
local_save_flags(irq_flags);
@@ -937,7 +937,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
dsize = __get_data_size(&tk->tp, regs);
size = sizeof(*entry) + tk->tp.size + dsize;
- event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+ event = trace_event_buffer_lock_reserve(&buffer, trace_file,
call->event.type,
size, irq_flags, pc);
if (!event)
@@ -947,7 +947,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
- event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
+ event_trigger_unlock_commit_regs(trace_file, buffer, event,
entry, irq_flags, pc, regs);
}
@@ -965,18 +965,18 @@ NOKPROBE_SYMBOL(kprobe_trace_func);
static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct pt_regs *regs,
- struct ftrace_event_file *ftrace_file)
+ struct trace_event_file *trace_file)
{
struct kretprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
int size, pc, dsize;
unsigned long irq_flags;
- struct ftrace_event_call *call = &tk->tp.call;
+ struct trace_event_call *call = &tk->tp.call;
- WARN_ON(call != ftrace_file->event_call);
+ WARN_ON(call != trace_file->event_call);
- if (ftrace_trigger_soft_disabled(ftrace_file))
+ if (trace_trigger_soft_disabled(trace_file))
return;
local_save_flags(irq_flags);
@@ -985,7 +985,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
dsize = __get_data_size(&tk->tp, regs);
size = sizeof(*entry) + tk->tp.size + dsize;
- event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+ event = trace_event_buffer_lock_reserve(&buffer, trace_file,
call->event.type,
size, irq_flags, pc);
if (!event)
@@ -996,7 +996,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
- event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
+ event_trigger_unlock_commit_regs(trace_file, buffer, event,
entry, irq_flags, pc, regs);
}
@@ -1025,7 +1025,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
field = (struct kprobe_trace_entry_head *)iter->ent;
tp = container_of(event, struct trace_probe, call.event);
- trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call));
+ trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
goto out;
@@ -1056,7 +1056,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
field = (struct kretprobe_trace_entry_head *)iter->ent;
tp = container_of(event, struct trace_probe, call.event);
- trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call));
+ trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
goto out;
@@ -1081,7 +1081,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
}
-static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
+static int kprobe_event_define_fields(struct trace_event_call *event_call)
{
int ret, i;
struct kprobe_trace_entry_head field;
@@ -1104,7 +1104,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
return 0;
}
-static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
+static int kretprobe_event_define_fields(struct trace_event_call *event_call)
{
int ret, i;
struct kretprobe_trace_entry_head field;
@@ -1134,7 +1134,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
static void
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
{
- struct ftrace_event_call *call = &tk->tp.call;
+ struct trace_event_call *call = &tk->tp.call;
struct bpf_prog *prog = call->prog;
struct kprobe_trace_entry_head *entry;
struct hlist_head *head;
@@ -1169,7 +1169,7 @@ static void
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct pt_regs *regs)
{
- struct ftrace_event_call *call = &tk->tp.call;
+ struct trace_event_call *call = &tk->tp.call;
struct bpf_prog *prog = call->prog;
struct kretprobe_trace_entry_head *entry;
struct hlist_head *head;
@@ -1206,11 +1206,11 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
* kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
* lockless, but we can't race with this __init function.
*/
-static int kprobe_register(struct ftrace_event_call *event,
+static int kprobe_register(struct trace_event_call *event,
enum trace_reg type, void *data)
{
struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
- struct ftrace_event_file *file = data;
+ struct trace_event_file *file = data;
switch (type) {
case TRACE_REG_REGISTER:
@@ -1276,10 +1276,10 @@ static struct trace_event_functions kprobe_funcs = {
static int register_kprobe_event(struct trace_kprobe *tk)
{
- struct ftrace_event_call *call = &tk->tp.call;
+ struct trace_event_call *call = &tk->tp.call;
int ret;
- /* Initialize ftrace_event_call */
+ /* Initialize trace_event_call */
INIT_LIST_HEAD(&call->class->fields);
if (trace_kprobe_is_return(tk)) {
call->event.funcs = &kretprobe_funcs;
@@ -1290,7 +1290,7 @@ static int register_kprobe_event(struct trace_kprobe *tk)
}
if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
return -ENOMEM;
- ret = register_ftrace_event(&call->event);
+ ret = register_trace_event(&call->event);
if (!ret) {
kfree(call->print_fmt);
return -ENODEV;
@@ -1301,9 +1301,9 @@ static int register_kprobe_event(struct trace_kprobe *tk)
ret = trace_add_event_call(call);
if (ret) {
pr_info("Failed to register kprobe event: %s\n",
- ftrace_event_name(call));
+ trace_event_name(call));
kfree(call->print_fmt);
- unregister_ftrace_event(&call->event);
+ unregister_trace_event(&call->event);
}
return ret;
}
@@ -1364,10 +1364,10 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
return a1 + a2 + a3 + a4 + a5 + a6;
}
-static struct ftrace_event_file *
+static struct trace_event_file *
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
{
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list)
if (file->event_call == &tk->tp.call)
@@ -1385,7 +1385,7 @@ static __init int kprobe_trace_self_tests_init(void)
int ret, warn = 0;
int (*target)(int, int, int, int, int, int);
struct trace_kprobe *tk;
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
if (tracing_is_disabled())
return -ENODEV;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 7a9ba62e9fef..638e110c5bfd 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -298,7 +298,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct trace_array_cpu *data,
struct mmiotrace_rw *rw)
{
- struct ftrace_event_call *call = &event_mmiotrace_rw;
+ struct trace_event_call *call = &event_mmiotrace_rw;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
@@ -328,7 +328,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct trace_array_cpu *data,
struct mmiotrace_map *map)
{
- struct ftrace_event_call *call = &event_mmiotrace_map;
+ struct trace_event_call *call = &event_mmiotrace_map;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 25a086bcb700..dfab253727dc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -60,9 +60,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
}
const char *
-ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
- unsigned long flags,
- const struct trace_print_flags *flag_array)
+trace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array)
{
unsigned long mask;
const char *str;
@@ -95,11 +95,11 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
return ret;
}
-EXPORT_SYMBOL(ftrace_print_flags_seq);
+EXPORT_SYMBOL(trace_print_flags_seq);
const char *
-ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
- const struct trace_print_flags *symbol_array)
+trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array)
{
int i;
const char *ret = trace_seq_buffer_ptr(p);
@@ -120,11 +120,11 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
return ret;
}
-EXPORT_SYMBOL(ftrace_print_symbols_seq);
+EXPORT_SYMBOL(trace_print_symbols_seq);
#if BITS_PER_LONG == 32
const char *
-ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
+trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
const struct trace_print_flags_u64 *symbol_array)
{
int i;
@@ -146,12 +146,12 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
return ret;
}
-EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
+EXPORT_SYMBOL(trace_print_symbols_seq_u64);
#endif
const char *
-ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
- unsigned int bitmask_size)
+trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+ unsigned int bitmask_size)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -160,10 +160,10 @@ ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
return ret;
}
-EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq);
+EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
const char *
-ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
+trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
{
int i;
const char *ret = trace_seq_buffer_ptr(p);
@@ -175,11 +175,11 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
return ret;
}
-EXPORT_SYMBOL(ftrace_print_hex_seq);
+EXPORT_SYMBOL(trace_print_hex_seq);
const char *
-ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count,
- size_t el_size)
+trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
+ size_t el_size)
{
const char *ret = trace_seq_buffer_ptr(p);
const char *prefix = "";
@@ -220,17 +220,17 @@ ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count,
return ret;
}
-EXPORT_SYMBOL(ftrace_print_array_seq);
+EXPORT_SYMBOL(trace_print_array_seq);
-int ftrace_raw_output_prep(struct trace_iterator *iter,
- struct trace_event *trace_event)
+int trace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *trace_event)
{
- struct ftrace_event_call *event;
+ struct trace_event_call *event;
struct trace_seq *s = &iter->seq;
struct trace_seq *p = &iter->tmp_seq;
struct trace_entry *entry;
- event = container_of(trace_event, struct ftrace_event_call, event);
+ event = container_of(trace_event, struct trace_event_call, event);
entry = iter->ent;
if (entry->type != event->event.type) {
@@ -239,14 +239,14 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
}
trace_seq_init(p);
- trace_seq_printf(s, "%s: ", ftrace_event_name(event));
+ trace_seq_printf(s, "%s: ", trace_event_name(event));
return trace_handle_return(s);
}
-EXPORT_SYMBOL(ftrace_raw_output_prep);
+EXPORT_SYMBOL(trace_raw_output_prep);
-static int ftrace_output_raw(struct trace_iterator *iter, char *name,
- char *fmt, va_list ap)
+static int trace_output_raw(struct trace_iterator *iter, char *name,
+ char *fmt, va_list ap)
{
struct trace_seq *s = &iter->seq;
@@ -256,18 +256,18 @@ static int ftrace_output_raw(struct trace_iterator *iter, char *name,
return trace_handle_return(s);
}
-int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
+int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
- ret = ftrace_output_raw(iter, name, fmt, ap);
+ ret = trace_output_raw(iter, name, fmt, ap);
va_end(ap);
return ret;
}
-EXPORT_SYMBOL_GPL(ftrace_output_call);
+EXPORT_SYMBOL_GPL(trace_output_call);
#ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name)
@@ -675,7 +675,7 @@ static int trace_search_list(struct list_head **list)
}
/* Did we used up all 65 thousand events??? */
- if ((last + 1) > FTRACE_MAX_EVENT)
+ if ((last + 1) > TRACE_EVENT_TYPE_MAX)
return 0;
*list = &e->list;
@@ -693,7 +693,7 @@ void trace_event_read_unlock(void)
}
/**
- * register_ftrace_event - register output for an event type
+ * register_trace_event - register output for an event type
* @event: the event type to register
*
* Event types are stored in a hash and this hash is used to
@@ -707,7 +707,7 @@ void trace_event_read_unlock(void)
*
* Returns the event type number or zero on error.
*/
-int register_ftrace_event(struct trace_event *event)
+int register_trace_event(struct trace_event *event)
{
unsigned key;
int ret = 0;
@@ -725,7 +725,7 @@ int register_ftrace_event(struct trace_event *event)
if (!event->type) {
struct list_head *list = NULL;
- if (next_event_type > FTRACE_MAX_EVENT) {
+ if (next_event_type > TRACE_EVENT_TYPE_MAX) {
event->type = trace_search_list(&list);
if (!event->type)
@@ -771,12 +771,12 @@ int register_ftrace_event(struct trace_event *event)
return ret;
}
-EXPORT_SYMBOL_GPL(register_ftrace_event);
+EXPORT_SYMBOL_GPL(register_trace_event);
/*
* Used by module code with the trace_event_sem held for write.
*/
-int __unregister_ftrace_event(struct trace_event *event)
+int __unregister_trace_event(struct trace_event *event)
{
hlist_del(&event->node);
list_del(&event->list);
@@ -784,18 +784,18 @@ int __unregister_ftrace_event(struct trace_event *event)
}
/**
- * unregister_ftrace_event - remove a no longer used event
+ * unregister_trace_event - remove a no longer used event
* @event: the event to remove
*/
-int unregister_ftrace_event(struct trace_event *event)
+int unregister_trace_event(struct trace_event *event)
{
down_write(&trace_event_sem);
- __unregister_ftrace_event(event);
+ __unregister_trace_event(event);
up_write(&trace_event_sem);
return 0;
}
-EXPORT_SYMBOL_GPL(unregister_ftrace_event);
+EXPORT_SYMBOL_GPL(unregister_trace_event);
/*
* Standard events
@@ -1243,7 +1243,7 @@ __init static int init_events(void)
for (i = 0; events[i]; i++) {
event = events[i];
- ret = register_ftrace_event(event);
+ ret = register_trace_event(event);
if (!ret) {
printk(KERN_WARNING "event %d failed to register\n",
event->type);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 8ef2c40efb3c..4cbfe85b99c8 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -32,7 +32,7 @@ extern int
trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
/* used by module unregistering */
-extern int __unregister_ftrace_event(struct trace_event *event);
+extern int __unregister_trace_event(struct trace_event *event);
extern struct rw_semaphore trace_event_sem;
#define SEQ_PUT_FIELD(s, x) \
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index ab283e146b70..b98dee914542 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -272,8 +272,8 @@ struct probe_arg {
struct trace_probe {
unsigned int flags; /* For TP_FLAG_* */
- struct ftrace_event_class class;
- struct ftrace_event_call call;
+ struct trace_event_class class;
+ struct trace_event_call call;
struct list_head files;
ssize_t size; /* trace entry size */
unsigned int nr_args;
@@ -281,7 +281,7 @@ struct trace_probe {
};
struct event_file_link {
- struct ftrace_event_file *file;
+ struct trace_event_file *file;
struct list_head list;
};
@@ -314,7 +314,7 @@ static inline int is_good_name(const char *name)
}
static inline struct event_file_link *
-find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
+find_event_file_link(struct trace_probe *tp, struct trace_event_file *file)
{
struct event_file_link *link;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index d6e1003724e9..9b33dd117f3f 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -369,7 +369,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *next,
unsigned long flags, int pc)
{
- struct ftrace_event_call *call = &event_context_switch;
+ struct trace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
@@ -397,7 +397,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *curr,
unsigned long flags, int pc)
{
- struct ftrace_event_call *call = &event_wakeup;
+ struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index f97f6e3a676c..7d567a4b9fa7 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -13,13 +13,13 @@
static DEFINE_MUTEX(syscall_trace_lock);
-static int syscall_enter_register(struct ftrace_event_call *event,
+static int syscall_enter_register(struct trace_event_call *event,
enum trace_reg type, void *data);
-static int syscall_exit_register(struct ftrace_event_call *event,
+static int syscall_exit_register(struct trace_event_call *event,
enum trace_reg type, void *data);
static struct list_head *
-syscall_get_enter_fields(struct ftrace_event_call *call)
+syscall_get_enter_fields(struct trace_event_call *call)
{
struct syscall_metadata *entry = call->data;
@@ -219,7 +219,7 @@ __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
return pos;
}
-static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
+static int __init set_syscall_print_fmt(struct trace_event_call *call)
{
char *print_fmt;
int len;
@@ -244,7 +244,7 @@ static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
return 0;
}
-static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
+static void __init free_syscall_print_fmt(struct trace_event_call *call)
{
struct syscall_metadata *entry = call->data;
@@ -252,7 +252,7 @@ static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
kfree(call->print_fmt);
}
-static int __init syscall_enter_define_fields(struct ftrace_event_call *call)
+static int __init syscall_enter_define_fields(struct trace_event_call *call)
{
struct syscall_trace_enter trace;
struct syscall_metadata *meta = call->data;
@@ -275,7 +275,7 @@ static int __init syscall_enter_define_fields(struct ftrace_event_call *call)
return ret;
}
-static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
+static int __init syscall_exit_define_fields(struct trace_event_call *call)
{
struct syscall_trace_exit trace;
int ret;
@@ -293,7 +293,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
{
struct trace_array *tr = data;
- struct ftrace_event_file *ftrace_file;
+ struct trace_event_file *trace_file;
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
@@ -308,11 +308,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
return;
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
- ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
- if (!ftrace_file)
+ trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
+ if (!trace_file)
return;
- if (ftrace_trigger_soft_disabled(ftrace_file))
+ if (trace_trigger_soft_disabled(trace_file))
return;
sys_data = syscall_nr_to_meta(syscall_nr);
@@ -334,14 +334,14 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
entry->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
- event_trigger_unlock_commit(ftrace_file, buffer, event, entry,
+ event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc);
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
{
struct trace_array *tr = data;
- struct ftrace_event_file *ftrace_file;
+ struct trace_event_file *trace_file;
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
@@ -355,11 +355,11 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
return;
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
- ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
- if (!ftrace_file)
+ trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
+ if (!trace_file)
return;
- if (ftrace_trigger_soft_disabled(ftrace_file))
+ if (trace_trigger_soft_disabled(trace_file))
return;
sys_data = syscall_nr_to_meta(syscall_nr);
@@ -380,12 +380,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->nr = syscall_nr;
entry->ret = syscall_get_return_value(current, regs);
- event_trigger_unlock_commit(ftrace_file, buffer, event, entry,
+ event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc);
}
-static int reg_event_syscall_enter(struct ftrace_event_file *file,
- struct ftrace_event_call *call)
+static int reg_event_syscall_enter(struct trace_event_file *file,
+ struct trace_event_call *call)
{
struct trace_array *tr = file->tr;
int ret = 0;
@@ -405,8 +405,8 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
return ret;
}
-static void unreg_event_syscall_enter(struct ftrace_event_file *file,
- struct ftrace_event_call *call)
+static void unreg_event_syscall_enter(struct trace_event_file *file,
+ struct trace_event_call *call)
{
struct trace_array *tr = file->tr;
int num;
@@ -422,8 +422,8 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
mutex_unlock(&syscall_trace_lock);
}
-static int reg_event_syscall_exit(struct ftrace_event_file *file,
- struct ftrace_event_call *call)
+static int reg_event_syscall_exit(struct trace_event_file *file,
+ struct trace_event_call *call)
{
struct trace_array *tr = file->tr;
int ret = 0;
@@ -443,8 +443,8 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
return ret;
}
-static void unreg_event_syscall_exit(struct ftrace_event_file *file,
- struct ftrace_event_call *call)
+static void unreg_event_syscall_exit(struct trace_event_file *file,
+ struct trace_event_call *call)
{
struct trace_array *tr = file->tr;
int num;
@@ -460,7 +460,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
mutex_unlock(&syscall_trace_lock);
}
-static int __init init_syscall_trace(struct ftrace_event_call *call)
+static int __init init_syscall_trace(struct trace_event_call *call)
{
int id;
int num;
@@ -493,7 +493,7 @@ struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
-struct ftrace_event_class __refdata event_class_syscall_enter = {
+struct trace_event_class __refdata event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
@@ -501,7 +501,7 @@ struct ftrace_event_class __refdata event_class_syscall_enter = {
.raw_init = init_syscall_trace,
};
-struct ftrace_event_class __refdata event_class_syscall_exit = {
+struct trace_event_class __refdata event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
@@ -584,7 +584,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
-static int perf_sysenter_enable(struct ftrace_event_call *call)
+static int perf_sysenter_enable(struct trace_event_call *call)
{
int ret = 0;
int num;
@@ -605,7 +605,7 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
return ret;
}
-static void perf_sysenter_disable(struct ftrace_event_call *call)
+static void perf_sysenter_disable(struct trace_event_call *call)
{
int num;
@@ -656,7 +656,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
-static int perf_sysexit_enable(struct ftrace_event_call *call)
+static int perf_sysexit_enable(struct trace_event_call *call)
{
int ret = 0;
int num;
@@ -677,7 +677,7 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
return ret;
}
-static void perf_sysexit_disable(struct ftrace_event_call *call)
+static void perf_sysexit_disable(struct trace_event_call *call)
{
int num;
@@ -693,10 +693,10 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
#endif /* CONFIG_PERF_EVENTS */
-static int syscall_enter_register(struct ftrace_event_call *event,
+static int syscall_enter_register(struct trace_event_call *event,
enum trace_reg type, void *data)
{
- struct ftrace_event_file *file = data;
+ struct trace_event_file *file = data;
switch (type) {
case TRACE_REG_REGISTER:
@@ -721,10 +721,10 @@ static int syscall_enter_register(struct ftrace_event_call *event,
return 0;
}
-static int syscall_exit_register(struct ftrace_event_call *event,
+static int syscall_exit_register(struct trace_event_call *event,
enum trace_reg type, void *data)
{
- struct ftrace_event_file *file = data;
+ struct trace_event_file *file = data;
switch (type) {
case TRACE_REG_REGISTER:
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 6dd022c7b5bc..aa1ea7b36fa8 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -293,7 +293,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
struct trace_uprobe *tu;
list_for_each_entry(tu, &uprobe_list, list)
- if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
+ if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
strcmp(tu->tp.call.class->system, group) == 0)
return tu;
@@ -323,7 +323,7 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
mutex_lock(&uprobe_lock);
/* register as an event */
- old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
+ old_tu = find_probe_event(trace_event_name(&tu->tp.call),
tu->tp.call.class->system);
if (old_tu) {
/* delete old event */
@@ -600,7 +600,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
int i;
seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
- ftrace_event_name(&tu->tp.call));
+ trace_event_name(&tu->tp.call));
seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
for (i = 0; i < tu->tp.nr_args; i++)
@@ -651,7 +651,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
struct trace_uprobe *tu = v;
seq_printf(m, " %s %-44s %15lu\n", tu->filename,
- ftrace_event_name(&tu->tp.call), tu->nhit);
+ trace_event_name(&tu->tp.call), tu->nhit);
return 0;
}
@@ -770,26 +770,26 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
static void __uprobe_trace_func(struct trace_uprobe *tu,
unsigned long func, struct pt_regs *regs,
struct uprobe_cpu_buffer *ucb, int dsize,
- struct ftrace_event_file *ftrace_file)
+ struct trace_event_file *trace_file)
{
struct uprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
void *data;
int size, esize;
- struct ftrace_event_call *call = &tu->tp.call;
+ struct trace_event_call *call = &tu->tp.call;
- WARN_ON(call != ftrace_file->event_call);
+ WARN_ON(call != trace_file->event_call);
if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
return;
- if (ftrace_trigger_soft_disabled(ftrace_file))
+ if (trace_trigger_soft_disabled(trace_file))
return;
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
size = esize + tu->tp.size + dsize;
- event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+ event = trace_event_buffer_lock_reserve(&buffer, trace_file,
call->event.type, size, 0, 0);
if (!event)
return;
@@ -806,7 +806,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
memcpy(data, ucb->buf, tu->tp.size + dsize);
- event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0);
+ event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
}
/* uprobe handler */
@@ -853,12 +853,12 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
if (is_ret_probe(tu)) {
trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
- ftrace_event_name(&tu->tp.call),
+ trace_event_name(&tu->tp.call),
entry->vaddr[1], entry->vaddr[0]);
data = DATAOF_TRACE_ENTRY(entry, true);
} else {
trace_seq_printf(s, "%s: (0x%lx)",
- ftrace_event_name(&tu->tp.call),
+ trace_event_name(&tu->tp.call),
entry->vaddr[0]);
data = DATAOF_TRACE_ENTRY(entry, false);
}
@@ -881,7 +881,7 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self,
struct mm_struct *mm);
static int
-probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
+probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
filter_func_t filter)
{
bool enabled = trace_probe_is_enabled(&tu->tp);
@@ -938,7 +938,7 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
}
static void
-probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
+probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
{
if (!trace_probe_is_enabled(&tu->tp))
return;
@@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
uprobe_buffer_disable();
}
-static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
+static int uprobe_event_define_fields(struct trace_event_call *event_call)
{
int ret, i, size;
struct uprobe_trace_entry_head field;
@@ -1093,7 +1093,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
unsigned long func, struct pt_regs *regs,
struct uprobe_cpu_buffer *ucb, int dsize)
{
- struct ftrace_event_call *call = &tu->tp.call;
+ struct trace_event_call *call = &tu->tp.call;
struct uprobe_trace_entry_head *entry;
struct hlist_head *head;
void *data;
@@ -1159,11 +1159,11 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
#endif /* CONFIG_PERF_EVENTS */
static int
-trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
+trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
void *data)
{
struct trace_uprobe *tu = event->data;
- struct ftrace_event_file *file = data;
+ struct trace_event_file *file = data;
switch (type) {
case TRACE_REG_REGISTER:
@@ -1272,10 +1272,10 @@ static struct trace_event_functions uprobe_funcs = {
static int register_uprobe_event(struct trace_uprobe *tu)
{
- struct ftrace_event_call *call = &tu->tp.call;
+ struct trace_event_call *call = &tu->tp.call;
int ret;
- /* Initialize ftrace_event_call */
+ /* Initialize trace_event_call */
INIT_LIST_HEAD(&call->class->fields);
call->event.funcs = &uprobe_funcs;
call->class->define_fields = uprobe_event_define_fields;
@@ -1283,7 +1283,7 @@ static int register_uprobe_event(struct trace_uprobe *tu)
if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
return -ENOMEM;
- ret = register_ftrace_event(&call->event);
+ ret = register_trace_event(&call->event);
if (!ret) {
kfree(call->print_fmt);
return -ENODEV;
@@ -1295,9 +1295,9 @@ static int register_uprobe_event(struct trace_uprobe *tu)
if (ret) {
pr_info("Failed to register uprobe event: %s\n",
- ftrace_event_name(call));
+ trace_event_name(call));
kfree(call->print_fmt);
- unregister_ftrace_event(&call->event);
+ unregister_trace_event(&call->event);
}
return ret;
diff --git a/mm/debug.c b/mm/debug.c
index 3eb3ac2fcee7..76089ddf99ea 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/memcontrol.h>
static const struct trace_print_flags pageflag_names[] = {
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 430b5d27828e..1bd593bbf7a5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -55,10 +55,10 @@ void xs_init(pTHX)
INTERP my_perl;
-#define FTRACE_MAX_EVENT \
+#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(unsigned short) * 8)) - 1)
-static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
+static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX);
extern struct scripting_context *scripting_context;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 5544b8cdd1ee..ace2484985cb 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -44,10 +44,10 @@
PyMODINIT_FUNC initperf_trace_context(void);
-#define FTRACE_MAX_EVENT \
+#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(unsigned short) * 8)) - 1)
-static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
+static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX);
#define MAX_FIELDS 64
#define N_COMMON_FIELDS 7