summaryrefslogtreecommitdiff
path: root/include/linux/kernel.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kernel.h')
-rw-r--r--include/linux/kernel.h97
1 files changed, 95 insertions, 2 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6320a3e8def..f61039e8ee98 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -20,6 +20,98 @@
#include <asm/byteorder.h>
#include <asm/bug.h>
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ * We currently do not profile modules.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) \
+ && !defined(MODULE)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+#undef likely
+#define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+#undef unlikely
+#define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+extern int sysctl_branch_profiling_enabled;
+#ifdef CONFIG_PROFILE_BRANCHES_PER_CPU
+extern void branch_profiler(struct ftrace_branch_data *data, int cond);
+#else
+static inline void branch_profiler(struct ftrace_branch_data *data, int cond)
+{
+ data->miss_hit[cond]++;
+}
+#endif
+
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+ if ((!sysctl_branch_profiling_enabled || \
+ __builtin_constant_p((cond))) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ branch_profiler(&______f, ______r); \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+#endif
+
extern const char linux_banner[];
extern const char linux_proc_banner[];
@@ -125,7 +217,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line);
+ void __might_sleep(char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -137,8 +229,9 @@ extern int _cond_resched(void);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
+ static inline void __might_sleep(char *file, int line, int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif