summaryrefslogtreecommitdiff
path: root/kernel/context_tracking.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-11-23 09:04:05 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-11-23 09:04:05 +0100
commit92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch)
tree15626ff9287e37c3cb81c7286d6db5a7fd77c854 /kernel/context_tracking.c
parent15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff)
parent1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff)
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2 Backmerge to get at commit 1b0e3a049efe471c399674fd954500ce97438d30 Author: Imre Deak <imre.deak@intel.com> Date: Thu Nov 5 23:04:11 2015 +0200 drm/i915/skl: disable display side power well support for now so that we can proplery re-eanble skl power wells in -next. Conflicts are just adjacent lines changed, except for intel_fbdev.c where we need to interleave the changs. Nothing nefarious. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'kernel/context_tracking.c')
-rw-r--r--kernel/context_tracking.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 0a495ab35bc7..d8560ee3bab7 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -58,36 +58,13 @@ static void context_tracking_recursion_exit(void)
* instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state.
*/
-void context_tracking_enter(enum ctx_state state)
+void __context_tracking_enter(enum ctx_state state)
{
- unsigned long flags;
-
- /*
- * Repeat the user_enter() check here because some archs may be calling
- * this from asm and if no CPU needs context tracking, they shouldn't
- * go further. Repeat the check here until they support the inline static
- * key check.
- */
- if (!context_tracking_is_enabled())
- return;
-
- /*
- * Some contexts may involve an exception occuring in an irq,
- * leading to that nesting:
- * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
- * This would mess up the dyntick_nesting count though. And rcu_irq_*()
- * helpers are enough to protect RCU uses inside the exception. So
- * just return immediately if we detect we are in an IRQ.
- */
- if (in_interrupt())
- return;
-
/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);
- local_irq_save(flags);
if (!context_tracking_recursion_enter())
- goto out_irq_restore;
+ return;
if ( __this_cpu_read(context_tracking.state) != state) {
if (__this_cpu_read(context_tracking.active)) {
@@ -120,7 +97,27 @@ void context_tracking_enter(enum ctx_state state)
__this_cpu_write(context_tracking.state, state);
}
context_tracking_recursion_exit();
-out_irq_restore:
+}
+NOKPROBE_SYMBOL(__context_tracking_enter);
+EXPORT_SYMBOL_GPL(__context_tracking_enter);
+
+void context_tracking_enter(enum ctx_state state)
+{
+ unsigned long flags;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+ __context_tracking_enter(state);
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_enter);
@@ -128,7 +125,7 @@ EXPORT_SYMBOL_GPL(context_tracking_enter);
void context_tracking_user_enter(void)
{
- context_tracking_enter(CONTEXT_USER);
+ user_enter();
}
NOKPROBE_SYMBOL(context_tracking_user_enter);
@@ -144,19 +141,10 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void context_tracking_exit(enum ctx_state state)
+void __context_tracking_exit(enum ctx_state state)
{
- unsigned long flags;
-
- if (!context_tracking_is_enabled())
- return;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
if (!context_tracking_recursion_enter())
- goto out_irq_restore;
+ return;
if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
@@ -173,7 +161,19 @@ void context_tracking_exit(enum ctx_state state)
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
}
context_tracking_recursion_exit();
-out_irq_restore:
+}
+NOKPROBE_SYMBOL(__context_tracking_exit);
+EXPORT_SYMBOL_GPL(__context_tracking_exit);
+
+void context_tracking_exit(enum ctx_state state)
+{
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+ __context_tracking_exit(state);
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_exit);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(context_tracking_exit);
void context_tracking_user_exit(void)
{
- context_tracking_exit(CONTEXT_USER);
+ user_exit();
}
NOKPROBE_SYMBOL(context_tracking_user_exit);