From de30a2b355ea85350ca2f58f3b9bf4e5bc007986 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:42 -0700 Subject: [PATCH] lockdep: irqtrace subsystem, core Accurate hard-IRQ-flags and softirq-flags state tracing. This allows us to attach extra functionality to IRQ flags on/off events (such as trace-on/off). Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/softirq.c | 137 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 118 insertions(+), 19 deletions(-) (limited to 'kernel/softirq.c') diff --git a/kernel/softirq.c b/kernel/softirq.c index 8f03e3b89b55..584609b6a66e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -61,6 +61,119 @@ static inline void wakeup_softirqd(void) wake_up_process(tsk); } +/* + * This one is for softirq.c-internal use, + * where hardirqs are disabled legitimately: + */ +static void __local_bh_disable(unsigned long ip) +{ + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + + raw_local_irq_save(flags); + add_preempt_count(SOFTIRQ_OFFSET); + /* + * Were softirqs turned off above: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_off(ip); + raw_local_irq_restore(flags); +} + +void local_bh_disable(void) +{ + __local_bh_disable((unsigned long)__builtin_return_address(0)); +} + +EXPORT_SYMBOL(local_bh_disable); + +void __local_bh_enable(void) +{ + WARN_ON_ONCE(in_irq()); + + /* + * softirqs should never be enabled by __local_bh_enable(), + * it always nests inside local_bh_enable() sections: + */ + WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); + + sub_preempt_count(SOFTIRQ_OFFSET); +} +EXPORT_SYMBOL_GPL(__local_bh_enable); + +/* + * Special-case - softirqs can safely be enabled in + * cond_resched_softirq(), or by __do_softirq(), + * without processing still-pending softirqs: + */ +void _local_bh_enable(void) +{ + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + sub_preempt_count(SOFTIRQ_OFFSET); +} + +EXPORT_SYMBOL(_local_bh_enable); + +void local_bh_enable(void) +{ + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(irqs_disabled()); + + local_irq_save(flags); + /* + * Are softirqs going to be turned on now: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + /* + * Keep preemption disabled until we are done with + * softirq processing: + */ + sub_preempt_count(SOFTIRQ_OFFSET - 1); + + if (unlikely(!in_interrupt() && local_softirq_pending())) + do_softirq(); + + dec_preempt_count(); + local_irq_restore(flags); + preempt_check_resched(); +} +EXPORT_SYMBOL(local_bh_enable); + +void local_bh_enable_ip(unsigned long ip) +{ + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + + local_irq_save(flags); + /* + * Are softirqs going to be turned on now: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on(ip); + /* + * Keep preemption disabled until we are done with + * softirq processing: + */ + sub_preempt_count(SOFTIRQ_OFFSET - 1); + + if (unlikely(!in_interrupt() && local_softirq_pending())) + do_softirq(); + + dec_preempt_count(); + local_irq_restore(flags); + preempt_check_resched(); +} +EXPORT_SYMBOL(local_bh_enable_ip); + /* * We restart softirq processing MAX_SOFTIRQ_RESTART times, * and we fall back to softirqd after that. @@ -80,8 +193,9 @@ asmlinkage void __do_softirq(void) int cpu; pending = local_softirq_pending(); + __local_bh_disable((unsigned long)__builtin_return_address(0)); + trace_softirq_enter(); - local_bh_disable(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ @@ -109,7 +223,8 @@ restart: if (pending) wakeup_softirqd(); - __local_bh_enable(); + trace_softirq_exit(); + _local_bh_enable(); } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -136,23 +251,6 @@ EXPORT_SYMBOL(do_softirq); #endif -void local_bh_enable(void) -{ - WARN_ON(irqs_disabled()); - /* - * Keep preemption disabled until we are done with - * softirq processing: - */ - sub_preempt_count(SOFTIRQ_OFFSET - 1); - - if (unlikely(!in_interrupt() && local_softirq_pending())) - do_softirq(); - - dec_preempt_count(); - preempt_check_resched(); -} -EXPORT_SYMBOL(local_bh_enable); - #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED # define invoke_softirq() __do_softirq() #else @@ -165,6 +263,7 @@ EXPORT_SYMBOL(local_bh_enable); void irq_exit(void) { account_system_vtime(current); + trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); -- cgit v1.2.3 From 829035fd709119d9def124a6d40b94d317573e6f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 3 Jul 2006 00:25:40 -0700 Subject: [PATCH] lockdep: irqtrace subsystem, move account_system_vtime() calls into kernel/softirq.c At the moment, powerpc and s390 have their own versions of do_softirq which include local_bh_disable() and __local_bh_enable() calls. They end up calling __do_softirq (in kernel/softirq.c) which also does local_bh_disable/enable. Apparently the two levels of disable/enable trigger a warning from some validation code that Ingo is working on, and he would like to see the outer level removed. But to do that, we have to move the account_system_vtime calls that are currently in the arch do_softirq() implementations for powerpc and s390 into the generic __do_softirq() (this is a no-op for other archs because account_system_vtime is defined to be an empty inline function on all other archs). This patch does that. Signed-off-by: Paul Mackerras Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/irq.c | 7 +------ arch/s390/kernel/irq.c | 8 -------- kernel/softirq.c | 4 ++++ 3 files changed, 5 insertions(+), 14 deletions(-) (limited to 'kernel/softirq.c') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 027728b95429..e3774f6b57cc 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -424,13 +424,8 @@ void do_softirq(void) local_irq_save(flags); - if (local_softirq_pending()) { - account_system_vtime(current); - local_bh_disable(); + if (local_softirq_pending()) do_softirq_onstack(); - account_system_vtime(current); - _local_bh_enable(); - } local_irq_restore(flags); } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index e347190d9aea..1eef50918615 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -69,10 +69,6 @@ asmlinkage void do_softirq(void) local_irq_save(flags); - account_system_vtime(current); - - local_bh_disable(); - if (local_softirq_pending()) { /* Get current stack pointer. */ asm volatile("la %0,0(15)" : "=a" (old)); @@ -95,10 +91,6 @@ asmlinkage void do_softirq(void) __do_softirq(); } - account_system_vtime(current); - - _local_bh_enable(); - local_irq_restore(flags); } diff --git a/kernel/softirq.c b/kernel/softirq.c index 584609b6a66e..215541e26c1a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -193,6 +193,8 @@ asmlinkage void __do_softirq(void) int cpu; pending = local_softirq_pending(); + account_system_vtime(current); + __local_bh_disable((unsigned long)__builtin_return_address(0)); trace_softirq_enter(); @@ -224,6 +226,8 @@ restart: wakeup_softirqd(); trace_softirq_exit(); + + account_system_vtime(current); _local_bh_enable(); } -- cgit v1.2.3