From 32b5d378f49485f3ae172eb0ac2bfc4ebbbdb060 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:51 +0200 Subject: irq: cleanup irqfixup Make the following cleanups to irqfixup. * Define IRQFIXUP_{SPURIOUS|MISROUTED|POLL} and use them instead of hard coding 0, 1 and 2. * Add an inline note_interrupt() wrapper which checks noirqdebug and calls __note_interrupt() instead of checking noirqdebug from each caller. Signed-off-by: Tejun Heo --- include/linux/irq.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b4..ec93be456b3d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -324,8 +324,17 @@ static inline void generic_handle_irq(unsigned int irq) } /* Handling of unhandled and spurious interrupts: */ -extern void note_interrupt(unsigned int irq, struct irq_desc *desc, - irqreturn_t action_ret); +extern void __note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret); + +static inline void note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + extern int noirqdebug; + + if (!noirqdebug) + __note_interrupt(irq, desc, action_ret); +} /* Resending of interrupts :*/ void check_irq_resend(struct irq_desc *desc, unsigned int irq); -- cgit v1.2.3 From 80f97e2b33c3e66c11b10abd2890506a3abeb320 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:51 +0200 Subject: irq: make spurious poll timer per desc Currently there is single timer for for spurious IRQ polling and when it kicks in, it polls all the IRQs. Add irq_desc->poll_timer and use it for spurious polling such that only the failed IRQ is polled. This significantly reduces the cost of spurious polling and the polling interval is adjusted to 10ms. irq_poll_action_{added|removed}(), which are called from {setup|free}_irq() respectively, are added so that poll timer management is done inside spurious.c. The global polling function poll_spurious_irqs() is replaced with per-IRQ polling function poll_irq() and try_one_irq() is changed to expect its callers to acquire desc->lock for upcoming extension of poll_irq(). This reduces the overhead of spurious handling and eases implementing further fine grained IRQ protection mechanisms on top. Signed-off-by: Tejun Heo --- include/linux/irq.h | 5 +++ kernel/irq/internals.h | 6 ++++ kernel/irq/manage.c | 14 +++----- kernel/irq/spurious.c | 93 +++++++++++++++++++++++++++++++------------------- 4 files changed, 73 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/linux/irq.h b/include/linux/irq.h index ec93be456b3d..50a77f92cf16 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -169,6 +170,7 @@ struct irq_2_iommu; * @pending_mask: pending rebalanced interrupts * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @poll_timer: timer for IRQ polling * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -203,6 +205,9 @@ struct irq_desc { #endif atomic_t threads_active; wait_queue_head_t wait_for_threads; + + struct timer_list poll_timer; + #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 341f952904dc..088e5d6c9a4e 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -38,6 +38,12 @@ extern int irq_select_affinity_usr(unsigned int irq); extern void irq_set_thread_affinity(struct irq_desc *desc); +extern void poll_irq(unsigned long arg); +extern void irq_poll_action_added(struct irq_desc *desc, + struct irqaction *action); +extern void irq_poll_action_removed(struct irq_desc *desc, + struct irqaction *action); + /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) { diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3164ba7ce151..cf9ab6507d21 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -752,6 +752,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irq_chip_set_defaults(desc->chip); init_waitqueue_head(&desc->wait_for_threads); + setup_timer(&desc->poll_timer, poll_irq, (unsigned long)desc); /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { @@ -804,17 +805,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) desc->irq_count = 0; desc->irqs_unhandled = 0; - /* - * Check whether we disabled the irq via the spurious handler - * before. Reenable it and give it another chance. - */ - if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { - desc->status &= ~IRQ_SPURIOUS_DISABLED; - __enable_irq(desc, irq, false); - } - raw_spin_unlock_irqrestore(&desc->lock, flags); + irq_poll_action_added(desc, new); + /* * Strictly no need to wake it up, but hung_task complains * when no hard interrupt wakes the thread up. @@ -930,6 +924,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) raw_spin_unlock_irqrestore(&desc->lock, flags); + irq_poll_action_removed(desc, action); + unregister_handler_proc(irq, action); /* Make sure it's not being used on another CPU: */ diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 5da60a2f1906..545f7300e588 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -12,22 +12,22 @@ #include #include #include -#include + +#include "internals.h" enum { /* irqfixup levels */ IRQFIXUP_SPURIOUS = 0, /* spurious storm detection */ IRQFIXUP_MISROUTED = 1, /* misrouted IRQ fixup */ IRQFIXUP_POLL = 2, /* enable polling by default */ + + /* IRQ polling common parameters */ + IRQ_POLL_INTV = HZ / 100, /* from the good ol' 100HZ tick */ }; int noirqdebug __read_mostly; static int irqfixup __read_mostly = IRQFIXUP_SPURIOUS; -#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) -static void poll_spurious_irqs(unsigned long dummy); -static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); - /* * Recovery handler for misrouted interrupts. */ @@ -36,7 +36,6 @@ static int try_one_irq(int irq, struct irq_desc *desc) struct irqaction *action; int ok = 0, work = 0; - raw_spin_lock(&desc->lock); /* Already running on another processor */ if (desc->status & IRQ_INPROGRESS) { /* @@ -45,7 +44,6 @@ static int try_one_irq(int irq, struct irq_desc *desc) */ if (desc->action && (desc->action->flags & IRQF_SHARED)) desc->status |= IRQ_PENDING; - raw_spin_unlock(&desc->lock); return ok; } /* Honour the normal IRQ locking */ @@ -88,7 +86,6 @@ static int try_one_irq(int irq, struct irq_desc *desc) */ if (work && desc->chip && desc->chip->end) desc->chip->end(irq); - raw_spin_unlock(&desc->lock); return ok; } @@ -105,39 +102,15 @@ static int misrouted_irq(int irq) if (i == irq) /* Already tried */ continue; + raw_spin_lock(&desc->lock); if (try_one_irq(i, desc)) ok = 1; + raw_spin_unlock(&desc->lock); } /* So the caller can adjust the irq error counts */ return ok; } -static void poll_spurious_irqs(unsigned long dummy) -{ - struct irq_desc *desc; - int i; - - for_each_irq_desc(i, desc) { - unsigned int status; - - if (!i) - continue; - - /* Racy but it doesn't matter */ - status = desc->status; - barrier(); - if (!(status & IRQ_SPURIOUS_DISABLED)) - continue; - - local_irq_disable(); - try_one_irq(i, desc); - local_irq_enable(); - } - - mod_timer(&poll_spurious_irq_timer, - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); -} - /* * If 99,900 of the previous 100,000 interrupts have not been handled * then assume that the IRQ is stuck in some manner. Drop a diagnostic @@ -264,12 +237,60 @@ void __note_interrupt(unsigned int irq, struct irq_desc *desc, desc->depth++; desc->chip->disable(irq); - mod_timer(&poll_spurious_irq_timer, - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + mod_timer(&desc->poll_timer, jiffies + IRQ_POLL_INTV); } desc->irqs_unhandled = 0; } +/* + * IRQ poller. Called from desc->poll_timer. + */ +void poll_irq(unsigned long arg) +{ + struct irq_desc *desc = (void *)arg; + + raw_spin_lock_irq(&desc->lock); + try_one_irq(desc->irq, desc); + raw_spin_unlock_irq(&desc->lock); + + mod_timer(&desc->poll_timer, jiffies + IRQ_POLL_INTV); +} + +void irq_poll_action_added(struct irq_desc *desc, struct irqaction *action) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + + /* if the interrupt was killed before, give it one more chance */ + if (desc->status & IRQ_SPURIOUS_DISABLED) { + desc->status &= ~IRQ_SPURIOUS_DISABLED; + __enable_irq(desc, desc->irq, false); + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +void irq_poll_action_removed(struct irq_desc *desc, struct irqaction *action) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + + /* + * Make sure the timer is offline if no irqaction is left as + * the irq_desc will be reinitialized when the next irqaction + * is added. + */ + while (!desc->action && try_to_del_timer_sync(&desc->poll_timer) < 0) { + raw_spin_unlock_irqrestore(&desc->lock, flags); + cpu_relax(); + raw_spin_lock_irqsave(&desc->lock, flags); + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + int noirqdebug_setup(char *str) { noirqdebug = 1; -- cgit v1.2.3 From bb9ad129477a09f940ab8df78be71cd3dae72968 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:52 +0200 Subject: irq: kill IRQF_IRQPOLL With irqpoll polling switched to desc->poll_timer, IRQF_IRQPOLL is no longer necessary. Drop it. Signed-off-by: Tejun Heo --- arch/arm/mach-aaec2000/core.c | 2 +- arch/arm/mach-at91/at91rm9200_time.c | 2 +- arch/arm/mach-at91/at91sam926x_time.c | 2 +- arch/arm/mach-bcmring/core.c | 2 +- arch/arm/mach-clps711x/time.c | 2 +- arch/arm/mach-cns3xxx/core.c | 2 +- arch/arm/mach-ebsa110/core.c | 2 +- arch/arm/mach-ep93xx/core.c | 2 +- arch/arm/mach-footbridge/dc21285-timer.c | 2 +- arch/arm/mach-footbridge/isa-timer.c | 2 +- arch/arm/mach-h720x/cpu-h7201.c | 2 +- arch/arm/mach-h720x/cpu-h7202.c | 2 +- arch/arm/mach-integrator/integrator_ap.c | 2 +- arch/arm/mach-ixp2000/core.c | 2 +- arch/arm/mach-ixp23xx/core.c | 2 +- arch/arm/mach-ixp4xx/common.c | 2 +- arch/arm/mach-lh7a40x/time.c | 2 +- arch/arm/mach-mmp/time.c | 2 +- arch/arm/mach-netx/time.c | 2 +- arch/arm/mach-ns9xxx/time-ns9360.c | 2 +- arch/arm/mach-nuc93x/time.c | 2 +- arch/arm/mach-omap1/time.c | 2 +- arch/arm/mach-omap1/timer32k.c | 2 +- arch/arm/mach-omap2/timer-gp.c | 2 +- arch/arm/mach-pnx4008/time.c | 2 +- arch/arm/mach-pxa/time.c | 2 +- arch/arm/mach-sa1100/time.c | 2 +- arch/arm/mach-shark/core.c | 2 +- arch/arm/mach-u300/timer.c | 2 +- arch/arm/mach-w90x900/time.c | 2 +- arch/arm/plat-iop/time.c | 2 +- arch/arm/plat-mxc/time.c | 2 +- arch/arm/plat-samsung/time.c | 2 +- arch/arm/plat-versatile/timer-sp.c | 2 +- arch/blackfin/kernel/time-ts.c | 6 ++---- arch/ia64/kernel/time.c | 2 +- arch/parisc/kernel/irq.c | 2 +- arch/x86/kernel/time.c | 2 +- drivers/clocksource/sh_cmt.c | 3 +-- drivers/clocksource/sh_mtu2.c | 3 +-- drivers/clocksource/sh_tmu.c | 3 +-- include/linux/interrupt.h | 4 ---- 42 files changed, 42 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-aaec2000/core.c b/arch/arm/mach-aaec2000/core.c index 3ef68330452a..ac8753539c4f 100644 --- a/arch/arm/mach-aaec2000/core.c +++ b/arch/arm/mach-aaec2000/core.c @@ -139,7 +139,7 @@ aaec2000_timer_interrupt(int irq, void *dev_id) static struct irqaction aaec2000_timer_irq = { .name = "AAEC-2000 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = aaec2000_timer_interrupt, }; diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 2500f41d8d2d..a4a00493ad9e 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -87,7 +87,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id) static struct irqaction at91rm9200_timer_irq = { .name = "at91_tick", - .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER, .handler = at91rm9200_timer_interrupt }; diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index 608a63240b64..22a290bdce24 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -123,7 +123,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) static struct irqaction at91sam926x_pit_irq = { .name = "at91_tick", - .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER, .handler = at91sam926x_pit_interrupt }; diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c index 72e405df0fb0..99e10ffec9a5 100644 --- a/arch/arm/mach-bcmring/core.c +++ b/arch/arm/mach-bcmring/core.c @@ -266,7 +266,7 @@ static irqreturn_t bcmring_timer_interrupt(int irq, void *dev_id) static struct irqaction bcmring_timer_irq = { .name = "bcmring Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = bcmring_timer_interrupt, }; diff --git a/arch/arm/mach-clps711x/time.c b/arch/arm/mach-clps711x/time.c index d581ef0bcd24..9dffaa60c8d1 100644 --- a/arch/arm/mach-clps711x/time.c +++ b/arch/arm/mach-clps711x/time.c @@ -56,7 +56,7 @@ p720t_timer_interrupt(int irq, void *dev_id) static struct irqaction clps711x_timer_irq = { .name = "CLPS711x Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = p720t_timer_interrupt, }; diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index 9ca4d581016f..9dcea9f3a84e 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -178,7 +178,7 @@ static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id) static struct irqaction cns3xxx_timer_irq = { .name = "timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = cns3xxx_timer_interrupt, }; diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index c7bc7fbb11a6..efb77343365b 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -195,7 +195,7 @@ ebsa110_timer_interrupt(int irq, void *dev_id) static struct irqaction ebsa110_timer_irq = { .name = "EBSA110 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = ebsa110_timer_interrupt, }; diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 9092677f63eb..2910f6c6341b 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -132,7 +132,7 @@ static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) static struct irqaction ep93xx_timer_irq = { .name = "ep93xx timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = ep93xx_timer_interrupt, }; diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index bc5e83fb5819..42b0bd787d6f 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -41,7 +41,7 @@ timer1_interrupt(int irq, void *dev_id) static struct irqaction footbridge_timer_irq = { .name = "Timer1 timer tick", .handler = timer1_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, }; /* diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c index f488fa2082d7..ca1932e2a2c3 100644 --- a/arch/arm/mach-footbridge/isa-timer.c +++ b/arch/arm/mach-footbridge/isa-timer.c @@ -71,7 +71,7 @@ isa_timer_interrupt(int irq, void *dev_id) static struct irqaction isa_timer_irq = { .name = "ISA timer tick", .handler = isa_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, }; static void __init isa_timer_init(void) diff --git a/arch/arm/mach-h720x/cpu-h7201.c b/arch/arm/mach-h720x/cpu-h7201.c index 24df2a349a98..be1db54c6870 100644 --- a/arch/arm/mach-h720x/cpu-h7201.c +++ b/arch/arm/mach-h720x/cpu-h7201.c @@ -37,7 +37,7 @@ h7201_timer_interrupt(int irq, void *dev_id) static struct irqaction h7201_timer_irq = { .name = "h7201 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = h7201_timer_interrupt, }; diff --git a/arch/arm/mach-h720x/cpu-h7202.c b/arch/arm/mach-h720x/cpu-h7202.c index fd33a19c813a..e40deea5f5dc 100644 --- a/arch/arm/mach-h720x/cpu-h7202.c +++ b/arch/arm/mach-h720x/cpu-h7202.c @@ -166,7 +166,7 @@ static struct irq_chip h7202_timerx_chip = { static struct irqaction h7202_timer_irq = { .name = "h7202 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = h7202_timer_interrupt, }; diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 227cf4d05088..60f741e596a8 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -447,7 +447,7 @@ static struct clock_event_device integrator_clockevent = { static struct irqaction integrator_timer_irq = { .name = "timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = integrator_timer_interrupt, .dev_id = &integrator_clockevent, }; diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c index babb22597163..693275d1139d 100644 --- a/arch/arm/mach-ixp2000/core.c +++ b/arch/arm/mach-ixp2000/core.c @@ -213,7 +213,7 @@ static int ixp2000_timer_interrupt(int irq, void *dev_id) static struct irqaction ixp2000_timer_irq = { .name = "IXP2000 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = ixp2000_timer_interrupt, }; diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c index aa4c4420ff3d..8967392b6a66 100644 --- a/arch/arm/mach-ixp23xx/core.c +++ b/arch/arm/mach-ixp23xx/core.c @@ -359,7 +359,7 @@ ixp23xx_timer_interrupt(int irq, void *dev_id) static struct irqaction ixp23xx_timer_irq = { .name = "IXP23xx Timer Tick", .handler = ixp23xx_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, }; void __init ixp23xx_init_timer(void) diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 0bce09799d18..9a574fb42f62 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -278,7 +278,7 @@ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id) static struct irqaction ixp4xx_timer_irq = { .name = "timer1", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = ixp4xx_timer_interrupt, .dev_id = &clockevent_ixp4xx, }; diff --git a/arch/arm/mach-lh7a40x/time.c b/arch/arm/mach-lh7a40x/time.c index 4601e425bae3..841fe8c518c5 100644 --- a/arch/arm/mach-lh7a40x/time.c +++ b/arch/arm/mach-lh7a40x/time.c @@ -49,7 +49,7 @@ lh7a40x_timer_interrupt(int irq, void *dev_id) static struct irqaction lh7a40x_timer_irq = { .name = "LHA740x Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = lh7a40x_timer_interrupt, }; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index cf75694e9687..3d6957aa36a8 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -177,7 +177,7 @@ static void __init timer_config(void) static struct irqaction timer_irq = { .name = "timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = timer_interrupt, .dev_id = &ckevt, }; diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c index 82801dbf0579..c0cc83697927 100644 --- a/arch/arm/mach-netx/time.c +++ b/arch/arm/mach-netx/time.c @@ -100,7 +100,7 @@ netx_timer_interrupt(int irq, void *dev_id) static struct irqaction netx_timer_irq = { .name = "NetX Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = netx_timer_interrupt, }; diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c index 77281260358a..e60627f292d8 100644 --- a/arch/arm/mach-ns9xxx/time-ns9360.c +++ b/arch/arm/mach-ns9xxx/time-ns9360.c @@ -121,7 +121,7 @@ static irqreturn_t ns9360_clockevent_handler(int irq, void *dev_id) static struct irqaction ns9360_clockevent_action = { .name = "ns9360-timer" __stringify(TIMER_CLOCKEVENT), - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = ns9360_clockevent_handler, }; diff --git a/arch/arm/mach-nuc93x/time.c b/arch/arm/mach-nuc93x/time.c index 2f90f9dc6e30..8e0dbea8ec24 100644 --- a/arch/arm/mach-nuc93x/time.c +++ b/arch/arm/mach-nuc93x/time.c @@ -56,7 +56,7 @@ static irqreturn_t nuc93x_timer_interrupt(int irq, void *dev_id) static struct irqaction nuc93x_timer_irq = { .name = "nuc93x Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = nuc93x_timer_interrupt, }; diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 1be6a214d88d..c62fa799fe78 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -157,7 +157,7 @@ static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id) static struct irqaction omap_mpu_timer1_irq = { .name = "mpu_timer1", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = omap_mpu_timer1_interrupt, }; diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index 20cfbcc6c60c..8ad901b566f4 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c @@ -156,7 +156,7 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id) static struct irqaction omap_32k_timer_irq = { .name = "32KHz timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = omap_32k_timer_interrupt, }; diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 74fbed8491f2..ddf9fae448b8 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -62,7 +62,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id) static struct irqaction omap2_gp_timer_irq = { .name = "gp timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = omap2_gp_timer_interrupt, }; diff --git a/arch/arm/mach-pnx4008/time.c b/arch/arm/mach-pnx4008/time.c index 0c8aad4bb0dc..1d5b2dae3183 100644 --- a/arch/arm/mach-pnx4008/time.c +++ b/arch/arm/mach-pnx4008/time.c @@ -80,7 +80,7 @@ static irqreturn_t pnx4008_timer_interrupt(int irq, void *dev_id) static struct irqaction pnx4008_timer_irq = { .name = "PNX4008 Tick Timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = pnx4008_timer_interrupt }; diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 293e40aeaf29..9fa6e1a0b96f 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -133,7 +133,7 @@ static struct clocksource cksrc_pxa_oscr0 = { static struct irqaction pxa_ost0_irq = { .name = "ost0", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = pxa_ost0_interrupt, .dev_id = &ckevt_pxa_osmr0, }; diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index 74b6e0e570b6..7ec781d5249a 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c @@ -87,7 +87,7 @@ static struct clocksource cksrc_sa1100_oscr = { static struct irqaction sa1100_timer_irq = { .name = "ost0", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = sa1100_ost0_interrupt, .dev_id = &ckevt_sa1100_osmr0, }; diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index 358d875ace14..d1d6ea5e9fcf 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c @@ -130,7 +130,7 @@ shark_timer_interrupt(int irq, void *dev_id) static struct irqaction shark_timer_irq = { .name = "Shark Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = shark_timer_interrupt, }; diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index 26d26f5100fe..63db057de08e 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c @@ -326,7 +326,7 @@ static irqreturn_t u300_timer_interrupt(int irq, void *dev_id) static struct irqaction u300_timer_irq = { .name = "U300 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = u300_timer_interrupt, }; diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c index b80f769bc135..9cda8355e19d 100644 --- a/arch/arm/mach-w90x900/time.c +++ b/arch/arm/mach-w90x900/time.c @@ -111,7 +111,7 @@ static irqreturn_t nuc900_timer0_interrupt(int irq, void *dev_id) static struct irqaction nuc900_timer0_irq = { .name = "nuc900-timer0", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = nuc900_timer0_interrupt, }; diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 6c8a02ad98e3..e3201481741b 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -164,7 +164,7 @@ iop_timer_interrupt(int irq, void *dev_id) static struct irqaction iop_timer_irq = { .name = "IOP Timer Tick", .handler = iop_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .dev_id = &iop_clockevent, }; diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index f9a1b059a76c..e5ca2bdaa92d 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c @@ -258,7 +258,7 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) static struct irqaction mxc_timer_irq = { .name = "i.MX Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = mxc_timer_interrupt, }; diff --git a/arch/arm/plat-samsung/time.c b/arch/arm/plat-samsung/time.c index 2231d80ad817..133069ae6feb 100644 --- a/arch/arm/plat-samsung/time.c +++ b/arch/arm/plat-samsung/time.c @@ -138,7 +138,7 @@ s3c2410_timer_interrupt(int irq, void *dev_id) static struct irqaction s3c2410_timer_irq = { .name = "S3C2410 Timer Tick", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = s3c2410_timer_interrupt, }; diff --git a/arch/arm/plat-versatile/timer-sp.c b/arch/arm/plat-versatile/timer-sp.c index fb0d1c299718..62066b4b6af7 100644 --- a/arch/arm/plat-versatile/timer-sp.c +++ b/arch/arm/plat-versatile/timer-sp.c @@ -135,7 +135,7 @@ static struct clock_event_device sp804_clockevent = { static struct irqaction sp804_timer_irq = { .name = "timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER, .handler = sp804_timer_interrupt, .dev_id = &sp804_clockevent, }; diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 8c9a43daf80f..6fefad4edd19 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -213,8 +213,7 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) static struct irqaction gptmr0_irq = { .name = "Blackfin GPTimer0", - .flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_PERCPU, + .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU, .handler = bfin_gptmr0_interrupt, }; @@ -322,8 +321,7 @@ irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) static struct irqaction coretmr_irq = { .name = "Blackfin CoreTimer", - .flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_PERCPU, + .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU, .handler = bfin_coretmr_interrupt, }; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 653b3c46ea82..8b366c4cc632 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -411,7 +411,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL, + .flags = IRQF_DISABLED, .name = "timer" }; diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index efbcee5d2220..24681d553e13 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -383,7 +383,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) static struct irqaction timer_action = { .handler = timer_interrupt, .name = "timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, + .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU, }; #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index fb5cc5e14cfa..476e2fe402b5 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -88,7 +88,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction irq0 = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, .name = "timer" }; diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index f3d3898898ed..60b681cb0bf6 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -610,8 +610,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) p->irqaction.name = dev_name(&p->pdev->dev); p->irqaction.handler = sh_cmt_interrupt; p->irqaction.dev_id = p; - p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_NOBALANCING; + p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "cmt_fck"); diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index ef7a5be8a09f..9fe3507a453d 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -281,8 +281,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) p->irqaction.handler = sh_mtu2_interrupt; p->irqaction.dev_id = p; p->irqaction.irq = irq; - p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_NOBALANCING; + p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index de715901b82a..75967f8e028e 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -387,8 +387,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) p->irqaction.handler = sh_tmu_interrupt; p->irqaction.dev_id = p; p->irqaction.irq = irq; - p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_NOBALANCING; + p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "tmu_fck"); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c2331138ca1b..61857f1bda6a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -47,9 +47,6 @@ * IRQF_TIMER - Flag to mark this interrupt as timer interrupt * IRQF_PERCPU - Interrupt is per cpu * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing - * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is - * registered first in an shared interrupt is considered for - * performance reasons) * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. @@ -61,7 +58,6 @@ #define IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 -#define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 /* -- cgit v1.2.3 From 45e1f61e3a8de3577a1c32f63c1af32b8141fa50 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:52 +0200 Subject: irq: misc preparations for further changes * properly indent irqaction fields. * factor out print_irq_handler() Signed-off-by: Tejun Heo --- include/linux/interrupt.h | 20 ++++++++++---------- kernel/irq/spurious.c | 28 ++++++++++++++++------------ 2 files changed, 26 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 61857f1bda6a..b20bd6500972 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -102,16 +102,16 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @thread_flags: flags related to @thread */ struct irqaction { - irq_handler_t handler; - unsigned long flags; - const char *name; - void *dev_id; - struct irqaction *next; - int irq; - struct proc_dir_entry *dir; - irq_handler_t thread_fn; - struct task_struct *thread; - unsigned long thread_flags; + irq_handler_t handler; + unsigned long flags; + const char *name; + void *dev_id; + struct irqaction *next; + int irq; + struct proc_dir_entry *dir; + irq_handler_t thread_fn; + struct task_struct *thread; + unsigned long thread_flags; }; extern irqreturn_t no_action(int cpl, void *dev_id); diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index fc18a13ac831..0bce0e397b35 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -28,6 +28,21 @@ enum { int noirqdebug __read_mostly; static int irqfixup __read_mostly = IRQFIXUP_SPURIOUS; +static void print_irq_handlers(struct irq_desc *desc) +{ + struct irqaction *action; + + printk(KERN_ERR "handlers:\n"); + + action = desc->action; + while (action) { + printk(KERN_ERR "[<%p>]", action->handler); + print_symbol(" (%s)", (unsigned long)action->handler); + printk("\n"); + action = action->next; + } +} + /* * Recovery handler for misrouted interrupts. */ @@ -126,8 +141,6 @@ static void __report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { - struct irqaction *action; - if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { printk(KERN_ERR "irq event %d: bogus return value %x\n", irq, action_ret); @@ -136,16 +149,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, "the \"irqpoll\" option)\n", irq); } dump_stack(); - printk(KERN_ERR "handlers:\n"); - - action = desc->action; - while (action) { - printk(KERN_ERR "[<%p>]", action->handler); - print_symbol(" (%s)", - (unsigned long)action->handler); - printk("\n"); - action = action->next; - } + print_irq_handlers(desc); } static void -- cgit v1.2.3 From ad1c8a5a7d168d5623b86da0bfbc5ade0083aa66 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:53 +0200 Subject: irq: improve spurious IRQ handling Currently, once spurious polling is enabled, it's never disabled and to avoid enaling it unnecessarily, the condition for kicking in is very conservative. Now that spurious polling is per-IRQ, it can be made more adaptive without adding overhead to the fast path. This patch improves spurious handling such that the spurious IRQ polling kicks in earlier and it disables itself after polling certain number of times which is automatically adjusted according to whether and when spurious IRQ happens again. This allows the system to work around temporary IRQ glitches without paying unnecessary long term overhead. Signed-off-by: Tejun Heo --- include/linux/irq.h | 19 +++- kernel/irq/chip.c | 2 - kernel/irq/internals.h | 2 +- kernel/irq/manage.c | 4 - kernel/irq/proc.c | 5 +- kernel/irq/spurious.c | 292 +++++++++++++++++++++++++++++++++++-------------- 6 files changed, 226 insertions(+), 98 deletions(-) (limited to 'include') diff --git a/include/linux/irq.h b/include/linux/irq.h index 50a77f92cf16..b2f73ba2ff65 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -145,6 +145,17 @@ struct irq_chip { struct timer_rand_state; struct irq_2_iommu; + +/* spurious IRQ tracking and handling */ +struct irq_spr { + unsigned long last_bad; /* when was the last bad? */ + unsigned long period_start; /* period start jiffies */ + unsigned int nr_samples; /* nr of irqs in this period */ + unsigned int nr_bad; /* nr of bad deliveries */ + unsigned int poll_cnt; /* nr to poll once activated */ + unsigned int poll_rem; /* how many polls are left? */ +}; + /** * struct irq_desc - interrupt descriptor * @irq: interrupt number for this descriptor @@ -161,15 +172,13 @@ struct irq_2_iommu; * @status: status information * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple set_irq_wake() callers - * @irq_count: stats field to detect stalled irqs - * @last_unhandled: aging timer for unhandled count - * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @node: node index useful for balancing * @pending_mask: pending rebalanced interrupts * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @spr: data for spurious IRQ handling * @poll_timer: timer for IRQ polling * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output @@ -191,9 +200,6 @@ struct irq_desc { unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ - unsigned int irq_count; /* For detecting broken IRQs */ - unsigned long last_unhandled; /* Aging timer for unhandled count */ - unsigned int irqs_unhandled; raw_spinlock_t lock; #ifdef CONFIG_SMP cpumask_var_t affinity; @@ -206,6 +212,7 @@ struct irq_desc { atomic_t threads_active; wait_queue_head_t wait_for_threads; + struct irq_spr spr; struct timer_list poll_timer; #ifdef CONFIG_PROC_FS diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index db26ff036911..45a87f57ade7 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -40,8 +40,6 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) if (!keep_chip_data) desc->chip_data = NULL; desc->action = NULL; - desc->irq_count = 0; - desc->irqs_unhandled = 0; #ifdef CONFIG_SMP cpumask_setall(desc->affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 088e5d6c9a4e..1b24309a0404 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -68,7 +68,7 @@ static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) { printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", - irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); + irq, desc, desc->depth, desc->spr.nr_samples, desc->spr.nr_bad); printk("->handle_irq(): %p, ", desc->handle_irq); print_symbol("%s\n", (unsigned long)desc->handle_irq); printk("->chip(): %p, ", desc->chip); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cf9ab6507d21..5862bfcd5a43 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -801,10 +801,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) new->irq = irq; *old_ptr = new; - /* Reset broken irq detection when installing new handler */ - desc->irq_count = 0; - desc->irqs_unhandled = 0; - raw_spin_unlock_irqrestore(&desc->lock, flags); irq_poll_action_added(desc, new); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 09a2ee540bd2..b072460a602f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -205,10 +205,11 @@ static const struct file_operations irq_node_proc_fops = { static int irq_spurious_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); + struct irq_spr *spr = &desc->spr; seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", - desc->irq_count, desc->irqs_unhandled, - jiffies_to_msecs(desc->last_unhandled)); + spr->nr_samples, spr->nr_bad, + jiffies_to_msecs(spr->last_bad)); return 0; } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 19f7b499a9b5..6ecf4cb9b901 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -25,6 +26,37 @@ enum { IRQ_POLL_INTV = HZ / 100, /* from the good ol' 100HZ tick */ IRQ_POLL_SLACK = HZ / 250, /* 1 tick slack w/ the popular 250HZ config */ + + /* + * Spurious IRQ handling parameters. + * + * As this per-IRQ spurious handling is cheaper than the + * previous system wide spurious handling, it can afford to + * use more responsive settings but these parameters are still + * pretty conservative. If ever necessary, making it more + * responsive shouldn't cause any problem. + * + * Spurious IRQs are monitored in segments of PERIOD_SAMPLES + * IRQs which can stretch PERIOD_DURATION at maximum. If + * there are less than PERIOD_SAMPLES IRQs per + * PERIOD_DURATION, the period is considered good. + * + * If >=BAD_THRESHOLD IRQs are bad ones, the period is + * considered bad and spurious IRQ handling kicks in - the IRQ + * is disabled and polled. The IRQ is given another shot + * after certain number IRQs are handled, which is at minimum + * POLL_CNT_MIN, increased by 1 << POLL_CNT_INC_SHIFT times + * after each bad period and decreased by factor of + * POLL_CNT_INC_DEC_SHIFT after each good one. + */ + IRQ_SPR_PERIOD_DURATION = 10 * HZ, + IRQ_SPR_PERIOD_SAMPLES = 10000, + IRQ_SPR_BAD_THRESHOLD = 9900, + IRQ_SPR_POLL_CNT_MIN = 10000, + IRQ_SPR_POLL_CNT_INF = UINT_MAX, + IRQ_SPR_POLL_CNT_INC_SHIFT = 3, + IRQ_SPR_POLL_CNT_DEC_SHIFT = 1, + IRQ_SPR_POLL_CNT_MAX_DEC_SHIFT = BITS_PER_BYTE * sizeof(int) / 4, }; int noirqdebug __read_mostly; @@ -77,8 +109,24 @@ static void irq_schedule_poll(struct irq_desc *desc, unsigned long intv) mod_timer(&desc->poll_timer, expires); } +/* start a new spurious handling period */ +static void irq_spr_new_period(struct irq_spr *spr) +{ + spr->period_start = jiffies; + spr->nr_samples = 0; + spr->nr_bad = 0; +} + +/* Reset spurious handling. After this, poll_timer will offline itself soon. */ +static void irq_spr_reset(struct irq_spr *spr) +{ + irq_spr_new_period(spr); + spr->poll_cnt = IRQ_SPR_POLL_CNT_MIN; + spr->poll_rem = 0; +} + /* - * Recovery handler for misrouted interrupts. + * Perform an actual poll. */ static int try_one_irq(int irq, struct irq_desc *desc) { @@ -161,91 +209,99 @@ static int misrouted_irq(int irq) } /* - * If 99,900 of the previous 100,000 interrupts have not been handled - * then assume that the IRQ is stuck in some manner. Drop a diagnostic - * and try to turn the IRQ off. - * - * (The other 100-of-100,000 interrupts may have been a correctly - * functioning device sharing an IRQ with the failing one) - * - * Called under desc->lock + * IRQ delivery notification function. Called after each IRQ delivery. */ - -static void -__report_bad_irq(unsigned int irq, struct irq_desc *desc, - irqreturn_t action_ret) -{ - if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { - printk(KERN_ERR "irq event %d: bogus return value %x\n", - irq, action_ret); - } else { - printk(KERN_ERR "irq %d: nobody cared (try booting with " - "the \"irqpoll\" option)\n", irq); - } - dump_stack(); - print_irq_handlers(desc); -} - -static void -report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) -{ - static int count = 100; - - if (count > 0) { - count--; - __report_bad_irq(irq, desc, action_ret); - } -} - void __note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { - if (unlikely(action_ret != IRQ_HANDLED)) { - /* - * If we are seeing only the odd spurious IRQ caused by - * bus asynchronicity then don't eventually trigger an error, - * otherwise the counter becomes a doomsday timer for otherwise - * working systems - */ - if (time_after(jiffies, desc->last_unhandled + HZ/10)) - desc->irqs_unhandled = 1; - else - desc->irqs_unhandled++; - desc->last_unhandled = jiffies; - if (unlikely(action_ret != IRQ_NONE)) - report_bad_irq(irq, desc, action_ret); - } + struct irq_spr *spr = &desc->spr; + unsigned long dur; + unsigned int cnt, abbr; + char unit = 'k'; - if (unlikely(irqfixup >= IRQFIXUP_MISROUTED && - action_ret == IRQ_NONE)) { - int ok = misrouted_irq(irq); - if (action_ret == IRQ_NONE) - desc->irqs_unhandled -= ok; + /* + * Account for unhandled interrupt. We don't care whether + * spurious accounting update races with irq open/close and + * gets some values wrong. Do it w/o locking. + */ + if (unlikely(action_ret != IRQ_HANDLED)) { + static int bogus_count = 100; + + spr->last_bad = jiffies - INITIAL_JIFFIES; + spr->nr_bad++; + if (likely(action_ret == IRQ_NONE)) { + if (unlikely(irqfixup >= IRQFIXUP_MISROUTED && + misrouted_irq(irq))) + spr->nr_bad--; + } else if (bogus_count > 0) { + bogus_count--; + printk(KERN_ERR "IRQ %u: bogus return value %x\n", + irq, action_ret); + dump_stack(); + print_irq_handlers(desc); + } } - desc->irq_count++; - if (likely(desc->irq_count < 100000)) + /* did we finish this spurious period? */ + spr->nr_samples++; + if (likely(spr->nr_samples < IRQ_SPR_PERIOD_SAMPLES)) return; - desc->irq_count = 0; - if (unlikely(desc->irqs_unhandled > 99900)) { + /* if so, was it a good one? */ + dur = jiffies - spr->period_start; + if (likely(spr->nr_bad < IRQ_SPR_BAD_THRESHOLD || + dur > IRQ_SPR_PERIOD_DURATION)) { /* - * The interrupt is stuck + * If longer than PERIOD_DURATION has passed, consider + * multiple good periods have happened. */ - __report_bad_irq(irq, desc, action_ret); - /* - * Now kill the IRQ - */ - printk(KERN_EMERG "Disabling IRQ #%d\n", irq); - desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; - desc->depth++; - desc->chip->disable(irq); + int sft = IRQ_SPR_POLL_CNT_DEC_SHIFT * + (dur >> order_base_2(IRQ_SPR_PERIOD_DURATION)); - raw_spin_lock(&desc->lock); - irq_schedule_poll(desc, IRQ_POLL_INTV); - raw_spin_unlock(&desc->lock); + /* but don't kill poll_cnt at once */ + sft = clamp(sft, 1, IRQ_SPR_POLL_CNT_MAX_DEC_SHIFT); + + spr->poll_cnt >>= sft; + irq_spr_new_period(spr); + return; } - desc->irqs_unhandled = 0; + + /* + * It was a bad one, start polling. This is a slow path and + * we're gonna be changing states which require proper + * synchronization, grab desc->lock. + */ + raw_spin_lock(&desc->lock); + + irq_spr_new_period(spr); + + /* update spr_poll_cnt considering the lower and upper bounds */ + cnt = max_t(unsigned int, spr->poll_cnt, IRQ_SPR_POLL_CNT_MIN); + spr->poll_cnt = cnt << IRQ_SPR_POLL_CNT_INC_SHIFT; + if (spr->poll_cnt < cnt) /* did it overflow? */ + spr->poll_cnt = IRQ_SPR_POLL_CNT_INF; + + /* whine, plug IRQ and kick poll timer */ + abbr = cnt / 1000; + if (abbr > 1000) { + abbr /= 1000; + unit = 'm'; + } + printk(KERN_ERR "IRQ %u: too many spurious IRQs, disabling and " + "polling for %u%c %umsec intervals.\n", + desc->irq, abbr, unit, jiffies_to_msecs(IRQ_POLL_INTV)); + printk(KERN_ERR "IRQ %u: system performance may be affected\n", + desc->irq); + print_irq_handlers(desc); + + desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; + desc->depth++; + desc->chip->disable(desc->irq); + + spr->poll_rem = cnt; + irq_schedule_poll(desc, IRQ_POLL_INTV); + + raw_spin_unlock(&desc->lock); } /* @@ -254,48 +310,118 @@ void __note_interrupt(unsigned int irq, struct irq_desc *desc, void poll_irq(unsigned long arg) { struct irq_desc *desc = (void *)arg; + struct irq_spr *spr = &desc->spr; + unsigned long intv = MAX_JIFFY_OFFSET; + bool reenable_irq = false; raw_spin_lock_irq(&desc->lock); + + /* poll the IRQ */ try_one_irq(desc->irq, desc); - irq_schedule_poll(desc, IRQ_POLL_INTV); + + /* take care of spurious handling */ + if (spr->poll_rem) { + if (spr->poll_rem != IRQ_SPR_POLL_CNT_INF) + spr->poll_rem--; + if (spr->poll_rem) + intv = IRQ_POLL_INTV; + else + irq_spr_new_period(spr); + } + if (!spr->poll_rem) + reenable_irq = desc->status & IRQ_SPURIOUS_DISABLED; + + /* need to poll again? */ + if (intv < MAX_JIFFY_OFFSET) + irq_schedule_poll(desc, intv); + + raw_spin_unlock_irq(&desc->lock); + + if (!reenable_irq) + return; + + /* need to do locking dance for chip_bus_lock() to reenable IRQ */ + chip_bus_lock(desc->irq, desc); + raw_spin_lock_irq(&desc->lock); + + /* make sure we haven't raced with anyone inbetween */ + if (!spr->poll_rem && (desc->status & IRQ_SPURIOUS_DISABLED)) { + printk(KERN_INFO "IRQ %u: spurious polling finished, " + "reenabling IRQ\n", desc->irq); + __enable_irq(desc, desc->irq, false); + desc->status &= ~IRQ_SPURIOUS_DISABLED; + } + raw_spin_unlock_irq(&desc->lock); + chip_bus_sync_unlock(desc->irq, desc); } void irq_poll_action_added(struct irq_desc *desc, struct irqaction *action) { + struct irq_spr *spr = &desc->spr; unsigned long flags; raw_spin_lock_irqsave(&desc->lock, flags); - /* if the interrupt was killed before, give it one more chance */ - if (desc->status & IRQ_SPURIOUS_DISABLED) { - desc->status &= ~IRQ_SPURIOUS_DISABLED; - __enable_irq(desc, desc->irq, false); - } - - if ((action->flags & IRQF_SHARED) && irqfixup >= IRQFIXUP_POLL) + if ((action->flags & IRQF_SHARED) && irqfixup >= IRQFIXUP_POLL) { + if (!spr->poll_rem) + printk(KERN_INFO "IRQ %u: starting IRQFIXUP_POLL\n", + desc->irq); + spr->poll_rem = IRQ_SPR_POLL_CNT_INF; irq_schedule_poll(desc, IRQ_POLL_INTV); + } else { + /* new irqaction registered, give the IRQ another chance */ + irq_spr_reset(spr); + } raw_spin_unlock_irqrestore(&desc->lock, flags); } void irq_poll_action_removed(struct irq_desc *desc, struct irqaction *action) { + bool irq_enabled = false, timer_killed = false; unsigned long flags; + int rc; raw_spin_lock_irqsave(&desc->lock, flags); + /* give the IRQ another chance */ + if (irqfixup < IRQFIXUP_POLL) + irq_spr_reset(&desc->spr); + /* * Make sure the timer is offline if no irqaction is left as * the irq_desc will be reinitialized when the next irqaction - * is added. + * is added; otherwise, the timer can be left alone. It will + * offline itself if no longer necessary. */ - while (!desc->action && try_to_del_timer_sync(&desc->poll_timer) < 0) { + while (!desc->action) { + rc = try_to_del_timer_sync(&desc->poll_timer); + if (rc >= 0) { + timer_killed = rc > 0; + break; + } raw_spin_unlock_irqrestore(&desc->lock, flags); cpu_relax(); raw_spin_lock_irqsave(&desc->lock, flags); } + /* + * If the timer was forcefully shut down, it might not have + * had the chance to reenable IRQ. Make sure it's enabled. + */ + if (timer_killed && (desc->status & IRQ_SPURIOUS_DISABLED)) { + __enable_irq(desc, desc->irq, false); + desc->status &= ~IRQ_SPURIOUS_DISABLED; + irq_enabled = true; + } + + if (timer_killed || irq_enabled) + printk(KERN_INFO "IRQ %u:%s%s%s\n", desc->irq, + timer_killed ? " polling stopped" : "", + timer_killed && irq_enabled ? " and" : "", + irq_enabled ? " IRQ reenabled" : ""); + raw_spin_unlock_irqrestore(&desc->lock, flags); } -- cgit v1.2.3 From 33c9ac0e6990053d9b684193b5cf0b45494138c5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:53 +0200 Subject: irq: implement IRQ watching This patch implements IRQ watching, which is a simple polling mechanism drivers can use to work around lost and/or misrouted IRQs. IRQ watching is enabled by driver calling watch_irq(irq, dev_id). After that, it polls the irqaction for certain amount of time (1min) and keeps track of whether IRQ delivery is actually working. If the irqaction is serviced by poll, it's considered to be a possible indication of IRQ misdelivery. The watch polling starts slowly at 1HZ and speeds up to 100HZ when it sees a possible bad delivery. After collecting stats for a while, it determines whether the IRQ delivery is working for the irqaction. If so or it can't be determined, it steps out. If not working, IRQ polling continues till the irqaction is unregistered. This can be used by drivers which don't know when to expect the next IRQ. Just by calling watch_irq() after irqaction is registered or a timeout, most IRQ misrouting problems can be worked around. Signed-off-by: Tejun Heo --- include/linux/interrupt.h | 12 +++ include/linux/irq.h | 2 + kernel/irq/handle.c | 1 + kernel/irq/spurious.c | 209 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 221 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b20bd6500972..bc0cdbcd6261 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -88,6 +88,14 @@ enum { typedef irqreturn_t (*irq_handler_t)(int, void *); +struct irq_watch { + irqreturn_t last_ret; + unsigned int flags; + unsigned long started; + unsigned int nr_samples; + unsigned int nr_polled; +}; + /** * struct irqaction - per interrupt action descriptor * @handler: interrupt handler function @@ -100,6 +108,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @thread_fn: interupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts * @thread_flags: flags related to @thread + * @watch: data for irq watching */ struct irqaction { irq_handler_t handler; @@ -112,6 +121,7 @@ struct irqaction { irq_handler_t thread_fn; struct task_struct *thread; unsigned long thread_flags; + struct irq_watch watch; }; extern irqreturn_t no_action(int cpl, void *dev_id); @@ -184,6 +194,8 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); +extern void watch_irq(unsigned int irq, void *dev_id); + /* * On lockdep we dont want to enable hardirqs in hardirq * context. Use local_irq_enable_in_hardirq() to annotate diff --git a/include/linux/irq.h b/include/linux/irq.h index b2f73ba2ff65..e31954f6781c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ +#define IRQ_CHECK_WATCHES 0x40000000 /* IRQ watch enabled */ #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) @@ -214,6 +215,7 @@ struct irq_desc { struct irq_spr spr; struct timer_list poll_timer; + bool poll_warned; #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3ae50bf16512..685c3b3cf465 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -416,6 +416,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) } retval |= ret; + action->watch.last_ret = ret; action = action->next; } while (action); diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 6ecf4cb9b901..6e0433d41848 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -23,10 +23,50 @@ enum { IRQFIXUP_POLL = 2, /* enable polling by default */ /* IRQ polling common parameters */ + IRQ_POLL_SLOW_INTV = 3 * HZ, /* not too slow for ppl, slow enough for machine */ IRQ_POLL_INTV = HZ / 100, /* from the good ol' 100HZ tick */ + IRQ_POLL_SLOW_SLACK = HZ, IRQ_POLL_SLACK = HZ / 250, /* 1 tick slack w/ the popular 250HZ config */ + /* + * IRQ watch parameters. + * + * As IRQ watching has much less information about what's + * going on, the parameters are more conservative. It will + * terminate unless it can reliably determine that IRQ + * delivery isn't working. + * + * IRQs are watched in timed intervals which is BASE_PERIOD + * long by default. Polling interval starts at BASE_INTV and + * grows upto SLOW_INTV if no bad delivery is detected. + * + * If a period contains zero sample and no bad delivery was + * seen since watch started, watch terminates. + * + * If a period contains >=1 but =BAD_PCT% are bad, the + * irqaction is tagged bad and watched indefinitely. if + * BAD_PCT% > nr_bad >= WARY_PCT%, WARY_PERIOD is used instead + * of BASE_PERIOD and the whole process is restarted. If + * action; act; act = act->next) + if (act->dev_id == dev_id) + return act; + return NULL; +} + static void print_irq_handlers(struct irq_desc *desc) { struct irqaction *action; @@ -77,9 +127,25 @@ static void print_irq_handlers(struct irq_desc *desc) } } +static void warn_irq_poll(struct irq_desc *desc, struct irqaction *act) +{ + if (desc->poll_warned) + return; + + desc->poll_warned = true; + + printk(KERN_WARNING "IRQ %u: %s: can't verify IRQ, will keep polling\n", + desc->irq, act->name); + printk(KERN_WARNING "IRQ %u: %s: system performance may be affected\n", + desc->irq, act->name); +} + static unsigned long irq_poll_slack(unsigned long intv) { - return IRQ_POLL_SLACK; + if (intv >= IRQ_POLL_SLOW_INTV) + return IRQ_POLL_SLOW_SLACK; + else + return IRQ_POLL_SLACK; } /** @@ -109,6 +175,119 @@ static void irq_schedule_poll(struct irq_desc *desc, unsigned long intv) mod_timer(&desc->poll_timer, expires); } +/** + * irq_update_watch - IRQ handled, update watch state + * @desc: IRQ desc of interest + * @act: IRQ action of interest + * @via_poll: IRQ was handled via poll + * + * Called after IRQ is successfully delievered or polled. Updates + * watch state accordingly and determines which watch interval to use. + * + * CONTEXT: + * desc->lock + * + * RETURNS: + * Watch poll interval to use, MAX_JIFFY_OFFSET if watch polling isn't + * necessary. + */ +static unsigned long irq_update_watch(struct irq_desc *desc, + struct irqaction *act, bool via_poll) +{ + struct irq_watch *wat = &act->watch; + unsigned long period = wat->flags & IRQ_WAT_WARY ? + IRQ_WAT_WARY_PERIOD : IRQ_WAT_BASE_PERIOD; + + /* if not watching or already determined to be bad, it's easy */ + if (!(wat->flags & IRQ_WATCHING)) + return MAX_JIFFY_OFFSET; + if (wat->flags & IRQ_WAT_BAD) + return IRQ_POLL_INTV; + + /* don't expire watch period while spurious polling is in effect */ + if (desc->spr.poll_rem) { + wat->started = jiffies; + return IRQ_POLL_INTV; + } + + /* IRQ was handled, record whether it was a good or bad delivery */ + if (wat->last_ret == IRQ_HANDLED) { + wat->nr_samples++; + if (via_poll) { + wat->nr_polled++; + wat->flags |= IRQ_WAT_POLLED; + } + } + + /* is this watch period over? */ + if (time_after(jiffies, wat->started + period)) { + unsigned int wry_thr = wat->nr_samples * IRQ_WAT_WARY_PCT / 100; + unsigned int bad_thr = wat->nr_samples * IRQ_WAT_BAD_PCT / 100; + + if (wat->nr_samples >= IRQ_WAT_MIN_SAMPLES) { + /* have enough samples, determine what to do */ + if (wat->nr_polled <= wry_thr) + wat->flags &= ~IRQ_WATCHING; + else if (wat->nr_polled <= bad_thr) + wat->flags |= IRQ_WAT_WARY; + else { + warn_irq_poll(desc, act); + wat->flags |= IRQ_WAT_BAD; + } + wat->nr_samples = 0; + wat->nr_polled = 0; + } else if (!wat->nr_samples || !(wat->flags & IRQ_WAT_POLLED)) { + /* not sure but let's not hold onto it */ + wat->flags &= ~IRQ_WATCHING; + } + + wat->started = jiffies; + } + + if (!(wat->flags & IRQ_WATCHING)) + return MAX_JIFFY_OFFSET; + if (wat->flags & IRQ_WAT_POLLED) + return IRQ_POLL_INTV; + /* every delivery upto this point has been successful, grow interval */ + return clamp_t(unsigned long, jiffies - wat->started, + IRQ_WAT_BASE_INTV, IRQ_POLL_SLOW_INTV); +} + +/** + * watch_irq - watch an irqaction + * @irq: IRQ the irqaction to watch belongs to + * @dev_id: dev_id for the irqaction to watch + * + * LOCKING: + * Grabs and releases desc->lock. + */ +void watch_irq(unsigned int irq, void *dev_id) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *act; + unsigned long flags; + + if (WARN_ON_ONCE(!desc)) + return; + + raw_spin_lock_irqsave(&desc->lock, flags); + + act = find_irq_action(desc, dev_id); + if (!WARN_ON_ONCE(!act)) { + struct irq_watch *wat = &act->watch; + + wat->flags |= IRQ_WATCHING; + wat->started = jiffies; + wat->nr_samples = 0; + wat->nr_polled = 0; + desc->status |= IRQ_CHECK_WATCHES; + irq_schedule_poll(desc, IRQ_WAT_BASE_INTV); + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); +} +EXPORT_SYMBOL_GPL(watch_irq); + /* start a new spurious handling period */ static void irq_spr_new_period(struct irq_spr *spr) { @@ -151,8 +330,9 @@ static int try_one_irq(int irq, struct irq_desc *desc) while (action) { /* Only shared IRQ handlers are safe to call */ if (action->flags & IRQF_SHARED) { - if (action->handler(irq, action->dev_id) == - IRQ_HANDLED) + action->watch.last_ret = + action->handler(irq, action->dev_id); + if (action->watch.last_ret == IRQ_HANDLED) ok = 1; } action = action->next; @@ -219,6 +399,24 @@ void __note_interrupt(unsigned int irq, struct irq_desc *desc, unsigned int cnt, abbr; char unit = 'k'; + /* first, take care of IRQ watches */ + if (unlikely(desc->status & IRQ_CHECK_WATCHES)) { + unsigned long intv = MAX_JIFFY_OFFSET; + struct irqaction *act; + + raw_spin_lock(&desc->lock); + + for (act = desc->action; act; act = act->next) + intv = min(intv, irq_update_watch(desc, act, false)); + + if (intv < MAX_JIFFY_OFFSET) + irq_schedule_poll(desc, intv); + else + desc->status &= ~IRQ_CHECK_WATCHES; + + raw_spin_unlock(&desc->lock); + } + /* * Account for unhandled interrupt. We don't care whether * spurious accounting update races with irq open/close and @@ -313,6 +511,7 @@ void poll_irq(unsigned long arg) struct irq_spr *spr = &desc->spr; unsigned long intv = MAX_JIFFY_OFFSET; bool reenable_irq = false; + struct irqaction *act; raw_spin_lock_irq(&desc->lock); @@ -331,6 +530,10 @@ void poll_irq(unsigned long arg) if (!spr->poll_rem) reenable_irq = desc->status & IRQ_SPURIOUS_DISABLED; + /* take care of watches */ + for (act = desc->action; act; act = act->next) + intv = min(irq_update_watch(desc, act, true), intv); + /* need to poll again? */ if (intv < MAX_JIFFY_OFFSET) irq_schedule_poll(desc, intv); -- cgit v1.2.3 From bb6439658c8e66819bf23b5b6069f397de1f3d36 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 16:04:53 +0200 Subject: irq: implement IRQ expecting This patch implements IRQ expecting, which can be used when a driver can anticipate the controller to raise an interrupt in relatively immediate future. A driver needs to allocate an irq expect token using init_irq_expect() to use it. expect_irq() should be called when an operation which will be followed by an interrupt is started. unexpect_irq() when the operation finished or timed out. This allows IRQ subsystem closely monitor the IRQ and react quickly if the expected IRQ doesn't happen for whatever reason. The [un]expect_irq() functions are fairly light weight and any real driver which accesses hardware controller should be able to use them for each operation without adding noticeable overhead. This is most useful for drivers which have to deal with hardware which is inherently unreliable in dealing with interrupts. Signed-off-by: Tejun Heo --- include/linux/interrupt.h | 7 ++ include/linux/irq.h | 1 + kernel/irq/spurious.c | 276 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 281 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index bc0cdbcd6261..8bbd9dce0c70 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -88,6 +88,8 @@ enum { typedef irqreturn_t (*irq_handler_t)(int, void *); +struct irq_expect; + struct irq_watch { irqreturn_t last_ret; unsigned int flags; @@ -109,6 +111,7 @@ struct irq_watch { * @thread: thread pointer for threaded interrupts * @thread_flags: flags related to @thread * @watch: data for irq watching + * @expects: data for irq expecting */ struct irqaction { irq_handler_t handler; @@ -122,6 +125,7 @@ struct irqaction { struct task_struct *thread; unsigned long thread_flags; struct irq_watch watch; + struct irq_expect *expects; }; extern irqreturn_t no_action(int cpl, void *dev_id); @@ -194,6 +198,9 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); +extern struct irq_expect *init_irq_expect(unsigned int irq, void *dev_id); +extern void expect_irq(struct irq_expect *exp); +extern void unexpect_irq(struct irq_expect *exp, bool timedout); extern void watch_irq(unsigned int irq, void *dev_id); /* diff --git a/include/linux/irq.h b/include/linux/irq.h index e31954f6781c..98530ef9068e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ +#define IRQ_IN_POLLING 0x20000000 /* IRQ polling in progress */ #define IRQ_CHECK_WATCHES 0x40000000 /* IRQ watch enabled */ #ifdef CONFIG_IRQ_PER_CPU diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 6e0433d41848..465031dbda12 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "internals.h" @@ -25,9 +26,43 @@ enum { /* IRQ polling common parameters */ IRQ_POLL_SLOW_INTV = 3 * HZ, /* not too slow for ppl, slow enough for machine */ IRQ_POLL_INTV = HZ / 100, /* from the good ol' 100HZ tick */ + IRQ_POLL_QUICK_INTV = HZ / 1000, /* on every tick, basically */ IRQ_POLL_SLOW_SLACK = HZ, IRQ_POLL_SLACK = HZ / 250, /* 1 tick slack w/ the popular 250HZ config */ + IRQ_POLL_QUICK_SLACK = HZ / 10000, /* no slack, basically */ + + /* + * IRQ expect parameters. + * + * Because IRQ expecting is tightly coupled with the actual + * activity of the controller, we can be slightly aggressive + * and try to minimize the effect of lost interrupts. + * + * An irqaction must accumulate VERIFY_GOAL good deliveries, + * where one bad delivery (delivered by polling) costs + * BAD_FACTOR good ones, before reaching the verified state. + * + * QUICK_SAMPLES IRQ deliveries are examined and if + * >=QUICK_THRESHOLD of them are polled on the first poll, the + * IRQ is considered to be quick and QUICK_INTV is used + * instead. + * + * Keep QUICK_SAMPLES much higher than VERIFY_GOAL so that + * quick polling doesn't interfact with the initial + * verification attempt (quicker polling increases the chance + * of polled deliveries). + */ + IRQ_EXP_BAD_FACTOR = 10, + IRQ_EXP_VERIFY_GOAL = 256, + IRQ_EXP_QUICK_SAMPLES = IRQ_EXP_VERIFY_GOAL * 4, + IRQ_EXP_QUICK_THRESHOLD = IRQ_EXP_QUICK_SAMPLES * 8 / 10, + + /* IRQ expect flags */ + IRQ_EXPECTING = (1 << 0), /* expecting in progress */ + IRQ_EXP_VERIFIED = (1 << 1), /* delivery verified, use slow interval */ + IRQ_EXP_QUICK = (1 << 2), /* quick polling enabled */ + IRQ_EXP_WARNED = (1 << 3), /* already whined */ /* * IRQ watch parameters. @@ -99,6 +134,18 @@ enum { IRQ_SPR_POLL_CNT_MAX_DEC_SHIFT = BITS_PER_BYTE * sizeof(int) / 4, }; +struct irq_expect { + struct irq_expect *next; + struct irq_desc *desc; /* the associated IRQ desc */ + struct irqaction *act; /* the associated IRQ action */ + + unsigned int flags; /* IRQ_EXP_* flags */ + unsigned int nr_samples; /* nr of collected samples in this period */ + unsigned int nr_quick; /* nr of polls completed after single attempt */ + unsigned int nr_good; /* nr of good IRQ deliveries */ + unsigned long started; /* when this period started */ +}; + int noirqdebug __read_mostly; static int irqfixup __read_mostly = IRQFIXUP_SPURIOUS; @@ -144,8 +191,10 @@ static unsigned long irq_poll_slack(unsigned long intv) { if (intv >= IRQ_POLL_SLOW_INTV) return IRQ_POLL_SLOW_SLACK; - else + else if (intv >= IRQ_POLL_INTV) return IRQ_POLL_SLACK; + else + return IRQ_POLL_QUICK_SLACK; } /** @@ -175,6 +224,206 @@ static void irq_schedule_poll(struct irq_desc *desc, unsigned long intv) mod_timer(&desc->poll_timer, expires); } +static unsigned long irq_exp_intv(struct irq_expect *exp) +{ + if (!(exp->flags & IRQ_EXPECTING)) + return MAX_JIFFY_OFFSET; + if (exp->flags & IRQ_EXP_VERIFIED) + return IRQ_POLL_SLOW_INTV; + if (exp->flags & IRQ_EXP_QUICK) + return IRQ_POLL_QUICK_INTV; + return IRQ_POLL_INTV; +} + +/** + * init_irq_expect - initialize IRQ expecting + * @irq: IRQ to expect + * @dev_id: dev_id of the irqaction to expect + * + * Initializes IRQ expecting and returns expect token to use. This + * function can be called multiple times for the same irqaction and + * each token can be used independently. + * + * CONTEXT: + * Does GFP_KERNEL allocation. + * + * RETURNS: + * irq_expect token to use on success, %NULL on failure. + */ +struct irq_expect *init_irq_expect(unsigned int irq, void *dev_id) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *act; + struct irq_expect *exp; + unsigned long flags; + + if (noirqdebug || WARN_ON_ONCE(!desc)) + return NULL; + + exp = kzalloc(sizeof(*exp), GFP_KERNEL); + if (!exp) { + printk(KERN_WARNING "IRQ %u: failed to initialize IRQ expect, " + "allocation failed\n", irq); + return NULL; + } + + exp->desc = desc; + + raw_spin_lock_irqsave(&desc->lock, flags); + + act = find_irq_action(desc, dev_id); + if (!WARN_ON_ONCE(!act)) { + exp->act = act; + exp->next = act->expects; + act->expects = exp; + } else { + kfree(exp); + exp = NULL; + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); + + return exp; +} +EXPORT_SYMBOL_GPL(init_irq_expect); + +/** + * expect_irq - expect IRQ + * @exp: expect token acquired from init_irq_expect(), %NULL is allowed + * + * Tell IRQ subsystem to expect an IRQ. The IRQ might be polled until + * unexpect_irq() is called on @exp. If @exp is %NULL, this function + * becomes noop. + * + * This function is fairly cheap and drivers can call it for each + * interrupt driven operation without adding noticeable overhead in + * most cases. + * + * CONTEXT: + * Don't care. The caller is responsible for ensuring + * [un]expect_irq() calls don't overlap. Overlapping may lead to + * unexpected polling behaviors but won't directly cause a failure. + */ +void expect_irq(struct irq_expect *exp) +{ + struct irq_desc *desc; + unsigned long intv, deadline; + unsigned long flags; + + /* @exp is NULL if noirqdebug */ + if (unlikely(!exp)) + return; + + desc = exp->desc; + exp->flags |= IRQ_EXPECTING; + + /* + * Paired with mb in poll_irq(). Either we see timer pending + * cleared or poll_irq() sees IRQ_EXPECTING. + */ + smp_mb(); + + exp->started = jiffies; + intv = irq_exp_intv(exp); + deadline = exp->started + intv + irq_poll_slack(intv); + + /* + * poll_timer is never explicitly killed unless there's no + * action left on the irq; also, while it's online, timer + * duration is only shortened, which means that if we see + * ->expires in the future and not later than our deadline, + * the timer is guaranteed to fire before it. + */ + if (!timer_pending(&desc->poll_timer) || + time_after_eq(jiffies, desc->poll_timer.expires) || + time_before(deadline, desc->poll_timer.expires)) { + raw_spin_lock_irqsave(&desc->lock, flags); + irq_schedule_poll(desc, intv); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} +EXPORT_SYMBOL_GPL(expect_irq); + +/** + * unexpect_irq - unexpect IRQ + * @exp: expect token acquired from init_irq_expect(), %NULL is allowed + * @timedout: did the IRQ timeout? + * + * Tell IRQ subsystem to stop expecting an IRQ. Set @timedout to + * %true if the expected IRQ never arrived. If @exp is %NULL, this + * function becomes noop. + * + * This function is fairly cheap and drivers can call it for each + * interrupt driven operation without adding noticeable overhead in + * most cases. + * + * CONTEXT: + * Don't care. The caller is responsible for ensuring + * [un]expect_irq() calls don't overlap. Overlapping may lead to + * unexpected polling behaviors but won't directly cause a failure. + */ +void unexpect_irq(struct irq_expect *exp, bool timedout) +{ + struct irq_desc *desc; + + /* @exp is NULL if noirqdebug */ + if (unlikely(!exp) || (!(exp->flags & IRQ_EXPECTING) && !timedout)) + return; + + desc = exp->desc; + exp->flags &= ~IRQ_EXPECTING; + + /* succesful completion from IRQ? */ + if (likely(!(desc->status & IRQ_IN_POLLING) && !timedout)) { + /* + * IRQ seems a bit more trustworthy. Allow nr_good to + * increase till VERIFY_GOAL + BAD_FACTOR - 1 so that + * single succesful delivery can recover verified + * state after an accidental polling hit. + */ + if (unlikely(exp->nr_good < + IRQ_EXP_VERIFY_GOAL + IRQ_EXP_BAD_FACTOR - 1) && + ++exp->nr_good >= IRQ_EXP_VERIFY_GOAL) { + exp->flags |= IRQ_EXP_VERIFIED; + exp->nr_samples = 0; + exp->nr_quick = 0; + } + return; + } + + /* timedout or polled */ + if (timedout) { + exp->nr_good = 0; + } else { + exp->nr_good -= min_t(unsigned int, + exp->nr_good, IRQ_EXP_BAD_FACTOR); + + if (time_before_eq(jiffies, exp->started + IRQ_POLL_INTV)) + exp->nr_quick++; + + if (++exp->nr_samples >= IRQ_EXP_QUICK_SAMPLES) { + /* + * Use quick sampling checkpoints as warning + * checkpoints too. + */ + if (!(exp->flags & IRQ_EXP_WARNED) && + !desc->spr.poll_rem) { + warn_irq_poll(desc, exp->act); + exp->flags |= IRQ_EXP_WARNED; + } + + exp->flags &= ~IRQ_EXP_QUICK; + if (exp->nr_quick >= IRQ_EXP_QUICK_THRESHOLD) + exp->flags |= IRQ_EXP_QUICK; + exp->nr_samples = 0; + exp->nr_quick = 0; + } + } + + exp->flags &= ~IRQ_EXP_VERIFIED; +} +EXPORT_SYMBOL_GPL(unexpect_irq); + /** * irq_update_watch - IRQ handled, update watch state * @desc: IRQ desc of interest @@ -512,11 +761,14 @@ void poll_irq(unsigned long arg) unsigned long intv = MAX_JIFFY_OFFSET; bool reenable_irq = false; struct irqaction *act; + struct irq_expect *exp; raw_spin_lock_irq(&desc->lock); /* poll the IRQ */ + desc->status |= IRQ_IN_POLLING; try_one_irq(desc->irq, desc); + desc->status &= ~IRQ_IN_POLLING; /* take care of spurious handling */ if (spr->poll_rem) { @@ -530,9 +782,19 @@ void poll_irq(unsigned long arg) if (!spr->poll_rem) reenable_irq = desc->status & IRQ_SPURIOUS_DISABLED; - /* take care of watches */ - for (act = desc->action; act; act = act->next) + /* + * Paired with mb in expect_irq() so that either they see + * timer pending cleared or irq_exp_intv() below sees + * IRQ_EXPECTING. + */ + smp_mb(); + + /* take care of expects and watches */ + for (act = desc->action; act; act = act->next) { intv = min(irq_update_watch(desc, act, true), intv); + for (exp = act->expects; exp; exp = exp->next) + intv = min(irq_exp_intv(exp), intv); + } /* need to poll again? */ if (intv < MAX_JIFFY_OFFSET) @@ -583,6 +845,7 @@ void irq_poll_action_added(struct irq_desc *desc, struct irqaction *action) void irq_poll_action_removed(struct irq_desc *desc, struct irqaction *action) { bool irq_enabled = false, timer_killed = false; + struct irq_expect *exp, *next; unsigned long flags; int rc; @@ -625,6 +888,13 @@ void irq_poll_action_removed(struct irq_desc *desc, struct irqaction *action) timer_killed && irq_enabled ? " and" : "", irq_enabled ? " IRQ reenabled" : ""); + /* free expect tokens */ + for (exp = action->expects; exp; exp = next) { + next = exp->next; + kfree(exp); + } + action->expects = NULL; + raw_spin_unlock_irqrestore(&desc->lock, flags); } -- cgit v1.2.3 From fa7cd37f808c487f722d7a2ce9c0923a323922df Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 16:46:22 +0200 Subject: libata: use IRQ expecting Legacy ATA is very susceptible to IRQ delivery problems in both directions - lost and spurious interrupts. In traditional PATA, the IRQ line is ultimately out of the controller and driver's control. Even relatively new SATA controllers share this problem as many still emulate the traditional IDE interface which doesn't have reliable way to indicate interrupt pending state and there also is an issue regarding the interpretation of nIEN on both sides of the cable. Controllers with native interface have fewer problems compared to the ones which use SFF but they still are affected by IRQ misrouting or broken MSI implementations. IRQ delivery problems on ATA are particularly nasty because it commonly hosts installation and/or booting. Most of these problems can be worked around by using the new IRQ expecting mechanism without adding any noticeable overhead. In ATA, almost all operations are initiated by the host and the controller signals progress or completion using IRQ. IRQ expecting can easily be added in libata core and applied to all libata drivers. Signed-off-by: Tejun Heo Acked-by: Jeff Garzik --- drivers/ata/libata-core.c | 55 +++++++++++++++++++++++++++++++---------------- drivers/ata/libata-eh.c | 4 +++- drivers/ata/libata-sff.c | 37 +++++++++++++++---------------- include/linux/libata.h | 2 ++ 4 files changed, 60 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 649efa20d174..5ca7b71c6e68 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4958,22 +4958,7 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; } -/** - * ata_qc_complete - Complete an active ATA command - * @qc: Command to complete - * - * Indicate to the mid and upper layers that an ATA command has - * completed, with either an ok or not-ok status. - * - * Refrain from calling this function multiple times when - * successfully completing multiple NCQ commands. - * ata_qc_complete_multiple() should be used instead, which will - * properly update IRQ expect state. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_qc_complete(struct ata_queued_cmd *qc) +static void ata_qc_complete_raw(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; @@ -5051,6 +5036,27 @@ void ata_qc_complete(struct ata_queued_cmd *qc) } } +/** + * ata_qc_complete - Complete an active ATA command + * @qc: Command to complete + * + * Indicate to the mid and upper layers that an ATA command has + * completed, with either an ok or not-ok status. + * + * Refrain from calling this function multiple times when + * successfully completing multiple NCQ commands. + * ata_qc_complete_multiple() should be used instead, which will + * properly update IRQ expect state. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_qc_complete(struct ata_queued_cmd *qc) +{ + unexpect_irq(qc->ap->irq_expect, false); + ata_qc_complete_raw(qc); +} + /** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question @@ -5076,6 +5082,8 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) int nr_done = 0; u32 done_mask; + unexpect_irq(ap->irq_expect, false); + done_mask = ap->qc_active ^ qc_active; if (unlikely(done_mask & qc_active)) { @@ -5090,12 +5098,15 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) qc = ata_qc_from_tag(ap, tag); if (qc) { - ata_qc_complete(qc); + ata_qc_complete_raw(qc); nr_done++; } done_mask &= ~(1 << tag); } + if (ap->qc_active) + expect_irq(ap->irq_expect); + return nr_done; } @@ -5162,6 +5173,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) goto err; + expect_irq(ap->irq_expect); return; sg_err: @@ -6194,8 +6206,13 @@ int ata_host_activate(struct ata_host *host, int irq, if (rc) return rc; - for (i = 0; i < host->n_ports; i++) - ata_port_desc(host->ports[i], "irq %d", irq); + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + if (!ata_port_is_dummy(ap)) + ap->irq_expect = init_irq_expect(irq, host); + ata_port_desc(ap, "irq %d%s", irq, ap->irq_expect ? "+" : ""); + } rc = ata_host_register(host, sht); /* if failed, just free the IRQ and leave ports alone */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f77a67303f8b..f1ae3ec09f5f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -619,8 +619,10 @@ void ata_scsi_error(struct Scsi_Host *host) * handler doesn't diddle with those qcs. This must * be done atomically w.r.t. setting QCFLAG_FAILED. */ - if (nr_timedout) + if (nr_timedout) { + unexpect_irq(ap->irq_expect, true); __ata_port_freeze(ap); + } spin_unlock_irqrestore(ap->lock, flags); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index efa4a18cfb9d..cc96f367ccef 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2388,7 +2388,8 @@ int ata_pci_sff_activate_host(struct ata_host *host, struct device *dev = host->dev; struct pci_dev *pdev = to_pci_dev(dev); const char *drv_name = dev_driver_string(host->dev); - int legacy_mode = 0, rc; + struct ata_port *ap[2] = { host->ports[0], host->ports[1] }; + int legacy_mode = 0, i, rc; rc = ata_host_start(host); if (rc) @@ -2422,29 +2423,29 @@ int ata_pci_sff_activate_host(struct ata_host *host, if (rc) goto out; - ata_port_desc(host->ports[0], "irq %d", pdev->irq); - ata_port_desc(host->ports[1], "irq %d", pdev->irq); + for (i = 0; i < 2; i++) { + if (!ata_port_is_dummy(ap[i])) + ap[i]->irq_expect = + init_irq_expect(pdev->irq, host); + ata_port_desc(ap[i], "irq %d%s", + pdev->irq, ap[i]->irq_expect ? "+" : ""); + } } else if (legacy_mode) { - if (!ata_port_is_dummy(host->ports[0])) { - rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), - irq_handler, IRQF_SHARED, - drv_name, host); - if (rc) - goto out; + unsigned int irqs[2] = { ATA_PRIMARY_IRQ(pdev), + ATA_SECONDARY_IRQ(pdev) }; - ata_port_desc(host->ports[0], "irq %d", - ATA_PRIMARY_IRQ(pdev)); - } + for (i = 0; i < 2; i++) { + if (ata_port_is_dummy(ap[i])) + continue; - if (!ata_port_is_dummy(host->ports[1])) { - rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), - irq_handler, IRQF_SHARED, - drv_name, host); + rc = devm_request_irq(dev, irqs[i], irq_handler, + IRQF_SHARED, drv_name, host); if (rc) goto out; - ata_port_desc(host->ports[1], "irq %d", - ATA_SECONDARY_IRQ(pdev)); + ap[i]->irq_expect = init_irq_expect(irqs[i], host); + ata_port_desc(ap[i], "irq %d%s", + irqs[i], ap[i]->irq_expect ? "+" : ""); } } diff --git a/include/linux/libata.h b/include/linux/libata.h index b85f3ff34d7d..3f5f159c8e62 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -751,6 +751,8 @@ struct ata_port { struct ata_host *host; struct device *dev; + struct irq_expect *irq_expect; /* for irq expecting */ + struct delayed_work hotplug_task; struct work_struct scsi_rescan_task; -- cgit v1.2.3