From 40c8cefaaf12734327db7199a56e60058d98e7b6 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Fri, 6 Jan 2012 12:34:07 +0000 Subject: powerpc: Fix kernel log of oops/panic instruction dump A kernel oops/panic prints an instruction dump showing several instructions before and after the instruction which caused the oops/panic. The code intended that the faulting instruction be enclosed in angle brackets, however a bug caused the faulting instruction to be interpreted by printk() as the message log level. To fix this, the KERN_CONT log level is added before the actual text of the printed message. === Before the patch === [ 1081.587266] Instruction dump: [ 1081.590236] 7c000110 7c0000f8 5400077c 552907f6 7d290378 992b0003 4e800020 38000001 [ 1081.598034] 3d20c03a 9009a114 7c0004ac 39200000 [ 1081.602500] 4e800020 3803ffd0 2b800009 <4>[ 1081.587266] Instruction dump: <4>[ 1081.590236] 7c000110 7c0000f8 5400077c 552907f6 7d290378 992b0003 4e800020 38000001 <4>[ 1081.598034] 3d20c03a 9009a114 7c0004ac 39200000 <98090000>[ 1081.602500] 4e800020 3803ffd0 2b800009 === After the patch === [ 51.385216] Instruction dump: [ 51.388186] 7c000110 7c0000f8 5400077c 552907f6 7d290378 992b0003 4e800020 38000001 [ 51.395986] 3d20c03a 9009a114 7c0004ac 39200000 <98090000> 4e800020 3803ffd0 2b800009 <4>[ 51.385216] Instruction dump: <4>[ 51.388186] 7c000110 7c0000f8 5400077c 552907f6 7d290378 992b0003 4e800020 38000001 <4>[ 51.395986] 3d20c03a 9009a114 7c0004ac 39200000 <98090000> 4e800020 3803ffd0 2b800009 Signed-off-by: Ira W. Snyder Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/process.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ebe5766781aa..d817ab018486 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -566,12 +566,12 @@ static void show_instructions(struct pt_regs *regs) */ if (!__kernel_text_address(pc) || __get_user(instr, (unsigned int __user *)pc)) { - printk("XXXXXXXX "); + printk(KERN_CONT "XXXXXXXX "); } else { if (regs->nip == pc) - printk("<%08x> ", instr); + printk(KERN_CONT "<%08x> ", instr); else - printk("%08x ", instr); + printk(KERN_CONT "%08x ", instr); } pc += sizeof(int); -- cgit v1.2.3 From fe1952fc0afb9a2e4c79f103c08aef5d13db1873 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 1 Mar 2012 12:45:27 +1100 Subject: powerpc: Rework runlatch code This moves the inlines into system.h and changes the runlatch code to use the thread local flags (non-atomic) rather than the TIF flags (atomic) to keep track of the latch state. The code to turn it back on in an asynchronous interrupt is now simplified and partially inlined. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/exception-64s.h | 50 +++++++++++++++++--------------- arch/powerpc/include/asm/reg.h | 22 ++------------ arch/powerpc/include/asm/system.h | 38 ++++++++++++++++++++++++ arch/powerpc/include/asm/thread_info.h | 9 +++++- arch/powerpc/kernel/exceptions-64s.S | 3 ++ arch/powerpc/kernel/process.c | 24 +++++++-------- 6 files changed, 89 insertions(+), 57 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 041b2227967d..bdc9eebd1d44 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -284,35 +284,39 @@ label##_hv: \ rlwimi r11,r12,0,MSR_EE; \ mtmsrd r11,1 -#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ - .align 7; \ - .globl label##_common; \ -label##_common: \ - EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ - DISABLE_INTS; \ - bl .save_nvgprs; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr; \ - b .ret_from_except +#define ADD_NVGPRS \ + bl .save_nvgprs + +#define RUNLATCH_ON \ +BEGIN_FTR_SECTION \ + clrrdi r3,r1,THREAD_SHIFT; \ + ld r4,TI_LOCAL_FLAGS(r3); \ + andi. r0,r4,_TLF_RUNLATCH; \ + beql ppc64_runlatch_on_trampoline; \ +END_FTR_SECTION_IFSET(CPU_FTR_CTRL) + +#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + additions; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b ret + +#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ + EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \ + ADD_NVGPRS;DISABLE_INTS) /* * Like STD_EXCEPTION_COMMON, but for exceptions that can occur * in the idle task and therefore need the special idle handling * (finish nap and runlatch) */ -#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ - .align 7; \ - .globl label##_common; \ -label##_common: \ - EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ - FINISH_NAP; \ - DISABLE_INTS; \ -BEGIN_FTR_SECTION \ - bl .ppc64_runlatch_on; \ -END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr; \ - b .ret_from_except_lite +#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ + EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ + FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) /* * When the idle code in power4_idle puts the CPU into NAP mode, diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 7fdc2c0b7fa0..b1a215eabef6 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1079,30 +1079,12 @@ #define proc_trap() asm volatile("trap") -#ifdef CONFIG_PPC64 - -extern void ppc64_runlatch_on(void); -extern void __ppc64_runlatch_off(void); - -#define ppc64_runlatch_off() \ - do { \ - if (cpu_has_feature(CPU_FTR_CTRL) && \ - test_thread_flag(TIF_RUNLATCH)) \ - __ppc64_runlatch_off(); \ - } while (0) +#define __get_SP() ({unsigned long sp; \ + asm volatile("mr %0,1": "=r" (sp)); sp;}) extern unsigned long scom970_read(unsigned int address); extern void scom970_write(unsigned int address, unsigned long value); -#else -#define ppc64_runlatch_on() -#define ppc64_runlatch_off() - -#endif /* CONFIG_PPC64 */ - -#define __get_SP() ({unsigned long sp; \ - asm volatile("mr %0,1": "=r" (sp)); sp;}) - struct pt_regs; extern void ppc_save_regs(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index c377457d1b89..a02883d5af43 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -550,5 +550,43 @@ extern void reloc_got2(unsigned long); extern struct dentry *powerpc_debugfs_root; +#ifdef CONFIG_PPC64 + +extern void __ppc64_runlatch_on(void); +extern void __ppc64_runlatch_off(void); + +/* + * We manually hard enable-disable, this is called + * in the idle loop and we don't want to mess up + * with soft-disable/enable & interrupt replay. + */ +#define ppc64_runlatch_off() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_off(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) + +#define ppc64_runlatch_on() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + !test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_on(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) +#else +#define ppc64_runlatch_on() +#define ppc64_runlatch_off() +#endif /* CONFIG_PPC64 */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYSTEM_H */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 964714940961..4a741c7efd02 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -110,7 +110,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NOERROR 12 /* Force successful syscall return */ #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ -#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<flags); } +static inline bool test_thread_local_flags(unsigned int flags) +{ + struct thread_info *ti = current_thread_info(); + return (ti->local_flags & flags) != 0; +} + #ifdef CONFIG_PPC64 #define is_32bit_task() (test_thread_flag(TIF_32BIT)) #else diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 2240d4ecec02..3af80e82830b 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -483,6 +483,9 @@ machine_check_common: system_call_entry: b system_call_common +ppc64_runlatch_on_trampoline: + b .__ppc64_runlatch_on + /* * Here we have detected that the kernel stack pointer is bad. * R9 contains the saved CR, r13 points to the paca, diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index d817ab018486..bf80a1d5f8fe 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1220,34 +1220,32 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); #ifdef CONFIG_PPC64 -void ppc64_runlatch_on(void) +/* Called with hard IRQs off */ +void __ppc64_runlatch_on(void) { + struct thread_info *ti = current_thread_info(); unsigned long ctrl; - if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { - HMT_medium(); - - ctrl = mfspr(SPRN_CTRLF); - ctrl |= CTRL_RUNLATCH; - mtspr(SPRN_CTRLT, ctrl); + ctrl = mfspr(SPRN_CTRLF); + ctrl |= CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); - set_thread_flag(TIF_RUNLATCH); - } + ti->local_flags |= TLF_RUNLATCH; } +/* Called with hard IRQs off */ void __ppc64_runlatch_off(void) { + struct thread_info *ti = current_thread_info(); unsigned long ctrl; - HMT_medium(); - - clear_thread_flag(TIF_RUNLATCH); + ti->local_flags &= ~TLF_RUNLATCH; ctrl = mfspr(SPRN_CTRLF); ctrl &= ~CTRL_RUNLATCH; mtspr(SPRN_CTRLT, ctrl); } -#endif +#endif /* CONFIG_PPC64 */ #if THREAD_SHIFT < PAGE_SHIFT -- cgit v1.2.3 From 7230c5644188cd9e3fb380cc97dde00c464a3ba7 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Mar 2012 18:27:59 +1100 Subject: powerpc: Rework lazy-interrupt handling The current implementation of lazy interrupts handling has some issues that this tries to address. We don't do the various workarounds we need to do when re-enabling interrupts in some cases such as when returning from an interrupt and thus we may still lose or get delayed decrementer or doorbell interrupts. The current scheme also makes it much harder to handle the external "edge" interrupts provided by some BookE processors when using the EPR facility (External Proxy) and the Freescale Hypervisor. Additionally, we tend to keep interrupts hard disabled in a number of cases, such as decrementer interrupts, external interrupts, or when a masked decrementer interrupt is pending. This is sub-optimal. This is an attempt at fixing it all in one go by reworking the way we do the lazy interrupt disabling from the ground up. The base idea is to replace the "hard_enabled" field with a "irq_happened" field in which we store a bit mask of what interrupt occurred while soft-disabled. When re-enabling, either via arch_local_irq_restore() or when returning from an interrupt, we can now decide what to do by testing bits in that field. We then implement replaying of the missed interrupts either by re-using the existing exception frame (in exception exit case) or via the creation of a new one from an assembly trampoline (in the arch_local_irq_enable case). This removes the need to play with the decrementer to try to create fake interrupts, among others. In addition, this adds a few refinements: - We no longer hard disable decrementer interrupts that occur while soft-disabled. We now simply bump the decrementer back to max (on BookS) or leave it stopped (on BookE) and continue with hard interrupts enabled, which means that we'll potentially get better sample quality from performance monitor interrupts. - Timer, decrementer and doorbell interrupts now hard-enable shortly after removing the source of the interrupt, which means they no longer run entirely hard disabled. Again, this will improve perf sample quality. - On Book3E 64-bit, we now make the performance monitor interrupt act as an NMI like Book3S (the necessary C code for that to work appear to already be present in the FSL perf code, notably calling nmi_enter instead of irq_enter). (This also fixes a bug where BookE perfmon interrupts could clobber r14 ... oops) - We could make "masked" decrementer interrupts act as NMIs when doing timer-based perf sampling to improve the sample quality. Signed-off-by-yet: Benjamin Herrenschmidt --- v2: - Add hard-enable to decrementer, timer and doorbells - Fix CR clobber in masked irq handling on BookE - Make embedded perf interrupt act as an NMI - Add a PACA_HAPPENED_EE_EDGE for use by FSL if they want to retrigger an interrupt without preventing hard-enable v3: - Fix or vs. ori bug on Book3E - Fix enabling of interrupts for some exceptions on Book3E v4: - Fix resend of doorbells on return from interrupt on Book3E v5: - Rebased on top of my latest series, which involves some significant rework of some aspects of the patch. v6: - 32-bit compile fix - more compile fixes with various .config combos - factor out the asm code to soft-disable interrupts - remove the C wrapper around preempt_schedule_irq v7: - Fix a bug with hard irq state tracking on native power7 --- arch/powerpc/include/asm/exception-64s.h | 34 ++-- arch/powerpc/include/asm/hw_irq.h | 49 +++++- arch/powerpc/include/asm/irqflags.h | 37 ++-- arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/kernel/asm-offsets.c | 2 +- arch/powerpc/kernel/dbell.c | 2 + arch/powerpc/kernel/entry_64.S | 153 ++++++++++++---- arch/powerpc/kernel/exceptions-64e.S | 221 ++++++++++++++++-------- arch/powerpc/kernel/exceptions-64s.S | 150 +++++++--------- arch/powerpc/kernel/head_64.S | 24 ++- arch/powerpc/kernel/idle.c | 6 +- arch/powerpc/kernel/idle_book3e.S | 25 +-- arch/powerpc/kernel/idle_power4.S | 24 ++- arch/powerpc/kernel/idle_power7.S | 23 ++- arch/powerpc/kernel/irq.c | 204 +++++++++++++++------- arch/powerpc/kernel/process.c | 3 + arch/powerpc/kernel/time.c | 8 +- arch/powerpc/platforms/pseries/processor_idle.c | 18 +- arch/powerpc/xmon/xmon.c | 4 +- 19 files changed, 647 insertions(+), 342 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 70354af0740e..548da3aa0a30 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -232,23 +232,30 @@ label##_hv: \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ EXC_HV, KVMTEST, vec) -#define __SOFTEN_TEST(h) \ +/* This associate vector numbers with bits in paca->irq_happened */ +#define SOFTEN_VALUE_0x500 PACA_IRQ_EE +#define SOFTEN_VALUE_0x502 PACA_IRQ_EE +#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC +#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC + +#define __SOFTEN_TEST(h, vec) \ lbz r10,PACASOFTIRQEN(r13); \ cmpwi r10,0; \ + li r10,SOFTEN_VALUE_##vec; \ beq masked_##h##interrupt -#define _SOFTEN_TEST(h) __SOFTEN_TEST(h) +#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) #define SOFTEN_TEST_PR(vec) \ KVMTEST_PR(vec); \ - _SOFTEN_TEST(EXC_STD) + _SOFTEN_TEST(EXC_STD, vec) #define SOFTEN_TEST_HV(vec) \ KVMTEST(vec); \ - _SOFTEN_TEST(EXC_HV) + _SOFTEN_TEST(EXC_HV, vec) #define SOFTEN_TEST_HV_201(vec) \ KVMTEST(vec); \ - _SOFTEN_TEST(EXC_STD) + _SOFTEN_TEST(EXC_STD, vec) #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ HMT_MEDIUM; \ @@ -279,22 +286,7 @@ label##_hv: \ */ /* Exception addition: Hard disable interrupts */ -#ifdef CONFIG_TRACE_IRQFLAGS -#define DISABLE_INTS \ - lbz r10,PACASOFTIRQEN(r13); \ - li r11,0; \ - cmpwi cr0,r10,0; \ - stb r11,PACAHARDIRQEN(r13); \ - beq 44f; \ - stb r11,PACASOFTIRQEN(r13); \ - TRACE_DISABLE_INTS; \ -44: -#else -#define DISABLE_INTS \ - li r11,0; \ - stb r11,PACASOFTIRQEN(r13); \ - stb r11,PACAHARDIRQEN(r13) -#endif /* CONFIG_TRACE_IRQFLAGS */ +#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) /* Exception addition: Keep interrupt state */ #define ENABLE_INTS \ diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 6c6fa955baa7..51010bfc792e 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -11,6 +11,27 @@ #include #include +#ifdef CONFIG_PPC64 + +/* + * PACA flags in paca->irq_happened. + * + * This bits are set when interrupts occur while soft-disabled + * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS + * is set whenever we manually hard disable. + */ +#define PACA_IRQ_HARD_DIS 0x01 +#define PACA_IRQ_DBELL 0x02 +#define PACA_IRQ_EE 0x04 +#define PACA_IRQ_DEC 0x08 /* Or FIT */ +#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */ + +#endif /* CONFIG_PPC64 */ + +#ifndef __ASSEMBLY__ + +extern void __replay_interrupt(unsigned int vector); + extern void timer_interrupt(struct pt_regs *); #ifdef CONFIG_PPC64 @@ -42,7 +63,6 @@ static inline unsigned long arch_local_irq_disable(void) } extern void arch_local_irq_restore(unsigned long); -extern void iseries_handle_interrupts(void); static inline void arch_local_irq_enable(void) { @@ -72,12 +92,24 @@ static inline bool arch_irqs_disabled(void) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) #endif -#define hard_irq_disable() \ - do { \ - __hard_irq_disable(); \ - get_paca()->soft_enabled = 0; \ - get_paca()->hard_enabled = 0; \ - } while(0) +static inline void hard_irq_disable(void) +{ + __hard_irq_disable(); + get_paca()->soft_enabled = 0; + get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; +} + +/* + * This is called by asynchronous interrupts to conditionally + * re-enable hard interrupts when soft-disabled after having + * cleared the source of the interrupt + */ +static inline void may_hard_irq_enable(void) +{ + get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; + if (!(get_paca()->irq_happened & PACA_IRQ_EE)) + __hard_irq_enable(); +} static inline bool arch_irq_disabled_regs(struct pt_regs *regs) { @@ -149,6 +181,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !(regs->msr & MSR_EE); } +static inline void may_hard_irq_enable(void) { } + #endif /* CONFIG_PPC64 */ #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST @@ -159,5 +193,6 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) */ struct irq_chip; +#endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index b0b06d85788d..6f9b6e23dc5a 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -39,24 +39,31 @@ #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) -#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \ - cmpdi en,0; \ - bne 95f; \ - stb en,PACASOFTIRQEN(r13); \ - TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \ - b skip; \ -95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \ - li en,1; -#define TRACE_AND_RESTORE_IRQ(en) \ - TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \ - stb en,PACASOFTIRQEN(r13); \ -96: +/* + * This is used by assembly code to soft-disable interrupts + */ +#define SOFT_DISABLE_INTS(__rA, __rB) \ + lbz __rA,PACASOFTIRQEN(r13); \ + lbz __rB,PACAIRQHAPPENED(r13); \ + cmpwi cr0,__rA,0; \ + li __rA,0; \ + ori __rB,__rB,PACA_IRQ_HARD_DIS; \ + stb __rB,PACAIRQHAPPENED(r13); \ + beq 44f; \ + stb __rA,PACASOFTIRQEN(r13); \ + TRACE_DISABLE_INTS; \ +44: + #else #define TRACE_ENABLE_INTS #define TRACE_DISABLE_INTS -#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) -#define TRACE_AND_RESTORE_IRQ(en) \ - stb en,PACASOFTIRQEN(r13) + +#define SOFT_DISABLE_INTS(__rA, __rB) \ + lbz __rA,PACAIRQHAPPENED(r13); \ + li __rB,0; \ + ori __rA,__rA,PACA_IRQ_HARD_DIS; \ + stb __rB,PACASOFTIRQEN(r13); \ + stb __rA,PACAIRQHAPPENED(r13) #endif #endif diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 269c05a36d91..daf813fea91f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -132,7 +132,7 @@ struct paca_struct { u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ u8 soft_enabled; /* irq soft-enable flag */ - u8 hard_enabled; /* set if irqs are enabled in MSR */ + u8 irq_happened; /* irq happened while soft-disabled */ u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 nap_state_lost; /* NV GPR values lost in power7_idle */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 04caee7d9bc1..cdd0d264415f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -147,7 +147,7 @@ int main(void) DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); - DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); + DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); #ifdef CONFIG_PPC_MM_SLICES DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 2cc451aaaca7..5b25c8060fd6 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -37,6 +37,8 @@ void doorbell_exception(struct pt_regs *regs) irq_enter(); + may_hard_irq_enable(); + smp_ipi_demux(); irq_exit(); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c513beb78b3b..f8a7a1a1a9f4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -32,6 +32,7 @@ #include #include #include +#include /* * System calls. @@ -583,18 +584,72 @@ _GLOBAL(ret_from_except_lite) bne do_work #endif /* !CONFIG_PREEMPT */ + .globl fast_exc_return_irq +fast_exc_return_irq: restore: + /* + * This is the main kernel exit path, we first check if we + * have to change our interrupt state. + */ ld r5,SOFTE(r1) - TRACE_AND_RESTORE_IRQ(r5); + lbz r6,PACASOFTIRQEN(r13) + cmpwi cr1,r5,0 + cmpw cr0,r5,r6 + beq cr0,4f + + /* We do, handle disable first, which is easy */ + bne cr1,3f; + li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b 4f - /* extract EE bit and use it to restore paca->hard_enabled */ - ld r3,_MSR(r1) - rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ - stb r4,PACAHARDIRQEN(r13) +3: /* + * We are about to soft-enable interrupts (we are hard disabled + * at this point). We check if there's anything that needs to + * be replayed first. + */ + lbz r0,PACAIRQHAPPENED(r13) + cmpwi cr0,r0,0 + bne- restore_check_irq_replay + + /* + * Get here when nothing happened while soft-disabled, just + * soft-enable and move-on. We will hard-enable as a side + * effect of rfi + */ +restore_no_replay: + TRACE_ENABLE_INTS + li r0,1 + stb r0,PACASOFTIRQEN(r13); + /* + * Final return path. BookE is handled in a different file + */ +4: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else + /* + * Clear the reservation. If we know the CPU tracks the address of + * the reservation then we can potentially save some cycles and use + * a larx. On POWER6 and POWER7 this is significantly faster. + */ +BEGIN_FTR_SECTION + stdcx. r0,0,r1 /* to clear the reservation */ +FTR_SECTION_ELSE + ldarx r4,0,r1 +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) + + /* + * Some code path such as load_up_fpu or altivec return directly + * here. They run entirely hard disabled and do not alter the + * interrupt state. They also don't use lwarx/stwcx. and thus + * are known not to leave dangling reservations. + */ + .globl fast_exception_return +fast_exception_return: + ld r3,_MSR(r1) ld r4,_CTR(r1) ld r0,_LINK(r1) mtctr r4 @@ -607,17 +662,6 @@ restore: andi. r0,r3,MSR_RI beq- unrecov_restore - /* - * Clear the reservation. If we know the CPU tracks the address of - * the reservation then we can potentially save some cycles and use - * a larx. On POWER6 and POWER7 this is significantly faster. - */ -BEGIN_FTR_SECTION - stdcx. r0,0,r1 /* to clear the reservation */ -FTR_SECTION_ELSE - ldarx r4,0,r1 -ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - /* * Clear RI before restoring r13. If we are returning to * userspace and we take an exception after restoring r13, @@ -629,7 +673,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) /* * r13 is our per cpu area, only restore it if we are returning to - * userspace + * userspace the value stored in the stack frame may belong to + * another CPU. */ andi. r0,r3,MSR_PR beq 1f @@ -654,6 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) #endif /* CONFIG_PPC_BOOK3E */ + /* + * Something did happen, check if a re-emit is needed + * (this also clears paca->irq_happened) + */ +restore_check_irq_replay: + /* XXX: We could implement a fast path here where we check + * for irq_happened being just 0x01, in which case we can + * clear it and return. That means that we would potentially + * miss a decrementer having wrapped all the way around. + * + * Still, this might be useful for things like hash_page + */ + bl .__check_irq_replay + cmpwi cr0,r3,0 + beq restore_no_replay + + /* + * We need to re-emit an interrupt. We do so by re-using our + * existing exception frame. We first change the trap value, + * but we need to ensure we preserve the low nibble of it + */ + ld r4,_TRAP(r1) + clrldi r4,r4,60 + or r4,r4,r3 + std r4,_TRAP(r1) + + /* + * Then find the right handler and call it. Interrupts are + * still soft-disabled and we keep them that way. + */ + cmpwi cr0,r3,0x500 + bne 1f + addi r3,r1,STACK_FRAME_OVERHEAD; + bl .do_IRQ + b .ret_from_except +1: cmpwi cr0,r3,0x900 + bne 1f + addi r3,r1,STACK_FRAME_OVERHEAD; + bl .timer_interrupt + b .ret_from_except +#ifdef CONFIG_PPC_BOOK3E +1: cmpwi cr0,r3,0x280 + bne 1f + addi r3,r1,STACK_FRAME_OVERHEAD; + bl .doorbell_exception + b .ret_from_except +#endif /* CONFIG_PPC_BOOK3E */ +1: b .ret_from_except /* What else to do here ? */ + do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -666,18 +760,11 @@ do_work: crandc eq,cr1*4+eq,eq bne restore - /* Here we are preempting the current task. - * - * Ensure interrupts are soft-disabled. We also properly mark - * the PACA to reflect the fact that they are hard-disabled - * and trace the change + /* + * Here we are preempting the current task. We want to make + * sure we are soft-disabled first */ - li r0,0 - stb r0,PACASOFTIRQEN(r13) - stb r0,PACAHARDIRQEN(r13) - TRACE_DISABLE_INTS - - /* Call the scheduler with soft IRQs off */ + SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq /* Hard-disable interrupts again (and update PACA) */ @@ -687,8 +774,8 @@ do_work: ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ - li r0,0 - stb r0,PACAHARDIRQEN(r13) + li r0,PACA_IRQ_HARD_DIS + stb r0,PACAIRQHAPPENED(r13) /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT @@ -710,14 +797,12 @@ user_work: andi. r0,r4,_TIF_NEED_RESCHED beq 1f - li r5,1 - TRACE_AND_RESTORE_IRQ(r5); + bl .restore_interrupts bl .schedule b .ret_from_except_lite 1: bl .save_nvgprs - li r5,1 - TRACE_AND_RESTORE_IRQ(r5); + bl .restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD bl .do_notify_resume b .ret_from_except diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c4c34665c221..7215cc2495df 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -24,6 +24,7 @@ #include #include #include +#include /* XXX This will ultimately add space for a special exception save * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... @@ -77,59 +78,55 @@ #define SPRN_MC_SRR1 SPRN_MCSRR1 #define NORMAL_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, GEN, addition##_GEN) + EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) #define CRIT_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, CRIT, addition##_CRIT) + EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) #define DBG_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, DBG, addition##_DBG) + EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) #define MC_EXCEPTION_PROLOG(n, addition) \ - EXCEPTION_PROLOG(n, MC, addition##_MC) + EXCEPTION_PROLOG(n, MC, addition##_MC(n)) /* Variants of the "addition" argument for the prolog */ -#define PROLOG_ADDITION_NONE_GEN -#define PROLOG_ADDITION_NONE_CRIT -#define PROLOG_ADDITION_NONE_DBG -#define PROLOG_ADDITION_NONE_MC +#define PROLOG_ADDITION_NONE_GEN(n) +#define PROLOG_ADDITION_NONE_CRIT(n) +#define PROLOG_ADDITION_NONE_DBG(n) +#define PROLOG_ADDITION_NONE_MC(n) -#define PROLOG_ADDITION_MASKABLE_GEN \ +#define PROLOG_ADDITION_MASKABLE_GEN(n) \ lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ cmpwi cr0,r11,0; /* yes -> go out of line */ \ - beq masked_interrupt_book3e; + beq masked_interrupt_book3e_##n -#define PROLOG_ADDITION_2REGS_GEN \ +#define PROLOG_ADDITION_2REGS_GEN(n) \ std r14,PACA_EXGEN+EX_R14(r13); \ std r15,PACA_EXGEN+EX_R15(r13) -#define PROLOG_ADDITION_1REG_GEN \ +#define PROLOG_ADDITION_1REG_GEN(n) \ std r14,PACA_EXGEN+EX_R14(r13); -#define PROLOG_ADDITION_2REGS_CRIT \ +#define PROLOG_ADDITION_2REGS_CRIT(n) \ std r14,PACA_EXCRIT+EX_R14(r13); \ std r15,PACA_EXCRIT+EX_R15(r13) -#define PROLOG_ADDITION_2REGS_DBG \ +#define PROLOG_ADDITION_2REGS_DBG(n) \ std r14,PACA_EXDBG+EX_R14(r13); \ std r15,PACA_EXDBG+EX_R15(r13) -#define PROLOG_ADDITION_2REGS_MC \ +#define PROLOG_ADDITION_2REGS_MC(n) \ std r14,PACA_EXMC+EX_R14(r13); \ std r15,PACA_EXMC+EX_R15(r13) -#define PROLOG_ADDITION_DOORBELL_GEN \ - lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ - cmpwi cr0,r11,0; /* yes -> go out of line */ \ - beq masked_doorbell_book3e - /* Core exception code for all exceptions except TLB misses. * XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ #define EXCEPTION_COMMON(n, excf, ints) \ +exc_##n##_common: \ std r0,GPR0(r1); /* save r0 in stackframe */ \ std r2,GPR2(r1); /* save r2 in stackframe */ \ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ @@ -167,20 +164,21 @@ std r0,RESULT(r1); /* clear regs->result */ \ ints; -/* Variants for the "ints" argument */ +/* Variants for the "ints" argument. This one does nothing when we want + * to keep interrupts in their original state + */ #define INTS_KEEP -#define INTS_DISABLE_SOFT \ - stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \ - TRACE_DISABLE_INTS; -#define INTS_DISABLE_HARD \ - stb r0,PACAHARDIRQEN(r13); /* and hard disabled */ -#define INTS_DISABLE_ALL \ - INTS_DISABLE_SOFT \ - INTS_DISABLE_HARD - -/* This is called by exceptions that used INTS_KEEP (that is did not clear - * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE - * to it's previous value + +/* This second version is meant for exceptions that don't immediately + * hard-enable. We set a bit in paca->irq_happened to ensure that + * a subsequent call to arch_local_irq_restore() will properly + * hard-enable and avoid the fast-path + */ +#define INTS_DISABLE SOFT_DISABLE_INTS(r3,r4) + +/* This is called by exceptions that used INTS_KEEP (that did not touch + * irq indicators in the PACA). This will restore MSR:EE to it's previous + * value * * XXX In the long run, we may want to open-code it in order to separate the * load from the wrtee, thus limiting the latency caused by the dependency @@ -238,7 +236,7 @@ exc_##n##_bad_stack: \ #define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ START_EXCEPTION(label); \ NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ - EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \ + EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ ack(r8); \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ @@ -289,7 +287,7 @@ interrupt_end_book3e: /* Critical Input Interrupt */ START_EXCEPTION(critical_input); CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL) +// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); // addi r3,r1,STACK_FRAME_OVERHEAD @@ -300,7 +298,7 @@ interrupt_end_book3e: /* Machine Check Interrupt */ START_EXCEPTION(machine_check); CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL) +// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) // bl special_reg_save_mc // addi r3,r1,STACK_FRAME_OVERHEAD // CHECK_NAPPING(); @@ -313,7 +311,7 @@ interrupt_end_book3e: NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR - EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE_ALL) + EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) b storage_fault_common /* Instruction Storage Interrupt */ @@ -321,7 +319,7 @@ interrupt_end_book3e: NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) li r15,0 mr r14,r10 - EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE_ALL) + EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) b storage_fault_common /* External Input Interrupt */ @@ -339,12 +337,11 @@ interrupt_end_book3e: START_EXCEPTION(program); NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) mfspr r14,SPRN_ESR - EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT) + EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) bl .save_nvgprs - INTS_RESTORE_HARD bl .program_check_exception b .ret_from_except @@ -358,7 +355,7 @@ interrupt_end_book3e: beq- 1f bl .load_up_fpu b fast_exception_return -1: INTS_DISABLE_ALL +1: INTS_DISABLE bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .kernel_fp_unavailable_exception @@ -373,7 +370,7 @@ interrupt_end_book3e: /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL) +// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); // addi r3,r1,STACK_FRAME_OVERHEAD @@ -392,7 +389,7 @@ interrupt_end_book3e: /* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE_ALL) + EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .unknown_exception @@ -450,7 +447,7 @@ interrupt_end_book3e: mfspr r15,SPRN_SPRG_CRIT_SCRATCH mtspr SPRN_SPRG_GEN_SCRATCH,r15 mfspr r14,SPRN_DBSR - EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL) + EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r14 @@ -465,7 +462,7 @@ kernel_dbg_exc: /* Debug exception as a debug interrupt*/ START_EXCEPTION(debug_debug); - DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) + DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an @@ -515,7 +512,7 @@ kernel_dbg_exc: mfspr r15,SPRN_SPRG_DBG_SCRATCH mtspr SPRN_SPRG_GEN_SCRATCH,r15 mfspr r14,SPRN_DBSR - EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL) + EXCEPTION_COMMON(0xd08, PACA_EXDBG, INTS_DISABLE) std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r14 @@ -525,21 +522,20 @@ kernel_dbg_exc: bl .DebugException b .ret_from_except - MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) - -/* Doorbell interrupt */ - START_EXCEPTION(doorbell) - NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL) - EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL) - CHECK_NAPPING() + START_EXCEPTION(perfmon); + NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) + EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) addi r3,r1,STACK_FRAME_OVERHEAD - bl .doorbell_exception + bl .performance_monitor_exception b .ret_from_except_lite +/* Doorbell interrupt */ + MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) + /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); - CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL) + CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) +// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); // addi r3,r1,STACK_FRAME_OVERHEAD @@ -547,36 +543,114 @@ kernel_dbg_exc: // b ret_from_crit_except b . +/* Guest Doorbell */ MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) - MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE) - MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE) - MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE) +/* Guest Doorbell critical Interrupt */ + START_EXCEPTION(guest_doorbell_crit); + CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) +// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) +// bl special_reg_save_crit +// CHECK_NAPPING(); +// addi r3,r1,STACK_FRAME_OVERHEAD +// bl .guest_doorbell_critical_exception +// b ret_from_crit_except + b . + +/* Hypervisor call */ + START_EXCEPTION(hypercall); + NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) + EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) + addi r3,r1,STACK_FRAME_OVERHEAD + bl .save_nvgprs + INTS_RESTORE_HARD + bl .unknown_exception + b .ret_from_except + +/* Embedded Hypervisor priviledged */ + START_EXCEPTION(ehpriv); + NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) + EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) + addi r3,r1,STACK_FRAME_OVERHEAD + bl .save_nvgprs + INTS_RESTORE_HARD + bl .unknown_exception + b .ret_from_except /* - * An interrupt came in while soft-disabled; clear EE in SRR1, - * clear paca->hard_enabled and return. + * An interrupt came in while soft-disabled; We mark paca->irq_happened + * accordingly and if the interrupt is level sensitive, we hard disable */ -masked_doorbell_book3e: - mtcr r10 - /* Resend the doorbell to fire again when ints enabled */ - mfspr r10,SPRN_PIR - PPC_MSGSND(r10) - b masked_interrupt_book3e_common -masked_interrupt_book3e: +masked_interrupt_book3e_0x500: + /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */ + li r11,PACA_IRQ_EE + b masked_interrupt_book3e_full_mask + +masked_interrupt_book3e_0x900: + ACK_DEC(r11); + li r11,PACA_IRQ_DEC + b masked_interrupt_book3e_no_mask +masked_interrupt_book3e_0x980: + ACK_FIT(r11); + li r11,PACA_IRQ_DEC + b masked_interrupt_book3e_no_mask +masked_interrupt_book3e_0x280: +masked_interrupt_book3e_0x2c0: + li r11,PACA_IRQ_DBELL + b masked_interrupt_book3e_no_mask + +masked_interrupt_book3e_no_mask: mtcr r10 -masked_interrupt_book3e_common: - stb r11,PACAHARDIRQEN(r13) + lbz r10,PACAIRQHAPPENED(r13) + or r10,r10,r11 + stb r10,PACAIRQHAPPENED(r13) + b 1f +masked_interrupt_book3e_full_mask: + mtcr r10 + lbz r10,PACAIRQHAPPENED(r13) + or r10,r10,r11 + stb r10,PACAIRQHAPPENED(r13) mfspr r10,SPRN_SRR1 rldicl r11,r10,48,1 /* clear MSR_EE */ rotldi r10,r11,16 mtspr SPRN_SRR1,r10 - ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */ +1: ld r10,PACA_EXGEN+EX_R10(r13); ld r11,PACA_EXGEN+EX_R11(r13); mfspr r13,SPRN_SPRG_GEN_SCRATCH; rfi b . +/* + * Called from arch_local_irq_enable when an interrupt needs + * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 + * to indicate the kind of interrupt. MSR:EE is already off. + * We generate a stackframe like if a real interrupt had happened. + * + * Note: While MSR:EE is off, we need to make sure that _MSR + * in the generated frame has EE set to 1 or the exception + * handler will not properly re-enable them. + */ +_GLOBAL(__replay_interrupt) + /* We are going to jump to the exception common code which + * will retrieve various register values from the PACA which + * we don't give a damn about. + */ + mflr r10 + mfmsr r11 + mfcr r4 + mtspr SPRN_SPRG_GEN_SCRATCH,r13; + std r1,PACA_EXGEN+EX_R1(r13); + stw r4,PACA_EXGEN+EX_CR(r13); + ori r11,r11,MSR_EE + subi r1,r1,INT_FRAME_SIZE; + cmpwi cr0,r3,0x500 + beq exc_0x500_common + cmpwi cr0,r3,0x900 + beq exc_0x900_common + cmpwi cr0,r3,0x280 + beq exc_0x280_common + blr + /* * This is called from 0x300 and 0x400 handlers after the prologs with @@ -679,6 +753,8 @@ BAD_STACK_TRAMPOLINE(0x000) BAD_STACK_TRAMPOLINE(0x100) BAD_STACK_TRAMPOLINE(0x200) BAD_STACK_TRAMPOLINE(0x260) +BAD_STACK_TRAMPOLINE(0x280) +BAD_STACK_TRAMPOLINE(0x2a0) BAD_STACK_TRAMPOLINE(0x2c0) BAD_STACK_TRAMPOLINE(0x2e0) BAD_STACK_TRAMPOLINE(0x300) @@ -696,11 +772,10 @@ BAD_STACK_TRAMPOLINE(0xa00) BAD_STACK_TRAMPOLINE(0xb00) BAD_STACK_TRAMPOLINE(0xc00) BAD_STACK_TRAMPOLINE(0xd00) +BAD_STACK_TRAMPOLINE(0xd08) BAD_STACK_TRAMPOLINE(0xe00) BAD_STACK_TRAMPOLINE(0xf00) BAD_STACK_TRAMPOLINE(0xf20) -BAD_STACK_TRAMPOLINE(0x2070) -BAD_STACK_TRAMPOLINE(0x2080) .globl bad_stack_book3e bad_stack_book3e: diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 02448ea58ad3..2d0868a4e2f0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -12,6 +12,7 @@ * */ +#include #include #include @@ -356,34 +357,60 @@ do_stab_bolted_pSeries: KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) /* - * An interrupt came in while soft-disabled; clear EE in SRR1, - * clear paca->hard_enabled and return. + * An interrupt came in while soft-disabled. We set paca->irq_happened, + * then, if it was a decrementer interrupt, we bump the dec to max and + * and return, else we hard disable and return. This is called with + * r10 containing the value to OR to the paca field. */ -masked_interrupt: - stb r10,PACAHARDIRQEN(r13) - mtcrf 0x80,r9 - ld r9,PACA_EXGEN+EX_R9(r13) - mfspr r10,SPRN_SRR1 - rldicl r10,r10,48,1 /* clear MSR_EE */ - rotldi r10,r10,16 - mtspr SPRN_SRR1,r10 - ld r10,PACA_EXGEN+EX_R10(r13) - GET_SCRATCH0(r13) - rfid +#define MASKED_INTERRUPT(_H) \ +masked_##_H##interrupt: \ + std r11,PACA_EXGEN+EX_R11(r13); \ + lbz r11,PACAIRQHAPPENED(r13); \ + or r11,r11,r10; \ + stb r11,PACAIRQHAPPENED(r13); \ + andi. r10,r10,PACA_IRQ_DEC; \ + beq 1f; \ + lis r10,0x7fff; \ + ori r10,r10,0xffff; \ + mtspr SPRN_DEC,r10; \ + b 2f; \ +1: mfspr r10,SPRN_##_H##SRR1; \ + rldicl r10,r10,48,1; /* clear MSR_EE */ \ + rotldi r10,r10,16; \ + mtspr SPRN_##_H##SRR1,r10; \ +2: mtcrf 0x80,r9; \ + ld r9,PACA_EXGEN+EX_R9(r13); \ + ld r10,PACA_EXGEN+EX_R10(r13); \ + ld r11,PACA_EXGEN+EX_R11(r13); \ + GET_SCRATCH0(r13); \ + ##_H##rfid; \ b . + + MASKED_INTERRUPT() + MASKED_INTERRUPT(H) -masked_Hinterrupt: - stb r10,PACAHARDIRQEN(r13) - mtcrf 0x80,r9 - ld r9,PACA_EXGEN+EX_R9(r13) - mfspr r10,SPRN_HSRR1 - rldicl r10,r10,48,1 /* clear MSR_EE */ - rotldi r10,r10,16 - mtspr SPRN_HSRR1,r10 - ld r10,PACA_EXGEN+EX_R10(r13) - GET_SCRATCH0(r13) - hrfid - b . +/* + * Called from arch_local_irq_enable when an interrupt needs + * to be resent. r3 contains 0x500 or 0x900 to indicate which + * kind of interrupt. MSR:EE is already off. We generate a + * stackframe like if a real interrupt had happened. + * + * Note: While MSR:EE is off, we need to make sure that _MSR + * in the generated frame has EE set to 1 or the exception + * handler will not properly re-enable them. + */ +_GLOBAL(__replay_interrupt) + /* We are going to jump to the exception common code which + * will retrieve various register values from the PACA which + * we don't give a damn about, so we don't bother storing them. + */ + mfmsr r12 + mflr r11 + mfcr r9 + ori r12,r12,MSR_EE + andi. r3,r3,0x0800 + bne decrementer_common + b hardware_interrupt_common #ifdef CONFIG_PPC_PSERIES /* @@ -793,7 +820,8 @@ vsx_unavailable_common: EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) #ifdef CONFIG_VSX BEGIN_FTR_SECTION - bne .load_up_vsx + beq 1f + b .load_up_vsx 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif @@ -807,65 +835,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) .globl __end_handlers __end_handlers: -/* - * Return from an exception with minimal checks. - * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. - * If interrupts have been enabled, or anything has been - * done that might have changed the scheduling status of - * any task or sent any task a signal, you should use - * ret_from_except or ret_from_except_lite instead of this. - */ -fast_exc_return_irq: /* restores irq state too */ - ld r3,SOFTE(r1) - TRACE_AND_RESTORE_IRQ(r3); - ld r12,_MSR(r1) - rldicl r4,r12,49,63 /* get MSR_EE to LSB */ - stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ - b 1f - - .globl fast_exception_return -fast_exception_return: - ld r12,_MSR(r1) -1: ld r11,_NIP(r1) - andi. r3,r12,MSR_RI /* check if RI is set */ - beq- unrecov_fer - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING - andi. r3,r12,MSR_PR - beq 2f - ACCOUNT_CPU_USER_EXIT(r3, r4) -2: -#endif - - ld r3,_CCR(r1) - ld r4,_LINK(r1) - ld r5,_CTR(r1) - ld r6,_XER(r1) - mtcr r3 - mtlr r4 - mtctr r5 - mtxer r6 - REST_GPR(0, r1) - REST_8GPRS(2, r1) - - ld r10,PACAKMSR(r13) - clrrdi r10,r10,2 /* clear RI */ - mtmsrd r10,1 - - mtspr SPRN_SRR1,r12 - mtspr SPRN_SRR0,r11 - REST_4GPRS(10, r1) - ld r1,GPR1(r1) - rfid - b . /* prevent speculative execution */ - -unrecov_fer: - bl .save_nvgprs -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception - b 1b - - /* * Hash table stuff */ @@ -905,19 +874,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) * r4 contains the required access permissions * r5 contains the trap number * - * at return r3 = 0 for success + * at return r3 = 0 for success, 1 for page fault, negative for error */ bl .hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if hash_page succeeded */ - /* - * Here we have interrupts hard-disabled, so it is sufficient - * to restore paca->{soft,hard}_enable and get out. - */ + /* Success */ beq fast_exc_return_irq /* Return from exception on success */ - /* For a hash failure, we don't bother re-enabling interrupts */ - ble- 13f + /* Error */ + blt- 13f /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 40759fbfb171..58bddee8e1e8 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -38,6 +38,7 @@ #include #include #include +#include /* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow @@ -550,7 +551,8 @@ _GLOBAL(pmac_secondary_start) */ li r0,0 stb r0,PACASOFTIRQEN(r13) - stb r0,PACAHARDIRQEN(r13) + li r0,PACA_IRQ_HARD_DIS + stb r0,PACAIRQHAPPENED(r13) /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) @@ -601,9 +603,12 @@ __secondary_start: li r7,0 mtlr r7 - /* Mark interrupts both hard and soft disabled */ - stb r7,PACAHARDIRQEN(r13) + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ stb r7,PACASOFTIRQEN(r13) + li r0,PACA_IRQ_HARD_DIS + stb r0,PACAIRQHAPPENED(r13) /* enable MMU and jump to start_secondary */ LOAD_REG_ADDR(r3, .start_secondary_prolog) @@ -750,13 +755,18 @@ _INIT_GLOBAL(start_here_common) /* Load the TOC (virtual address) */ ld r2,PACATOC(r13) + /* Do more system initializations in virtual mode */ bl .setup_system - /* Load up the kernel context */ -5: li r5,0 - stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ - stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + li r0,PACA_IRQ_HARD_DIS + stb r0,PACAIRQHAPPENED(r13) + /* Generic kernel entry */ bl .start_kernel /* Not reached */ diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 0a48bf5db6c8..8f7a2b62863d 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -84,7 +84,11 @@ void cpu_idle(void) start_critical_timings(); - local_irq_enable(); + /* Some power_save functions return with + * interrupts enabled, some don't. + */ + if (irqs_disabled()) + local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index 16c002d6bdf1..ff007b59448d 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -29,43 +29,30 @@ _GLOBAL(book3e_idle) wrteei 0 /* Now check if an interrupt came in while we were soft disabled - * since we may otherwise lose it (doorbells etc...). We know - * that since PACAHARDIRQEN will have been cleared in that case. + * since we may otherwise lose it (doorbells etc...). */ - lbz r3,PACAHARDIRQEN(r13) + lbz r3,PACAIRQHAPPENED(r13) cmpwi cr0,r3,0 - beqlr + bnelr - /* Now we are going to mark ourselves as soft and hard enables in + /* Now we are going to mark ourselves as soft and hard enabled in * order to be able to take interrupts while asleep. We inform lockdep * of that. We don't actually turn interrupts on just yet tho. */ #ifdef CONFIG_TRACE_IRQFLAGS stdu r1,-128(r1) bl .trace_hardirqs_on + addi r1,r1,128 #endif li r0,1 stb r0,PACASOFTIRQEN(r13) - stb r0,PACAHARDIRQEN(r13) /* Interrupts will make use return to LR, so get something we want * in there */ bl 1f - /* Hard disable interrupts again */ - wrteei 0 - - /* Mark them off again in the PACA as well */ - li r0,0 - stb r0,PACASOFTIRQEN(r13) - stb r0,PACAHARDIRQEN(r13) - - /* Tell lockdep about it */ -#ifdef CONFIG_TRACE_IRQFLAGS - bl .trace_hardirqs_off - addi r1,r1,128 -#endif + /* And return (interrupts are on) */ ld r0,16(r1) mtlr r0 blr diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index ba3195478600..d8cdba4c28b2 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -14,6 +14,7 @@ #include #include #include +#include #undef DEBUG @@ -29,14 +30,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) cmpwi 0,r4,0 beqlr - /* Go to NAP now */ + /* Hard disable interrupts */ mfmsr r7 rldicl r0,r7,48,1 rotldi r0,r0,16 - mtmsrd r0,1 /* hard-disable interrupts */ + mtmsrd r0,1 + + /* Check if something happened while soft-disabled */ + lbz r0,PACAIRQHAPPENED(r13) + cmpwi cr0,r0,0 + bnelr + + /* Soft-enable interrupts */ +#ifdef CONFIG_TRACE_IRQFLAGS + mflr r0 + std r0,16(r1) + stdu r1,-128(r1) + bl .trace_hardirqs_on + addi r1,r1,128 + ld r0,16(r1) + mtlr r0 +#endif /* CONFIG_TRACE_IRQFLAGS */ + + TRACE_ENABLE_INTS li r0,1 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ - stb r0,PACAHARDIRQEN(r13) BEGIN_FTR_SECTION DSSALL sync diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index fcdff198da4b..0cdc9a392839 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -1,5 +1,5 @@ /* - * This file contains the power_save function for 970-family CPUs. + * This file contains the power_save function for Power7 CPUs. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -15,6 +15,7 @@ #include #include #include +#include #undef DEBUG @@ -51,9 +52,25 @@ _GLOBAL(power7_idle) rldicl r9,r9,48,1 rotldi r9,r9,16 mtmsrd r9,1 /* hard-disable interrupts */ + + /* Check if something happened while soft-disabled */ + lbz r0,PACAIRQHAPPENED(r13) + cmpwi cr0,r0,0 + beq 1f + addi r1,r1,INT_FRAME_SIZE + ld r0,16(r1) + mtlr r0 + blr + +1: /* We mark irqs hard disabled as this is the state we'll + * be in when returning and we need to tell arch_local_irq_restore() + * about it + */ + li r0,PACA_IRQ_HARD_DIS + stb r0,PACAIRQHAPPENED(r13) + + /* We haven't lost state ... yet */ li r0,0 - stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ - stb r0,PACAHARDIRQEN(r13) stb r0,PACA_NAPSTATELOST(r13) /* Continue saving state */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9b6e80668cfb..eb804e15b29b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -95,14 +95,14 @@ extern int tau_interrupts(int); int distribute_irqs = 1; -static inline notrace unsigned long get_hard_enabled(void) +static inline notrace unsigned long get_irq_happened(void) { - unsigned long enabled; + unsigned long happened; __asm__ __volatile__("lbz %0,%1(13)" - : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); + : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); - return enabled; + return happened; } static inline notrace void set_soft_enabled(unsigned long enable) @@ -111,88 +111,167 @@ static inline notrace void set_soft_enabled(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -static inline notrace void decrementer_check_overflow(void) +static inline notrace int decrementer_check_overflow(void) { - u64 now = get_tb_or_rtc(); - u64 *next_tb; - - preempt_disable(); - next_tb = &__get_cpu_var(decrementers_next_tb); - + u64 now = get_tb_or_rtc(); + u64 *next_tb = &__get_cpu_var(decrementers_next_tb); + if (now >= *next_tb) set_dec(1); - preempt_enable(); + return now >= *next_tb; } -notrace void arch_local_irq_restore(unsigned long en) +/* This is called whenever we are re-enabling interrupts + * and returns either 0 (nothing to do) or 500/900 if there's + * either an EE or a DEC to generate. + * + * This is called in two contexts: From arch_local_irq_restore() + * before soft-enabling interrupts, and from the exception exit + * path when returning from an interrupt from a soft-disabled to + * a soft enabled context. In both case we have interrupts hard + * disabled. + * + * We take care of only clearing the bits we handled in the + * PACA irq_happened field since we can only re-emit one at a + * time and we don't want to "lose" one. + */ +notrace unsigned int __check_irq_replay(void) { /* - * get_paca()->soft_enabled = en; - * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? - * That was allowed before, and in such a case we do need to take care - * that gcc will set soft_enabled directly via r13, not choose to use - * an intermediate register, lest we're preempted to a different cpu. + * We use local_paca rather than get_paca() to avoid all + * the debug_smp_processor_id() business in this low level + * function */ - set_soft_enabled(en); - if (!en) - return; + unsigned char happened = local_paca->irq_happened; -#ifdef CONFIG_PPC_STD_MMU_64 - if (firmware_has_feature(FW_FEATURE_ISERIES)) { - /* - * Do we need to disable preemption here? Not really: in the - * unlikely event that we're preempted to a different cpu in - * between getting r13, loading its lppaca_ptr, and loading - * its any_int, we might call iseries_handle_interrupts without - * an interrupt pending on the new cpu, but that's no disaster, - * is it? And the business of preempting us off the old cpu - * would itself involve a local_irq_restore which handles the - * interrupt to that cpu. - * - * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" - * to avoid any preemption checking added into get_paca(). - */ - if (local_paca->lppaca_ptr->int_dword.any_int) - iseries_handle_interrupts(); + /* Clear bit 0 which we wouldn't clear otherwise */ + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + + /* + * Force the delivery of pending soft-disabled interrupts on PS3. + * Any HV call will have this side effect. + */ + if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { + u64 tmp, tmp2; + lv1_get_version_info(&tmp, &tmp2); } -#endif /* CONFIG_PPC_STD_MMU_64 */ /* - * if (get_paca()->hard_enabled) return; - * But again we need to take care that gcc gets hard_enabled directly - * via r13, not choose to use an intermediate register, lest we're - * preempted to a different cpu in between the two instructions. + * We may have missed a decrementer interrupt. We check the + * decrementer itself rather than the paca irq_happened field + * in case we also had a rollover while hard disabled + */ + local_paca->irq_happened &= ~PACA_IRQ_DEC; + if (decrementer_check_overflow()) + return 0x900; + + /* Finally check if an external interrupt happened */ + local_paca->irq_happened &= ~PACA_IRQ_EE; + if (happened & PACA_IRQ_EE) + return 0x500; + +#ifdef CONFIG_PPC_BOOK3E + /* Finally check if an EPR external interrupt happened + * this bit is typically set if we need to handle another + * "edge" interrupt from within the MPIC "EPR" handler + */ + local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; + if (happened & PACA_IRQ_EE_EDGE) + return 0x500; + + local_paca->irq_happened &= ~PACA_IRQ_DBELL; + if (happened & PACA_IRQ_DBELL) + return 0x280; +#endif /* CONFIG_PPC_BOOK3E */ + + /* There should be nothing left ! */ + BUG_ON(local_paca->irq_happened != 0); + + return 0; +} + +notrace void arch_local_irq_restore(unsigned long en) +{ + unsigned char irq_happened; + unsigned int replay; + + /* Write the new soft-enabled value */ + set_soft_enabled(en); + if (!en) + return; + /* + * From this point onward, we can take interrupts, preempt, + * etc... unless we got hard-disabled. We check if an event + * happened. If none happened, we know we can just return. + * + * We may have preempted before the check below, in which case + * we are checking the "new" CPU instead of the old one. This + * is only a problem if an event happened on the "old" CPU. + * + * External interrupt events on non-iseries will have caused + * interrupts to be hard-disabled, so there is no problem, we + * cannot have preempted. + * + * That leaves us with EEs on iSeries or decrementer interrupts, + * which I decided to safely ignore. The preemption would have + * itself been the result of an interrupt, upon which return we + * will have checked for pending events on the old CPU. */ - if (get_hard_enabled()) + irq_happened = get_irq_happened(); + if (!irq_happened) return; /* - * Need to hard-enable interrupts here. Since currently disabled, - * no need to take further asm precautions against preemption; but - * use local_paca instead of get_paca() to avoid preemption checking. + * We need to hard disable to get a trusted value from + * __check_irq_replay(). We also need to soft-disable + * again to avoid warnings in there due to the use of + * per-cpu variables. + * + * We know that if the value in irq_happened is exactly 0x01 + * then we are already hard disabled (there are other less + * common cases that we'll ignore for now), so we skip the + * (expensive) mtmsrd. */ - local_paca->hard_enabled = en; + if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) + __hard_irq_disable(); + set_soft_enabled(0); /* - * Trigger the decrementer if we have a pending event. Some processors - * only trigger on edge transitions of the sign bit. We might also - * have disabled interrupts long enough that the decrementer wrapped - * to positive. + * Check if anything needs to be re-emitted. We haven't + * soft-enabled yet to avoid warnings in decrementer_check_overflow + * accessing per-cpu variables */ - decrementer_check_overflow(); + replay = __check_irq_replay(); + + /* We can soft-enable now */ + set_soft_enabled(1); /* - * Force the delivery of pending soft-disabled interrupts on PS3. - * Any HV call will have this side effect. + * And replay if we have to. This will return with interrupts + * hard-enabled. */ - if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { - u64 tmp, tmp2; - lv1_get_version_info(&tmp, &tmp2); + if (replay) { + __replay_interrupt(replay); + return; } + /* Finally, let's ensure we are hard enabled */ __hard_irq_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); + +/* + * This is specifically called by assembly code to re-enable interrupts + * if they are currently disabled. This is typically called before + * schedule() or do_signal() when returning to userspace. We do it + * in C to avoid the burden of dealing with lockdep etc... + */ +void restore_interrupts(void) +{ + if (irqs_disabled()) + local_irq_enable(); +} + #endif /* CONFIG_PPC64 */ int arch_show_interrupts(struct seq_file *p, int prec) @@ -360,8 +439,17 @@ void do_IRQ(struct pt_regs *regs) check_stack_overflow(); + /* + * Query the platform PIC for the interrupt & ack it. + * + * This will typically lower the interrupt line to the CPU + */ irq = ppc_md.get_irq(); + /* We can hard enable interrupts now */ + may_hard_irq_enable(); + + /* And finally process it */ if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) handle_one_irq(irq); else if (irq != NO_IRQ_IGNORE) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index bf80a1d5f8fe..e40707032ac3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -647,6 +647,9 @@ void show_regs(struct pt_regs * regs) printk("MSR: "REG" ", regs->msr); printbits(regs->msr, msr_bits); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); +#ifdef CONFIG_PPC64 + printk("SOFTE: %ld\n", regs->softe); +#endif trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) printk("CFAR: "REG"\n", regs->orig_gpr3); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 567dd7c3ac2a..f81c81b92f0e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -259,7 +259,6 @@ void accumulate_stolen_time(void) u64 sst, ust; u8 save_soft_enabled = local_paca->soft_enabled; - u8 save_hard_enabled = local_paca->hard_enabled; /* We are called early in the exception entry, before * soft/hard_enabled are sync'ed to the expected state @@ -268,7 +267,6 @@ void accumulate_stolen_time(void) * complain */ local_paca->soft_enabled = 0; - local_paca->hard_enabled = 0; sst = scan_dispatch_log(local_paca->starttime_user); ust = scan_dispatch_log(local_paca->starttime); @@ -277,7 +275,6 @@ void accumulate_stolen_time(void) local_paca->stolen_time += ust + sst; local_paca->soft_enabled = save_soft_enabled; - local_paca->hard_enabled = save_hard_enabled; } static inline u64 calculate_stolen_time(u64 stop_tb) @@ -580,6 +577,11 @@ void timer_interrupt(struct pt_regs * regs) if (!cpu_online(smp_processor_id())) return; + /* Conditionally hard-enable interrupts now that the DEC has been + * bumped to its maximum value + */ + may_hard_irq_enable(); + trace_timer_interrupt_entry(regs); __get_cpu_var(irq_stat).timer_irqs++; diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 085fd3f45ad2..a12e95af6933 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -96,6 +96,20 @@ out: return index; } +static void check_and_cede_processor(void) +{ + /* + * Interrupts are soft-disabled at this point, + * but not hard disabled. So an interrupt might have + * occurred before entering NAP, and would be potentially + * lost (edge events, decrementer events, etc...) unless + * we first hard disable then check. + */ + hard_irq_disable(); + if (get_paca()->irq_happened == 0) + cede_processor(); +} + static int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -108,7 +122,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, ppc64_runlatch_off(); HMT_medium(); - cede_processor(); + check_and_cede_processor(); get_lppaca()->donate_dedicated_cpu = 0; dev->last_residency = @@ -132,7 +146,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, * processor. When returning here, external interrupts * are enabled. */ - cede_processor(); + check_and_cede_processor(); dev->last_residency = (int)idle_loop_epilog(in_purr, kt_before); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 63846ebd3276..974a47b3c9b8 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1437,8 +1437,8 @@ static void excprint(struct pt_regs *fp) printf(" current = 0x%lx\n", current); #ifdef CONFIG_PPC64 - printf(" paca = 0x%lx\t softe: %d\t harde: %d\n", - local_paca, local_paca->soft_enabled, local_paca->hard_enabled); + printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", + local_paca, local_paca->soft_enabled, local_paca->irq_happened); #endif if (current) { printf(" pid = %ld, comm = %s\n", -- cgit v1.2.3 From ae3a197e3d0bfe3f4bf1693723e82dc018c096f3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 28 Mar 2012 18:30:02 +0100 Subject: Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PowerPC. Signed-off-by: David Howells Acked-by: Benjamin Herrenschmidt cc: linuxppc-dev@lists.ozlabs.org --- arch/powerpc/include/asm/atomic.h | 8 +- arch/powerpc/include/asm/auxvec.h | 2 + arch/powerpc/include/asm/barrier.h | 68 +++ arch/powerpc/include/asm/bug.h | 11 + arch/powerpc/include/asm/cache.h | 16 + arch/powerpc/include/asm/cmpxchg.h | 309 +++++++++++ arch/powerpc/include/asm/debug.h | 56 ++ arch/powerpc/include/asm/dma.h | 1 - arch/powerpc/include/asm/exec.h | 9 + arch/powerpc/include/asm/hw_breakpoint.h | 2 +- arch/powerpc/include/asm/processor.h | 30 ++ arch/powerpc/include/asm/reg_booke.h | 5 + arch/powerpc/include/asm/rtas.h | 2 + arch/powerpc/include/asm/runlatch.h | 45 ++ arch/powerpc/include/asm/setup.h | 24 +- arch/powerpc/include/asm/switch_to.h | 65 +++ arch/powerpc/include/asm/system.h | 598 +--------------------- arch/powerpc/kernel/align.c | 2 +- arch/powerpc/kernel/cputable.c | 1 + arch/powerpc/kernel/crash.c | 2 +- arch/powerpc/kernel/idle.c | 2 +- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/kernel/kprobes.c | 1 - arch/powerpc/kernel/lparcfg.c | 1 - arch/powerpc/kernel/ppc_ksyms.c | 2 +- arch/powerpc/kernel/process.c | 4 +- arch/powerpc/kernel/prom.c | 1 - arch/powerpc/kernel/prom_init.c | 1 - arch/powerpc/kernel/ptrace.c | 2 +- arch/powerpc/kernel/ptrace32.c | 2 +- arch/powerpc/kernel/rtas.c | 1 - arch/powerpc/kernel/setup-common.c | 1 - arch/powerpc/kernel/setup_32.c | 1 - arch/powerpc/kernel/setup_64.c | 1 - arch/powerpc/kernel/signal.c | 1 + arch/powerpc/kernel/signal_32.c | 1 + arch/powerpc/kernel/signal_64.c | 1 + arch/powerpc/kernel/smp.c | 2 +- arch/powerpc/kernel/softemu8xx.c | 1 - arch/powerpc/kernel/swsusp.c | 2 +- arch/powerpc/kernel/swsusp_64.c | 1 - arch/powerpc/kernel/sys_ppc32.c | 1 + arch/powerpc/kernel/sysfs.c | 1 - arch/powerpc/kernel/traps.c | 3 +- arch/powerpc/kernel/vdso.c | 1 - arch/powerpc/kvm/book3s_hv.c | 1 + arch/powerpc/lib/alloc.c | 2 +- arch/powerpc/lib/copyuser_power7_vmx.c | 1 + arch/powerpc/mm/44x_mmu.c | 1 - arch/powerpc/mm/fault.c | 2 +- arch/powerpc/mm/hash_utils_64.c | 1 - arch/powerpc/mm/init_32.c | 1 - arch/powerpc/mm/init_64.c | 1 - arch/powerpc/mm/numa.c | 2 +- arch/powerpc/mm/pgtable_32.c | 1 + arch/powerpc/mm/pgtable_64.c | 1 - arch/powerpc/oprofile/common.c | 1 - arch/powerpc/oprofile/op_model_7450.c | 1 - arch/powerpc/oprofile/op_model_cell.c | 1 - arch/powerpc/oprofile/op_model_fsl_emb.c | 1 - arch/powerpc/oprofile/op_model_power4.c | 1 - arch/powerpc/oprofile/op_model_rs64.c | 1 - arch/powerpc/platforms/52xx/lite5200_pm.c | 1 + arch/powerpc/platforms/82xx/pq2.c | 1 - arch/powerpc/platforms/83xx/km83xx.c | 1 - arch/powerpc/platforms/83xx/mpc832x_mds.c | 1 - arch/powerpc/platforms/83xx/mpc834x_itx.c | 1 - arch/powerpc/platforms/83xx/mpc834x_mds.c | 1 - arch/powerpc/platforms/83xx/mpc836x_mds.c | 1 - arch/powerpc/platforms/83xx/sbc834x.c | 1 - arch/powerpc/platforms/83xx/suspend.c | 1 + arch/powerpc/platforms/85xx/corenet_ds.c | 1 - arch/powerpc/platforms/85xx/ge_imp3a.c | 1 - arch/powerpc/platforms/85xx/ksi8560.c | 1 - arch/powerpc/platforms/85xx/mpc8536_ds.c | 1 - arch/powerpc/platforms/85xx/mpc85xx_ads.c | 1 - arch/powerpc/platforms/85xx/mpc85xx_cds.c | 1 - arch/powerpc/platforms/85xx/mpc85xx_ds.c | 1 - arch/powerpc/platforms/85xx/mpc85xx_mds.c | 1 - arch/powerpc/platforms/85xx/mpc85xx_rdb.c | 1 - arch/powerpc/platforms/85xx/p1010rdb.c | 1 - arch/powerpc/platforms/85xx/p1023_rds.c | 1 - arch/powerpc/platforms/85xx/p2041_rdb.c | 1 - arch/powerpc/platforms/85xx/p3041_ds.c | 1 - arch/powerpc/platforms/85xx/p4080_ds.c | 1 - arch/powerpc/platforms/85xx/p5020_ds.c | 1 - arch/powerpc/platforms/85xx/sbc8548.c | 1 - arch/powerpc/platforms/85xx/sbc8560.c | 1 - arch/powerpc/platforms/85xx/socrates.c | 1 - arch/powerpc/platforms/85xx/stx_gp3.c | 1 - arch/powerpc/platforms/85xx/tqm85xx.c | 1 - arch/powerpc/platforms/85xx/xes_mpc85xx.c | 1 - arch/powerpc/platforms/86xx/gef_ppc9a.c | 1 - arch/powerpc/platforms/86xx/gef_sbc310.c | 1 - arch/powerpc/platforms/86xx/gef_sbc610.c | 1 - arch/powerpc/platforms/86xx/mpc8610_hpcd.c | 1 - arch/powerpc/platforms/86xx/mpc86xx_hpcn.c | 1 - arch/powerpc/platforms/86xx/pic.c | 1 - arch/powerpc/platforms/86xx/sbc8641d.c | 1 - arch/powerpc/platforms/8xx/mpc86xads_setup.c | 1 - arch/powerpc/platforms/8xx/mpc885ads_setup.c | 1 - arch/powerpc/platforms/8xx/tqm8xx_setup.c | 1 - arch/powerpc/platforms/cell/smp.c | 1 - arch/powerpc/platforms/embedded6xx/c2k.c | 1 - arch/powerpc/platforms/embedded6xx/holly.c | 1 - arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | 1 - arch/powerpc/platforms/embedded6xx/prpmc2800.c | 1 - arch/powerpc/platforms/embedded6xx/storcenter.c | 1 - arch/powerpc/platforms/fsl_uli1575.c | 1 - arch/powerpc/platforms/maple/setup.c | 1 - arch/powerpc/platforms/maple/time.c | 1 - arch/powerpc/platforms/pasemi/setup.c | 2 +- arch/powerpc/platforms/powermac/bootx_init.c | 1 + arch/powerpc/platforms/powermac/cpufreq_32.c | 2 +- arch/powerpc/platforms/powermac/nvram.c | 1 - arch/powerpc/platforms/powermac/setup.c | 1 - arch/powerpc/platforms/powermac/time.c | 1 - arch/powerpc/platforms/powernv/smp.c | 1 - arch/powerpc/platforms/ps3/mm.c | 1 + arch/powerpc/platforms/pseries/dtl.c | 2 +- arch/powerpc/platforms/pseries/hotplug-cpu.c | 1 - arch/powerpc/platforms/pseries/processor_idle.c | 2 +- arch/powerpc/platforms/pseries/ras.c | 1 - arch/powerpc/platforms/pseries/smp.c | 1 - arch/powerpc/platforms/wsp/chroma.c | 1 - arch/powerpc/platforms/wsp/psr2.c | 1 - arch/powerpc/platforms/wsp/wsp_pci.c | 1 + arch/powerpc/sysdev/cpm_common.c | 1 - arch/powerpc/sysdev/fsl_soc.c | 1 - arch/powerpc/sysdev/msi_bitmap.c | 1 + arch/powerpc/sysdev/tsi108_dev.c | 1 - arch/powerpc/xmon/xmon.c | 1 + lib/raid6/altivec.uc | 1 + 133 files changed, 687 insertions(+), 701 deletions(-) create mode 100644 arch/powerpc/include/asm/barrier.h create mode 100644 arch/powerpc/include/asm/cmpxchg.h create mode 100644 arch/powerpc/include/asm/debug.h create mode 100644 arch/powerpc/include/asm/exec.h create mode 100644 arch/powerpc/include/asm/runlatch.h create mode 100644 arch/powerpc/include/asm/switch_to.h (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 14174e838ad9..da29032ae38f 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -5,13 +5,9 @@ * PowerPC atomic operations */ -#include - #ifdef __KERNEL__ -#include -#include -#include -#include +#include +#include #define ATOMIC_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/auxvec.h b/arch/powerpc/include/asm/auxvec.h index 19a099b62cd6..ce17d2c9eb4e 100644 --- a/arch/powerpc/include/asm/auxvec.h +++ b/arch/powerpc/include/asm/auxvec.h @@ -16,4 +16,6 @@ */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ + #endif diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h new file mode 100644 index 000000000000..ae782254e731 --- /dev/null +++ b/arch/powerpc/include/asm/barrier.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 1999 Cort Dougan + */ +#ifndef _ASM_POWERPC_BARRIER_H +#define _ASM_POWERPC_BARRIER_H + +/* + * Memory barrier. + * The sync instruction guarantees that all memory accesses initiated + * by this processor have been performed (with respect to all other + * mechanisms that access memory). The eieio instruction is a barrier + * providing an ordering (separately) for (a) cacheable stores and (b) + * loads and stores to non-cacheable memory (e.g. I/O devices). + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + * read_barrier_depends() prevents data-dependent loads being reordered + * across this point (nop on PPC). + * + * *mb() variants without smp_ prefix must order all types of memory + * operations with one another. sync is the only instruction sufficient + * to do this. + * + * For the smp_ barriers, ordering is for cacheable memory operations + * only. We have to use the sync instruction for smp_mb(), since lwsync + * doesn't order loads with respect to previous stores. Lwsync can be + * used for smp_rmb() and smp_wmb(). + * + * However, on CPUs that don't support lwsync, lwsync actually maps to a + * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. + */ +#define mb() __asm__ __volatile__ ("sync" : : : "memory") +#define rmb() __asm__ __volatile__ ("sync" : : : "memory") +#define wmb() __asm__ __volatile__ ("sync" : : : "memory") +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#ifdef CONFIG_SMP + +#ifdef __SUBARCH_HAS_LWSYNC +# define SMPWMB LWSYNC +#else +# define SMPWMB eieio +#endif + +#define smp_mb() mb() +#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif /* CONFIG_SMP */ + +/* + * This is a barrier which prevents following instructions from being + * started until the value of the argument x is known. For example, if + * x is a variable loaded from memory, this prevents following + * instructions from being executed until the load has been performed. + */ +#define data_barrier(x) \ + asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); + +#endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 065c590c991d..3eb53d741070 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -126,5 +126,16 @@ #include +#ifndef __ASSEMBLY__ + +struct pt_regs; +extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); +extern void bad_page_fault(struct pt_regs *, unsigned long, int); +extern void _exception(int, struct pt_regs *, int, unsigned long); +extern void die(const char *, struct pt_regs *, long); +extern void print_backtrace(unsigned long *); + +#endif /* !__ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_BUG_H */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 4b509411ad8a..9e495c9a6a88 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -42,8 +42,24 @@ extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if !defined(__ASSEMBLY__) + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +#ifdef CONFIG_6xx +extern long _get_L2CR(void); +extern long _get_L3CR(void); +extern void _set_L2CR(unsigned long); +extern void _set_L3CR(unsigned long); +#else +#define _get_L2CR() 0L +#define _get_L3CR() 0L +#define _set_L2CR(val) do { } while(0) +#define _set_L3CR(val) do { } while(0) #endif +extern void cacheable_memzero(void *p, unsigned int nb); +extern void *cacheable_memcpy(void *, const void *, unsigned int); + +#endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHE_H */ diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h new file mode 100644 index 000000000000..e245aab7f191 --- /dev/null +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -0,0 +1,309 @@ +#ifndef _ASM_POWERPC_CMPXCHG_H_ +#define _ASM_POWERPC_CMPXCHG_H_ + +#ifdef __KERNEL__ +#include +#include +#include + +/* + * Atomic exchange + * + * Changes the memory location '*ptr' to be val and returns + * the previous value stored there. + */ +static __always_inline unsigned long +__xchg_u32(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( + PPC_RELEASE_BARRIER +"1: lwarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stwcx. %3,0,%2 \n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + : "=&r" (prev), "+m" (*(volatile unsigned int *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +/* + * Atomic exchange + * + * Changes the memory location '*ptr' to be val and returns + * the previous value stored there. + */ +static __always_inline unsigned long +__xchg_u32_local(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stwcx. %3,0,%2 \n\ + bne- 1b" + : "=&r" (prev), "+m" (*(volatile unsigned int *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +#ifdef CONFIG_PPC64 +static __always_inline unsigned long +__xchg_u64(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( + PPC_RELEASE_BARRIER +"1: ldarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stdcx. %3,0,%2 \n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + : "=&r" (prev), "+m" (*(volatile unsigned long *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__xchg_u64_local(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: ldarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stdcx. %3,0,%2 \n\ + bne- 1b" + : "=&r" (prev), "+m" (*(volatile unsigned long *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} +#endif + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__xchg(volatile void *ptr, unsigned long x, unsigned int size) +{ + switch (size) { + case 4: + return __xchg_u32(ptr, x); +#ifdef CONFIG_PPC64 + case 8: + return __xchg_u64(ptr, x); +#endif + } + __xchg_called_with_bad_pointer(); + return x; +} + +static __always_inline unsigned long +__xchg_local(volatile void *ptr, unsigned long x, unsigned int size) +{ + switch (size) { + case 4: + return __xchg_u32_local(ptr, x); +#ifdef CONFIG_PPC64 + case 8: + return __xchg_u64_local(ptr, x); +#endif + } + __xchg_called_with_bad_pointer(); + return x; +} +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ + }) + +#define xchg_local(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_local((ptr), \ + (unsigned long)_x_, sizeof(*(ptr))); \ + }) + +/* + * Compare and exchange - if *p == old, set it to new, + * and return the old value of *p. + */ +#define __HAVE_ARCH_CMPXCHG 1 + +static __always_inline unsigned long +__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( + PPC_RELEASE_BARRIER +"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ + cmpw 0,%0,%3\n\ + bne- 2f\n" + PPC405_ERR77(0,%2) +" stwcx. %4,0,%2\n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, + unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( +"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ + cmpw 0,%0,%3\n\ + bne- 2f\n" + PPC405_ERR77(0,%2) +" stwcx. %4,0,%2\n\ + bne- 1b" + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +#ifdef CONFIG_PPC64 +static __always_inline unsigned long +__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( + PPC_RELEASE_BARRIER +"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ + cmpd 0,%0,%3\n\ + bne- 2f\n\ + stdcx. %4,0,%2\n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, + unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( +"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ + cmpd 0,%0,%3\n\ + bne- 2f\n\ + stdcx. %4,0,%2\n\ + bne- 1b" + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} +#endif + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, + unsigned int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); +#ifdef CONFIG_PPC64 + case 8: + return __cmpxchg_u64(ptr, old, new); +#endif + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +static __always_inline unsigned long +__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, + unsigned int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32_local(ptr, old, new); +#ifdef CONFIG_PPC64 + case 8: + return __cmpxchg_u64_local(ptr, old, new); +#endif + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + + +#define cmpxchg_local(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#ifdef CONFIG_PPC64 +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ + }) +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) +#else +#include +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_CMPXCHG_H_ */ diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h new file mode 100644 index 000000000000..716d2f089eb6 --- /dev/null +++ b/arch/powerpc/include/asm/debug.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 1999 Cort Dougan + */ +#ifndef _ASM_POWERPC_DEBUG_H +#define _ASM_POWERPC_DEBUG_H + +struct pt_regs; + +extern struct dentry *powerpc_debugfs_root; + +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) + +extern int (*__debugger)(struct pt_regs *regs); +extern int (*__debugger_ipi)(struct pt_regs *regs); +extern int (*__debugger_bpt)(struct pt_regs *regs); +extern int (*__debugger_sstep)(struct pt_regs *regs); +extern int (*__debugger_iabr_match)(struct pt_regs *regs); +extern int (*__debugger_dabr_match)(struct pt_regs *regs); +extern int (*__debugger_fault_handler)(struct pt_regs *regs); + +#define DEBUGGER_BOILERPLATE(__NAME) \ +static inline int __NAME(struct pt_regs *regs) \ +{ \ + if (unlikely(__ ## __NAME)) \ + return __ ## __NAME(regs); \ + return 0; \ +} + +DEBUGGER_BOILERPLATE(debugger) +DEBUGGER_BOILERPLATE(debugger_ipi) +DEBUGGER_BOILERPLATE(debugger_bpt) +DEBUGGER_BOILERPLATE(debugger_sstep) +DEBUGGER_BOILERPLATE(debugger_iabr_match) +DEBUGGER_BOILERPLATE(debugger_dabr_match) +DEBUGGER_BOILERPLATE(debugger_fault_handler) + +#else +static inline int debugger(struct pt_regs *regs) { return 0; } +static inline int debugger_ipi(struct pt_regs *regs) { return 0; } +static inline int debugger_bpt(struct pt_regs *regs) { return 0; } +static inline int debugger_sstep(struct pt_regs *regs) { return 0; } +static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } +#endif + +extern int set_dabr(unsigned long dabr); +#ifdef CONFIG_PPC_ADV_DEBUG_REGS +extern void do_send_trap(struct pt_regs *regs, unsigned long address, + unsigned long error_code, int signal_code, int brkpt); +#else +extern void do_dabr(struct pt_regs *regs, unsigned long address, + unsigned long error_code); +#endif + +#endif /* _ASM_POWERPC_DEBUG_H */ diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h index adadb9943610..f6813e919bb2 100644 --- a/arch/powerpc/include/asm/dma.h +++ b/arch/powerpc/include/asm/dma.h @@ -24,7 +24,6 @@ #include #include -#include #ifndef MAX_DMA_CHANNELS #define MAX_DMA_CHANNELS 8 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h new file mode 100644 index 000000000000..8196e9c7d7e8 --- /dev/null +++ b/arch/powerpc/include/asm/exec.h @@ -0,0 +1,9 @@ +/* + * Copyright (C) 1999 Cort Dougan + */ +#ifndef _ASM_POWERPC_EXEC_H +#define _ASM_POWERPC_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_POWERPC_EXEC_H */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 80fd4d2b4a62..be04330af751 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -35,7 +35,7 @@ struct arch_hw_breakpoint { #include #include -#include +#include struct perf_event; struct pmu; diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index b585bff1a022..8e2d0371fe1e 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -385,6 +385,36 @@ static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; +extern int powersave_nap; /* set if nap mode can be used in idle loop */ +void cpu_idle_wait(void); + +#ifdef CONFIG_PSERIES_IDLE +extern void update_smt_snooze_delay(int snooze); +extern int pseries_notify_cpuidle_add_cpu(int cpu); +#else +static inline void update_smt_snooze_delay(int snooze) {} +static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; } +#endif + +extern void flush_instruction_cache(void); +extern void hard_reset_now(void); +extern void poweroff_now(void); +extern int fix_alignment(struct pt_regs *); +extern void cvt_fd(float *from, double *to); +extern void cvt_df(double *from, float *to); +extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); + +#ifdef CONFIG_PPC64 +/* + * We handle most unaligned accesses in hardware. On the other hand + * unaligned DMA can be very expensive on some ppc64 IO chips (it does + * powers of 2 writes until it reaches sufficient alignment). + * + * Based on this we disable the IP header alignment in network drivers. + */ +#define NET_IP_ALIGN 0 +#endif + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PROCESSOR_H */ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 8a97aa7289d3..b86faa9107da 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -15,6 +15,11 @@ #ifndef __ASM_POWERPC_REG_BOOKE_H__ #define __ASM_POWERPC_REG_BOOKE_H__ +#ifdef CONFIG_BOOKE_WDT +extern u32 booke_wdt_enabled; +extern u32 booke_wdt_period; +#endif /* CONFIG_BOOKE_WDT */ + /* Machine State Register (MSR) Fields */ #define MSR_GS (1<<28) /* Guest state */ #define MSR_UCLE (1<<26) /* User-mode cache lock enable */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c143bb77ae..9d6c37a11eed 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -325,5 +325,7 @@ static inline int page_is_rtas_user_buf(unsigned long pfn) static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} #endif +extern int call_rtas(const char *, int, int, unsigned long *, ...); + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h new file mode 100644 index 000000000000..54e9b963876e --- /dev/null +++ b/arch/powerpc/include/asm/runlatch.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 1999 Cort Dougan + */ +#ifndef _ASM_POWERPC_RUNLATCH_H +#define _ASM_POWERPC_RUNLATCH_H + +#ifdef CONFIG_PPC64 + +extern void __ppc64_runlatch_on(void); +extern void __ppc64_runlatch_off(void); + +/* + * We manually hard enable-disable, this is called + * in the idle loop and we don't want to mess up + * with soft-disable/enable & interrupt replay. + */ +#define ppc64_runlatch_off() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_off(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) + +#define ppc64_runlatch_on() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + !test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_on(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) +#else +#define ppc64_runlatch_on() +#define ppc64_runlatch_off() +#endif /* CONFIG_PPC64 */ + +#endif /* _ASM_POWERPC_RUNLATCH_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 186e0fb835bd..d084ce195fc3 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -5,6 +5,28 @@ #ifndef __ASSEMBLY__ extern void ppc_printk_progress(char *s, unsigned short hex); -#endif + +extern unsigned int rtas_data; +extern int mem_init_done; /* set on boot once kmalloc can be called */ +extern int init_bootmem_done; /* set once bootmem is available */ +extern phys_addr_t memory_limit; +extern unsigned long klimit; +extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); + +extern void via_cuda_init(void); +extern void read_rtc_time(void); +extern void pmac_find_display(void); + +struct device_node; +extern void note_scsi_host(struct device_node *, void *); + +/* Used in very early kernel initialization. */ +extern unsigned long reloc_offset(void); +extern unsigned long add_reloc_offset(unsigned long); +extern void reloc_got2(unsigned long); + +#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) + +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h new file mode 100644 index 000000000000..caf82d0a00de --- /dev/null +++ b/arch/powerpc/include/asm/switch_to.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 1999 Cort Dougan + */ +#ifndef _ASM_POWERPC_SWITCH_TO_H +#define _ASM_POWERPC_SWITCH_TO_H + +struct thread_struct; +struct task_struct; +struct pt_regs; + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); +#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) + +struct thread_struct; +extern struct task_struct *_switch(struct thread_struct *prev, + struct thread_struct *next); + +extern void giveup_fpu(struct task_struct *); +extern void disable_kernel_fp(void); +extern void enable_kernel_fp(void); +extern void flush_fp_to_thread(struct task_struct *); +extern void enable_kernel_altivec(void); +extern void giveup_altivec(struct task_struct *); +extern void load_up_altivec(struct task_struct *); +extern int emulate_altivec(struct pt_regs *); +extern void __giveup_vsx(struct task_struct *); +extern void giveup_vsx(struct task_struct *); +extern void enable_kernel_spe(void); +extern void giveup_spe(struct task_struct *); +extern void load_up_spe(struct task_struct *); + +#ifndef CONFIG_SMP +extern void discard_lazy_cpu_state(void); +#else +static inline void discard_lazy_cpu_state(void) +{ +} +#endif + +#ifdef CONFIG_ALTIVEC +extern void flush_altivec_to_thread(struct task_struct *); +#else +static inline void flush_altivec_to_thread(struct task_struct *t) +{ +} +#endif + +#ifdef CONFIG_VSX +extern void flush_vsx_to_thread(struct task_struct *); +#else +static inline void flush_vsx_to_thread(struct task_struct *t) +{ +} +#endif + +#ifdef CONFIG_SPE +extern void flush_spe_to_thread(struct task_struct *); +#else +static inline void flush_spe_to_thread(struct task_struct *t) +{ +} +#endif + +#endif /* _ASM_POWERPC_SWITCH_TO_H */ diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index a02883d5af43..502c1e0275af 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -1,592 +1,6 @@ -/* - * Copyright (C) 1999 Cort Dougan - */ -#ifndef _ASM_POWERPC_SYSTEM_H -#define _ASM_POWERPC_SYSTEM_H - -#include -#include - -#include - -/* - * Memory barrier. - * The sync instruction guarantees that all memory accesses initiated - * by this processor have been performed (with respect to all other - * mechanisms that access memory). The eieio instruction is a barrier - * providing an ordering (separately) for (a) cacheable stores and (b) - * loads and stores to non-cacheable memory (e.g. I/O devices). - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - * read_barrier_depends() prevents data-dependent loads being reordered - * across this point (nop on PPC). - * - * *mb() variants without smp_ prefix must order all types of memory - * operations with one another. sync is the only instruction sufficient - * to do this. - * - * For the smp_ barriers, ordering is for cacheable memory operations - * only. We have to use the sync instruction for smp_mb(), since lwsync - * doesn't order loads with respect to previous stores. Lwsync can be - * used for smp_rmb() and smp_wmb(). - * - * However, on CPUs that don't support lwsync, lwsync actually maps to a - * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. - */ -#define mb() __asm__ __volatile__ ("sync" : : : "memory") -#define rmb() __asm__ __volatile__ ("sync" : : : "memory") -#define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef __KERNEL__ -#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ -#ifdef CONFIG_SMP - -#ifdef __SUBARCH_HAS_LWSYNC -# define SMPWMB LWSYNC -#else -# define SMPWMB eieio -#endif - -#define smp_mb() mb() -#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") -#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif /* CONFIG_SMP */ - -/* - * This is a barrier which prevents following instructions from being - * started until the value of the argument x is known. For example, if - * x is a variable loaded from memory, this prevents following - * instructions from being executed until the load has been performed. - */ -#define data_barrier(x) \ - asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); - -struct task_struct; -struct pt_regs; - -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) - -extern int (*__debugger)(struct pt_regs *regs); -extern int (*__debugger_ipi)(struct pt_regs *regs); -extern int (*__debugger_bpt)(struct pt_regs *regs); -extern int (*__debugger_sstep)(struct pt_regs *regs); -extern int (*__debugger_iabr_match)(struct pt_regs *regs); -extern int (*__debugger_dabr_match)(struct pt_regs *regs); -extern int (*__debugger_fault_handler)(struct pt_regs *regs); - -#define DEBUGGER_BOILERPLATE(__NAME) \ -static inline int __NAME(struct pt_regs *regs) \ -{ \ - if (unlikely(__ ## __NAME)) \ - return __ ## __NAME(regs); \ - return 0; \ -} - -DEBUGGER_BOILERPLATE(debugger) -DEBUGGER_BOILERPLATE(debugger_ipi) -DEBUGGER_BOILERPLATE(debugger_bpt) -DEBUGGER_BOILERPLATE(debugger_sstep) -DEBUGGER_BOILERPLATE(debugger_iabr_match) -DEBUGGER_BOILERPLATE(debugger_dabr_match) -DEBUGGER_BOILERPLATE(debugger_fault_handler) - -#else -static inline int debugger(struct pt_regs *regs) { return 0; } -static inline int debugger_ipi(struct pt_regs *regs) { return 0; } -static inline int debugger_bpt(struct pt_regs *regs) { return 0; } -static inline int debugger_sstep(struct pt_regs *regs) { return 0; } -static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } -#endif - -extern int set_dabr(unsigned long dabr); -#ifdef CONFIG_PPC_ADV_DEBUG_REGS -extern void do_send_trap(struct pt_regs *regs, unsigned long address, - unsigned long error_code, int signal_code, int brkpt); -#else -extern void do_dabr(struct pt_regs *regs, unsigned long address, - unsigned long error_code); -#endif -extern void print_backtrace(unsigned long *); -extern void flush_instruction_cache(void); -extern void hard_reset_now(void); -extern void poweroff_now(void); - -#ifdef CONFIG_6xx -extern long _get_L2CR(void); -extern long _get_L3CR(void); -extern void _set_L2CR(unsigned long); -extern void _set_L3CR(unsigned long); -#else -#define _get_L2CR() 0L -#define _get_L3CR() 0L -#define _set_L2CR(val) do { } while(0) -#define _set_L3CR(val) do { } while(0) -#endif - -extern void via_cuda_init(void); -extern void read_rtc_time(void); -extern void pmac_find_display(void); -extern void giveup_fpu(struct task_struct *); -extern void disable_kernel_fp(void); -extern void enable_kernel_fp(void); -extern void flush_fp_to_thread(struct task_struct *); -extern void enable_kernel_altivec(void); -extern void giveup_altivec(struct task_struct *); -extern void load_up_altivec(struct task_struct *); -extern int emulate_altivec(struct pt_regs *); -extern void __giveup_vsx(struct task_struct *); -extern void giveup_vsx(struct task_struct *); -extern void enable_kernel_spe(void); -extern void giveup_spe(struct task_struct *); -extern void load_up_spe(struct task_struct *); -extern int fix_alignment(struct pt_regs *); -extern void cvt_fd(float *from, double *to); -extern void cvt_df(double *from, float *to); - -#ifndef CONFIG_SMP -extern void discard_lazy_cpu_state(void); -#else -static inline void discard_lazy_cpu_state(void) -{ -} -#endif - -#ifdef CONFIG_ALTIVEC -extern void flush_altivec_to_thread(struct task_struct *); -#else -static inline void flush_altivec_to_thread(struct task_struct *t) -{ -} -#endif - -#ifdef CONFIG_VSX -extern void flush_vsx_to_thread(struct task_struct *); -#else -static inline void flush_vsx_to_thread(struct task_struct *t) -{ -} -#endif - -#ifdef CONFIG_SPE -extern void flush_spe_to_thread(struct task_struct *); -#else -static inline void flush_spe_to_thread(struct task_struct *t) -{ -} -#endif - -extern int call_rtas(const char *, int, int, unsigned long *, ...); -extern void cacheable_memzero(void *p, unsigned int nb); -extern void *cacheable_memcpy(void *, const void *, unsigned int); -extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); -extern void bad_page_fault(struct pt_regs *, unsigned long, int); -extern void _exception(int, struct pt_regs *, int, unsigned long); -extern void die(const char *, struct pt_regs *, long); -extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); - -#ifdef CONFIG_BOOKE_WDT -extern u32 booke_wdt_enabled; -extern u32 booke_wdt_period; -#endif /* CONFIG_BOOKE_WDT */ - -struct device_node; -extern void note_scsi_host(struct device_node *, void *); - -extern struct task_struct *__switch_to(struct task_struct *, - struct task_struct *); -#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) - -struct thread_struct; -extern struct task_struct *_switch(struct thread_struct *prev, - struct thread_struct *next); - -extern unsigned int rtas_data; -extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern int init_bootmem_done; /* set once bootmem is available */ -extern phys_addr_t memory_limit; -extern unsigned long klimit; -extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); - -extern int powersave_nap; /* set if nap mode can be used in idle loop */ -void cpu_idle_wait(void); - -#ifdef CONFIG_PSERIES_IDLE -extern void update_smt_snooze_delay(int snooze); -extern int pseries_notify_cpuidle_add_cpu(int cpu); -#else -static inline void update_smt_snooze_delay(int snooze) {} -static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; } -#endif - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - */ -static __always_inline unsigned long -__xchg_u32(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( - PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stwcx. %3,0,%2 \n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - : "=&r" (prev), "+m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - */ -static __always_inline unsigned long -__xchg_u32_local(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stwcx. %3,0,%2 \n\ - bne- 1b" - : "=&r" (prev), "+m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -#ifdef CONFIG_PPC64 -static __always_inline unsigned long -__xchg_u64(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( - PPC_RELEASE_BARRIER -"1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stdcx. %3,0,%2 \n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - : "=&r" (prev), "+m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__xchg_u64_local(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( -"1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stdcx. %3,0,%2 \n\ - bne- 1b" - : "=&r" (prev), "+m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} -#endif - -/* - * This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid xchg(). - */ -extern void __xchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__xchg(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32(ptr, x); -#ifdef CONFIG_PPC64 - case 8: - return __xchg_u64(ptr, x); -#endif - } - __xchg_called_with_bad_pointer(); - return x; -} - -static __always_inline unsigned long -__xchg_local(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32_local(ptr, x); -#ifdef CONFIG_PPC64 - case 8: - return __xchg_u64_local(ptr, x); -#endif - } - __xchg_called_with_bad_pointer(); - return x; -} -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), \ - (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -/* - * Compare and exchange - if *p == old, set it to new, - * and return the old value of *p. - */ -#define __HAVE_ARCH_CMPXCHG 1 - -static __always_inline unsigned long -__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( - PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n" - PPC405_ERR77(0,%2) -" stwcx. %4,0,%2\n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, - unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n" - PPC405_ERR77(0,%2) -" stwcx. %4,0,%2\n\ - bne- 1b" - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -#ifdef CONFIG_PPC64 -static __always_inline unsigned long -__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( - PPC_RELEASE_BARRIER -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, - unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} -#endif - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); -#ifdef CONFIG_PPC64 - case 8: - return __cmpxchg_u64(ptr, old, new); -#endif - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32_local(ptr, old, new); -#ifdef CONFIG_PPC64 - case 8: - return __cmpxchg_u64_local(ptr, old, new); -#endif - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - - -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#ifdef CONFIG_PPC64 -/* - * We handle most unaligned accesses in hardware. On the other hand - * unaligned DMA can be very expensive on some ppc64 IO chips (it does - * powers of 2 writes until it reaches sufficient alignment). - * - * Based on this we disable the IP header alignment in network drivers. - */ -#define NET_IP_ALIGN 0 - -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#include -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - -extern unsigned long arch_align_stack(unsigned long sp); - -/* Used in very early kernel initialization. */ -extern unsigned long reloc_offset(void); -extern unsigned long add_reloc_offset(unsigned long); -extern void reloc_got2(unsigned long); - -#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) - -extern struct dentry *powerpc_debugfs_root; - -#ifdef CONFIG_PPC64 - -extern void __ppc64_runlatch_on(void); -extern void __ppc64_runlatch_off(void); - -/* - * We manually hard enable-disable, this is called - * in the idle loop and we don't want to mess up - * with soft-disable/enable & interrupt replay. - */ -#define ppc64_runlatch_off() \ - do { \ - if (cpu_has_feature(CPU_FTR_CTRL) && \ - test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ - __hard_irq_disable(); \ - __ppc64_runlatch_off(); \ - if (msr & MSR_EE) \ - __hard_irq_enable(); \ - } \ - } while (0) - -#define ppc64_runlatch_on() \ - do { \ - if (cpu_has_feature(CPU_FTR_CTRL) && \ - !test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ - __hard_irq_disable(); \ - __ppc64_runlatch_on(); \ - if (msr & MSR_EE) \ - __hard_irq_enable(); \ - } \ - } while (0) -#else -#define ppc64_runlatch_on() -#define ppc64_runlatch_off() -#endif /* CONFIG_PPC64 */ - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_SYSTEM_H */ +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ +#include +#include +#include +#include +#include diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 8184ee97e484..ee5b690a0bed 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -21,10 +21,10 @@ #include #include #include -#include #include #include #include +#include struct aligninfo { unsigned char len; diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 138ae183c440..455faa389876 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -20,6 +20,7 @@ #include #include /* for PTRRELOC on ARCH=ppc */ #include +#include struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index abef75176c07..fdcd8f551aff 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -27,8 +27,8 @@ #include #include #include -#include #include +#include /* * The primary CPU waits a while for all secondary CPUs to enter. This is to diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index e8e821146f38..6d2209ac0c44 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -26,11 +26,11 @@ #include #include -#include #include #include #include #include +#include #include #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index a3d128e94cff..2c5635dce05f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -57,7 +57,6 @@ #include #include -#include #include #include #include @@ -67,6 +66,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index bc47352deb1f..e88c64331819 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -35,7 +35,6 @@ #include #include #include -#include #ifdef CONFIG_PPC_ADV_DEBUG_REGS #define MSR_SINGLESTEP (MSR_DE) diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index ac12bd80ad95..f5725bce9ed2 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d3114a71dd32..786a2700ec2d 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -43,6 +42,7 @@ #include #include #include +#include #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e40707032ac3..f88698c0f332 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -41,14 +41,16 @@ #include #include -#include #include #include #include #include #include #include +#include #include +#include +#include #ifdef CONFIG_PPC64 #include #endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 89e850af3dd6..f191bf02943a 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e2d599048142..b2aae219b4b2 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 5b43325402bc..8d8e028893be 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #define CREATE_TRACE_POINTS #include diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 69c4be917d07..469349d14a97 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include /* * does not yet catch signals sent when the child dies. diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 9f843cdfee9e..3dc679930d00 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index b0ebdeab9494..afd4f051f3f2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index ac7610815113..9825f29d1faf 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4cb8f1e9d044..6d639b2d24c6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -52,7 +52,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 7006b7f4267a..651c5963662b 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "signal.h" diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index e061ef5dd449..45eb998557f8 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -43,6 +43,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include "ppc32.h" #include diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index a50b5ec281dc..2692efdb154e 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "signal.h" diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 46695febc09f..d9f94410fd7f 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -43,12 +43,12 @@ #include #include #include -#include #include #include #ifdef CONFIG_PPC64 #include #endif +#include #ifdef DEBUG #include diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c index af0e8290b4fc..29b2f81dd709 100644 --- a/arch/powerpc/kernel/softemu8xx.c +++ b/arch/powerpc/kernel/softemu8xx.c @@ -26,7 +26,6 @@ #include #include -#include #include /* Eventually we may need a look-up table, but this works for now. diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 641f9adc6205..eae33e10b65f 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -10,9 +10,9 @@ */ #include -#include #include #include +#include void save_processor_state(void) { diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 168e88480223..0e899e47c325 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -6,7 +6,6 @@ * GPLv2 */ -#include #include #include #include diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 4e5bf1edc0f2..81c570633ead 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -50,6 +50,7 @@ #include #include #include +#include asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 0c683d376b1c..3529446c2abd 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "cacheinfo.h" diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a750409ccc4e..6aa0c663e247 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -58,6 +57,8 @@ #include #include #include +#include +#include #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) int (*__debugger)(struct pt_regs *regs) __read_mostly; diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 7d14bb697d40..1ce1ec410a12 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a7267167a550..ab4267760b7d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 13b676c20d12..da22c84a8fed 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -3,8 +3,8 @@ #include #include #include +#include -#include void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) { diff --git a/arch/powerpc/lib/copyuser_power7_vmx.c b/arch/powerpc/lib/copyuser_power7_vmx.c index 6e1efadac48b..bf2654f2b68e 100644 --- a/arch/powerpc/lib/copyuser_power7_vmx.c +++ b/arch/powerpc/lib/copyuser_power7_vmx.c @@ -20,6 +20,7 @@ */ #include #include +#include int enter_vmx_copy(void) { diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 388b95e1a009..2c9441ee6bb8 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -27,7 +27,6 @@ #include #include -#include #include #include diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 19f2f9498b27..08ffcf52a856 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -38,10 +38,10 @@ #include #include #include -#include #include #include #include +#include #include #include "icswx.h" diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 3e8c37a4e395..377e5cbedbbb 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6157be2a7049..01e2db97a210 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -45,7 +45,6 @@ #include #include #include -#include #include #include "mmu_decl.h" diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index e94b57fb79a0..620b7acd2fdf 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -61,7 +61,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 3feefc3842a8..b6edbb3b4a54 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -24,11 +24,11 @@ #include #include #include -#include #include #include #include #include +#include static int numa_enabled = 1; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 0907f92ce309..6c856fb8c15b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "mmu_decl.h" diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ad36ede469cc..249a0631c4db 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 6f01624f317f..4f51025f5b00 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c index f8d36f940e88..ff617246d128 100644 --- a/arch/powerpc/oprofile/op_model_7450.c +++ b/arch/powerpc/oprofile/op_model_7450.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index cb515cff745c..b9589c19ccda 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include "../platforms/cell/interrupt.h" diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c index d4e6507277b5..ccc1daa33aed 100644 --- a/arch/powerpc/oprofile/op_model_fsl_emb.c +++ b/arch/powerpc/oprofile/op_model_fsl_emb.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index e6bec74be131..95ae77dec3f6 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c index a20afe45d936..9b801b8c8c5a 100644 --- a/arch/powerpc/oprofile/op_model_rs64.c +++ b/arch/powerpc/oprofile/op_model_rs64.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index eda0fc2a3914..870b70f5d1bd 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -3,6 +3,7 @@ #include #include #include +#include /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c index d111b024eafd..fb94d10e5a4d 100644 --- a/arch/powerpc/platforms/82xx/pq2.c +++ b/arch/powerpc/platforms/82xx/pq2.c @@ -17,7 +17,6 @@ #include #include #include -#include #include diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index 65eb792a0d00..a266ba876863 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c index e36bc611dd6e..d440435e055c 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c @@ -26,7 +26,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index 39849dd1b5bb..a494fa57bdf9 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -25,7 +25,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c index 5828d8e97c37..553e793a4a93 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c @@ -25,7 +25,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c index ad8e4bcd7d55..1b1f6c8a1a12 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c @@ -33,7 +33,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c index 8a81d7640b1f..26cb3e934722 100644 --- a/arch/powerpc/platforms/83xx/sbc834x.c +++ b/arch/powerpc/platforms/83xx/sbc834x.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index edf66870d978..1a046715e461 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -27,6 +27,7 @@ #include #include #include +#include #include diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c index df69e99e511c..dd3617c531d7 100644 --- a/arch/powerpc/platforms/85xx/corenet_ds.c +++ b/arch/powerpc/platforms/85xx/corenet_ds.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c index d50056f424f6..18014629416d 100644 --- a/arch/powerpc/platforms/85xx/ge_imp3a.c +++ b/arch/powerpc/platforms/85xx/ge_imp3a.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index 60120e55da41..3dc1bda3ddc3 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c @@ -20,7 +20,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c index f58872688d8f..585bd22b1406 100644 --- a/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index d19f675cb369..29ee8fcd75a2 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index ab5f0bf19454..11156fb53d83 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 6e23e3e34bd9..1fd91e9e0ffb 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index f33662b46b8d..3754ddc00af7 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -35,7 +35,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c index db214cd4c822..9848f9e39853 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c index d8bd6563d9ca..dbaf44354f0d 100644 --- a/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/arch/powerpc/platforms/85xx/p1010rdb.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c index 6b07398e4369..2990e8b13dc9 100644 --- a/arch/powerpc/platforms/85xx/p1023_rds.c +++ b/arch/powerpc/platforms/85xx/p1023_rds.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c index eda6ed5683e1..6541fa2630c0 100644 --- a/arch/powerpc/platforms/85xx/p2041_rdb.c +++ b/arch/powerpc/platforms/85xx/p2041_rdb.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c index 96d99a374dcf..f238efa75891 100644 --- a/arch/powerpc/platforms/85xx/p3041_ds.c +++ b/arch/powerpc/platforms/85xx/p3041_ds.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c index d1b21d7663e3..c92417dc6574 100644 --- a/arch/powerpc/platforms/85xx/p4080_ds.c +++ b/arch/powerpc/platforms/85xx/p4080_ds.c @@ -17,7 +17,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c index e8cba5004fd8..17bef15a85ed 100644 --- a/arch/powerpc/platforms/85xx/p5020_ds.c +++ b/arch/powerpc/platforms/85xx/p5020_ds.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c index 1677b8a22677..cd3a66bdb54b 100644 --- a/arch/powerpc/platforms/85xx/sbc8548.c +++ b/arch/powerpc/platforms/85xx/sbc8548.c @@ -30,7 +30,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index 3c3bbcc27566..b1be632ede43 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c index b71919217756..b9c6daa07b66 100644 --- a/arch/powerpc/platforms/85xx/socrates.c +++ b/arch/powerpc/platforms/85xx/socrates.c @@ -29,7 +29,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index 27ca3a7b04ab..e0508002b086 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c @@ -28,7 +28,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index d7504cefe016..4d786c25d3e5 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c @@ -26,7 +26,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c index 503c21596c63..41c687550ea7 100644 --- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index ed58b6cfd60c..1fca663f1b25 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index 710db69bd523..14e0e576bcbd 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index 4a13d2f4ac20..1638f43599f0 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 13fa9a6403e6..bbc615206c67 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c @@ -25,7 +25,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 569262ca499a..3755e61d7ecf 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 22cc3571ae19..9982f57c98b9 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -12,7 +12,6 @@ #include #include -#include #include #include diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c index 51c8f331b671..e7007d0d949e 100644 --- a/arch/powerpc/platforms/86xx/sbc8641d.c +++ b/arch/powerpc/platforms/86xx/sbc8641d.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index caaec29796b7..866feff83c91 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index 45ed6cdc1310..5d98398c2f5e 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index 528e00ddef31..8d21ab70e06c 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 4a255cf8cd17..49a65e2dfc71 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c index 8cab5731850f..ebd3963fdf91 100644 --- a/arch/powerpc/platforms/embedded6xx/c2k.c +++ b/arch/powerpc/platforms/embedded6xx/c2k.c @@ -23,7 +23,6 @@ #include #include -#include #include #include diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index ab51b21b4bd7..8c305c7c8977 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -28,7 +28,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 74ccce36baed..beeaf4a173e1 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -32,7 +32,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c index 670035f49a69..d455f08bea53 100644 --- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c +++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c @@ -17,7 +17,6 @@ #include #include -#include #include #include diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index e0ed3c71d69b..c458b60d14c4 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index 8b0c2082a783..64fde058e545 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -15,7 +15,6 @@ #include #include -#include #include #define ULI_PIRQA 0x08 diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 3b7545a51aa9..cb1b0b35a0c6 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index eac569dee27c..b4a369dac3a8 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index e777ad471a48..2ed9212d7d59 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -32,13 +32,13 @@ #include #include -#include #include #include #include #include #include #include +#include #include #include diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index 84d7fd9bcc69..3e91ef538114 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -19,6 +19,7 @@ #include #include #include +#include #undef DEBUG #define SET_BOOT_BAT diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 1fc386a23f18..64171198535c 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -33,9 +33,9 @@ #include #include #include -#include #include #include +#include /* WARNING !!! This will cause calibrate_delay() to be called, * but this is an __init function ! So you MUST go edit diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index da18b26dcc6f..014d06e6d46b 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 970ea1de4298..141f8899a633 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 11c9fce43b5b..8680bb69795d 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -26,7 +26,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 17210c526c52..3ef46254c35b 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 8bd6ba542691..de2aea421707 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "platform.h" diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 0e8656370063..a7648543c59e 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -25,10 +25,10 @@ #include #include #include -#include #include #include #include +#include #include "plpar_wrappers.h" diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index c986d08d0807..64c97d8ac0c5 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -23,7 +23,6 @@ #include #include /* for idle_task_exit */ #include -#include #include #include #include diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index a12e95af6933..41a34bc4a9a2 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -14,9 +14,9 @@ #include #include -#include #include #include +#include #include "plpar_wrappers.h" #include "pseries.h" diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 086d2ae4e06a..9e248ef6cc67 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -37,7 +37,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index eadba9521a35..e16bb8d48550 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c index ca6fa26f6e63..8ef53bc2e70e 100644 --- a/arch/powerpc/platforms/wsp/chroma.c +++ b/arch/powerpc/platforms/wsp/chroma.c @@ -17,7 +17,6 @@ #include #include -#include #include #include "ics.h" diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c index 0c1ae06d0be1..508ec8282b96 100644 --- a/arch/powerpc/platforms/wsp/psr2.c +++ b/arch/powerpc/platforms/wsp/psr2.c @@ -17,7 +17,6 @@ #include #include -#include #include #include "ics.h" diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c index d24b3acf858e..386879412e91 100644 --- a/arch/powerpc/platforms/wsp/wsp_pci.c +++ b/arch/powerpc/platforms/wsp/wsp_pci.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "wsp.h" #include "wsp_pci.h" diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index bf6c7cc0a6af..4dd534194ae8 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -26,7 +26,6 @@ #include #include -#include #include #include diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index e8f385fbf549..c449dbd1c938 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -31,7 +31,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 5287e95cec3a..0968b66b4cf9 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -12,6 +12,7 @@ #include #include #include +#include int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num) { diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index 2370e1c63379..1fd0717ade02 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 68a9cbbab450..0f3ab06d2222 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -41,6 +41,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index 2654d5c854be..69f3a9ff870f 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc @@ -30,6 +30,7 @@ #ifdef __KERNEL__ # include # include +# include #endif /* -- cgit v1.2.3