From 4fa20439a80c008d33f2865b0db94dcb5da467e2 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 1 Aug 2011 17:25:06 -0400 Subject: ARM: clean up idle handlers Let's factor out the need_resched() check instead of having it duplicated in every pm_idle implementations to avoid inconsistencies (omap2_pm_idle is missing it already). The forceful re-enablement of IRQs after pm_idle has returned can go. The warning certainly doesn't trigger for existing users. To get rid of the pm_idle calling convention oddity, let's introduce arm_pm_idle() allowing for the local_irq_enable() to be factored out from SOC specific implementations. The default pm_idle function becomes a wrapper for arm_pm_idle and it takes care of enabling IRQs closer to where they are initially disabled. And finally move the comment explaining the reason for that turning off of IRQs to a more proper location. Signed-off-by: Nicolas Pitre Acked-and-tested-by: Jamie Iles --- arch/arm/kernel/process.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel/process.c') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 971d65c253a9..ba9e7ef92bec 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -181,12 +181,16 @@ void cpu_idle_wait(void) EXPORT_SYMBOL_GPL(cpu_idle_wait); /* - * This is our default idle handler. We need to disable - * interrupts here to ensure we don't miss a wakeup call. + * This is our default idle handler. */ + +void (*arm_pm_idle)(void); + static void default_idle(void) { - if (!need_resched()) + if (arm_pm_idle) + arm_pm_idle(); + else arch_idle(); local_irq_enable(); } @@ -215,6 +219,10 @@ void cpu_idle(void) cpu_die(); #endif + /* + * We need to disable interrupts here + * to ensure we don't miss a wakeup call. + */ local_irq_disable(); #ifdef CONFIG_PL310_ERRATA_769419 wmb(); @@ -222,19 +230,18 @@ void cpu_idle(void) if (hlt_counter) { local_irq_enable(); cpu_relax(); - } else { + } else if (!need_resched()) { stop_critical_timings(); if (cpuidle_idle_call()) pm_idle(); start_critical_timings(); /* - * This will eventually be removed - pm_idle - * functions should always return with IRQs - * enabled. + * pm_idle functions must always + * return with IRQs enabled. */ WARN_ON(irqs_disabled()); + } else local_irq_enable(); - } } leds_event(led_idle_end); rcu_idle_exit(); -- cgit v1.2.3 From ae940913030386884f259eb4d95ac4d93b57144f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 19 Dec 2011 03:03:58 -0500 Subject: ARM: substitute arch_idle() Now that all implementations of arch_idle() are equivalent to cpu_do_idle() we can just use the later directly and stop including mach/system.h. Signed-off-by: Nicolas Pitre Acked-by: H Hartley Sweeten Acked-and-tested-by: Jamie Iles Acked-by: Tony Lindgren Tested-by: Stephen Warren --- arch/arm/kernel/process.c | 4 +--- arch/arm/mach-mxs/pm.c | 3 +-- arch/arm/mach-omap2/pm44xx.c | 4 ++-- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/arm/kernel/process.c') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index ba9e7ef92bec..008e7ce766a7 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -61,8 +61,6 @@ extern void setup_mm_for_reboot(void); static volatile int hlt_counter; -#include - void disable_hlt(void) { hlt_counter++; @@ -191,7 +189,7 @@ static void default_idle(void) if (arm_pm_idle) arm_pm_idle(); else - arch_idle(); + cpu_do_idle(); local_irq_enable(); } diff --git a/arch/arm/mach-mxs/pm.c b/arch/arm/mach-mxs/pm.c index fb042da29bda..a9b4bbcdafb4 100644 --- a/arch/arm/mach-mxs/pm.c +++ b/arch/arm/mach-mxs/pm.c @@ -15,13 +15,12 @@ #include #include #include -#include static int mxs_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: - arch_idle(); + cpu_do_idle(); break; default: diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 62d4f36c57a6..c840689df24a 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -173,7 +173,7 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) * omap_default_idle - OMAP4 default ilde routine.' * * Implements OMAP4 memory, IO ordering requirements which can't be addressed - * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and + * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and * by secondary CPU with CONFIG_CPUIDLE. */ static void omap_default_idle(void) @@ -253,7 +253,7 @@ static int __init omap4_pm_init(void) suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ - /* Overwrite the default arch_idle() */ + /* Overwrite the default cpu_do_idle() */ arm_pm_idle = omap_default_idle; omap4_idle_init(); -- cgit v1.2.3 From bd2f55361f18347e890d52ff9cfd8895455ec11b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 21 Mar 2011 12:33:18 +0100 Subject: sched/rt: Use schedule_preempt_disabled() Coccinelle based conversion. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-24swm5zut3h9c4a6s46x8rws@git.kernel.org Signed-off-by: Ingo Molnar --- arch/arm/kernel/process.c | 4 +--- arch/avr32/kernel/process.c | 4 +--- arch/blackfin/kernel/process.c | 4 +--- arch/cris/kernel/process.c | 4 +--- arch/frv/kernel/process.c | 4 +--- arch/h8300/kernel/process.c | 4 +--- arch/ia64/kernel/process.c | 4 +--- arch/m32r/kernel/process.c | 4 +--- arch/m68k/kernel/process_mm.c | 4 +--- arch/m68k/kernel/process_no.c | 4 +--- arch/microblaze/kernel/process.c | 4 +--- arch/mips/kernel/process.c | 4 +--- arch/mn10300/kernel/process.c | 4 +--- arch/parisc/kernel/process.c | 4 +--- arch/powerpc/kernel/idle.c | 8 ++++---- arch/powerpc/platforms/iseries/setup.c | 8 ++------ arch/s390/kernel/process.c | 4 +--- arch/score/kernel/process.c | 4 +--- arch/sh/kernel/idle.c | 4 +--- arch/sparc/kernel/process_32.c | 8 ++------ arch/sparc/kernel/process_64.c | 10 ++++------ arch/tile/kernel/process.c | 4 +--- arch/x86/kernel/process_32.c | 4 +--- arch/x86/kernel/process_64.c | 4 +--- arch/xtensa/kernel/process.c | 4 +--- init/main.c | 5 +---- kernel/mutex.c | 4 +--- kernel/softirq.c | 4 +--- 28 files changed, 36 insertions(+), 95 deletions(-) (limited to 'arch/arm/kernel/process.c') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 971d65c253a9..c2ae3cd331fe 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -239,9 +239,7 @@ void cpu_idle(void) leds_event(led_idle_end); rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index ea3395750324..92c5af98a6f7 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -40,9 +40,7 @@ void cpu_idle(void) cpu_idle_sleep(); rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 8dd0416673cb..a80a643f3691 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -94,9 +94,7 @@ void cpu_idle(void) idle(); rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index aa585e4e979e..d8f50ff6fadd 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -115,9 +115,7 @@ void cpu_idle (void) idle = default_idle; idle(); } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 3901df1213c0..29cc49783787 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -92,9 +92,7 @@ void cpu_idle(void) idle(); } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 933bd388efb2..1a173b35f475 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -81,9 +81,7 @@ void cpu_idle(void) while (1) { while (!need_resched()) idle(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6d33c5cc94f0..9dc52b63fc87 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -330,9 +330,7 @@ cpu_idle (void) normal_xtp(); #endif } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); check_pgt_cache(); if (cpu_is_offline(cpu)) play_dead(); diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 422bea9f1dbc..3a4a32b27208 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -90,9 +90,7 @@ void cpu_idle (void) idle(); } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c index 099283ee1a8f..fe4186b5fc32 100644 --- a/arch/m68k/kernel/process_mm.c +++ b/arch/m68k/kernel/process_mm.c @@ -78,9 +78,7 @@ void cpu_idle(void) while (1) { while (!need_resched()) idle(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c index 5e1078cabe0e..f7fe6c348595 100644 --- a/arch/m68k/kernel/process_no.c +++ b/arch/m68k/kernel/process_no.c @@ -73,9 +73,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { idle(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 7dcb5bfffb75..9155f7d92669 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -110,9 +110,7 @@ void cpu_idle(void) rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); check_pgt_cache(); } } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7955409051c4..61f1cb45a1d5 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -80,9 +80,7 @@ void __noreturn cpu_idle(void) #endif rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 28eec3102535..cac401d37f75 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -123,9 +123,7 @@ void cpu_idle(void) idle(); } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 62c60b87d039..d4b94b395c16 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -71,9 +71,7 @@ void cpu_idle(void) while (1) { while (!need_resched()) barrier(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); check_pgt_cache(); } } diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 0a48bf5db6c8..65035141552b 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -101,11 +101,11 @@ void cpu_idle(void) ppc64_runlatch_on(); rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - if (cpu_should_die()) + if (cpu_should_die()) { + preempt_enable_no_resched(); cpu_die(); - schedule(); - preempt_disable(); + } + schedule_preempt_disabled(); } } diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index 8fc62586a973..a5fbf4cb6329 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -584,9 +584,7 @@ static void iseries_shared_idle(void) if (hvlpevent_is_pending()) process_iSeries_events(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } @@ -615,9 +613,7 @@ static void iseries_dedicated_idle(void) ppc64_runlatch_on(); rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e795933eb2cb..7618085b4164 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -97,9 +97,7 @@ void cpu_idle(void) tick_nohz_idle_exit(); if (test_thread_flag(TIF_MCCK_PENDING)) s390_handle_mcck(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index 25d08030a883..2707023c7563 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -53,9 +53,7 @@ void __noreturn cpu_idle(void) while (!need_resched()) barrier(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 406508d4ce74..7e4892826563 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -114,9 +114,7 @@ void cpu_idle(void) rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index f793742eec2b..935fdbcd88c2 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -113,9 +113,7 @@ void cpu_idle(void) while (!need_resched()) cpu_relax(); } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); check_pgt_cache(); } } @@ -138,9 +136,7 @@ void cpu_idle(void) while (!need_resched()) cpu_relax(); } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); check_pgt_cache(); } } diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 39d8b05201a2..ab9a29268213 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -104,15 +104,13 @@ void cpu_idle(void) rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(cpu)) + if (cpu_is_offline(cpu)) { + preempt_enable_no_resched(); cpu_play_dead(); + } #endif - - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 4c1ac6e5347a..6ae495ef2b99 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -108,9 +108,7 @@ void cpu_idle(void) } rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c08d1ff12b7c..49888fefe794 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -119,9 +119,7 @@ void cpu_idle(void) } rcu_idle_exit(); tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index cfa5c90c01db..e34257c70c28 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -156,9 +156,7 @@ void cpu_idle(void) } tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 47041e7c088c..2c9004770c4e 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -113,9 +113,7 @@ void cpu_idle(void) while (1) { while (!need_resched()) platform_idle(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } } diff --git a/init/main.c b/init/main.c index ff49a6dacfbb..4990f7ec776a 100644 --- a/init/main.c +++ b/init/main.c @@ -374,11 +374,8 @@ static noinline void __init_refok rest_init(void) * at least once to get things moving: */ init_idle_bootup_task(current); - preempt_enable_no_resched(); - schedule(); - + schedule_preempt_disabled(); /* Call into cpu_idle with preempt disabled */ - preempt_disable(); cpu_idle(); } diff --git a/kernel/mutex.c b/kernel/mutex.c index 89096dd8786f..a307cc9c9526 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, /* didn't get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); } diff --git a/kernel/softirq.c b/kernel/softirq.c index 4eb3a0fa351e..79b524767a24 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -744,9 +744,7 @@ static int run_ksoftirqd(void * __bind_cpu) while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + schedule_preempt_disabled(); } __set_current_state(TASK_RUNNING); -- cgit v1.2.3 From 909af768e88867016f427264ae39d27a57b6a8ed Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 23 Mar 2012 15:02:51 -0700 Subject: coredump: remove VM_ALWAYSDUMP flag The motivation for this patchset was that I was looking at a way for a qemu-kvm process, to exclude the guest memory from its core dump, which can be quite large. There are already a number of filter flags in /proc//coredump_filter, however, these allow one to specify 'types' of kernel memory, not specific address ranges (which is needed in this case). Since there are no more vma flags available, the first patch eliminates the need for the 'VM_ALWAYSDUMP' flag. The flag is used internally by the kernel to mark vdso and vsyscall pages. However, it is simple enough to check if a vma covers a vdso or vsyscall page without the need for this flag. The second patch then replaces the 'VM_ALWAYSDUMP' flag with a new 'VM_NODUMP' flag, which can be set by userspace using new madvise flags: 'MADV_DONTDUMP', and unset via 'MADV_DODUMP'. The core dump filters continue to work the same as before unless 'MADV_DONTDUMP' is set on the region. The qemu code which implements this features is at: http://people.redhat.com/~jbaron/qemu-dump/qemu-dump.patch In my testing the qemu core dump shrunk from 383MB -> 13MB with this patch. I also believe that the 'MADV_DONTDUMP' flag might be useful for security sensitive apps, which might want to select which areas are dumped. This patch: The VM_ALWAYSDUMP flag is currently used by the coredump code to indicate that a vma is part of a vsyscall or vdso section. However, we can determine if a vma is in one these sections by checking it against the gate_vma and checking for a non-NULL return value from arch_vma_name(). Thus, freeing a valuable vma bit. Signed-off-by: Jason Baron Acked-by: Roland McGrath Cc: Chris Metcalf Cc: Avi Kivity Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/process.c | 3 +-- arch/hexagon/kernel/vdso.c | 3 +-- arch/mips/kernel/vdso.c | 3 +-- arch/powerpc/kernel/vdso.c | 10 ++-------- arch/s390/kernel/vdso.c | 10 ++-------- arch/sh/kernel/vsyscall/vsyscall.c | 3 +-- arch/tile/mm/elf.c | 8 +------- arch/unicore32/kernel/process.c | 2 +- arch/x86/um/mem_32.c | 8 -------- arch/x86/um/vdso/vma.c | 3 +-- arch/x86/vdso/vdso32-setup.c | 17 ++--------------- arch/x86/vdso/vma.c | 3 +-- fs/binfmt_elf.c | 27 +++++++++++++++++++++++++-- include/linux/mm.h | 1 - mm/memory.c | 8 +------- 15 files changed, 40 insertions(+), 69 deletions(-) (limited to 'arch/arm/kernel/process.c') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c2ae3cd331fe..219e4efee1a6 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -533,8 +533,7 @@ int vectors_user_mapping(void) struct mm_struct *mm = current->mm; return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYEXEC | - VM_ALWAYSDUMP | VM_RESERVED, + VM_MAYREAD | VM_MAYEXEC | VM_RESERVED, NULL); } diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c index 16277c33308a..f212a453b527 100644 --- a/arch/hexagon/kernel/vdso.c +++ b/arch/hexagon/kernel/vdso.c @@ -78,8 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* MAYWRITE to allow gdb to COW and set breakpoints. */ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index e5cdfd603f8f..0f1af58b036a 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -88,8 +88,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 7d14bb697d40..d36ee1055f88 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -263,17 +263,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * the "data" page of the vDSO or you'll stop getting kernel updates * and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code - * pages though - * - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to see - * what PC values meant. + * pages though. */ rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); if (rc) { current->mm->context.vdso_base = 0; diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index e704a9965f90..9c80138206b0 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -241,17 +241,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * on the "data" page of the vDSO or you'll stop getting kernel * updates and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code - * pages though - * - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to see - * what PC values meant. + * pages though. */ rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); if (rc) current->mm->context.vdso_base = 0; diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 1d6d51a1ce79..5ca579720a09 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -73,8 +73,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | - VM_ALWAYSDUMP, + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, syscall_pages); if (unlikely(ret)) goto up_fail; diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index 55e58e93bfc5..1a00fb64fc88 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c @@ -117,17 +117,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, /* * MAYWRITE to allow gdb to COW and set breakpoints - * - * Make sure the vDSO gets into every core dump. Dumping its - * contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to - * see what PC values meant. */ vdso_base = VDSO_BASE; retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pages); #ifndef __tilegx__ diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 52edc2b62873..432b4291f37b 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -381,7 +381,7 @@ int vectors_user_mapping(void) return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | - VM_ALWAYSDUMP | VM_RESERVED, + VM_RESERVED, NULL); } diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c index 639900a6fde9..f40281e5d6a2 100644 --- a/arch/x86/um/mem_32.c +++ b/arch/x86/um/mem_32.c @@ -23,14 +23,6 @@ static int __init gate_vma_init(void) gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; - /* - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to see - * what PC values meant. - */ - gate_vma.vm_flags |= VM_ALWAYSDUMP; - return 0; } __initcall(gate_vma_init); diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c index 91f4ec9a0a56..af91901babb8 100644 --- a/arch/x86/um/vdso/vma.c +++ b/arch/x86/um/vdso/vma.c @@ -64,8 +64,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdsop); up_write(&mm->mmap_sem); diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 468d591dde31..a944020fa859 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -250,13 +250,7 @@ static int __init gate_vma_init(void) gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; - /* - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to see - * what PC values meant. - */ - gate_vma.vm_flags |= VM_ALWAYSDUMP; + return 0; } @@ -343,17 +337,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (compat_uses_vma || !compat) { /* * MAYWRITE to allow gdb to COW and set breakpoints - * - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully - * interpretable later without matching up the same - * kernel and hardware config to see what PC values - * meant. */ ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso32_pages); if (ret) diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 153407c35b75..17e18279649f 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -124,8 +124,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pages); if (ret) { current->mm->context.vdso = NULL; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 81878b78c9d4..b64be5b5ac21 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1092,6 +1092,29 @@ out: * Jeremy Fitzhardinge */ +/* + * The purpose of always_dump_vma() is to make sure that special kernel mappings + * that are useful for post-mortem analysis are included in every core dump. + * In that way we ensure that the core dump is fully interpretable later + * without matching up the same kernel and hardware config to see what PC values + * meant. These special mappings include - vDSO, vsyscall, and other + * architecture specific mappings + */ +static bool always_dump_vma(struct vm_area_struct *vma) +{ + /* Any vsyscall mappings? */ + if (vma == get_gate_vma(vma->vm_mm)) + return true; + /* + * arch_vma_name() returns non-NULL for special architecture mappings, + * such as vDSO sections. + */ + if (arch_vma_name(vma)) + return true; + + return false; +} + /* * Decide what to dump of a segment, part, all or none. */ @@ -1100,8 +1123,8 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, { #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) - /* The vma can be set up to tell us the answer directly. */ - if (vma->vm_flags & VM_ALWAYSDUMP) + /* always dump the vdso and vsyscall sections */ + if (always_dump_vma(vma)) goto whole; /* Hugetlb memory check */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 7330742e7973..2de2ddba51d4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -111,7 +111,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ #endif #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ -#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ diff --git a/mm/memory.c b/mm/memory.c index 3416b6e018d6..6105f475fa86 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3623,13 +3623,7 @@ static int __init gate_vma_init(void) gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; - /* - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to see - * what PC values meant. - */ - gate_vma.vm_flags |= VM_ALWAYSDUMP; + return 0; } __initcall(gate_vma_init); -- cgit v1.2.3 From f9d4861fc32b995b1616775614459b8f266c803c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 20 Jan 2012 12:01:13 +0100 Subject: ARM: 7294/1: vectors: use gate_vma for vectors user mapping The current user mapping for the vectors page is inserted as a `horrible hack vma' into each task via arch_setup_additional_pages. This causes problems with the MM subsystem and vm_normal_page, as described here: https://lkml.org/lkml/2012/1/14/55 Following the suggestion from Hugh in the above thread, this patch uses the gate_vma for the vectors user mapping, therefore consolidating the horrible hack VMAs into one. Acked-and-Tested-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 4 ---- arch/arm/include/asm/mmu_context.h | 29 +---------------------------- arch/arm/include/asm/page.h | 2 ++ arch/arm/kernel/process.c | 38 ++++++++++++++++++++++++++++---------- 4 files changed, 31 insertions(+), 42 deletions(-) (limited to 'arch/arm/kernel/process.c') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 0e9ce8d9686e..38050b1c4800 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,8 +130,4 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk -extern int vectors_user_mapping(void); -#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() -#define ARCH_HAS_SETUP_ADDITIONAL_PAGES - #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 71605d9f8e42..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include #include #include +#include void __check_kvm_seq(struct mm_struct *mm); @@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) -/* - * We are inserting a "fake" vma for the user-accessible vector page so - * gdb and friends can get to it through ptrace and /proc//mem. - * But we also want to remove it before the generic code gets to see it - * during process exit or the unmapping of it would cause total havoc. - * (the macro is used as remove_vma() is static to mm/mmap.c) - */ -#define arch_exit_mmap(mm) \ -do { \ - struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ - if (high_vma) { \ - BUG_ON(high_vma->vm_next); /* it should be last */ \ - if (high_vma->vm_prev) \ - high_vma->vm_prev->vm_next = NULL; \ - else \ - mm->mmap = NULL; \ - rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ - mm->mmap_cache = NULL; \ - mm->map_count--; \ - remove_vma(high_vma); \ - } \ -} while (0) - -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) -{ -} - #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 97b440c25c58..5838361c48b3 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#define __HAVE_ARCH_GATE_AREA 1 + #ifdef CONFIG_ARM_LPAE #include #else diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 971d65c253a9..e11b523db332 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -526,22 +526,40 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) #ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the - * atomic helpers and the signal restart code. Let's declare a mapping - * for it so it is visible through ptrace and /proc//mem. + * atomic helpers and the signal restart code. Insert it into the + * gate_vma so that it is visible through ptrace and /proc//mem. */ +static struct vm_area_struct gate_vma; -int vectors_user_mapping(void) +static int __init gate_vma_init(void) { - struct mm_struct *mm = current->mm; - return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYEXEC | - VM_ALWAYSDUMP | VM_RESERVED, - NULL); + gate_vma.vm_start = 0xffff0000; + gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; + gate_vma.vm_page_prot = PAGE_READONLY_EXEC; + gate_vma.vm_flags = VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYEXEC | + VM_ALWAYSDUMP; + return 0; +} +arch_initcall(gate_vma_init); + +struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +{ + return &gate_vma; +} + +int in_gate_area(struct mm_struct *mm, unsigned long addr) +{ + return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); +} + +int in_gate_area_no_mm(unsigned long addr) +{ + return in_gate_area(NULL, addr); } const char *arch_vma_name(struct vm_area_struct *vma) { - return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; + return (vma == &gate_vma) ? "[vectors]" : NULL; } #endif -- cgit v1.2.3 From 9f97da78bf018206fb623cd351d454af2f105fe0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 28 Mar 2012 18:30:01 +0100 Subject: Disintegrate asm/system.h for ARM Disintegrate asm/system.h for ARM. Signed-off-by: David Howells cc: Russell King cc: linux-arm-kernel@lists.infradead.org --- arch/arm/common/via82c505.c | 1 - arch/arm/include/asm/atomic.h | 4 +- arch/arm/include/asm/barrier.h | 69 ++++ arch/arm/include/asm/bitops.h | 2 +- arch/arm/include/asm/bug.h | 30 ++ arch/arm/include/asm/cmpxchg.h | 295 +++++++++++++++++ arch/arm/include/asm/compiler.h | 15 + arch/arm/include/asm/cp15.h | 2 +- arch/arm/include/asm/div64.h | 2 +- arch/arm/include/asm/dma.h | 1 - arch/arm/include/asm/domain.h | 4 + arch/arm/include/asm/exec.h | 6 + arch/arm/include/asm/io.h | 2 +- arch/arm/include/asm/mmu.h | 7 + arch/arm/include/asm/processor.h | 3 +- arch/arm/include/asm/switch_to.h | 18 + arch/arm/include/asm/system.h | 474 +-------------------------- arch/arm/include/asm/system_info.h | 27 ++ arch/arm/include/asm/system_misc.h | 28 ++ arch/arm/include/asm/uaccess.h | 2 +- arch/arm/kernel/armksyms.c | 1 - arch/arm/kernel/elf.c | 1 + arch/arm/kernel/entry-armv.S | 2 +- arch/arm/kernel/hw_breakpoint.c | 1 - arch/arm/kernel/irq.c | 1 - arch/arm/kernel/kprobes-common.c | 1 + arch/arm/kernel/machine_kexec.c | 2 +- arch/arm/kernel/process.c | 1 - arch/arm/kernel/ptrace.c | 1 - arch/arm/kernel/setup.c | 2 + arch/arm/kernel/sleep.S | 1 - arch/arm/kernel/tcm.c | 1 + arch/arm/kernel/thumbee.c | 1 + arch/arm/kernel/traps.c | 2 +- arch/arm/mach-at91/at91cap9.c | 2 + arch/arm/mach-at91/at91rm9200.c | 1 + arch/arm/mach-at91/at91sam9260.c | 1 + arch/arm/mach-at91/at91sam9261.c | 1 + arch/arm/mach-at91/at91sam9263.c | 1 + arch/arm/mach-at91/at91sam9g45.c | 1 + arch/arm/mach-at91/at91sam9rl.c | 1 + arch/arm/mach-at91/include/mach/system_rev.h | 2 + arch/arm/mach-clps711x/common.c | 1 + arch/arm/mach-clps711x/p720t-leds.c | 1 - arch/arm/mach-davinci/board-da850-evm.c | 1 + arch/arm/mach-ebsa110/core.c | 2 +- arch/arm/mach-ebsa110/leds.c | 1 - arch/arm/mach-footbridge/common.c | 1 + arch/arm/mach-footbridge/dc21285-timer.c | 1 + arch/arm/mach-footbridge/dc21285.c | 1 - arch/arm/mach-footbridge/ebsa285-leds.c | 1 - arch/arm/mach-footbridge/netwinder-hw.c | 1 + arch/arm/mach-footbridge/netwinder-leds.c | 1 - arch/arm/mach-imx/dma-v1.c | 1 - arch/arm/mach-imx/mach-imx6q.c | 1 + arch/arm/mach-imx/mach-mx51_efikamx.c | 1 + arch/arm/mach-imx/mach-mx51_efikasb.c | 1 + arch/arm/mach-integrator/core.c | 1 - arch/arm/mach-integrator/leds.c | 1 - arch/arm/mach-integrator/pci.c | 1 - arch/arm/mach-integrator/pci_v3.c | 1 - arch/arm/mach-iop33x/uart.c | 1 - arch/arm/mach-ixp2000/core.c | 1 - arch/arm/mach-ixp2000/enp2611.c | 1 - arch/arm/mach-ixp2000/ixdp2400.c | 1 - arch/arm/mach-ixp2000/ixdp2800.c | 1 - arch/arm/mach-ixp2000/ixdp2x00.c | 1 - arch/arm/mach-ixp2000/ixdp2x01.c | 1 - arch/arm/mach-ixp2000/pci.c | 1 - arch/arm/mach-ixp23xx/core.c | 1 - arch/arm/mach-ixp23xx/espresso.c | 1 - arch/arm/mach-ixp23xx/ixdp2351.c | 1 - arch/arm/mach-ixp23xx/pci.c | 1 - arch/arm/mach-ixp23xx/roadrunner.c | 1 - arch/arm/mach-ixp4xx/common-pci.c | 1 - arch/arm/mach-ixp4xx/goramo_mlr.c | 1 - arch/arm/mach-ks8695/time.c | 1 + arch/arm/mach-mmp/common.c | 1 + arch/arm/mach-mmp/pxa168.c | 1 + arch/arm/mach-msm/board-sapphire.c | 1 - arch/arm/mach-mxs/system.c | 2 +- arch/arm/mach-omap1/id.c | 1 + arch/arm/mach-omap1/leds-h2p2-debug.c | 1 - arch/arm/mach-omap1/leds-innovator.c | 1 - arch/arm/mach-omap1/leds-osk.c | 1 - arch/arm/mach-omap1/mux.c | 1 - arch/arm/mach-omap1/time.c | 1 - arch/arm/mach-omap1/timer32k.c | 1 - arch/arm/mach-omap2/board-omap3touchbook.c | 1 + arch/arm/mach-omap2/board-rx51-peripherals.c | 1 + arch/arm/mach-omap2/mux.c | 1 - arch/arm/mach-omap2/omap-mpuss-lowpower.c | 1 - arch/arm/mach-omap2/pm24xx.c | 1 + arch/arm/mach-omap2/pm34xx.c | 1 + arch/arm/mach-omap2/pm44xx.c | 1 + arch/arm/mach-omap2/sleep44xx.S | 1 - arch/arm/mach-orion5x/common.c | 1 + arch/arm/mach-orion5x/dns323-setup.c | 1 + arch/arm/mach-orion5x/ls-chl-setup.c | 1 - arch/arm/mach-orion5x/ls_hgl-setup.c | 1 - arch/arm/mach-orion5x/lsmini-setup.c | 1 - arch/arm/mach-pnx4008/core.c | 2 +- arch/arm/mach-pnx4008/dma.c | 1 - arch/arm/mach-pnx4008/irq.c | 1 - arch/arm/mach-pnx4008/time.c | 1 - arch/arm/mach-pxa/cm-x300.c | 1 + arch/arm/mach-pxa/colibri-pxa3xx.c | 1 + arch/arm/mach-pxa/corgi.c | 1 - arch/arm/mach-pxa/generic.c | 1 - arch/arm/mach-pxa/leds-idp.c | 1 - arch/arm/mach-pxa/leds-lubbock.c | 1 - arch/arm/mach-pxa/leds-mainstone.c | 1 - arch/arm/mach-pxa/magician.c | 1 + arch/arm/mach-pxa/poodle.c | 1 - arch/arm/mach-pxa/reset.c | 1 + arch/arm/mach-pxa/viper.c | 1 + arch/arm/mach-pxa/zeus.c | 1 + arch/arm/mach-realview/core.c | 1 - arch/arm/mach-rpc/riscpc.c | 1 + arch/arm/mach-s3c2410/s3c2410.c | 1 + arch/arm/mach-s3c2412/s3c2412.c | 1 + arch/arm/mach-s3c2416/s3c2416.c | 1 + arch/arm/mach-s3c2440/s3c244x.c | 1 + arch/arm/mach-s3c2443/s3c2443.c | 1 + arch/arm/mach-s3c64xx/common.c | 1 + arch/arm/mach-s5p64x0/common.c | 1 + arch/arm/mach-s5pc100/common.c | 1 + arch/arm/mach-sa1100/dma.c | 1 - arch/arm/mach-sa1100/generic.c | 2 +- arch/arm/mach-sa1100/leds-assabet.c | 1 - arch/arm/mach-sa1100/leds-badge4.c | 1 - arch/arm/mach-sa1100/leds-cerf.c | 1 - arch/arm/mach-sa1100/leds-hackkit.c | 1 - arch/arm/mach-sa1100/leds-lart.c | 1 - arch/arm/mach-sa1100/pm.c | 1 - arch/arm/mach-shark/leds.c | 1 - arch/arm/mach-shmobile/cpuidle.c | 1 - arch/arm/mach-shmobile/include/mach/system.h | 2 + arch/arm/mach-shmobile/pm-r8a7779.c | 1 - arch/arm/mach-shmobile/pm-sh7372.c | 1 - arch/arm/mach-shmobile/suspend.c | 2 +- arch/arm/mach-tegra/cpu-tegra.c | 1 - arch/arm/mach-versatile/core.c | 1 - arch/arm/mach-versatile/pci.c | 1 - arch/arm/mach-w90x900/cpu.c | 1 + arch/arm/mm/alignment.c | 1 + arch/arm/mm/fault.c | 3 +- arch/arm/mm/flush.c | 1 - arch/arm/mm/idmap.c | 1 + arch/arm/mm/ioremap.c | 1 + arch/arm/mm/mmu.c | 1 + arch/arm/mm/proc-fa526.S | 1 - arch/arm/nwfpe/fpa11.c | 1 - arch/arm/plat-iop/i2c.c | 1 - arch/arm/plat-iop/pci.c | 1 - arch/arm/plat-iop/restart.c | 1 + arch/arm/plat-mxc/system.c | 2 +- arch/arm/plat-omap/debug-leds.c | 1 - arch/arm/plat-omap/dma.c | 1 - arch/arm/plat-omap/mux.c | 1 - arch/arm/plat-pxa/dma.c | 1 - arch/arm/plat-s3c24xx/cpu.c | 1 + arch/arm/plat-s3c24xx/dma.c | 1 - arch/arm/plat-samsung/cpu.c | 1 - arch/arm/plat-samsung/time.c | 1 - arch/arm/plat-spear/restart.c | 1 + arch/arm/vfp/vfpmodule.c | 1 + drivers/net/ethernet/8390/etherh.c | 1 + drivers/tty/serial/21285.c | 1 + drivers/watchdog/wdt285.c | 1 + 170 files changed, 592 insertions(+), 565 deletions(-) create mode 100644 arch/arm/include/asm/barrier.h create mode 100644 arch/arm/include/asm/cmpxchg.h create mode 100644 arch/arm/include/asm/compiler.h create mode 100644 arch/arm/include/asm/exec.h create mode 100644 arch/arm/include/asm/switch_to.h create mode 100644 arch/arm/include/asm/system_info.h create mode 100644 arch/arm/include/asm/system_misc.h (limited to 'arch/arm/kernel/process.c') diff --git a/arch/arm/common/via82c505.c b/arch/arm/common/via82c505.c index 67dd2affc57a..1171a5010aea 100644 --- a/arch/arm/common/via82c505.c +++ b/arch/arm/common/via82c505.c @@ -6,7 +6,6 @@ #include #include -#include #include diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 86976d034382..68374ba6a943 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -13,7 +13,9 @@ #include #include -#include +#include +#include +#include #define ATOMIC_INIT(i) { (i) } diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h new file mode 100644 index 000000000000..44f4a09ff37b --- /dev/null +++ b/arch/arm/include/asm/barrier.h @@ -0,0 +1,69 @@ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#else +#define isb() __asm__ __volatile__ ("" : : : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#include +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dsb() +#define wmb() mb() +#else +#include +#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#endif + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() +#endif + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index f7419ef9c8f9..e691ec91e4d3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -24,7 +24,7 @@ #endif #include -#include +#include #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index fac79dceb736..7af5c6c3653a 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -1,6 +1,7 @@ #ifndef _ASMARM_BUG_H #define _ASMARM_BUG_H +#include #ifdef CONFIG_BUG @@ -57,4 +58,33 @@ do { \ #include +struct pt_regs; +void die(const char *msg, struct pt_regs *regs, int err); + +struct siginfo; +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, + unsigned long err, unsigned long trap); + +#ifdef CONFIG_ARM_LPAE +#define FAULT_CODE_ALIGNMENT 33 +#define FAULT_CODE_DEBUG 34 +#else +#define FAULT_CODE_ALIGNMENT 1 +#define FAULT_CODE_DEBUG 2 +#endif + +void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); + +void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); + +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; +extern void show_pte(struct mm_struct *mm, unsigned long addr); +extern void __show_regs(struct pt_regs *); + #endif diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h new file mode 100644 index 000000000000..d41d7cbf0ada --- /dev/null +++ b/arch/arm/include/asm/cmpxchg.h @@ -0,0 +1,295 @@ +#ifndef __ASM_ARM_CMPXCHG_H +#define __ASM_ARM_CMPXCHG_H + +#include +#include + +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +/* + * On the StrongARM, "swp" is terminally broken since it bypasses the + * cache totally. This means that the cache becomes inconsistent, and, + * since we use normal loads/stores as well, this is really bad. + * Typically, this causes oopsen in filp_close, but could have other, + * more disastrous effects. There are two work-arounds: + * 1. Disable interrupts and emulate the atomic swap + * 2. Clean the cache, perform atomic swap, flush the cache + * + * We choose (1) since its the "easiest" to achieve here and is not + * dependent on the processor type. + * + * NOTE that this solution won't work on an SMP system, so explcitly + * forbid it here. + */ +#define swp_is_buggy +#endif + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + extern void __bad_xchg(volatile void *, int); + unsigned long ret; +#ifdef swp_is_buggy + unsigned long flags; +#endif +#if __LINUX_ARM_ARCH__ >= 6 + unsigned int tmp; +#endif + + smp_mb(); + + switch (size) { +#if __LINUX_ARM_ARCH__ >= 6 + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#elif defined(swp_is_buggy) +#ifdef CONFIG_SMP +#error SMP is not supported on this platform +#endif + case 1: + raw_local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + raw_local_irq_restore(flags); + break; + + case 4: + raw_local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + raw_local_irq_restore(flags); + break; +#else + case 1: + asm volatile("@ __xchg1\n" + " swpb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + " swp %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + smp_mb(); + + return ret; +} + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#include + +#if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ + +#ifdef CONFIG_SMP +#error "SMP is not supported on this platform" +#endif + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include +#endif + +#else /* min ARCH >= ARMv6 */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval, res; + + switch (size) { +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#endif + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + switch (size) { +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ + case 1: + case 2: + ret = __cmpxchg_local_generic(ptr, old, new, size); + break; +#endif + default: + ret = __cmpxchg(ptr, old, new, size); + } + + return ret; +} + +#define cmpxchg_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ + +/* + * Note : ARMv7-M (currently unsupported by Linux) does not support + * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should + * not be allowed to use __cmpxchg64. + */ +static inline unsigned long long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + register unsigned long long oldval asm("r0"); + register unsigned long long __old asm("r2") = old; + register unsigned long long __new asm("r4") = new; + unsigned long res; + + do { + asm volatile( + " @ __cmpxchg8\n" + " ldrexd %1, %H1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " teqeq %H1, %H3\n" + " strexdeq %0, %4, %H4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (__old), "r" (__new) + : "memory", "cc"); + } while (res); + + return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long ret; + + smp_mb(); + ret = __cmpxchg64(ptr, old, new); + smp_mb(); + + return ret; +} + +#define cmpxchg64(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#define cmpxchg64_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#else /* min ARCH = ARMv6 */ + +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#endif + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + +#endif /* __ASM_ARM_CMPXCHG_H */ diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h new file mode 100644 index 000000000000..8155db2f7fa1 --- /dev/null +++ b/arch/arm/include/asm/compiler.h @@ -0,0 +1,15 @@ +#ifndef __ASM_ARM_COMPILER_H +#define __ASM_ARM_COMPILER_H + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences. Apparently we can't trust + * the compiler from one version to another so a bit of paranoia won't hurt. + * This string is meant to be concatenated with the inline asm string and + * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) + */ +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + + +#endif /* __ASM_ARM_COMPILER_H */ diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 3dabd8dd4049..5ef4d8015a60 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -1,7 +1,7 @@ #ifndef __ASM_ARM_CP15_H #define __ASM_ARM_CP15_H -#include +#include /* * CR1 bits (CP#15 CR1) diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index d3f0a9eee9f6..fe92ccf1d0b0 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -1,8 +1,8 @@ #ifndef __ASM_ARM_DIV64 #define __ASM_ARM_DIV64 -#include #include +#include /* * The semantics of do_div() are: diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 69a5b0b6455c..5694a0d6576b 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -19,7 +19,6 @@ * It should not be re-used except for that purpose. */ #include -#include #include #include diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index b5dc173d336f..3d2220498abc 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -10,6 +10,10 @@ #ifndef __ASM_PROC_DOMAIN_H #define __ASM_PROC_DOMAIN_H +#ifndef __ASSEMBLY__ +#include +#endif + /* * Domain numbers * diff --git a/arch/arm/include/asm/exec.h b/arch/arm/include/asm/exec.h new file mode 100644 index 000000000000..7c4fbef72b3a --- /dev/null +++ b/arch/arm/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ARM_EXEC_H +#define __ASM_ARM_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_ARM_EXEC_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 9275828feb3d..bae7eb6011d2 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -26,7 +26,6 @@ #include #include #include -#include #include /* @@ -99,6 +98,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr) /* IO barriers */ #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#include #define __iormb() rmb() #define __iowmb() wmb() #else diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a923..b8e580a297e4 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -34,4 +34,11 @@ typedef struct { #endif +/* + * switch_mm() may do a full cache flush over the context switch, + * so enable interrupts over the context switch to avoid high + * latency. + */ +#define __ARCH_WANT_INTERRUPTS_ON_CTXSW + #endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index cb8d638924fd..f4d7f56ee51f 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -22,7 +22,6 @@ #include #include #include -#include #ifdef __KERNEL__ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ @@ -90,6 +89,8 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() #endif +void cpu_idle_wait(void); + /* * Create a new kernel thread */ diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h new file mode 100644 index 000000000000..fa09e6b49bf1 --- /dev/null +++ b/arch/arm/include/asm/switch_to.h @@ -0,0 +1,18 @@ +#ifndef __ASM_ARM_SWITCH_TO_H +#define __ASM_ARM_SWITCH_TO_H + +#include + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. + */ +extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); + +#define switch_to(prev,next,last) \ +do { \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ +} while (0) + +#endif /* __ASM_ARM_SWITCH_TO_H */ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 774c41e8addf..74542c52f9be 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -1,466 +1,8 @@ -#ifndef __ASM_ARM_SYSTEM_H -#define __ASM_ARM_SYSTEM_H - -#ifdef __KERNEL__ - -#define CPU_ARCH_UNKNOWN 0 -#define CPU_ARCH_ARMv3 1 -#define CPU_ARCH_ARMv4 2 -#define CPU_ARCH_ARMv4T 3 -#define CPU_ARCH_ARMv5 4 -#define CPU_ARCH_ARMv5T 5 -#define CPU_ARCH_ARMv5TE 6 -#define CPU_ARCH_ARMv5TEJ 7 -#define CPU_ARCH_ARMv6 8 -#define CPU_ARCH_ARMv7 9 - -/* - * This is used to ensure the compiler did actually allocate the register we - * asked it for some inline assembly sequences. Apparently we can't trust - * the compiler from one version to another so a bit of paranoia won't hurt. - * This string is meant to be concatenated with the inline asm string and - * will cause compilation to stop on mismatch. - * (for details, see gcc PR 15089) - */ -#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" - -#ifndef __ASSEMBLY__ - -#include -#include -#include - -#include - -struct thread_info; -struct task_struct; - -/* information about the system we're running on */ -extern unsigned int system_rev; -extern unsigned int system_serial_low; -extern unsigned int system_serial_high; -extern unsigned int mem_fclk_21285; - -struct pt_regs; - -void die(const char *msg, struct pt_regs *regs, int err); - -struct siginfo; -void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, - unsigned long err, unsigned long trap); - -#ifdef CONFIG_ARM_LPAE -#define FAULT_CODE_ALIGNMENT 33 -#define FAULT_CODE_DEBUG 34 -#else -#define FAULT_CODE_ALIGNMENT 1 -#define FAULT_CODE_DEBUG 2 -#endif - -void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -struct mm_struct; -extern void show_pte(struct mm_struct *mm, unsigned long addr); -extern void __show_regs(struct pt_regs *); - -extern int __pure cpu_architecture(void); -extern void cpu_init(void); - -void soft_restart(unsigned long); -extern void (*arm_pm_restart)(char str, const char *cmd); - -#define UDBG_UNDEFINED (1 << 0) -#define UDBG_SYSCALL (1 << 1) -#define UDBG_BADABORT (1 << 2) -#define UDBG_SEGV (1 << 3) -#define UDBG_BUS (1 << 4) - -extern unsigned int user_debug; - -#if __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define sev() __asm__ __volatile__ ("sev" : : : "memory") -#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") -#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") -#endif - -#if __LINUX_ARM_ARCH__ >= 7 -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") -#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ - : : "r" (0) : "memory") -#elif defined(CONFIG_CPU_FA526) -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#else -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#endif - -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dsb() -#define wmb() mb() -#else -#include -#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#endif - -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() -#endif - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) -#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); - -/* - * switch_mm() may do a full cache flush over the context switch, - * so enable interrupts over the context switch to avoid high - * latency. - */ -#define __ARCH_WANT_INTERRUPTS_ON_CTXSW - -/* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. schedule() itself - * contains the memory barrier to tell GCC not to cache `current'. - */ -extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); - -#define switch_to(prev,next,last) \ -do { \ - last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ -} while (0) - -#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) -/* - * On the StrongARM, "swp" is terminally broken since it bypasses the - * cache totally. This means that the cache becomes inconsistent, and, - * since we use normal loads/stores as well, this is really bad. - * Typically, this causes oopsen in filp_close, but could have other, - * more disastrous effects. There are two work-arounds: - * 1. Disable interrupts and emulate the atomic swap - * 2. Clean the cache, perform atomic swap, flush the cache - * - * We choose (1) since its the "easiest" to achieve here and is not - * dependent on the processor type. - * - * NOTE that this solution won't work on an SMP system, so explcitly - * forbid it here. - */ -#define swp_is_buggy -#endif - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - extern void __bad_xchg(volatile void *, int); - unsigned long ret; -#ifdef swp_is_buggy - unsigned long flags; -#endif -#if __LINUX_ARM_ARCH__ >= 6 - unsigned int tmp; -#endif - - smp_mb(); - - switch (size) { -#if __LINUX_ARM_ARCH__ >= 6 - case 1: - asm volatile("@ __xchg1\n" - "1: ldrexb %0, [%3]\n" - " strexb %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - "1: ldrex %0, [%3]\n" - " strex %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#elif defined(swp_is_buggy) -#ifdef CONFIG_SMP -#error SMP is not supported on this platform -#endif - case 1: - raw_local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - raw_local_irq_restore(flags); - break; - - case 4: - raw_local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - raw_local_irq_restore(flags); - break; -#else - case 1: - asm volatile("@ __xchg1\n" - " swpb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - " swp %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#endif - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - smp_mb(); - - return ret; -} - -extern void disable_hlt(void); -extern void enable_hlt(void); - -void cpu_idle_wait(void); - -#include - -#if __LINUX_ARM_ARCH__ < 6 -/* min ARCH < ARMv6 */ - -#ifdef CONFIG_SMP -#error "SMP is not supported on this platform" -#endif - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include -#endif - -#else /* min ARCH >= ARMv6 */ - -extern void __bad_cmpxchg(volatile void *ptr, int size); - -/* - * cmpxchg only support 32-bits operands on ARMv6. - */ - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long oldval, res; - - switch (size) { -#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ - case 1: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexb %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexbeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - case 2: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexh %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexheq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; -#endif - case 4: - do { - asm volatile("@ __cmpxchg4\n" - " ldrex %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - default: - __bad_cmpxchg(ptr, size); - oldval = 0; - } - - return oldval; -} - -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - smp_mb(); - ret = __cmpxchg(ptr, old, new, size); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - switch (size) { -#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ - case 1: - case 2: - ret = __cmpxchg_local_generic(ptr, old, new, size); - break; -#endif - default: - ret = __cmpxchg(ptr, old, new, size); - } - - return ret; -} - -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ - -/* - * Note : ARMv7-M (currently unsupported by Linux) does not support - * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should - * not be allowed to use __cmpxchg64. - */ -static inline unsigned long long __cmpxchg64(volatile void *ptr, - unsigned long long old, - unsigned long long new) -{ - register unsigned long long oldval asm("r0"); - register unsigned long long __old asm("r2") = old; - register unsigned long long __new asm("r4") = new; - unsigned long res; - - do { - asm volatile( - " @ __cmpxchg8\n" - " ldrexd %1, %H1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " teqeq %H1, %H3\n" - " strexdeq %0, %4, %H4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (__old), "r" (__new) - : "memory", "cc"); - } while (res); - - return oldval; -} - -static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, - unsigned long long old, - unsigned long long new) -{ - unsigned long long ret; - - smp_mb(); - ret = __cmpxchg64(ptr, old, new); - smp_mb(); - - return ret; -} - -#define cmpxchg64(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#else /* min ARCH = ARMv6 */ - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#endif - -#endif /* __LINUX_ARM_ARCH__ >= 6 */ - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif /* __KERNEL__ */ - -#endif +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ +#include +#include +#include +#include +#include +#include +#include diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h new file mode 100644 index 000000000000..dfd386d0c022 --- /dev/null +++ b/arch/arm/include/asm/system_info.h @@ -0,0 +1,27 @@ +#ifndef __ASM_ARM_SYSTEM_INFO_H +#define __ASM_ARM_SYSTEM_INFO_H + +#define CPU_ARCH_UNKNOWN 0 +#define CPU_ARCH_ARMv3 1 +#define CPU_ARCH_ARMv4 2 +#define CPU_ARCH_ARMv4T 3 +#define CPU_ARCH_ARMv5 4 +#define CPU_ARCH_ARMv5T 5 +#define CPU_ARCH_ARMv5TE 6 +#define CPU_ARCH_ARMv5TEJ 7 +#define CPU_ARCH_ARMv6 8 +#define CPU_ARCH_ARMv7 9 + +#ifndef __ASSEMBLY__ + +/* information about the system we're running on */ +extern unsigned int system_rev; +extern unsigned int system_serial_low; +extern unsigned int system_serial_high; +extern unsigned int mem_fclk_21285; + +extern int __pure cpu_architecture(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_INFO_H */ diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h new file mode 100644 index 000000000000..9e65b23be145 --- /dev/null +++ b/arch/arm/include/asm/system_misc.h @@ -0,0 +1,28 @@ +#ifndef __ASM_ARM_SYSTEM_MISC_H +#define __ASM_ARM_SYSTEM_MISC_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +extern void cpu_init(void); + +void soft_restart(unsigned long); +extern void (*arm_pm_restart)(char str, const char *cmd); + +#define UDBG_UNDEFINED (1 << 0) +#define UDBG_SYSCALL (1 << 1) +#define UDBG_BADABORT (1 << 2) +#define UDBG_SEGV (1 << 3) +#define UDBG_BUS (1 << 4) + +extern unsigned int user_debug; + +extern void disable_hlt(void); +extern void enable_hlt(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 2958976d867b..71f6536d17ac 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -16,8 +16,8 @@ #include #include #include -#include #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 5b0bce61eb69..b57c75e0b01f 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -18,7 +18,6 @@ #include #include -#include #include /* diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index ddba41d1fcf1..d0d1e83150c9 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -3,6 +3,7 @@ #include #include #include +#include int elf_check_arch(const struct elf32_hdr *x) { diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index be16a48007b4..093a415902c1 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include "entry-header.S" #include diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index d6a95ef9131d..ba386bd94107 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -34,7 +34,6 @@ #include #include #include -#include #include /* Breakpoint currently in use for each BRP. */ diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3efd82cc95f0..6a6a097edd61 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -36,7 +36,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c index a5394fb4e4e0..18a76282970e 100644 --- a/arch/arm/kernel/kprobes-common.c +++ b/arch/arm/kernel/kprobes-common.c @@ -13,6 +13,7 @@ #include #include +#include #include "kprobes.h" diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 764bd456d84f..56995983eed8 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c2ae3cd331fe..19917e89f13b 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ede6443c34d9..45956c9d0ef0 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -26,7 +26,6 @@ #include #include -#include #include #define REG_PC 15 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 615e992d738d..9e0fdb3a1988 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -50,6 +50,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 1f268bda4552..987dcf33415c 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -4,7 +4,6 @@ #include #include #include -#include .text /* diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 01ec453bb924..30ae6bb4a310 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "tcm.h" static struct gen_pool *tcm_pool; diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 9cb7aaca159f..aab899764053 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -20,6 +20,7 @@ #include #include +#include #include /* diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f84dfe67724f..cd77743472a2 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -29,11 +29,11 @@ #include #include #include -#include #include #include #include #include +#include #include "signal.h" diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c index a42edc25a87e..fc460e96f1af 100644 --- a/arch/arm/mach-at91/at91cap9.c +++ b/arch/arm/mach-at91/at91cap9.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 99c3174e24a2..413c027ab85d 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c index d4036ba43612..79f571842c23 100644 --- a/arch/arm/mach-at91/at91sam9260.c +++ b/arch/arm/mach-at91/at91sam9260.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c index 023c2ff138df..62818b956aea 100644 --- a/arch/arm/mach-at91/at91sam9261.c +++ b/arch/arm/mach-at91/at91sam9261.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c index 75e876c258af..722ee08e5104 100644 --- a/arch/arm/mach-at91/at91sam9263.c +++ b/arch/arm/mach-at91/at91sam9263.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 1cb6a96b1c1e..47e8fdbed9c4 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c index d2c91a841cb8..7ae499359d36 100644 --- a/arch/arm/mach-at91/at91sam9rl.c +++ b/arch/arm/mach-at91/at91sam9rl.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index ec164a4124c9..ef79a9aafc08 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h @@ -7,6 +7,8 @@ #ifndef __ARCH_SYSTEM_REV_H__ #define __ARCH_SYSTEM_REV_H__ +#include + /* * board revision encoding * mach specific diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c index ab1711b9b4d6..ac84f925c79e 100644 --- a/arch/arm/mach-clps711x/common.c +++ b/arch/arm/mach-clps711x/common.c @@ -37,6 +37,7 @@ #include #include #include +#include /* * This maps the generic CLPS711x registers diff --git a/arch/arm/mach-clps711x/p720t-leds.c b/arch/arm/mach-clps711x/p720t-leds.c index 15121446efc8..dd9a6cdbeb02 100644 --- a/arch/arm/mach-clps711x/p720t-leds.c +++ b/arch/arm/mach-clps711x/p720t-leds.c @@ -25,7 +25,6 @@ #include #include -#include #include #include diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index d5088900af6c..a70de24d1cbc 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -36,6 +36,7 @@ #include #include +#include #include #include diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index 294aad07f7a0..0c40e59af73a 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-ebsa110/leds.c b/arch/arm/mach-ebsa110/leds.c index 6a6ea57c2a4e..101a3204438b 100644 --- a/arch/arm/mach-ebsa110/leds.c +++ b/arch/arm/mach-ebsa110/leds.c @@ -17,7 +17,6 @@ #include #include -#include #include static spinlock_t leds_lock; diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 41978ee4f9d0..3e6aaa6361da 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 121ad1d4fa39..3b54196447c7 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -14,6 +14,7 @@ #include #include +#include #include "common.h" diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index f685650c25d7..94a7087a6106 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -21,7 +21,6 @@ #include