summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2014-04-11 10:24:40 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2014-04-11 10:24:40 +1000
commit055ab0a697394f127f32a112c99f97a6d430dae9 (patch)
tree440c80e210b7c5d337169b503dd9df353b3c8b31 /arch
parent4ba85265790ba3681deeaf73f018c0eb829a7341 (diff)
parentc9d347e0277696c80496161cbef47393b850a0aa (diff)
Merge remote-tracking branch 'arm-current/fixes'
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/crash_dump.c2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/mach-vexpress/dcscb.c13
-rw-r--r--arch/arm/mm/dump.c47
4 files changed, 43 insertions, 21 deletions
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 90c50d4b43f7..5d1286d51154 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 806d287e3e53..be83fe9adec7 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -99,7 +99,7 @@ void soft_restart(unsigned long addr)
u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
/* Disable interrupts first */
- local_irq_disable();
+ raw_local_irq_disable();
local_fiq_disable();
/* Disable the L2 if we're the last man standing. */
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 14d499688736..788495d35cf9 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
v7_exit_coherency_flush(all);
/*
- * This is a harmless no-op. On platforms with a real
- * outer cache this might either be needed or not,
- * depending on where the outer cache sits.
+ * A full outer cache flush could be needed at this point
+ * on platforms with such a cache, depending on where the
+ * outer cache sits. In some cases the notion of a "last
+ * cluster standing" would need to be implemented if the
+ * outer cache is shared across clusters. In any case, when
+ * the outer cache needs flushing, there is no concurrent
+ * access to the cache controller to worry about and no
+ * special locking besides what is already provided by the
+ * MCPM state machinery is needed.
*/
- outer_flush_all();
/*
* Disable cluster-level coherency by masking
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index ef69152f9b52..c508f41a43bc 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
};
static const struct prot_bits section_bits[] = {
-#ifndef CONFIG_ARM_LPAE
- /* These are approximate */
+#ifdef CONFIG_ARM_LPAE
+ {
+ .mask = PMD_SECT_USER,
+ .val = PMD_SECT_USER,
+ .set = "USR",
+ }, {
+ .mask = PMD_SECT_RDONLY,
+ .val = PMD_SECT_RDONLY,
+ .set = "ro",
+ .clear = "RW",
+#elif __LINUX_ARM_ARCH__ >= 6
{
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
- .val = 0,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro",
}, {
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
.set = " RW",
}, {
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
- .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
-#else
+#else /* ARMv4/ARMv5 */
+ /* These are approximate */
{
- .mask = PMD_SECT_USER,
- .val = PMD_SECT_USER,
- .set = "USR",
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = 0,
+ .set = " ro",
}, {
- .mask = PMD_SECT_RDONLY,
- .val = PMD_SECT_RDONLY,
- .set = "ro",
- .clear = "RW",
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_AP_WRITE,
+ .set = " RW",
+ }, {
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_AP_READ,
+ .set = "USR ro",
+ }, {
+ .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+ .set = "USR RW",
#endif
}, {
.mask = PMD_SECT_XN,