summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/iwmmxt.S8
-rw-r--r--arch/arm/kernel/machine_kexec.c7
-rw-r--r--arch/arm/kernel/pj4-cp0.c42
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c6
-rw-r--r--arch/arm/kernel/topology.c80
8 files changed, 111 insertions, 36 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a766bcbaf8ad..040619c32d68 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
+obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 166e945de832..8f51bdcdacbb 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -391,6 +391,7 @@
CALL(sys_finit_module)
/* 380 */ CALL(sys_sched_setattr)
CALL(sys_sched_getattr)
+ CALL(sys_renameat2)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f8c08839edf3..591d6e4a6492 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -587,7 +587,7 @@ __fixup_pv_table:
add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address
mov r0, r8, lsr #12 @ convert to PFN
- str r0, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
+ str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a08783823b32..2452dd1bef53 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -19,12 +19,16 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
-#if defined(CONFIG_CPU_PJ4)
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
#define XSC(code...)
-#else
+#elif defined(CONFIG_CPU_MOHAWK) || \
+ defined(CONFIG_CPU_XSC3) || \
+ defined(CONFIG_CPU_XSCALE)
#define PJ4(code...)
#define XSC(code...) code
+#else
+#error "Unsupported iWMMXt architecture"
#endif
#define MMX_WR0 (0x00)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index f0d180d8b29f..8cf0996aa1a8 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -184,3 +184,10 @@ void machine_kexec(struct kimage *image)
soft_restart(reboot_entry_phys);
}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+ VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
index fc7208636284..8153e36b2491 100644
--- a/arch/arm/kernel/pj4-cp0.c
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -45,7 +45,7 @@ static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
return NOTIFY_DONE;
}
-static struct notifier_block iwmmxt_notifier_block = {
+static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
@@ -72,6 +72,33 @@ static void __init pj4_cp_access_write(u32 value)
: "=r" (temp) : "r" (value));
}
+static int __init pj4_get_iwmmxt_version(void)
+{
+ u32 cp_access, wcid;
+
+ cp_access = pj4_cp_access_read();
+ pj4_cp_access_write(cp_access | 0xf);
+
+ /* check if coprocessor 0 and 1 are available */
+ if ((pj4_cp_access_read() & 0xf) != 0xf) {
+ pj4_cp_access_write(cp_access);
+ return -ENODEV;
+ }
+
+ /* read iWMMXt coprocessor id register p1, c0 */
+ __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
+
+ pj4_cp_access_write(cp_access);
+
+ /* iWMMXt v1 */
+ if ((wcid & 0xffffff00) == 0x56051000)
+ return 1;
+ /* iWMMXt v2 */
+ if ((wcid & 0xffffff00) == 0x56052000)
+ return 2;
+
+ return -EINVAL;
+}
/*
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
@@ -79,17 +106,26 @@ static void __init pj4_cp_access_write(u32 value)
*/
static int __init pj4_cp0_init(void)
{
- u32 cp_access;
+ u32 __maybe_unused cp_access;
+ int vers;
if (!cpu_is_pj4())
return 0;
+ vers = pj4_get_iwmmxt_version();
+ if (vers < 0)
+ return 0;
+
+#ifndef CONFIG_IWMMXT
+ pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
+#else
cp_access = pj4_cp_access_read() & ~0xf;
pj4_cp_access_write(cp_access);
- printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
+ pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
+#endif
return 0;
}
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 702bd329d9d0..e90a3148f385 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -203,9 +203,9 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
int ret;
switch (cmd) {
- case F_GETLKP:
- case F_SETLKP:
- case F_SETLKPW:
+ case F_OFD_GETLK:
+ case F_OFD_SETLK:
+ case F_OFD_SETLKW:
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 0bc94b1fd1ae..d42a7db22236 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -26,30 +26,30 @@
#include <asm/topology.h>
/*
- * cpu power scale management
+ * cpu capacity scale management
*/
/*
- * cpu power table
+ * cpu capacity table
* This per cpu data structure describes the relative capacity of each core.
* On a heteregenous system, cores don't have the same computation capacity
- * and we reflect that difference in the cpu_power field so the scheduler can
- * take this difference into account during load balance. A per cpu structure
- * is preferred because each CPU updates its own cpu_power field during the
- * load balance except for idle cores. One idle core is selected to run the
- * rebalance_domains for all idle cores and the cpu_power can be updated
- * during this sequence.
+ * and we reflect that difference in the cpu_capacity field so the scheduler
+ * can take this difference into account during load balance. A per cpu
+ * structure is preferred because each CPU updates its own cpu_capacity field
+ * during the load balance except for idle cores. One idle core is selected
+ * to run the rebalance_domains for all idle cores and the cpu_capacity can be
+ * updated during this sequence.
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale);
-unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
{
return per_cpu(cpu_scale, cpu);
}
-static void set_power_scale(unsigned int cpu, unsigned long power)
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
{
- per_cpu(cpu_scale, cpu) = power;
+ per_cpu(cpu_scale, cpu) = capacity;
}
#ifdef CONFIG_OF
@@ -62,11 +62,11 @@ struct cpu_efficiency {
* Table of relative efficiency of each processors
* The efficiency value must fit in 20bit and the final
* cpu_scale value must be in the range
- * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+ * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
* in order to return at most 1 when DIV_ROUND_CLOSEST
* is used to compute the capacity of a CPU.
* Processors that are not defined in the table,
- * use the default SCHED_POWER_SCALE value for cpu_scale.
+ * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
*/
static const struct cpu_efficiency table_efficiency[] = {
{"arm,cortex-a15", 3891},
@@ -83,9 +83,9 @@ static unsigned long middle_capacity = 1;
* Iterate all CPUs' descriptor in DT and compute the efficiency
* (as per table_efficiency). Also calculate a middle efficiency
* as close as possible to (max{eff_i} - min{eff_i}) / 2
- * This is later used to scale the cpu_power field such that an
- * 'average' CPU is of middle power. Also see the comments near
- * table_efficiency[] and update_cpu_power().
+ * This is later used to scale the cpu_capacity field such that an
+ * 'average' CPU is of middle capacity. Also see the comments near
+ * table_efficiency[] and update_cpu_capacity().
*/
static void __init parse_dt_topology(void)
{
@@ -141,15 +141,15 @@ static void __init parse_dt_topology(void)
* cpu_scale because all CPUs have the same capacity. Otherwise, we
* compute a middle_capacity factor that will ensure that the capacity
* of an 'average' CPU of the system will be as close as possible to
- * SCHED_POWER_SCALE, which is the default value, but with the
+ * SCHED_CAPACITY_SCALE, which is the default value, but with the
* constraint explained near table_efficiency[].
*/
if (4*max_capacity < (3*(max_capacity + min_capacity)))
middle_capacity = (min_capacity + max_capacity)
- >> (SCHED_POWER_SHIFT+1);
+ >> (SCHED_CAPACITY_SHIFT+1);
else
middle_capacity = ((max_capacity / 3)
- >> (SCHED_POWER_SHIFT-1)) + 1;
+ >> (SCHED_CAPACITY_SHIFT-1)) + 1;
}
@@ -158,20 +158,20 @@ static void __init parse_dt_topology(void)
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
* function returns directly for SMP system.
*/
-static void update_cpu_power(unsigned int cpu)
+static void update_cpu_capacity(unsigned int cpu)
{
if (!cpu_capacity(cpu))
return;
- set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+ set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
- printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
- cpu, arch_scale_freq_power(NULL, cpu));
+ printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n",
+ cpu, arch_scale_freq_capacity(NULL, cpu));
}
#else
static inline void parse_dt_topology(void) {}
-static inline void update_cpu_power(unsigned int cpuid) {}
+static inline void update_cpu_capacity(unsigned int cpuid) {}
#endif
/*
@@ -185,6 +185,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return &cpu_topology[cpu].core_sibling;
}
+/*
+ * The current assumption is that we can power gate each core independently.
+ * This will be superseded by DT binding once available.
+ */
+const struct cpumask *cpu_corepower_mask(int cpu)
+{
+ return &cpu_topology[cpu].thread_sibling;
+}
+
static void update_siblings_masks(unsigned int cpuid)
{
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -258,7 +267,7 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid);
- update_cpu_power(cpuid);
+ update_cpu_capacity(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
@@ -266,6 +275,20 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+static inline const int cpu_corepower_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
+}
+
+static struct sched_domain_topology_level arm_topology[] = {
+#ifdef CONFIG_SCHED_MC
+ { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
@@ -274,7 +297,7 @@ void __init init_cpu_topology(void)
{
unsigned int cpu;
- /* init core mask and power*/
+ /* init core mask and capacity */
for_each_possible_cpu(cpu) {
struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
@@ -284,9 +307,12 @@ void __init init_cpu_topology(void)
cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
- set_power_scale(cpu, SCHED_POWER_SCALE);
+ set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
}
smp_wmb();
parse_dt_topology();
+
+ /* Set scheduler topology descriptor */
+ set_sched_topology(arm_topology);
}