summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:10:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:10:48 -0700
commitc9ef713993ba168b38d1a97ea0ab00874f1da022 (patch)
tree4eb3ea0e68af4f4ecfb0ca8ed7aea160b36bee30 /arch/arm64/kernel
parent87c1f0f8c9442c86cbb343b9324bef8312029d7d (diff)
parent16c85a1fd73eade2ae290d759924c09b4595f504 (diff)
Merge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64
Pull arm64 update from Catalin Marinas: "Main features: - Versatile Express SoC (model) support - DT files and Kconfig entries (there are no arch/arm64/mach-* directories). The bulk of the code has already been moved to drivers/ as part of the ARM SoC clean-up. - Basic multi-cluster support (CPU logical map initialised from the DT) - Simple earlyprintk support for UART 8250/16550 and FastModel console output - Optimised kernel library bitops and string functions. - Automatic initialisation of the irqchip and clocks via DT" * tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64: (26 commits) arm64: Use acquire/release semantics instead of explicit DMB arm64: klib: bitops: fix unpredictable stxr usage arm64: vexpress: Enable ARMv8 RTSM model (SoC) support arm64: vexpress: Add dts files for the ARMv8 RTSM models arm64: Survive invalid cpu enable-methods arm64: mm: Correct show_pte behaviour arm64: Fix compat types affecting struct compat_stat arm64: Execute DSB during thread switching for TLB/cache maintenance arm64: compiling issue, need add include/asm/vga.h file arm64: smp: honour #address-size when parsing CPU reg property arm64: Define cmpxchg64 and cmpxchg64_local for outside use arm64: Define readq and writeq for driver module using arm64: Fix task tracing arm64: add explicit symbols to ESR_EL1 decoding arm64: Use irqchip_init() for interrupt controller initialisation arm64: psci: Use the MPIDR values from cpu_logical_map for cpu ids. arm64: klib: Optimised atomic bitops arm64: klib: Optimised string functions arm64: klib: Optimised memory functions arm64: head: match all affinity levels in the pen of the secondaries ...
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/arm64ksyms.c21
-rw-r--r--arch/arm64/kernel/early_printk.c35
-rw-r--r--arch/arm64/kernel/entry.S53
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/irq.c19
-rw-r--r--arch/arm64/kernel/process.c8
-rw-r--r--arch/arm64/kernel/setup.c12
-rw-r--r--arch/arm64/kernel/smp.c113
-rw-r--r--arch/arm64/kernel/smp_psci.c5
9 files changed, 209 insertions, 61 deletions
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index aa3e948f7885..7df1aad29b67 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -39,10 +39,21 @@ EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
- /* bitops */
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(__atomic_hash);
-#endif
-
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
+
+ /* string / mem functions */
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+
+ /* atomic bitops */
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(change_bit);
+EXPORT_SYMBOL(test_and_change_bit);
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index 7e320a2edb9b..ac974f48a7a2 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/amba/serial.h>
+#include <linux/serial_reg.h>
static void __iomem *early_base;
static void (*printch)(char ch);
@@ -40,6 +41,37 @@ static void pl011_printch(char ch)
;
}
+/*
+ * Semihosting-based debug console
+ */
+static void smh_printch(char ch)
+{
+ asm volatile("mov x1, %0\n"
+ "mov x0, #3\n"
+ "hlt 0xf000\n"
+ : : "r" (&ch) : "x0", "x1", "memory");
+}
+
+/*
+ * 8250/16550 (8-bit aligned registers) single character TX.
+ */
+static void uart8250_8bit_printch(char ch)
+{
+ while (!(readb_relaxed(early_base + UART_LSR) & UART_LSR_THRE))
+ ;
+ writeb_relaxed(ch, early_base + UART_TX);
+}
+
+/*
+ * 8250/16550 (32-bit aligned registers) single character TX.
+ */
+static void uart8250_32bit_printch(char ch)
+{
+ while (!(readl_relaxed(early_base + (UART_LSR << 2)) & UART_LSR_THRE))
+ ;
+ writel_relaxed(ch, early_base + (UART_TX << 2));
+}
+
struct earlycon_match {
const char *name;
void (*printch)(char ch);
@@ -47,6 +79,9 @@ struct earlycon_match {
static const struct earlycon_match earlycon_match[] __initconst = {
{ .name = "pl011", .printch = pl011_printch, },
+ { .name = "smh", .printch = smh_printch, },
+ { .name = "uart8250-8bit", .printch = uart8250_8bit_printch, },
+ { .name = "uart8250-32bit", .printch = uart8250_32bit_printch, },
{}
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 514d6098dbee..c7e047049f2c 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -24,6 +24,7 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/errno.h>
+#include <asm/esr.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/unistd32.h>
@@ -239,18 +240,18 @@ ENDPROC(el1_error_invalid)
el1_sync:
kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
- lsr x24, x1, #26 // exception class
- cmp x24, #0x25 // data abort in EL1
+ lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
+ cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
b.eq el1_da
- cmp x24, #0x18 // configurable trap
+ cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #0x26 // stack alignment exception
+ cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
- cmp x24, #0x22 // pc alignment exception
+ cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
- cmp x24, #0x00 // unknown exception in EL1
+ cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
- cmp x24, #0x30 // debug exception in EL1
+ cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_da:
@@ -346,27 +347,27 @@ el1_preempt:
el0_sync:
kernel_entry 0
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #26 // exception class
- cmp x24, #0x15 // SVC in 64-bit state
+ lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
+ cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc
adr lr, ret_from_exception
- cmp x24, #0x24 // data abort in EL0
+ cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
b.eq el0_da
- cmp x24, #0x20 // instruction abort in EL0
+ cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
b.eq el0_ia
- cmp x24, #0x07 // FP/ASIMD access
+ cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #0x2c // FP/ASIMD exception
+ cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #0x18 // configurable trap
+ cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
b.eq el0_undef
- cmp x24, #0x26 // stack alignment exception
+ cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
- cmp x24, #0x22 // pc alignment exception
+ cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
- cmp x24, #0x00 // unknown exception in EL0
+ cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #0x30 // debug exception in EL0
+ cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
b.ge el0_dbg
b el0_inv
@@ -375,21 +376,21 @@ el0_sync:
el0_sync_compat:
kernel_entry 0, 32
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #26 // exception class
- cmp x24, #0x11 // SVC in 32-bit state
+ lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
+ cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat
adr lr, ret_from_exception
- cmp x24, #0x24 // data abort in EL0
+ cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
b.eq el0_da
- cmp x24, #0x20 // instruction abort in EL0
+ cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
b.eq el0_ia
- cmp x24, #0x07 // FP/ASIMD access
+ cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #0x28 // FP/ASIMD exception
+ cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #0x00 // unknown exception in EL0
+ cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #0x30 // debug exception in EL0
+ cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
b.ge el0_dbg
b el0_inv
el0_svc_compat:
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0a0a49756826..53dcae49e729 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable-hwdef.h>
@@ -229,7 +230,8 @@ ENTRY(secondary_holding_pen)
bl __calc_phys_offset // x24=phys offset
bl el2_setup // Drop to EL1
mrs x0, mpidr_el1
- and x0, x0, #15 // CPU number
+ ldr x1, =MPIDR_HWID_BITMASK
+ and x0, x0, x1
adr x1, 1b
ldp x2, x3, [x1]
sub x1, x1, x2
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0373c6609eaf..ecb3354292ed 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -25,7 +25,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/init.h>
-#include <linux/of_irq.h>
+#include <linux/irqchip.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
@@ -67,18 +67,17 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-/*
- * Interrupt controllers supported by the kernel.
- */
-static const struct of_device_id intctrl_of_match[] __initconst = {
- /* IRQ controllers { .compatible, .data } info to go here */
- {}
-};
+void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
+{
+ if (handle_arch_irq)
+ return;
+
+ handle_arch_irq = handle_irq;
+}
void __init init_IRQ(void)
{
- of_irq_init(intctrl_of_match);
-
+ irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 83a0ad5936a5..6f3822f98dcd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -278,11 +278,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
+ contextidr_thread_switch(next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
+ * the thread migrates to a different CPU.
+ */
+ dsb();
/* the actual thread switch */
last = cpu_switch_to(prev, next);
- contextidr_thread_switch(next);
return last;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 113db863f832..6a9a53292590 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -32,6 +32,7 @@
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
+#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
@@ -46,6 +47,7 @@
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/smp_plat.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
@@ -240,6 +242,8 @@ static void __init request_standard_resources(void)
}
}
+u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+
void __init setup_arch(char **cmdline_p)
{
setup_processor();
@@ -264,6 +268,7 @@ void __init setup_arch(char **cmdline_p)
psci_init();
+ cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
@@ -277,6 +282,13 @@ void __init setup_arch(char **cmdline_p)
#endif
}
+static int __init arm64_of_clk_init(void)
+{
+ of_clk_init(NULL);
+ return 0;
+}
+arch_initcall(arm64_of_clk_init);
+
static DEFINE_PER_CPU(struct cpu, cpu_data);
static int __init topology_init(void)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 261445c4666f..5d54e3717bf8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -43,6 +43,7 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
+#include <asm/smp_plat.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
@@ -53,7 +54,7 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = -1;
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -70,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
* in coherency or not. This is necessary for the hotplug code to work
* reliably.
*/
-static void __cpuinit write_pen_release(int val)
+static void __cpuinit write_pen_release(u64 val)
{
void *start = (void *)&secondary_holding_pen_release;
unsigned long size = sizeof(secondary_holding_pen_release);
@@ -96,7 +97,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
/*
* Update the pen release flag.
*/
- write_pen_release(cpu);
+ write_pen_release(cpu_logical_map(cpu));
/*
* Send an event, causing the secondaries to read pen_release.
@@ -105,7 +106,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == -1UL)
+ if (secondary_holding_pen_release == INVALID_HWID)
break;
udelay(10);
}
@@ -116,7 +117,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
*/
raw_spin_unlock(&boot_lock);
- return secondary_holding_pen_release != -1 ? -ENOSYS : 0;
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
}
static DECLARE_COMPLETION(cpu_running);
@@ -190,7 +191,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* Let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- write_pen_release(-1);
+ write_pen_release(INVALID_HWID);
/*
* Synchronise with the boot thread.
@@ -244,11 +245,11 @@ static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
{
- const struct smp_enable_ops *ops = enable_ops[0];
+ const struct smp_enable_ops **ops = enable_ops;
- while (ops) {
- if (!strcmp(name, ops->name))
- return ops;
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
ops++;
}
@@ -257,15 +258,80 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
}
/*
- * Enumerate the possible CPU set from the device tree.
+ * Enumerate the possible CPU set from the device tree and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
void __init smp_init_cpus(void)
{
const char *enable_method;
struct device_node *dn = NULL;
- int cpu = 0;
+ int i, cpu = 1;
+ bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ const u32 *cell;
+ u64 hwid;
+
+ /*
+ * A cpu node with missing "reg" property is
+ * considered invalid to build a cpu_logical_map
+ * entry.
+ */
+ cell = of_get_property(dn, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n", dn->full_name);
+ goto next;
+ }
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+
+ /*
+ * Non affinity bits must be set to 0 in the DT
+ */
+ if (hwid & ~MPIDR_HWID_BITMASK) {
+ pr_err("%s: invalid reg property\n", dn->full_name);
+ goto next;
+ }
+
+ /*
+ * Duplicate MPIDRs are a recipe for disaster. Scan
+ * all initialized entries and check for
+ * duplicates. If any is found just ignore the cpu.
+ * cpu_logical_map was initialized to INVALID_HWID to
+ * avoid matching valid MPIDR values.
+ */
+ for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
+ if (cpu_logical_map(i) == hwid) {
+ pr_err("%s: duplicate cpu reg properties in the DT\n",
+ dn->full_name);
+ goto next;
+ }
+ }
+
+ /*
+ * The numbering scheme requires that the boot CPU
+ * must be assigned logical id 0. Record it so that
+ * the logical map built from DT is validated and can
+ * be used.
+ */
+ if (hwid == cpu_logical_map(0)) {
+ if (bootcpu_valid) {
+ pr_err("%s: duplicate boot cpu reg property in DT\n",
+ dn->full_name);
+ goto next;
+ }
+
+ bootcpu_valid = true;
+
+ /*
+ * cpu_logical_map has already been
+ * initialized and the boot cpu doesn't need
+ * the enable-method so continue without
+ * incrementing cpu.
+ */
+ continue;
+ }
+
if (cpu >= NR_CPUS)
goto next;
@@ -274,22 +340,24 @@ void __init smp_init_cpus(void)
*/
enable_method = of_get_property(dn, "enable-method", NULL);
if (!enable_method) {
- pr_err("CPU %d: missing enable-method property\n", cpu);
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
goto next;
}
smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
if (!smp_enable_ops[cpu]) {
- pr_err("CPU %d: invalid enable-method property: %s\n",
- cpu, enable_method);
+ pr_err("%s: invalid enable-method property: %s\n",
+ dn->full_name, enable_method);
goto next;
}
if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
goto next;
- set_cpu_possible(cpu, true);
+ pr_debug("cpu logical map 0x%llx\n", hwid);
+ cpu_logical_map(cpu) = hwid;
next:
cpu++;
}
@@ -298,6 +366,19 @@ next:
if (cpu > NR_CPUS)
pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
cpu, NR_CPUS);
+
+ if (!bootcpu_valid) {
+ pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
+ return;
+ }
+
+ /*
+ * All the cpus that made it to the cpu_logical_map have been
+ * validated so set them as possible cpus.
+ */
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_logical_map(i) != INVALID_HWID)
+ set_cpu_possible(i, true);
}
void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
index 112091684c22..0c533301be77 100644
--- a/arch/arm64/kernel/smp_psci.c
+++ b/arch/arm64/kernel/smp_psci.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <asm/psci.h>
+#include <asm/smp_plat.h>
static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
{
@@ -36,7 +37,7 @@ static int __init smp_psci_prepare_cpu(int cpu)
return -ENODEV;
}
- err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen));
+ err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
if (err) {
pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
return err;
@@ -47,6 +48,6 @@ static int __init smp_psci_prepare_cpu(int cpu)
const struct smp_enable_ops smp_psci_ops __initconst = {
.name = "psci",
- .init_cpu = smp_psci_init_cpu,
+ .init_cpu = smp_psci_init_cpu,
.prepare_cpu = smp_psci_prepare_cpu,
};