diff options
Diffstat (limited to 'arch/mips/kernel/smp.c')
-rw-r--r-- | arch/mips/kernel/smp.c | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 770d4d1516cb..d84b9066b465 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -42,13 +42,13 @@ #include <asm/processor.h> #include <asm/idle.h> #include <asm/r4k-timer.h> -#include <asm/mips-cpc.h> +#include <asm/mips-cps.h> #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> #include <asm/maar.h> -int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ @@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); /* @@ -96,8 +97,7 @@ static inline void set_cpu_sibling_map(int cpu) if (smp_num_siblings > 1) { for_each_cpu(i, &cpu_sibling_setup_map) { - if (cpu_data[cpu].package == cpu_data[i].package && - cpu_data[cpu].core == cpu_data[i].core) { + if (cpus_are_siblings(cpu, i)) { cpumask_set_cpu(i, &cpu_sibling_map[cpu]); cpumask_set_cpu(cpu, &cpu_sibling_map[i]); } @@ -134,8 +134,7 @@ void calculate_cpu_foreign_map(void) for_each_online_cpu(i) { core_present = 0; for_each_cpu(k, &temp_foreign_map) - if (cpu_data[i].package == cpu_data[k].package && - cpu_data[i].core == cpu_data[k].core) + if (cpus_are_siblings(i, k)) core_present = 1; if (!core_present) cpumask_set_cpu(i, &temp_foreign_map); @@ -146,10 +145,10 @@ void calculate_cpu_foreign_map(void) &temp_foreign_map, &cpu_sibling_map[i]); } -struct plat_smp_ops *mp_ops; +const struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); -void register_smp_ops(struct plat_smp_ops *ops) +void register_smp_ops(const struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); @@ -186,13 +185,13 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) if (mips_cpc_present()) { for_each_cpu(cpu, mask) { - core = cpu_data[cpu].core; - - if (core == current_cpu_data.core) + if (cpus_are_siblings(cpu, smp_processor_id())) continue; + core = cpu_core(&cpu_data[cpu]); + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { - mips_cm_lock_other(core, 0); + mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cpc_lock_other(core); write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); mips_cpc_unlock_other(); @@ -376,9 +375,12 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); - complete(&cpu_running); + /* Notify boot CPU that we're starting & ready to sync counters */ + complete(&cpu_starting); + synchronise_count_slave(cpu); + /* The CPU is running and counters synchronised, now mark it online */ set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -387,6 +389,12 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); /* + * Notify boot CPU that we're up & online and it can safely return + * from __cpu_up + */ + complete(&cpu_running); + + /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. */ @@ -441,19 +449,23 @@ void smp_prepare_boot_cpu(void) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - mp_ops->boot_secondary(cpu, tidle); + int err; - /* - * We must check for timeout here, as the CPU will not be marked - * online until the counters are synchronised. - */ - if (!wait_for_completion_timeout(&cpu_running, + err = mp_ops->boot_secondary(cpu, tidle); + if (err) + return err; + + /* Wait for CPU to start and be ready to sync counters */ + if (!wait_for_completion_timeout(&cpu_starting, msecs_to_jiffies(1000))) { pr_crit("CPU%u: failed to start\n", cpu); return -EIO; } synchronise_count_master(cpu); + + /* Wait for CPU to finish startup & mark itself online before return */ + wait_for_completion(&cpu_running); return 0; } @@ -648,12 +660,12 @@ EXPORT_SYMBOL(flush_tlb_one); #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); -static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); +static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd); void tick_broadcast(const struct cpumask *mask) { atomic_t *count; - struct call_single_data *csd; + call_single_data_t *csd; int cpu; for_each_cpu(cpu, mask) { @@ -674,7 +686,7 @@ static void tick_broadcast_callee(void *info) static int __init tick_broadcast_init(void) { - struct call_single_data *csd; + call_single_data_t *csd; int cpu; for (cpu = 0; cpu < NR_CPUS; cpu++) { |