diff options
-rw-r--r-- | arch/riscv/kernel/traps_misaligned.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 592b1a28e897..34b4a4e9dfca 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -627,6 +627,10 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void) { int cpu; + /* + * While being documented as very slow, schedule_on_each_cpu() is used since + * kernel_vector_begin() expects irqs to be enabled or it will panic() + */ schedule_on_each_cpu(check_vector_unaligned_access_emulated); for_each_online_cpu(cpu) @@ -647,7 +651,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void) static bool unaligned_ctl __read_mostly; -static void check_unaligned_access_emulated(struct work_struct *work __always_unused) +static void check_unaligned_access_emulated(void *arg __always_unused) { int cpu = smp_processor_id(); long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); @@ -688,7 +692,7 @@ bool __init check_unaligned_access_emulated_all_cpus(void) * accesses emulated since tasks requesting such control can run on any * CPU. */ - schedule_on_each_cpu(check_unaligned_access_emulated); + on_each_cpu(check_unaligned_access_emulated, NULL, 1); for_each_online_cpu(cpu) if (per_cpu(misaligned_access_speed, cpu) |