diff options
Diffstat (limited to 'arch/loongarch/kernel')
-rw-r--r-- | arch/loongarch/kernel/acpi.c | 22 | ||||
-rw-r--r-- | arch/loongarch/kernel/efi.c | 6 | ||||
-rw-r--r-- | arch/loongarch/kernel/head.S | 11 | ||||
-rw-r--r-- | arch/loongarch/kernel/hw_breakpoint.c | 2 | ||||
-rw-r--r-- | arch/loongarch/kernel/kprobes.c | 4 | ||||
-rw-r--r-- | arch/loongarch/kernel/paravirt.c | 151 | ||||
-rw-r--r-- | arch/loongarch/kernel/ptrace.c | 3 | ||||
-rw-r--r-- | arch/loongarch/kernel/relocate.c | 52 | ||||
-rw-r--r-- | arch/loongarch/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/loongarch/kernel/smp.c | 21 | ||||
-rw-r--r-- | arch/loongarch/kernel/syscall.c | 22 | ||||
-rw-r--r-- | arch/loongarch/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/loongarch/kernel/vmlinux.lds.S | 8 |
13 files changed, 280 insertions, 28 deletions
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 5cf59c617126..929a497c987e 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -57,15 +57,22 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) return ioremap_cache(phys, size); } +static int cpu_enumerated = 0; + #ifdef CONFIG_SMP static int set_processor_mask(u32 id, u32 flags) { - + int nr_cpus; int cpu, cpuid = id; - if (num_processors >= nr_cpu_ids) { - pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached." - " processor 0x%x ignored.\n", nr_cpu_ids, cpuid); + if (!cpu_enumerated) + nr_cpus = NR_CPUS; + else + nr_cpus = nr_cpu_ids; + + if (num_processors >= nr_cpus) { + pr_warn(PREFIX "nr_cpus limit of %i reached." + " processor 0x%x ignored.\n", nr_cpus, cpuid); return -ENODEV; @@ -73,11 +80,13 @@ static int set_processor_mask(u32 id, u32 flags) if (cpuid == loongson_sysconf.boot_cpu_id) cpu = 0; else - cpu = cpumask_next_zero(-1, cpu_present_mask); + cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS); + + if (!cpu_enumerated) + set_cpu_possible(cpu, true); if (flags & ACPI_MADT_ENABLED) { num_processors++; - set_cpu_possible(cpu, true); set_cpu_present(cpu, true); __cpu_number_map[cpuid] = cpu; __cpu_logical_map[cpu] = cpuid; @@ -138,6 +147,7 @@ static void __init acpi_process_madt(void) acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, acpi_parse_eio_master, MAX_IO_PICS); + cpu_enumerated = 1; loongson_sysconf.nr_cpus = num_processors; } diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 000825406c1f..2bf86aeda874 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -66,6 +66,12 @@ void __init efi_runtime_init(void) set_bit(EFI_RUNTIME_SERVICES, &efi.flags); } +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES) && + (acpi_gbl_reduced_hardware || acpi_no_s5); +} + unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; #if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 4677ea8fa8e9..506a99a5bbc7 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -44,11 +44,7 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize); SYM_CODE_START(kernel_entry) # kernel entry point /* Config direct window and set PG */ - li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx - csrwr t0, LOONGARCH_CSR_DMWIN1 - + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ @@ -124,11 +120,8 @@ SYM_CODE_END(kernel_entry) * function after setting up the stack and tp registers. */ SYM_CODE_START(smpboot_entry) - li.d t0, CSR_DMW0_INIT # UC, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN1 + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 #ifdef CONFIG_PAGE_SIZE_4KB diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c index 621ad7634df7..a6e4b605bfa8 100644 --- a/arch/loongarch/kernel/hw_breakpoint.c +++ b/arch/loongarch/kernel/hw_breakpoint.c @@ -221,7 +221,7 @@ static int hw_breakpoint_control(struct perf_event *bp, } enable = csr_read64(LOONGARCH_CSR_CRMD); csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD); - if (bp->hw.target) + if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH)) regs->csr_prmd |= CSR_PRMD_PWE; break; case HW_BREAKPOINT_UNINSTALL: diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c index 17b040bd6067..8ba391cfabb0 100644 --- a/arch/loongarch/kernel/kprobes.c +++ b/arch/loongarch/kernel/kprobes.c @@ -4,8 +4,8 @@ #include <linux/preempt.h> #include <asm/break.h> -#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP) -#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP) +#define KPROBE_BP_INSN __emit_break(BRK_KPROBE_BP) +#define KPROBE_SSTEPBP_INSN __emit_break(BRK_KPROBE_SSTEPBP) DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 1633ed4f692f..9c9b75b76f62 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -2,13 +2,17 @@ #include <linux/export.h> #include <linux/types.h> #include <linux/interrupt.h> +#include <linux/irq_work.h> #include <linux/jump_label.h> #include <linux/kvm_para.h> +#include <linux/reboot.h> #include <linux/static_call.h> #include <asm/paravirt.h> +static int has_steal_clock; struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); static u64 native_steal_clock(int cpu) { @@ -17,6 +21,34 @@ static u64 native_steal_clock(int cpu) DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); +static bool steal_acc = true; + +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 paravt_steal_clock(int cpu) +{ + int version; + u64 steal; + struct kvm_steal_time *src; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + virt_rmb(); /* Make sure that the version is read before the steal */ + steal = src->steal; + virt_rmb(); /* Make sure that the steal is read before the next version */ + + } while ((version & 1) || (version != src->version)); + + return steal; +} + #ifdef CONFIG_SMP static void pv_send_ipi_single(int cpu, unsigned int action) { @@ -97,6 +129,11 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev) info->ipi_irqs[IPI_CALL_FUNCTION]++; } + if (action & SMP_IRQ_WORK) { + irq_work_run(); + info->ipi_irqs[IPI_IRQ_WORK]++; + } + return IRQ_HANDLED; } @@ -149,3 +186,117 @@ int __init pv_ipi_init(void) return 0; } + +static int pv_enable_steal_time(void) +{ + int cpu = smp_processor_id(); + unsigned long addr; + struct kvm_steal_time *st; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be in one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + + return 0; +} + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); +} + +#ifdef CONFIG_SMP +static int pv_time_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_enable_steal_time(); + local_irq_restore(flags); + + return 0; +} + +static int pv_time_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + + return 0; +} +#endif + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int r, feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + if (!(feature & KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + r = pv_enable_steal_time(); + if (r < 0) { + has_steal_clock = 0; + return 0; + } + register_reboot_notifier(&pv_reboot_nb); + +#ifdef CONFIG_SMP + r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "loongarch/pv_time:online", + pv_time_cpu_online, pv_time_cpu_down_prepare); + if (r < 0) { + has_steal_clock = 0; + pr_err("Failed to install cpu hotplug callbacks\n"); + return r; + } +#endif + + static_call_update(pv_steal_clock, paravt_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif + + pr_info("Using paravirt steal-time\n"); + + return 0; +} diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 200109de1971..19dc6eff45cc 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -589,6 +589,7 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type, struct perf_event *bp; struct perf_event_attr attr; struct arch_hw_breakpoint_ctrl ctrl; + struct thread_info *ti = task_thread_info(tsk); bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); if (IS_ERR(bp)) @@ -613,8 +614,10 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type, if (err) return err; attr.disabled = 0; + set_ti_thread_flag(ti, TIF_LOAD_WATCH); } else { attr.disabled = 1; + clear_ti_thread_flag(ti, TIF_LOAD_WATCH); } return modify_user_hw_breakpoint(bp, &attr); diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 1acfa704c8d0..50c469067f3a 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -13,6 +13,7 @@ #include <asm/bootinfo.h> #include <asm/early_ioremap.h> #include <asm/inst.h> +#include <asm/io.h> #include <asm/sections.h> #include <asm/setup.h> @@ -34,11 +35,27 @@ static inline void __init relocate_relative(void) if (rela->r_info != R_LARCH_RELATIVE) continue; - if (relocated_addr >= VMLINUX_LOAD_ADDRESS) - relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr); - + relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr); *(Elf64_Addr *)RELOCATED(addr) = relocated_addr; } + +#ifdef CONFIG_RELR + u64 *addr = NULL; + u64 *relr = (u64 *)&__relr_dyn_begin; + u64 *relr_end = (u64 *)&__relr_dyn_end; + + for ( ; relr < relr_end; relr++) { + if ((*relr & 1) == 0) { + addr = (u64 *)(*relr + reloc_offset); + *addr++ += reloc_offset; + } else { + for (u64 *p = addr, r = *relr >> 1; r; p++, r >>= 1) + if (r & 1) + *p += reloc_offset; + addr += 63; + } + } +#endif } static inline void __init relocate_absolute(long random_offset) @@ -123,6 +140,32 @@ static inline __init bool kaslr_disabled(void) if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) return true; +#ifdef CONFIG_HIBERNATION + str = strstr(builtin_cmdline, "nohibernate"); + if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) + return false; + + str = strstr(boot_command_line, "nohibernate"); + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + return false; + + str = strstr(builtin_cmdline, "noresume"); + if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) + return false; + + str = strstr(boot_command_line, "noresume"); + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + return false; + + str = strstr(builtin_cmdline, "resume="); + if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) + return true; + + str = strstr(boot_command_line, "resume="); + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + return true; +#endif + return false; } @@ -170,7 +213,7 @@ unsigned long __init relocate_kernel(void) unsigned long kernel_length; unsigned long random_offset = 0; void *location_new = _text; /* Default to original kernel start */ - char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ + char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); @@ -182,6 +225,7 @@ unsigned long __init relocate_kernel(void) random_offset = (unsigned long)location_new - (unsigned long)(_text); #endif reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; + early_memunmap(cmdline, COMMAND_LINE_SIZE); if (random_offset) { kernel_length = (long)(_end) - (long)(_text); diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 3d048f1be143..0f0740f0be27 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -576,8 +576,10 @@ static void __init prefill_possible_map(void) for (i = 0; i < possible; i++) set_cpu_possible(i, true); - for (; i < NR_CPUS; i++) + for (; i < NR_CPUS; i++) { + set_cpu_present(i, false); set_cpu_possible(i, false); + } set_nr_cpu_ids(possible); } diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 1436d2465939..ca405ab86aae 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -13,6 +13,7 @@ #include <linux/cpumask.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irq_work.h> #include <linux/profile.h> #include <linux/seq_file.h> #include <linux/smp.h> @@ -70,6 +71,7 @@ static DEFINE_PER_CPU(int, cpu_state); static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", }; void show_ipi_list(struct seq_file *p, int prec) @@ -217,6 +219,13 @@ void arch_smp_send_reschedule(int cpu) } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK); +} +#endif + static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) { unsigned int action; @@ -234,6 +243,11 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++; } + if (action & SMP_IRQ_WORK) { + irq_work_run(); + per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++; + } + return IRQ_HANDLED; } @@ -271,11 +285,10 @@ static void __init fdt_smp_setup(void) if (cpuid >= nr_cpu_ids) continue; - if (cpuid == loongson_sysconf.boot_cpu_id) { + if (cpuid == loongson_sysconf.boot_cpu_id) cpu = 0; - } else { - cpu = cpumask_next_zero(-1, cpu_present_mask); - } + else + cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS); num_processors++; set_cpu_possible(cpu, true); diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c index ec17cd5163b7..ba5d0930a74f 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -9,11 +9,14 @@ #include <linux/entry-common.h> #include <linux/errno.h> #include <linux/linkage.h> +#include <linux/objtool.h> +#include <linux/randomize_kstack.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <asm/asm.h> #include <asm/exception.h> +#include <asm/loongarch.h> #include <asm/signal.h> #include <asm/switch_to.h> #include <asm-generic/syscalls.h> @@ -39,7 +42,7 @@ void *sys_call_table[__NR_syscalls] = { typedef long (*sys_call_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); -void noinstr do_syscall(struct pt_regs *regs) +void noinstr __no_stack_protector do_syscall(struct pt_regs *regs) { unsigned long nr; sys_call_fn syscall_fn; @@ -55,11 +58,28 @@ void noinstr do_syscall(struct pt_regs *regs) nr = syscall_enter_from_user_mode(regs, nr); + add_random_kstack_offset(); + if (nr < NR_syscalls) { syscall_fn = sys_call_table[nr]; regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6], regs->regs[7], regs->regs[8], regs->regs[9]); } + /* + * This value will get limited by KSTACK_OFFSET_MAX(), which is 10 + * bits. The actual entropy will be further reduced by the compiler + * when applying stack alignment constraints: 16-bytes (i.e. 4-bits) + * aligned, which will remove the 4 low bits from any entropy chosen + * here. + * + * The resulting 6 bits of entropy is seen in SP[9:4]. + */ + choose_random_kstack_offset(drdtime()); + syscall_exit_to_user_mode(regs); } + +#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET +STACK_FRAME_NON_STANDARD(do_syscall); +#endif diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index fd5354f9be7c..46d7d40c87e3 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -15,6 +15,7 @@ #include <asm/cpu-features.h> #include <asm/loongarch.h> +#include <asm/paravirt.h> #include <asm/time.h> u64 cpu_clock_freq; @@ -214,4 +215,5 @@ void __init time_init(void) constant_clockevent_init(); constant_clocksource_init(); + pv_time_init(); } diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index 3c7595342730..08ea921cdec1 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -113,6 +113,14 @@ SECTIONS __rela_dyn_end = .; } +#ifdef CONFIG_RELR + .relr.dyn : ALIGN(8) { + __relr_dyn_begin = .; + *(.relr.dyn) + __relr_dyn_end = .; + } +#endif + .data.rel : { *(.data.rel*) } #ifdef CONFIG_RELOCATABLE |