diff options
Diffstat (limited to 'arch')
1130 files changed, 23478 insertions, 19441 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 4790a5f23d9f..2bb30673d8e6 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -124,8 +124,8 @@ config HAVE_64BIT_ALIGNED_ACCESS accesses are required to be 64 bit aligned in this way even though it is not a 64 bit architecture. - See Documentation/unaligned-memory-access.txt for more - information on the topic of unaligned memory accesses. + See Documentation/core-api/unaligned-memory-access.rst for + more information on the topic of unaligned memory accesses. config HAVE_EFFICIENT_UNALIGNED_ACCESS bool @@ -295,6 +295,10 @@ config ARCH_32BIT_OFF_T still support 32-bit off_t. This option is enabled for all such architectures explicitly. +# Selected by 64 bit architectures which have a 32 bit f_tinode in struct ustat +config ARCH_32BIT_USTAT_F_TINODE + bool + config HAVE_ASM_MODVERSIONS bool help @@ -599,6 +603,96 @@ config SHADOW_CALL_STACK reading and writing arbitrary memory may be able to locate them and hijack control flow by modifying the stacks. +config LTO + bool + help + Selected if the kernel will be built using the compiler's LTO feature. + +config LTO_CLANG + bool + select LTO + help + Selected if the kernel will be built using Clang's LTO feature. + +config ARCH_SUPPORTS_LTO_CLANG + bool + help + An architecture should select this option if it supports: + - compiling with Clang, + - compiling inline assembly with Clang's integrated assembler, + - and linking with LLD. + +config ARCH_SUPPORTS_LTO_CLANG_THIN + bool + help + An architecture should select this option if it can support Clang's + ThinLTO mode. + +config HAS_LTO_CLANG + def_bool y + # Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510 + depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD + depends on $(success,test $(LLVM) -eq 1) + depends on $(success,test $(LLVM_IAS) -eq 1) + depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm) + depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm) + depends on ARCH_SUPPORTS_LTO_CLANG + depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT + depends on !KASAN + depends on !GCOV_KERNEL + help + The compiler and Kconfig options support building with Clang's + LTO. + +choice + prompt "Link Time Optimization (LTO)" + default LTO_NONE + help + This option enables Link Time Optimization (LTO), which allows the + compiler to optimize binaries globally. + + If unsure, select LTO_NONE. Note that LTO is very resource-intensive + so it's disabled by default. + +config LTO_NONE + bool "None" + help + Build the kernel normally, without Link Time Optimization (LTO). + +config LTO_CLANG_FULL + bool "Clang Full LTO (EXPERIMENTAL)" + depends on HAS_LTO_CLANG + depends on !COMPILE_TEST + select LTO_CLANG + help + This option enables Clang's full Link Time Optimization (LTO), which + allows the compiler to optimize the kernel globally. If you enable + this option, the compiler generates LLVM bitcode instead of ELF + object files, and the actual compilation from bitcode happens at + the LTO link step, which may take several minutes depending on the + kernel configuration. More information can be found from LLVM's + documentation: + + https://llvm.org/docs/LinkTimeOptimization.html + + During link time, this option can use a large amount of RAM, and + may take much longer than the ThinLTO option. + +config LTO_CLANG_THIN + bool "Clang ThinLTO (EXPERIMENTAL)" + depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN + select LTO_CLANG + help + This option enables Clang's ThinLTO, which allows for parallel + optimization and faster incremental compiles compared to the + CONFIG_LTO_CLANG_FULL option. More information can be found + from Clang's documentation: + + https://clang.llvm.org/docs/ThinLTO.html + + If unsure, say Y. +endchoice + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help @@ -727,6 +821,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK This spares a stack switch and improves cache usage on softirq processing. +config HAVE_SOFTIRQ_ON_OWN_STACK + bool + help + Architecture provides a function to run __do_softirq() on a + seperate stack. + config PGTABLE_LEVELS int default 2 diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index a401c1481a11..5998106faa60 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,6 +2,7 @@ config ALPHA bool default y + select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_NO_PREEMPT diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig index 6293675db164..724c4075df40 100644 --- a/arch/alpha/configs/defconfig +++ b/arch/alpha/configs/defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 6c71554206cc..5112ab996394 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -249,7 +249,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childti->pcb.ksp = (unsigned long) childstack; childti->pcb.flags = 1; /* set FEN, clear everything else */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); diff --git a/arch/alpha/kernel/syscalls/Makefile b/arch/alpha/kernel/syscalls/Makefile index 659faefdcb1d..285aaba832d9 100644 --- a/arch/alpha/kernel/syscalls/Makefile +++ b/arch/alpha/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))' \ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index a6617067dbe6..02f0244e005c 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -481,3 +481,4 @@ 549 common faccessat2 sys_faccessat2 550 common process_madvise sys_process_madvise 551 common epoll_pwait2 sys_epoll_pwait2 +552 common mount_setattr sys_mount_setattr diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 37f724ad5e39..d838d0d57696 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -191,7 +191,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childksp[0] = 0; /* fp */ childksp[1] = (unsigned long)ret_from_fork; /* blink */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(c_regs, 0, sizeof(struct pt_regs)); c_callee->r13 = kthread_arg; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6d0ed2888935..853aab5ab327 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -23,6 +23,7 @@ config ARM select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H + select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT @@ -1856,9 +1857,10 @@ config AUTO_ZRELADDR help ZRELADDR is the physical address where the decompressed kernel image will be placed. If AUTO_ZRELADDR is selected, the address - will be determined at run-time by masking the current IP with - 0xf8000000. This assumes the zImage being placed in the first 128MB - from start of memory. + will be determined at run-time, either by masking the current IP + with 0xf8000000, or, if invalid, from the DTB passed in r2. + This assumes the zImage being placed in the first 128MB from + start of memory. config EFI_STUB bool diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index c36c5d4c6e9c..9e0b5e7f12af 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1158,10 +1158,9 @@ choice Say Y here if you want kernel low-level debugging support on ST SPEAr13xx based platforms. - config STIH41X_DEBUG_ASC2 + config DEBUG_STIH41X_ASC2 bool "Use StiH415/416 ASC2 UART for low-level debug" depends on ARCH_STI - select DEBUG_STI_UART help Say Y here if you want kernel low-level debugging support on STiH415/416 based platforms like b2000, which has @@ -1169,10 +1168,9 @@ choice If unsure, say N. - config STIH41X_DEBUG_SBC_ASC1 + config DEBUG_STIH41X_SBC_ASC1 bool "Use StiH415/416 SBC ASC1 UART for low-level debug" depends on ARCH_STI - select DEBUG_STI_UART help Say Y here if you want kernel low-level debugging support on STiH415/416 based platforms like b2020. which has @@ -1180,6 +1178,16 @@ choice If unsure, say N. + config DEBUG_STIH418_SBC_ASC0 + bool "Use StiH418 SBC ASC0 UART for low-level debug" + depends on ARCH_STI + help + Say Y here if you want kernel low-level debugging support + on STiH418 based platforms which has default UART wired + up to SBC ASC0. + + If unsure, say N. + config STM32F4_DEBUG_UART bool "Use STM32F4 UART for low-level debug" depends on MACH_STM32F429 || MACH_STM32F469 @@ -1484,10 +1492,6 @@ config DEBUG_TEGRA_UART bool depends on ARCH_TEGRA -config DEBUG_STI_UART - bool - depends on ARCH_STI - config DEBUG_STM32_UART bool depends on ARCH_STM32 @@ -1546,7 +1550,9 @@ config DEBUG_LL_INCLUDE default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA4 default "debug/s3c24xx.S" if DEBUG_S3C24XX_UART || DEBUG_S3C64XX_UART default "debug/s5pv210.S" if DEBUG_S5PV210_UART - default "debug/sti.S" if DEBUG_STI_UART + default "debug/sti.S" if DEBUG_STIH41X_ASC2 + default "debug/sti.S" if DEBUG_STIH41X_SBC_ASC1 + default "debug/sti.S" if DEBUG_STIH418_SBC_ASC0 default "debug/stm32.S" if DEBUG_STM32_UART default "debug/tegra.S" if DEBUG_TEGRA_UART default "debug/ux500.S" if DEBUG_UX500_UART @@ -1579,6 +1585,7 @@ config DEBUG_UART_PHYS default 0x02531000 if DEBUG_KEYSTONE_UART1 default 0x03010fe0 if ARCH_RPC default 0x07000000 if DEBUG_SUN9I_UART0 + default 0x09530000 if DEBUG_STIH418_SBC_ASC0 default 0x10009000 if DEBUG_REALVIEW_STD_PORT || \ DEBUG_VEXPRESS_UART0_CA9 default 0x1010c000 if DEBUG_REALVIEW_PB1176_PORT @@ -1671,7 +1678,9 @@ config DEBUG_UART_PHYS default 0xfc00c000 if DEBUG_AT91_SAMA5D4_USART3 default 0xfcb00000 if DEBUG_HI3620_UART default 0xfd883000 if DEBUG_ALPINE_UART0 + default 0xfe531000 if DEBUG_STIH41X_SBC_ASC1 default 0xfe800000 if ARCH_IOP32X + default 0xfed32000 if DEBUG_STIH41X_ASC2 default 0xff690000 if DEBUG_RK32_UART2 default 0xffc02000 if DEBUG_SOCFPGA_UART0 default 0xffc02100 if DEBUG_SOCFPGA_ARRIA10_UART1 @@ -1699,7 +1708,9 @@ config DEBUG_UART_PHYS DEBUG_S3C64XX_UART || \ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \ DEBUG_DIGICOLOR_UA0 || \ - DEBUG_AT91_UART || DEBUG_STM32_UART + DEBUG_AT91_UART || DEBUG_STM32_UART || \ + DEBUG_STIH41X_ASC2 || DEBUG_STIH41X_SBC_ASC1 || \ + DEBUG_STIH418_SBC_ASC0 config DEBUG_UART_VIRT hex "Virtual base address of debug UART" @@ -1744,6 +1755,7 @@ config DEBUG_UART_VIRT default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1 default 0xf8ffee00 if DEBUG_AT91_SAM9263_DBGU default 0xf8fff200 if DEBUG_AT91_RM9200_DBGU + default 0xf9530000 if DEBUG_STIH418_SBC_ASC0 default 0xf9e09000 if DEBUG_AM33XXUART1 default 0xfa020000 if DEBUG_OMAP4UART3 || DEBUG_TI81XXUART1 default 0xfa022000 if DEBUG_TI81XXUART2 @@ -1762,7 +1774,9 @@ config DEBUG_UART_VIRT default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT default 0xfcfe8600 if DEBUG_BCM63XX_UART default 0xfd000000 if DEBUG_SPEAR3XX || DEBUG_SPEAR13XX + default 0xfd531000 if DEBUG_STIH41X_SBC_ASC1 default 0xfd883000 if DEBUG_ALPINE_UART0 + default 0xfdd32000 if DEBUG_STIH41X_ASC2 default 0xfe010000 if STM32MP1_DEBUG_UART default 0xfe017000 if DEBUG_MMP_UART2 default 0xfe018000 if DEBUG_MMP_UART3 @@ -1803,7 +1817,9 @@ config DEBUG_UART_VIRT DEBUG_S3C64XX_UART || \ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \ DEBUG_DIGICOLOR_UA0 || \ - DEBUG_AT91_UART || DEBUG_STM32_UART + DEBUG_AT91_UART || DEBUG_STM32_UART || \ + DEBUG_STIH41X_ASC2 || DEBUG_STIH41X_SBC_ASC1 || \ + DEBUG_STIH418_SBC_ASC0 config DEBUG_UART_8250_SHIFT int "Register offset shift for the 8250 debug UART" @@ -1837,7 +1853,7 @@ config DEBUG_UNCOMPRESS depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M depends on DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \ (!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \ - !DEBUG_BRCMSTB_UART + !DEBUG_BRCMSTB_UART && !DEBUG_SEMIHOSTING help This option influences the normal decompressor output for multiplatform kernels. Normally, multiplatform kernels disable diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index fb521efcc6c2..fd94e27ba4fa 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -87,10 +87,13 @@ libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y) OBJS += $(libfdt_objs) atags_to_fdt.o endif +ifeq ($(CONFIG_USE_OF),y) +OBJS += $(libfdt_objs) fdt_check_mem_start.o +endif # -fstack-protector-strong triggers protection checks in this code, # but it is being used too early to link to meaningful stack_chk logic. -$(foreach o, $(libfdt_objs) atags_to_fdt.o, \ +$(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \ $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector)) # These were previously generated C files. When you are building the kernel diff --git a/arch/arm/boot/compressed/fdt_check_mem_start.c b/arch/arm/boot/compressed/fdt_check_mem_start.c new file mode 100644 index 000000000000..62450d824c3c --- /dev/null +++ b/arch/arm/boot/compressed/fdt_check_mem_start.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/kernel.h> +#include <linux/libfdt.h> +#include <linux/sizes.h> + +static const void *get_prop(const void *fdt, const char *node_path, + const char *property, int minlen) +{ + const void *prop; + int offset, len; + + offset = fdt_path_offset(fdt, node_path); + if (offset < 0) + return NULL; + + prop = fdt_getprop(fdt, offset, property, &len); + if (!prop || len < minlen) + return NULL; + + return prop; +} + +static uint32_t get_cells(const void *fdt, const char *name) +{ + const fdt32_t *prop = get_prop(fdt, "/", name, sizeof(fdt32_t)); + + if (!prop) { + /* default */ + return 1; + } + + return fdt32_ld(prop); +} + +static uint64_t get_val(const fdt32_t *cells, uint32_t ncells) +{ + uint64_t r; + + r = fdt32_ld(cells); + if (ncells > 1) + r = (r << 32) | fdt32_ld(cells + 1); + + return r; +} + +/* + * Check the start of physical memory + * + * Traditionally, the start address of physical memory is obtained by masking + * the program counter. However, this does require that this address is a + * multiple of 128 MiB, precluding booting Linux on platforms where this + * requirement is not fulfilled. + * Hence validate the calculated address against the memory information in the + * DTB, and, if out-of-range, replace it by the real start address. + * To preserve backwards compatibility (systems reserving a block of memory + * at the start of physical memory, kdump, ...), the traditional method is + * always used if it yields a valid address. + * + * Return value: start address of physical memory to use + */ +uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt) +{ + uint32_t addr_cells, size_cells, base; + uint32_t fdt_mem_start = 0xffffffff; + const fdt32_t *reg, *endp; + uint64_t size, end; + const char *type; + int offset, len; + + if (!fdt) + return mem_start; + + if (fdt_magic(fdt) != FDT_MAGIC) + return mem_start; + + /* There may be multiple cells on LPAE platforms */ + addr_cells = get_cells(fdt, "#address-cells"); + size_cells = get_cells(fdt, "#size-cells"); + if (addr_cells > 2 || size_cells > 2) + return mem_start; + + /* Walk all memory nodes and regions */ + for (offset = fdt_next_node(fdt, -1, NULL); offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + type = fdt_getprop(fdt, offset, "device_type", NULL); + if (!type || strcmp(type, "memory")) + continue; + + reg = fdt_getprop(fdt, offset, "linux,usable-memory", &len); + if (!reg) + reg = fdt_getprop(fdt, offset, "reg", &len); + if (!reg) + continue; + + for (endp = reg + (len / sizeof(fdt32_t)); + endp - reg >= addr_cells + size_cells; + reg += addr_cells + size_cells) { + size = get_val(reg + addr_cells, size_cells); + if (!size) + continue; + + if (addr_cells > 1 && fdt32_ld(reg)) { + /* Outside 32-bit address space, skipping */ + continue; + } + + base = fdt32_ld(reg + addr_cells - 1); + end = base + size; + if (mem_start >= base && mem_start < end) { + /* Calculated address is valid, use it */ + return mem_start; + } + + if (base < fdt_mem_start) + fdt_mem_start = base; + } + } + + if (fdt_mem_start == 0xffffffff) { + /* No usable memory found, falling back to default */ + return mem_start; + } + + /* + * The calculated address is not usable. + * Use the lowest usable physical memory address from the DTB instead, + * and make sure this is a multiple of 2 MiB for phys/virt patching. + */ + return round_up(fdt_mem_start, SZ_2M); +} diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index d9cce7238a36..b1cb1972361b 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -174,10 +174,7 @@ .macro be32tocpu, val, tmp #ifndef __ARMEB__ /* convert to little endian */ - eor \tmp, \val, \val, ror #16 - bic \tmp, \tmp, #0x00ff0000 - mov \val, \val, ror #8 - eor \val, \val, \tmp, lsr #8 + rev_l \val, \tmp #endif .endm @@ -282,10 +279,40 @@ not_angel: * are already placing their zImage in (eg) the top 64MB * of this range. */ - mov r4, pc - and r4, r4, #0xf8000000 + mov r0, pc + and r0, r0, #0xf8000000 +#ifdef CONFIG_USE_OF + adr r1, LC1 +#ifdef CONFIG_ARM_APPENDED_DTB + /* + * Look for an appended DTB. If found, we cannot use it to + * validate the calculated start of physical memory, as its + * memory nodes may need to be augmented by ATAGS stored at + * an offset from the same start of physical memory. + */ + ldr r2, [r1, #4] @ get &_edata + add r2, r2, r1 @ relocate it + ldr r2, [r2] @ get DTB signature + ldr r3, =OF_DT_MAGIC + cmp r2, r3 @ do we have a DTB there? + beq 1f @ if yes, skip validation +#endif /* CONFIG_ARM_APPENDED_DTB */ + + /* + * Make sure we have some stack before calling C code. + * No GOT fixup has occurred yet, but none of the code we're + * about to call uses any global variables. + */ + ldr sp, [r1] @ get stack location + add sp, sp, r1 @ apply relocation + + /* Validate calculated start against passed DTB */ + mov r1, r8 + bl fdt_check_mem_start +1: +#endif /* CONFIG_USE_OF */ /* Determine final kernel image address. */ - add r4, r4, #TEXT_OFFSET + add r4, r0, #TEXT_OFFSET #else ldr r4, =zreladdr #endif @@ -1164,9 +1191,9 @@ __armv4_mmu_cache_off: __armv7_mmu_cache_off: mrc p15, 0, r0, c1, c0 #ifdef CONFIG_MMU - bic r0, r0, #0x000d + bic r0, r0, #0x0005 #else - bic r0, r0, #0x000c + bic r0, r0, #0x0004 #endif mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r0, #0 diff --git a/arch/arm/boot/dts/cros-ec-keyboard.dtsi b/arch/arm/boot/dts/cros-ec-keyboard.dtsi index 165c5bcd510e..55c4744fa7e7 100644 --- a/arch/arm/boot/dts/cros-ec-keyboard.dtsi +++ b/arch/arm/boot/dts/cros-ec-keyboard.dtsi @@ -6,103 +6,18 @@ */ #include <dt-bindings/input/input.h> +#include <dt-bindings/input/cros-ec-keyboard.h> &cros_ec { - keyboard-controller { + keyboard_controller: keyboard-controller { compatible = "google,cros-ec-keyb"; keypad,num-rows = <8>; keypad,num-columns = <13>; google,needs-ghost-filter; linux,keymap = < - MATRIX_KEY(0x00, 0x01, KEY_LEFTMETA) - MATRIX_KEY(0x00, 0x02, KEY_F1) - MATRIX_KEY(0x00, 0x03, KEY_B) - MATRIX_KEY(0x00, 0x04, KEY_F10) - MATRIX_KEY(0x00, 0x05, KEY_RO) - MATRIX_KEY(0x00, 0x06, KEY_N) - MATRIX_KEY(0x00, 0x08, KEY_EQUAL) - MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) - - MATRIX_KEY(0x01, 0x01, KEY_ESC) - MATRIX_KEY(0x01, 0x02, KEY_F4) - MATRIX_KEY(0x01, 0x03, KEY_G) - MATRIX_KEY(0x01, 0x04, KEY_F7) - MATRIX_KEY(0x01, 0x06, KEY_H) - MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) - MATRIX_KEY(0x01, 0x09, KEY_F9) - MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) - MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) - - MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) - MATRIX_KEY(0x02, 0x01, KEY_TAB) - MATRIX_KEY(0x02, 0x02, KEY_F3) - MATRIX_KEY(0x02, 0x03, KEY_T) - MATRIX_KEY(0x02, 0x04, KEY_F6) - MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) - MATRIX_KEY(0x02, 0x06, KEY_Y) - MATRIX_KEY(0x02, 0x07, KEY_102ND) - MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) - MATRIX_KEY(0x02, 0x09, KEY_F8) - MATRIX_KEY(0x02, 0x0a, KEY_YEN) - - MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) - MATRIX_KEY(0x03, 0x01, KEY_GRAVE) - MATRIX_KEY(0x03, 0x02, KEY_F2) - MATRIX_KEY(0x03, 0x03, KEY_5) - MATRIX_KEY(0x03, 0x04, KEY_F5) - MATRIX_KEY(0x03, 0x06, KEY_6) - MATRIX_KEY(0x03, 0x08, KEY_MINUS) - MATRIX_KEY(0x03, 0x09, KEY_F13) - MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) - MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) - - MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) - MATRIX_KEY(0x04, 0x01, KEY_A) - MATRIX_KEY(0x04, 0x02, KEY_D) - MATRIX_KEY(0x04, 0x03, KEY_F) - MATRIX_KEY(0x04, 0x04, KEY_S) - MATRIX_KEY(0x04, 0x05, KEY_K) - MATRIX_KEY(0x04, 0x06, KEY_J) - MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) - MATRIX_KEY(0x04, 0x09, KEY_L) - MATRIX_KEY(0x04, 0x0a, KEY_BACKSLASH) - MATRIX_KEY(0x04, 0x0b, KEY_ENTER) - - MATRIX_KEY(0x05, 0x01, KEY_Z) - MATRIX_KEY(0x05, 0x02, KEY_C) - MATRIX_KEY(0x05, 0x03, KEY_V) - MATRIX_KEY(0x05, 0x04, KEY_X) - MATRIX_KEY(0x05, 0x05, KEY_COMMA) - MATRIX_KEY(0x05, 0x06, KEY_M) - MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) - MATRIX_KEY(0x05, 0x08, KEY_SLASH) - MATRIX_KEY(0x05, 0x09, KEY_DOT) - MATRIX_KEY(0x05, 0x0b, KEY_SPACE) - - MATRIX_KEY(0x06, 0x01, KEY_1) - MATRIX_KEY(0x06, 0x02, KEY_3) - MATRIX_KEY(0x06, 0x03, KEY_4) - MATRIX_KEY(0x06, 0x04, KEY_2) - MATRIX_KEY(0x06, 0x05, KEY_8) - MATRIX_KEY(0x06, 0x06, KEY_7) - MATRIX_KEY(0x06, 0x08, KEY_0) - MATRIX_KEY(0x06, 0x09, KEY_9) - MATRIX_KEY(0x06, 0x0a, KEY_LEFTALT) - MATRIX_KEY(0x06, 0x0b, KEY_DOWN) - MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) - - MATRIX_KEY(0x07, 0x01, KEY_Q) - MATRIX_KEY(0x07, 0x02, KEY_E) - MATRIX_KEY(0x07, 0x03, KEY_R) - MATRIX_KEY(0x07, 0x04, KEY_W) - MATRIX_KEY(0x07, 0x05, KEY_I) - MATRIX_KEY(0x07, 0x06, KEY_U) - MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) - MATRIX_KEY(0x07, 0x08, KEY_P) - MATRIX_KEY(0x07, 0x09, KEY_O) - MATRIX_KEY(0x07, 0x0b, KEY_UP) - MATRIX_KEY(0x07, 0x0c, KEY_LEFT) + CROS_STD_TOP_ROW_KEYMAP + CROS_STD_MAIN_KEYMAP >; }; }; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 5f1a8bd13880..e025b7c9a357 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -518,6 +518,9 @@ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>; clock-names = "fck", "sys_clk"; + + #address-cells = <1>; + #size-cells = <0>; }; }; @@ -550,6 +553,9 @@ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>; clock-names = "fck", "sys_clk"; + + #address-cells = <1>; + #size-cells = <0>; }; }; diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 62f241b09fe3..e45f4e4e06b6 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -838,11 +838,10 @@ static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); - int ret = 0; if (drv->remove) - ret = drv->remove(ldev); - return ret; + drv->remove(ldev); + return 0; } struct bus_type locomo_bus_type = { diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index f89c1ea327a2..ff5e0d04cb89 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -1368,11 +1368,11 @@ static int sa1111_bus_remove(struct device *dev) { struct sa1111_dev *sadev = to_sa1111_device(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); - int ret = 0; if (drv->remove) - ret = drv->remove(sadev); - return ret; + drv->remove(sadev); + + return 0; } struct bus_type sa1111_bus_type = { diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig index 1ef2bc4c7f69..383c632eba7b 100644 --- a/arch/arm/configs/bcm2835_defconfig +++ b/arch/arm/configs/bcm2835_defconfig @@ -176,7 +176,6 @@ CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_LOCKUP_DETECTOR=y CONFIG_SCHED_TRACER=y diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index a9c6f32a9b1c..ca32446b187f 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -164,7 +164,6 @@ CONFIG_FONTS=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_FRAME_WARN=2048 -CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b26ef4866a35..f250bf1cc022 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -356,8 +356,8 @@ CONFIG_DRM_OMAP=m CONFIG_OMAP5_DSS_HDMI=y CONFIG_OMAP2_DSS_SDI=y CONFIG_OMAP2_DSS_DSI=y -CONFIG_DRM_OMAP_PANEL_DSI_CM=m CONFIG_DRM_TILCDC=m +CONFIG_DRM_PANEL_DSI_CM=m CONFIG_DRM_PANEL_SIMPLE=m CONFIG_DRM_PANEL_LG_LB035Q02=m CONFIG_DRM_PANEL_NEC_NL8048HL11=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index c9bf2df85cb9..2b575792363e 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -62,6 +62,25 @@ config CRYPTO_SHA512_ARM SHA-512 secure hash standard (DFIPS 180-2) implemented using optimized ARM assembler and NEON, when available. +config CRYPTO_BLAKE2S_ARM + tristate "BLAKE2s digest algorithm (ARM)" + select CRYPTO_ARCH_HAVE_LIB_BLAKE2S + help + BLAKE2s digest algorithm optimized with ARM scalar instructions. This + is faster than the generic implementations of BLAKE2s and BLAKE2b, but + slower than the NEON implementation of BLAKE2b. (There is no NEON + implementation of BLAKE2s, since NEON doesn't really help with it.) + +config CRYPTO_BLAKE2B_NEON + tristate "BLAKE2b digest algorithm (ARM NEON)" + depends on KERNEL_MODE_NEON + select CRYPTO_BLAKE2B + help + BLAKE2b digest algorithm optimized with ARM NEON instructions. + On ARM processors that have NEON support but not the ARMv8 + Crypto Extensions, typically this BLAKE2b implementation is + much faster than SHA-2 and slightly faster than SHA-1. + config CRYPTO_AES_ARM tristate "Scalar AES cipher for ARM" select CRYPTO_ALGAPI diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index b745c17d356f..8f26c454ea12 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o +obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o +obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o @@ -29,6 +31,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) +blake2s-arm-y := blake2s-core.o blake2s-glue.o +blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index f70af1d0514b..5c6cd3c63cbc 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -9,6 +9,7 @@ #include <asm/simd.h> #include <crypto/aes.h> #include <crypto/ctr.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> @@ -23,6 +24,8 @@ MODULE_ALIAS_CRYPTO("cbc(aes)-all"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); + asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], diff --git a/arch/arm/crypto/blake2b-neon-core.S b/arch/arm/crypto/blake2b-neon-core.S new file mode 100644 index 000000000000..0406a186377f --- /dev/null +++ b/arch/arm/crypto/blake2b-neon-core.S @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * BLAKE2b digest algorithm, NEON accelerated + * + * Copyright 2020 Google LLC + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + + .text + .fpu neon + + // The arguments to blake2b_compress_neon() + STATE .req r0 + BLOCK .req r1 + NBLOCKS .req r2 + INC .req r3 + + // Pointers to the rotation tables + ROR24_TABLE .req r4 + ROR16_TABLE .req r5 + + // The original stack pointer + ORIG_SP .req r6 + + // NEON registers which contain the message words of the current block. + // M_0-M_3 are occasionally used for other purposes too. + M_0 .req d16 + M_1 .req d17 + M_2 .req d18 + M_3 .req d19 + M_4 .req d20 + M_5 .req d21 + M_6 .req d22 + M_7 .req d23 + M_8 .req d24 + M_9 .req d25 + M_10 .req d26 + M_11 .req d27 + M_12 .req d28 + M_13 .req d29 + M_14 .req d30 + M_15 .req d31 + + .align 4 + // Tables for computing ror64(x, 24) and ror64(x, 16) using the vtbl.8 + // instruction. This is the most efficient way to implement these + // rotation amounts with NEON. (On Cortex-A53 it's the same speed as + // vshr.u64 + vsli.u64, while on Cortex-A7 it's faster.) +.Lror24_table: + .byte 3, 4, 5, 6, 7, 0, 1, 2 +.Lror16_table: + .byte 2, 3, 4, 5, 6, 7, 0, 1 + // The BLAKE2b initialization vector +.Lblake2b_IV: + .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b + .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 + .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f + .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 + +// Execute one round of BLAKE2b by updating the state matrix v[0..15] in the +// NEON registers q0-q7. The message block is in q8..q15 (M_0-M_15). The stack +// pointer points to a 32-byte aligned buffer containing a copy of q8 and q9 +// (M_0-M_3), so that they can be reloaded if they are used as temporary +// registers. The macro arguments s0-s15 give the order in which the message +// words are used in this round. 'final' is 1 if this is the final round. +.macro _blake2b_round s0, s1, s2, s3, s4, s5, s6, s7, \ + s8, s9, s10, s11, s12, s13, s14, s15, final=0 + + // Mix the columns: + // (v[0], v[4], v[8], v[12]), (v[1], v[5], v[9], v[13]), + // (v[2], v[6], v[10], v[14]), and (v[3], v[7], v[11], v[15]). + + // a += b + m[blake2b_sigma[r][2*i + 0]]; + vadd.u64 q0, q0, q2 + vadd.u64 q1, q1, q3 + vadd.u64 d0, d0, M_\s0 + vadd.u64 d1, d1, M_\s2 + vadd.u64 d2, d2, M_\s4 + vadd.u64 d3, d3, M_\s6 + + // d = ror64(d ^ a, 32); + veor q6, q6, q0 + veor q7, q7, q1 + vrev64.32 q6, q6 + vrev64.32 q7, q7 + + // c += d; + vadd.u64 q4, q4, q6 + vadd.u64 q5, q5, q7 + + // b = ror64(b ^ c, 24); + vld1.8 {M_0}, [ROR24_TABLE, :64] + veor q2, q2, q4 + veor q3, q3, q5 + vtbl.8 d4, {d4}, M_0 + vtbl.8 d5, {d5}, M_0 + vtbl.8 d6, {d6}, M_0 + vtbl.8 d7, {d7}, M_0 + + // a += b + m[blake2b_sigma[r][2*i + 1]]; + // + // M_0 got clobbered above, so we have to reload it if any of the four + // message words this step needs happens to be M_0. Otherwise we don't + // need to reload it here, as it will just get clobbered again below. +.if \s1 == 0 || \s3 == 0 || \s5 == 0 || \s7 == 0 + vld1.8 {M_0}, [sp, :64] +.endif + vadd.u64 q0, q0, q2 + vadd.u64 q1, q1, q3 + vadd.u64 d0, d0, M_\s1 + vadd.u64 d1, d1, M_\s3 + vadd.u64 d2, d2, M_\s5 + vadd.u64 d3, d3, M_\s7 + + // d = ror64(d ^ a, 16); + vld1.8 {M_0}, [ROR16_TABLE, :64] + veor q6, q6, q0 + veor q7, q7, q1 + vtbl.8 d12, {d12}, M_0 + vtbl.8 d13, {d13}, M_0 + vtbl.8 d14, {d14}, M_0 + vtbl.8 d15, {d15}, M_0 + + // c += d; + vadd.u64 q4, q4, q6 + vadd.u64 q5, q5, q7 + + // b = ror64(b ^ c, 63); + // + // This rotation amount isn't a multiple of 8, so it has to be + // implemented using a pair of shifts, which requires temporary + // registers. Use q8-q9 (M_0-M_3) for this, and reload them afterwards. + veor q8, q2, q4 + veor q9, q3, q5 + vshr.u64 q2, q8, #63 + vshr.u64 q3, q9, #63 + vsli.u64 q2, q8, #1 + vsli.u64 q3, q9, #1 + vld1.8 {q8-q9}, [sp, :256] + + // Mix the diagonals: + // (v[0], v[5], v[10], v[15]), (v[1], v[6], v[11], v[12]), + // (v[2], v[7], v[8], v[13]), and (v[3], v[4], v[9], v[14]). + // + // There are two possible ways to do this: use 'vext' instructions to + // shift the rows of the matrix so that the diagonals become columns, + // and undo it afterwards; or just use 64-bit operations on 'd' + // registers instead of 128-bit operations on 'q' registers. We use the + // latter approach, as it performs much better on Cortex-A7. + + // a += b + m[blake2b_sigma[r][2*i + 0]]; + vadd.u64 d0, d0, d5 + vadd.u64 d1, d1, d6 + vadd.u64 d2, d2, d7 + vadd.u64 d3, d3, d4 + vadd.u64 d0, d0, M_\s8 + vadd.u64 d1, d1, M_\s10 + vadd.u64 d2, d2, M_\s12 + vadd.u64 d3, d3, M_\s14 + + // d = ror64(d ^ a, 32); + veor d15, d15, d0 + veor d12, d12, d1 + veor d13, d13, d2 + veor d14, d14, d3 + vrev64.32 d15, d15 + vrev64.32 d12, d12 + vrev64.32 d13, d13 + vrev64.32 d14, d14 + + // c += d; + vadd.u64 d10, d10, d15 + vadd.u64 d11, d11, d12 + vadd.u64 d8, d8, d13 + vadd.u64 d9, d9, d14 + + // b = ror64(b ^ c, 24); + vld1.8 {M_0}, [ROR24_TABLE, :64] + veor d5, d5, d10 + veor d6, d6, d11 + veor d7, d7, d8 + veor d4, d4, d9 + vtbl.8 d5, {d5}, M_0 + vtbl.8 d6, {d6}, M_0 + vtbl.8 d7, {d7}, M_0 + vtbl.8 d4, {d4}, M_0 + + // a += b + m[blake2b_sigma[r][2*i + 1]]; +.if \s9 == 0 || \s11 == 0 || \s13 == 0 || \s15 == 0 + vld1.8 {M_0}, [sp, :64] +.endif + vadd.u64 d0, d0, d5 + vadd.u64 d1, d1, d6 + vadd.u64 d2, d2, d7 + vadd.u64 d3, d3, d4 + vadd.u64 d0, d0, M_\s9 + vadd.u64 d1, d1, M_\s11 + vadd.u64 d2, d2, M_\s13 + vadd.u64 d3, d3, M_\s15 + + // d = ror64(d ^ a, 16); + vld1.8 {M_0}, [ROR16_TABLE, :64] + veor d15, d15, d0 + veor d12, d12, d1 + veor d13, d13, d2 + veor d14, d14, d3 + vtbl.8 d12, {d12}, M_0 + vtbl.8 d13, {d13}, M_0 + vtbl.8 d14, {d14}, M_0 + vtbl.8 d15, {d15}, M_0 + + // c += d; + vadd.u64 d10, d10, d15 + vadd.u64 d11, d11, d12 + vadd.u64 d8, d8, d13 + vadd.u64 d9, d9, d14 + + // b = ror64(b ^ c, 63); + veor d16, d4, d9 + veor d17, d5, d10 + veor d18, d6, d11 + veor d19, d7, d8 + vshr.u64 q2, q8, #63 + vshr.u64 q3, q9, #63 + vsli.u64 q2, q8, #1 + vsli.u64 q3, q9, #1 + // Reloading q8-q9 can be skipped on the final round. +.if ! \final + vld1.8 {q8-q9}, [sp, :256] +.endif +.endm + +// +// void blake2b_compress_neon(struct blake2b_state *state, +// const u8 *block, size_t nblocks, u32 inc); +// +// Only the first three fields of struct blake2b_state are used: +// u64 h[8]; (inout) +// u64 t[2]; (inout) +// u64 f[2]; (in) +// + .align 5 +ENTRY(blake2b_compress_neon) + push {r4-r10} + + // Allocate a 32-byte stack buffer that is 32-byte aligned. + mov ORIG_SP, sp + sub ip, sp, #32 + bic ip, ip, #31 + mov sp, ip + + adr ROR24_TABLE, .Lror24_table + adr ROR16_TABLE, .Lror16_table + + mov ip, STATE + vld1.64 {q0-q1}, [ip]! // Load h[0..3] + vld1.64 {q2-q3}, [ip]! // Load h[4..7] +.Lnext_block: + adr r10, .Lblake2b_IV + vld1.64 {q14-q15}, [ip] // Load t[0..1] and f[0..1] + vld1.64 {q4-q5}, [r10]! // Load IV[0..3] + vmov r7, r8, d28 // Copy t[0] to (r7, r8) + vld1.64 {q6-q7}, [r10] // Load IV[4..7] + adds r7, r7, INC // Increment counter + bcs .Lslow_inc_ctr + vmov.i32 d28[0], r7 + vst1.64 {d28}, [ip] // Update t[0] +.Linc_ctr_done: + + // Load the next message block and finish initializing the state matrix + // 'v'. Fortunately, there are exactly enough NEON registers to fit the + // entire state matrix in q0-q7 and the entire message block in q8-15. + // + // However, _blake2b_round also needs some extra registers for rotates, + // so we have to spill some registers. It's better to spill the message + // registers than the state registers, as the message doesn't change. + // Therefore we store a copy of the first 32 bytes of the message block + // (q8-q9) in an aligned buffer on the stack so that they can be + // reloaded when needed. (We could just reload directly from the + // message buffer, but it's faster to use aligned loads.) + vld1.8 {q8-q9}, [BLOCK]! + veor q6, q6, q14 // v[12..13] = IV[4..5] ^ t[0..1] + vld1.8 {q10-q11}, [BLOCK]! + veor q7, q7, q15 // v[14..15] = IV[6..7] ^ f[0..1] + vld1.8 {q12-q13}, [BLOCK]! + vst1.8 {q8-q9}, [sp, :256] + mov ip, STATE + vld1.8 {q14-q15}, [BLOCK]! + + // Execute the rounds. Each round is provided the order in which it + // needs to use the message words. + _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 + _blake2b_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 + _blake2b_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 + _blake2b_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 + _blake2b_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 + _blake2b_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 + _blake2b_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 + _blake2b_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 + _blake2b_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 + _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 \ + final=1 + + // Fold the final state matrix into the hash chaining value: + // + // for (i = 0; i < 8; i++) + // h[i] ^= v[i] ^ v[i + 8]; + // + vld1.64 {q8-q9}, [ip]! // Load old h[0..3] + veor q0, q0, q4 // v[0..1] ^= v[8..9] + veor q1, q1, q5 // v[2..3] ^= v[10..11] + vld1.64 {q10-q11}, [ip] // Load old h[4..7] + veor q2, q2, q6 // v[4..5] ^= v[12..13] + veor q3, q3, q7 // v[6..7] ^= v[14..15] + veor q0, q0, q8 // v[0..1] ^= h[0..1] + veor q1, q1, q9 // v[2..3] ^= h[2..3] + mov ip, STATE + subs NBLOCKS, NBLOCKS, #1 // nblocks-- + vst1.64 {q0-q1}, [ip]! // Store new h[0..3] + veor q2, q2, q10 // v[4..5] ^= h[4..5] + veor q3, q3, q11 // v[6..7] ^= h[6..7] + vst1.64 {q2-q3}, [ip]! // Store new h[4..7] + + // Advance to the next block, if there is one. + bne .Lnext_block // nblocks != 0? + + mov sp, ORIG_SP + pop {r4-r10} + mov pc, lr + +.Lslow_inc_ctr: + // Handle the case where the counter overflowed its low 32 bits, by + // carrying the overflow bit into the full 128-bit counter. + vmov r9, r10, d29 + adcs r8, r8, #0 + adcs r9, r9, #0 + adc r10, r10, #0 + vmov d28, r7, r8 + vmov d29, r9, r10 + vst1.64 {q14}, [ip] // Update t[0] and t[1] + b .Linc_ctr_done +ENDPROC(blake2b_compress_neon) diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c new file mode 100644 index 000000000000..34d73200e7fa --- /dev/null +++ b/arch/arm/crypto/blake2b-neon-glue.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * BLAKE2b digest algorithm, NEON accelerated + * + * Copyright 2020 Google LLC + */ + +#include <crypto/internal/blake2b.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/simd.h> + +#include <linux/module.h> +#include <linux/sizes.h> + +#include <asm/neon.h> +#include <asm/simd.h> + +asmlinkage void blake2b_compress_neon(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static void blake2b_compress_arch(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc) +{ + if (!crypto_simd_usable()) { + blake2b_compress_generic(state, block, nblocks, inc); + return; + } + + do { + const size_t blocks = min_t(size_t, nblocks, + SZ_4K / BLAKE2B_BLOCK_SIZE); + + kernel_neon_begin(); + blake2b_compress_neon(state, block, blocks, inc); + kernel_neon_end(); + + nblocks -= blocks; + block += blocks * BLAKE2B_BLOCK_SIZE; + } while (nblocks); +} + +static int crypto_blake2b_update_neon(struct shash_desc *desc, + const u8 *in, unsigned int inlen) +{ + return crypto_blake2b_update(desc, in, inlen, blake2b_compress_arch); +} + +static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out) +{ + return crypto_blake2b_final(desc, out, blake2b_compress_arch); +} + +#define BLAKE2B_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 200, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2b_setkey, \ + .init = crypto_blake2b_init, \ + .update = crypto_blake2b_update_neon, \ + .final = crypto_blake2b_final_neon, \ + .descsize = sizeof(struct blake2b_state), \ + } + +static struct shash_alg blake2b_neon_algs[] = { + BLAKE2B_ALG("blake2b-160", "blake2b-160-neon", BLAKE2B_160_HASH_SIZE), + BLAKE2B_ALG("blake2b-256", "blake2b-256-neon", BLAKE2B_256_HASH_SIZE), + BLAKE2B_ALG("blake2b-384", "blake2b-384-neon", BLAKE2B_384_HASH_SIZE), + BLAKE2B_ALG("blake2b-512", "blake2b-512-neon", BLAKE2B_512_HASH_SIZE), +}; + +static int __init blake2b_neon_mod_init(void) +{ + if (!(elf_hwcap & HWCAP_NEON)) + return -ENODEV; + + return crypto_register_shashes(blake2b_neon_algs, + ARRAY_SIZE(blake2b_neon_algs)); +} + +static void __exit blake2b_neon_mod_exit(void) +{ + return crypto_unregister_shashes(blake2b_neon_algs, + ARRAY_SIZE(blake2b_neon_algs)); +} + +module_init(blake2b_neon_mod_init); +module_exit(blake2b_neon_mod_exit); + +MODULE_DESCRIPTION("BLAKE2b digest algorithm, NEON accelerated"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("blake2b-160"); +MODULE_ALIAS_CRYPTO("blake2b-160-neon"); +MODULE_ALIAS_CRYPTO("blake2b-256"); +MODULE_ALIAS_CRYPTO("blake2b-256-neon"); +MODULE_ALIAS_CRYPTO("blake2b-384"); +MODULE_ALIAS_CRYPTO("blake2b-384-neon"); +MODULE_ALIAS_CRYPTO("blake2b-512"); +MODULE_ALIAS_CRYPTO("blake2b-512-neon"); diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S new file mode 100644 index 000000000000..bed897e9a181 --- /dev/null +++ b/arch/arm/crypto/blake2s-core.S @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * BLAKE2s digest algorithm, ARM scalar implementation + * + * Copyright 2020 Google LLC + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + + // Registers used to hold message words temporarily. There aren't + // enough ARM registers to hold the whole message block, so we have to + // load the words on-demand. + M_0 .req r12 + M_1 .req r14 + +// The BLAKE2s initialization vector +.Lblake2s_IV: + .word 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A + .word 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 + +.macro __ldrd a, b, src, offset +#if __LINUX_ARM_ARCH__ >= 6 + ldrd \a, \b, [\src, #\offset] +#else + ldr \a, [\src, #\offset] + ldr \b, [\src, #\offset + 4] +#endif +.endm + +.macro __strd a, b, dst, offset +#if __LINUX_ARM_ARCH__ >= 6 + strd \a, \b, [\dst, #\offset] +#else + str \a, [\dst, #\offset] + str \b, [\dst, #\offset + 4] +#endif +.endm + +// Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals. +// (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two +// columns/diagonals. s0-s1 are the word offsets to the message words the first +// column/diagonal needs, and likewise s2-s3 for the second column/diagonal. +// M_0 and M_1 are free to use, and the message block can be found at sp + 32. +// +// Note that to save instructions, the rotations don't happen when the +// pseudocode says they should, but rather they are delayed until the values are +// used. See the comment above _blake2s_round(). +.macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3 + + ldr M_0, [sp, #32 + 4 * \s0] + ldr M_1, [sp, #32 + 4 * \s2] + + // a += b + m[blake2s_sigma[r][2*i + 0]]; + add \a0, \a0, \b0, ror #brot + add \a1, \a1, \b1, ror #brot + add \a0, \a0, M_0 + add \a1, \a1, M_1 + + // d = ror32(d ^ a, 16); + eor \d0, \a0, \d0, ror #drot + eor \d1, \a1, \d1, ror #drot + + // c += d; + add \c0, \c0, \d0, ror #16 + add \c1, \c1, \d1, ror #16 + + // b = ror32(b ^ c, 12); + eor \b0, \c0, \b0, ror #brot + eor \b1, \c1, \b1, ror #brot + + ldr M_0, [sp, #32 + 4 * \s1] + ldr M_1, [sp, #32 + 4 * \s3] + + // a += b + m[blake2s_sigma[r][2*i + 1]]; + add \a0, \a0, \b0, ror #12 + add \a1, \a1, \b1, ror #12 + add \a0, \a0, M_0 + add \a1, \a1, M_1 + + // d = ror32(d ^ a, 8); + eor \d0, \a0, \d0, ror#16 + eor \d1, \a1, \d1, ror#16 + + // c += d; + add \c0, \c0, \d0, ror#8 + add \c1, \c1, \d1, ror#8 + + // b = ror32(b ^ c, 7); + eor \b0, \c0, \b0, ror#12 + eor \b1, \c1, \b1, ror#12 +.endm + +// Execute one round of BLAKE2s by updating the state matrix v[0..15]. v[0..9] +// are in r0..r9. The stack pointer points to 8 bytes of scratch space for +// spilling v[8..9], then to v[9..15], then to the message block. r10-r12 and +// r14 are free to use. The macro arguments s0-s15 give the order in which the +// message words are used in this round. +// +// All rotates are performed using the implicit rotate operand accepted by the +// 'add' and 'eor' instructions. This is faster than using explicit rotate +// instructions. To make this work, we allow the values in the second and last +// rows of the BLAKE2s state matrix (rows 'b' and 'd') to temporarily have the +// wrong rotation amount. The rotation amount is then fixed up just in time +// when the values are used. 'brot' is the number of bits the values in row 'b' +// need to be rotated right to arrive at the correct values, and 'drot' +// similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such +// that they end up as (7, 8) after every round. +.macro _blake2s_round s0, s1, s2, s3, s4, s5, s6, s7, \ + s8, s9, s10, s11, s12, s13, s14, s15 + + // Mix first two columns: + // (v[0], v[4], v[8], v[12]) and (v[1], v[5], v[9], v[13]). + __ldrd r10, r11, sp, 16 // load v[12] and v[13] + _blake2s_quarterround r0, r4, r8, r10, r1, r5, r9, r11, \ + \s0, \s1, \s2, \s3 + __strd r8, r9, sp, 0 + __strd r10, r11, sp, 16 + + // Mix second two columns: + // (v[2], v[6], v[10], v[14]) and (v[3], v[7], v[11], v[15]). + __ldrd r8, r9, sp, 8 // load v[10] and v[11] + __ldrd r10, r11, sp, 24 // load v[14] and v[15] + _blake2s_quarterround r2, r6, r8, r10, r3, r7, r9, r11, \ + \s4, \s5, \s6, \s7 + str r10, [sp, #24] // store v[14] + // v[10], v[11], and v[15] are used below, so no need to store them yet. + + .set brot, 7 + .set drot, 8 + + // Mix first two diagonals: + // (v[0], v[5], v[10], v[15]) and (v[1], v[6], v[11], v[12]). + ldr r10, [sp, #16] // load v[12] + _blake2s_quarterround r0, r5, r8, r11, r1, r6, r9, r10, \ + \s8, \s9, \s10, \s11 + __strd r8, r9, sp, 8 + str r11, [sp, #28] + str r10, [sp, #16] + + // Mix second two diagonals: + // (v[2], v[7], v[8], v[13]) and (v[3], v[4], v[9], v[14]). + __ldrd r8, r9, sp, 0 // load v[8] and v[9] + __ldrd r10, r11, sp, 20 // load v[13] and v[14] + _blake2s_quarterround r2, r7, r8, r10, r3, r4, r9, r11, \ + \s12, \s13, \s14, \s15 + __strd r10, r11, sp, 20 +.endm + +// +// void blake2s_compress_arch(struct blake2s_state *state, +// const u8 *block, size_t nblocks, u32 inc); +// +// Only the first three fields of struct blake2s_state are used: +// u32 h[8]; (inout) +// u32 t[2]; (inout) +// u32 f[2]; (in) +// + .align 5 +ENTRY(blake2s_compress_arch) + push {r0-r2,r4-r11,lr} // keep this an even number + +.Lnext_block: + // r0 is 'state' + // r1 is 'block' + // r3 is 'inc' + + // Load and increment the counter t[0..1]. + __ldrd r10, r11, r0, 32 + adds r10, r10, r3 + adc r11, r11, #0 + __strd r10, r11, r0, 32 + + // _blake2s_round is very short on registers, so copy the message block + // to the stack to save a register during the rounds. This also has the + // advantage that misalignment only needs to be dealt with in one place. + sub sp, sp, #64 + mov r12, sp + tst r1, #3 + bne .Lcopy_block_misaligned + ldmia r1!, {r2-r9} + stmia r12!, {r2-r9} + ldmia r1!, {r2-r9} + stmia r12, {r2-r9} +.Lcopy_block_done: + str r1, [sp, #68] // Update message pointer + + // Calculate v[8..15]. Push v[9..15] onto the stack, and leave space + // for spilling v[8..9]. Leave v[8..9] in r8-r9. + mov r14, r0 // r14 = state + adr r12, .Lblake2s_IV + ldmia r12!, {r8-r9} // load IV[0..1] + __ldrd r0, r1, r14, 40 // load f[0..1] + ldm r12, {r2-r7} // load IV[3..7] + eor r4, r4, r10 // v[12] = IV[4] ^ t[0] + eor r5, r5, r11 // v[13] = IV[5] ^ t[1] + eor r6, r6, r0 // v[14] = IV[6] ^ f[0] + eor r7, r7, r1 // v[15] = IV[7] ^ f[1] + push {r2-r7} // push v[9..15] + sub sp, sp, #8 // leave space for v[8..9] + + // Load h[0..7] == v[0..7]. + ldm r14, {r0-r7} + + // Execute the rounds. Each round is provided the order in which it + // needs to use the message words. + .set brot, 0 + .set drot, 0 + _blake2s_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + _blake2s_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 + _blake2s_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 + _blake2s_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 + _blake2s_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 + _blake2s_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 + _blake2s_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 + _blake2s_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 + _blake2s_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 + _blake2s_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 + + // Fold the final state matrix into the hash chaining value: + // + // for (i = 0; i < 8; i++) + // h[i] ^= v[i] ^ v[i + 8]; + // + ldr r14, [sp, #96] // r14 = &h[0] + add sp, sp, #8 // v[8..9] are already loaded. + pop {r10-r11} // load v[10..11] + eor r0, r0, r8 + eor r1, r1, r9 + eor r2, r2, r10 + eor r3, r3, r11 + ldm r14, {r8-r11} // load h[0..3] + eor r0, r0, r8 + eor r1, r1, r9 + eor r2, r2, r10 + eor r3, r3, r11 + stmia r14!, {r0-r3} // store new h[0..3] + ldm r14, {r0-r3} // load old h[4..7] + pop {r8-r11} // load v[12..15] + eor r0, r0, r4, ror #brot + eor r1, r1, r5, ror #brot + eor r2, r2, r6, ror #brot + eor r3, r3, r7, ror #brot + eor r0, r0, r8, ror #drot + eor r1, r1, r9, ror #drot + eor r2, r2, r10, ror #drot + eor r3, r3, r11, ror #drot + add sp, sp, #64 // skip copy of message block + stm r14, {r0-r3} // store new h[4..7] + + // Advance to the next block, if there is one. Note that if there are + // multiple blocks, then 'inc' (the counter increment amount) must be + // 64. So we can simply set it to 64 without re-loading it. + ldm sp, {r0, r1, r2} // load (state, block, nblocks) + mov r3, #64 // set 'inc' + subs r2, r2, #1 // nblocks-- + str r2, [sp, #8] + bne .Lnext_block // nblocks != 0? + + pop {r0-r2,r4-r11,pc} + + // The next message block (pointed to by r1) isn't 4-byte aligned, so it + // can't be loaded using ldmia. Copy it to the stack buffer (pointed to + // by r12) using an alternative method. r2-r9 are free to use. +.Lcopy_block_misaligned: + mov r2, #64 +1: +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + ldr r3, [r1], #4 +#else + ldrb r3, [r1, #0] + ldrb r4, [r1, #1] + ldrb r5, [r1, #2] + ldrb r6, [r1, #3] + add r1, r1, #4 + orr r3, r3, r4, lsl #8 + orr r3, r3, r5, lsl #16 + orr r3, r3, r6, lsl #24 +#endif + subs r2, r2, #4 + str r3, [r12], #4 + bne 1b + b .Lcopy_block_done +ENDPROC(blake2s_compress_arch) diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/crypto/blake2s-glue.c new file mode 100644 index 000000000000..f2cc1e5fc9ec --- /dev/null +++ b/arch/arm/crypto/blake2s-glue.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * BLAKE2s digest algorithm, ARM scalar implementation + * + * Copyright 2020 Google LLC + */ + +#include <crypto/internal/blake2s.h> +#include <crypto/internal/hash.h> + +#include <linux/module.h> + +/* defined in blake2s-core.S */ +EXPORT_SYMBOL(blake2s_compress_arch); + +static int crypto_blake2s_update_arm(struct shash_desc *desc, + const u8 *in, unsigned int inlen) +{ + return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); +} + +static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out) +{ + return crypto_blake2s_final(desc, out, blake2s_compress_arch); +} + +#define BLAKE2S_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 200, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2s_setkey, \ + .init = crypto_blake2s_init, \ + .update = crypto_blake2s_update_arm, \ + .final = crypto_blake2s_final_arm, \ + .descsize = sizeof(struct blake2s_state), \ + } + +static struct shash_alg blake2s_arm_algs[] = { + BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE), + BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE), + BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE), + BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE), +}; + +static int __init blake2s_arm_mod_init(void) +{ + return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? + crypto_register_shashes(blake2s_arm_algs, + ARRAY_SIZE(blake2s_arm_algs)) : 0; +} + +static void __exit blake2s_arm_mod_exit(void) +{ + if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) + crypto_unregister_shashes(blake2s_arm_algs, + ARRAY_SIZE(blake2s_arm_algs)); +} + +module_init(blake2s_arm_mod_init); +module_exit(blake2s_arm_mod_exit); + +MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("blake2s-128"); +MODULE_ALIAS_CRYPTO("blake2s-128-arm"); +MODULE_ALIAS_CRYPTO("blake2s-160"); +MODULE_ALIAS_CRYPTO("blake2s-160-arm"); +MODULE_ALIAS_CRYPTO("blake2s-224"); +MODULE_ALIAS_CRYPTO("blake2s-224-arm"); +MODULE_ALIAS_CRYPTO("blake2s-256"); +MODULE_ALIAS_CRYPTO("blake2s-256-arm"); diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h new file mode 100644 index 000000000000..a8e84ca5c2ee --- /dev/null +++ b/arch/arm/include/asm/archrandom.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARCHRANDOM_H +#define _ASM_ARCHRANDOM_H + +static inline bool __init smccc_probe_trng(void) +{ + return false; +} + +#endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 6ed30421f697..e2b1fd558bf3 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -578,4 +578,21 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) __adldst_l str, \src, \sym, \tmp, \cond .endm + /* + * rev_l - byte-swap a 32-bit value + * + * @val: source/destination register + * @tmp: scratch register + */ + .macro rev_l, val:req, tmp:req + .if __LINUX_ARM_ARCH__ < 6 + eor \tmp, \val, \val, ror #16 + bic \tmp, \tmp, #0x00ff0000 + mov \val, \val, ror #8 + eor \val, \val, \tmp, lsr #8 + .else + rev \val, \val + .endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index f8712e3c29cf..246a3de25931 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -188,7 +188,7 @@ struct locomo_driver { struct device_driver drv; unsigned int devid; int (*probe)(struct locomo_dev *); - int (*remove)(struct locomo_dev *); + void (*remove)(struct locomo_dev *); }; #define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv) diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index d134b9a5ff94..2e70db6f22ea 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -403,7 +403,7 @@ struct sa1111_driver { struct device_driver drv; unsigned int devid; int (*probe)(struct sa1111_dev *); - int (*remove)(struct sa1111_dev *); + void (*remove)(struct sa1111_dev *); }; #define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv) diff --git a/arch/arm/include/debug/sti.S b/arch/arm/include/debug/sti.S index 72d052511890..dc796ac2ac57 100644 --- a/arch/arm/include/debug/sti.S +++ b/arch/arm/include/debug/sti.S @@ -6,28 +6,6 @@ * Copyright (C) 2013 STMicroelectronics (R&D) Limited. */ -#define STIH41X_COMMS_BASE 0xfed00000 -#define STIH41X_ASC2_BASE (STIH41X_COMMS_BASE+0x32000) - -#define STIH41X_SBC_LPM_BASE 0xfe400000 -#define STIH41X_SBC_COMMS_BASE (STIH41X_SBC_LPM_BASE + 0x100000) -#define STIH41X_SBC_ASC1_BASE (STIH41X_SBC_COMMS_BASE + 0x31000) - - -#define VIRT_ADDRESS(x) (x - 0x1000000) - -#if IS_ENABLED(CONFIG_STIH41X_DEBUG_ASC2) -#define DEBUG_LL_UART_BASE STIH41X_ASC2_BASE -#endif - -#if IS_ENABLED(CONFIG_STIH41X_DEBUG_SBC_ASC1) -#define DEBUG_LL_UART_BASE STIH41X_SBC_ASC1_BASE -#endif - -#ifndef DEBUG_LL_UART_BASE -#error "DEBUG UART is not Configured" -#endif - #define ASC_TX_BUF_OFF 0x04 #define ASC_CTRL_OFF 0x0c #define ASC_STA_OFF 0x14 @@ -37,8 +15,8 @@ .macro addruart, rp, rv, tmp - ldr \rp, =DEBUG_LL_UART_BASE @ physical base - ldr \rv, =VIRT_ADDRESS(DEBUG_LL_UART_BASE) @ virt base + ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical base + ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virt base .endm .macro senduart,rd,rx diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index ee3aee69e444..5199a2bb4111 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -243,7 +243,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, thread->cpu_domain = get_domain(); #endif - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5c48eb4fd0e5..74679240a9d8 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -540,12 +540,9 @@ void show_ipi_list(struct seq_file *p, int prec) unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { - unsigned int irq; - if (!ipi_desc[i]) continue; - irq = irq_desc_get_irq(ipi_desc[i]); seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 0203e545bbc8..075a2e0ed2c1 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -248,6 +248,7 @@ struct oabi_epoll_event { __u64 data; } __attribute__ ((packed,aligned(4))); +#ifdef CONFIG_EPOLL asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, struct oabi_epoll_event __user *event) { @@ -298,6 +299,20 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, kfree(kbuf); return err ? -EFAULT : ret; } +#else +asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, + struct oabi_epoll_event __user *event) +{ + return -EINVAL; +} + +asmlinkage long sys_oabi_epoll_wait(int epfd, + struct oabi_epoll_event __user *events, + int maxevents, int timeout) +{ + return -EINVAL; +} +#endif struct oabi_sembuf { unsigned short sem_num; diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 2d76e2c6c99e..2b004cc4f95e 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -85,7 +85,6 @@ void imx_anatop_pre_suspend(void); void imx_anatop_post_resume(void); int imx6_set_lpm(enum mxc_cpu_pwr_mode mode); void imx6_set_int_mem_clk_lpm(bool enable); -void imx6sl_set_wait_clk(bool enter); int imx_mmdc_get_ddr_type(void); int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode); diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c index 4521e5352bf6..b86ffbeb28e4 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sl.c +++ b/arch/arm/mach-imx/cpuidle-imx6sl.c @@ -3,6 +3,7 @@ * Copyright (C) 2014 Freescale Semiconductor, Inc. */ +#include <linux/clk/imx.h> #include <linux/cpuidle.h> #include <linux/module.h> #include <asm/cpuidle.h> diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 40c74b4c4d73..9244437cb1b9 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -4,6 +4,7 @@ * Copyright 2011 Linaro Ltd. */ +#include <linux/clk/imx.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index f7211b57b1e7..165c184801e1 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig @@ -13,7 +13,6 @@ config MACH_IXP4XX_OF select I2C select I2C_IOP3XX select PCI - select TIMER_OF select USE_OF help Say 'Y' here to support Device Tree-based IXP4xx platforms. diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index 524d6093e0c7..09b8495f3fd9 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -4,6 +4,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/spi/pxa2xx_spi.h> @@ -634,6 +635,13 @@ static struct platform_device pxa27x_device_camera = { void __init pxa_set_camera_info(struct pxacamera_platform_data *info) { + struct clk *mclk; + + /* Register a fixed-rate clock for camera sensors. */ + mclk = clk_register_fixed_rate(NULL, "pxa_camera_clk", NULL, 0, + info->mclk_10khz * 10000); + if (!IS_ERR(mclk)) + clkdev_create(mclk, "mclk", NULL); pxa_register_device(&pxa27x_device_camera, info); } diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index d3af80317f2d..a79f296e81e0 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -577,7 +577,6 @@ static struct platform_device power_dev = { static struct wm97xx_batt_pdata mioa701_battery_data = { .batt_aux = WM97XX_AUX_ID1, .temp_aux = -1, - .charge_gpio = -1, .min_voltage = 0xc00, .max_voltage = 0xfc0, .batt_tech = POWER_SUPPLY_TECHNOLOGY_LION, diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 0d246a1aebbc..6230381a7ca0 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -212,7 +212,6 @@ void __init palm27x_irda_init(int pwdn) static struct wm97xx_batt_pdata palm27x_batt_pdata = { .batt_aux = WM97XX_AUX_ID3, .temp_aux = WM97XX_AUX_ID2, - .charge_gpio = -1, .batt_mult = 1000, .batt_div = 414, .temp_mult = 1, diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c index e3bcf58b4e63..a2b10db4aacc 100644 --- a/arch/arm/mach-pxa/palmte2.c +++ b/arch/arm/mach-pxa/palmte2.c @@ -273,7 +273,6 @@ static struct platform_device power_supply = { static struct wm97xx_batt_pdata palmte2_batt_pdata = { .batt_aux = WM97XX_AUX_ID3, .temp_aux = WM97XX_AUX_ID2, - .charge_gpio = -1, .max_voltage = PALMTE2_BAT_MAX_VOLTAGE, .min_voltage = PALMTE2_BAT_MIN_VOLTAGE, .batt_mult = 1000, diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index 21fd76bb09cd..8e74fbb0a96e 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -20,7 +20,6 @@ #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/spi/libertas_spi.h> -#include <linux/spi/lms283gf05.h> #include <linux/power_supply.h> #include <linux/mtd/physmap.h> #include <linux/gpio.h> @@ -488,7 +487,6 @@ static struct z2_battery_info batt_chip_info = { .batt_I2C_bus = 0, .batt_I2C_addr = 0x55, .batt_I2C_reg = 2, - .charge_gpio = GPIO0_ZIPITZ2_AC_DETECT, .min_voltage = 3475000, .max_voltage = 4190000, .batt_div = 59, @@ -497,9 +495,19 @@ static struct z2_battery_info batt_chip_info = { .batt_name = "Z2", }; +static struct gpiod_lookup_table z2_battery_gpio_table = { + .dev_id = "aer915", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO0_ZIPITZ2_AC_DETECT, + NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct i2c_board_info __initdata z2_i2c_board_info[] = { { I2C_BOARD_INFO("aer915", 0x55), + .dev_name = "aer915", .platform_data = &batt_chip_info, }, { I2C_BOARD_INFO("wm8750", 0x1b), @@ -510,6 +518,7 @@ static struct i2c_board_info __initdata z2_i2c_board_info[] = { static void __init z2_i2c_init(void) { pxa_set_i2c_info(NULL); + gpiod_add_lookup_table(&z2_battery_gpio_table); i2c_register_board_info(0, ARRAY_AND_SIZE(z2_i2c_board_info)); } #else @@ -578,8 +587,13 @@ static struct pxa2xx_spi_chip lms283_chip_info = { .gpio_cs = GPIO88_ZIPITZ2_LCD_CS, }; -static const struct lms283gf05_pdata lms283_pdata = { - .reset_gpio = GPIO19_ZIPITZ2_LCD_RESET, +static struct gpiod_lookup_table lms283_gpio_table = { + .dev_id = "spi2.0", /* SPI bus 2 chip select 0 */ + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO19_ZIPITZ2_LCD_RESET, + "reset", GPIO_ACTIVE_LOW), + { }, + }, }; static struct spi_board_info spi_board_info[] __initdata = { @@ -595,7 +609,6 @@ static struct spi_board_info spi_board_info[] __initdata = { { .modalias = "lms283gf05", .controller_data = &lms283_chip_info, - .platform_data = &lms283_pdata, .max_speed_hz = 400000, .bus_num = 2, .chip_select = 0, @@ -615,6 +628,7 @@ static void __init z2_spi_init(void) { pxa2xx_set_spi_info(1, &pxa_ssp1_master_info); pxa2xx_set_spi_info(2, &pxa_ssp2_master_info); + gpiod_add_lookup_table(&lms283_gpio_table); spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); } #else diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index d4e89a02c8c8..14c33ed05318 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -224,18 +224,12 @@ static int collie_uart_probe(struct locomo_dev *dev) return 0; } -static int collie_uart_remove(struct locomo_dev *dev) -{ - return 0; -} - static struct locomo_driver collie_uart_driver = { .drv = { .name = "collie_uart", }, .devid = LOCOMO_DEVID_UART, .probe = collie_uart_probe, - .remove = collie_uart_remove, }; static int __init collie_uart_init(void) diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h index 25b4c5e66e39..8ec2b92dca19 100644 --- a/arch/arm/mach-spear/generic.h +++ b/arch/arm/mach-spear/generic.h @@ -43,16 +43,4 @@ void spear13xx_cpu_die(unsigned int cpu); extern const struct smp_operations spear13xx_smp_ops; -#ifdef CONFIG_MACH_SPEAR1310 -void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base); -#else -static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {} -#endif - -#ifdef CONFIG_MACH_SPEAR1340 -void __init spear1340_clk_init(void __iomem *misc_base); -#else -static inline void spear1340_clk_init(void __iomem *misc_base) {} -#endif - #endif /* __MACH_GENERIC_H */ diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c index 31c43cabf362..74d1ca2a529a 100644 --- a/arch/arm/mach-spear/spear13xx.c +++ b/arch/arm/mach-spear/spear13xx.c @@ -15,6 +15,7 @@ #include <linux/amba/pl022.h> #include <linux/clk.h> +#include <linux/clk/spear.h> #include <linux/clocksource.h> #include <linux/err.h> #include <linux/of.h> diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index c18d23a5e5f1..93ff0097f00b 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -19,6 +19,10 @@ #include <asm/ptdump.h> static struct addr_marker address_markers[] = { +#ifdef CONFIG_KASAN + { KASAN_SHADOW_START, "Kasan shadow start"}, + { KASAN_SHADOW_END, "Kasan shadow end"}, +#endif { MODULES_VADDR, "Modules" }, { PAGE_OFFSET, "Kernel Mapping" }, { 0, "vmalloc() Area" }, @@ -429,8 +433,11 @@ static void ptdump_initialize(void) if (pg_level[i].bits[j].nx_bit) pg_level[i].nx_bit = &pg_level[i].bits[j]; } - +#ifdef CONFIG_KASAN + address_markers[4].start_address = VMALLOC_START; +#else address_markers[2].start_address = VMALLOC_START; +#endif } static struct ptdump_info kernel_ptdump_info = { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c06ebfbc48c4..a25b660c3017 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -18,7 +18,6 @@ #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cachetype.h> -#include <asm/fixmap.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 27d8beb7c941..3654f979851b 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -11,7 +11,7 @@ uapi := $(gen)/uapi/asm syshdr := $(srctree)/$(src)/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh systbl := $(srctree)/$(src)/syscalltbl.sh -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl gen-y := $(gen)/calls-oabi.S gen-y += $(gen)/calls-eabi.S diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 20e1170e2e0a..dcc1191291a2 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -455,3 +455,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index b558bee0e1f6..7c9e395b77f7 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -23,7 +23,6 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ -T obj-$(CONFIG_VDSO) += vdso.o -extra-$(CONFIG_VDSO) += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) CFLAGS_REMOVE_vdso.o = -pg diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index e52950a43f2e..acb464547a54 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -93,10 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i; for (i = 0; i < count; i++) { + struct gnttab_unmap_grant_ref unmap; + int rc; + if (map_ops[i].status) continue; - set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, - map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); + if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap.host_addr = map_ops[i].host_addr, + unmap.handle = map_ops[i].handle; + map_ops[i].handle = ~0; + if (map_ops[i].flags & GNTMAP_device_map) + unmap.dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap.dev_bus_addr = 0; + + /* + * Pre-populate the status field, to be recognizable in + * the log message below. + */ + unmap.status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + &unmap, 1); + if (rc || unmap.status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st=%d\n", + rc, unmap.status); } return 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 52bc8f6206b7..1f212b47a48a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -73,6 +73,8 @@ config ARM64 select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK + select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN + select ARCH_SUPPORTS_LTO_CLANG_THIN select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_NUMA_BALANCING @@ -138,6 +140,7 @@ config ARM64 select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE) + select HAVE_ARCH_KFENCE select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT @@ -162,6 +165,8 @@ config ARM64 select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS \ if $(cc-option,-fpatchable-function-entry=2) + select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ + if DYNAMIC_FTRACE_WITH_REGS select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD @@ -522,7 +527,7 @@ config ARM64_ERRATUM_1024718 help This option adds a workaround for ARM Cortex-A55 Erratum 1024718. - Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect + Affected Cortex-A55 cores (all revisions) could cause incorrect update of the hardware dirty bit when the DBM/AP bits are updated without a break-before-make. The workaround is to disable the usage of hardware DBM locally on the affected cores. CPUs not affected by @@ -952,8 +957,9 @@ choice that is selected here. config CPU_BIG_ENDIAN - bool "Build big-endian kernel" - help + bool "Build big-endian kernel" + depends on !LD_IS_LLD || LLD_VERSION >= 130000 + help Say Y if you plan on running a kernel with a big-endian userspace. config CPU_LITTLE_ENDIAN @@ -993,6 +999,7 @@ config HOTPLUG_CPU # Common NUMA Features config NUMA bool "NUMA Memory Allocation and Scheduler Support" + select GENERIC_ARCH_NUMA select ACPI_NUMA if ACPI select OF_NUMA help @@ -1132,6 +1139,10 @@ config CRASH_DUMP For more details see Documentation/admin-guide/kdump/kdump.rst +config TRANS_TABLE + def_bool y + depends on HIBERNATION + config XEN_DOM0 def_bool y depends on XEN @@ -1476,7 +1487,7 @@ config ARM64_PTR_AUTH depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC # Modern compilers insert a .note.gnu.property section note for PAC # which is only understood by binutils starting with version 2.33.1. - depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100) + depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100) depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 90309208bb28..5b84aec31ed3 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -188,10 +188,12 @@ ifeq ($(KBUILD_EXTMOD),) # this hack. prepare: vdso_prepare vdso_prepare: prepare0 - $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h - $(if $(CONFIG_COMPAT_VDSO),$(Q)$(MAKE) \ - $(build)=arch/arm64/kernel/vdso32 \ - include/generated/vdso32-offsets.h) + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso \ + include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so +ifdef CONFIG_COMPAT_VDSO + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \ + include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so +endif endif define archhelp diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index 26a4a5a6871e..2f0528d01299 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -232,7 +232,7 @@ &apps_rsc { pm8009-rpmh-regulators { - compatible = "qcom,pm8009-rpmh-regulators"; + compatible = "qcom,pm8009-1-rpmh-regulators"; qcom,pmic-id = "f"; vdd-s1-supply = <&vph_pwr>; @@ -241,6 +241,13 @@ vdd-l5-l6-supply = <&vreg_bob>; vdd-l7-supply = <&vreg_s4a_1p8>; + vreg_s2f_0p95: smps2 { + regulator-name = "vreg_s2f_0p95"; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <952000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>; + }; + vreg_l1f_1p1: ldo1 { regulator-name = "vreg_l1f_1p1"; regulator-min-microvolt = <1104000>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 43c4280bfc4f..d612f633b771 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -770,7 +770,8 @@ CONFIG_SND_SOC_LPASS_VA_MACRO=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m CONFIG_HID_MULTITOUCH=m -CONFIG_I2C_HID=m +CONFIG_I2C_HID_ACPI=m +CONFIG_I2C_HID_OF=m CONFIG_USB_CONN_GPIO=m CONFIG_USB=y CONFIG_USB_OTG=y diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 34b8a89197be..17e735931a0c 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -24,6 +24,7 @@ #ifdef USE_V8_CRYPTO_EXTENSIONS #define MODE "ce" #define PRIO 300 +#define STRIDE 5 #define aes_expandkey ce_aes_expandkey #define aes_ecb_encrypt ce_aes_ecb_encrypt #define aes_ecb_decrypt ce_aes_ecb_decrypt @@ -41,6 +42,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #else #define MODE "neon" #define PRIO 200 +#define STRIDE 4 #define aes_ecb_encrypt neon_aes_ecb_encrypt #define aes_ecb_decrypt neon_aes_ecb_decrypt #define aes_cbc_encrypt neon_aes_cbc_encrypt @@ -55,7 +57,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_mac_update neon_aes_mac_update MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); #endif -#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS) +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS) MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); @@ -87,7 +89,7 @@ asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int bytes, u8 const iv[]); asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], - int rounds, int blocks, u8 ctr[]); + int rounds, int bytes, u8 ctr[], u8 finalbuf[]); asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds, int bytes, u32 const rk2[], u8 iv[], @@ -103,9 +105,9 @@ asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds, int blocks, u8 iv[], u32 const rk2[]); -asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, - int blocks, u8 dg[], int enc_before, - int enc_after); +asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds, + int blocks, u8 dg[], int enc_before, + int enc_after); struct crypto_aes_xts_ctx { struct crypto_aes_ctx key1; @@ -448,34 +450,36 @@ static int ctr_encrypt(struct skcipher_request *req) struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; - int blocks; err = skcipher_walk_virt(&walk, req, false); - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks, walk.iv); - kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); - } - if (walk.nbytes) { - u8 __aligned(8) tail[AES_BLOCK_SIZE]; + while (walk.nbytes > 0) { + const u8 *src = walk.src.virt.addr; unsigned int nbytes = walk.nbytes; - u8 *tdst = walk.dst.virt.addr; - u8 *tsrc = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; + unsigned int tail; - /* - * Tell aes_ctr_encrypt() to process a tail block. - */ - blocks = -1; + if (unlikely(nbytes < AES_BLOCK_SIZE)) + src = memcpy(buf, src, nbytes); + else if (nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); kernel_neon_begin(); - aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds, - blocks, walk.iv); + aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, + walk.iv, buf); kernel_neon_end(); - crypto_xor_cpy(tdst, tsrc, tail, nbytes); - err = skcipher_walk_done(&walk, 0); + + tail = nbytes % (STRIDE * AES_BLOCK_SIZE); + if (tail > 0 && tail < AES_BLOCK_SIZE) + /* + * The final partial block could not be returned using + * an overlapping store, so it was passed via buf[] + * instead. + */ + memcpy(dst + nbytes - tail, buf, tail); + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; @@ -650,7 +654,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) } static struct skcipher_alg aes_algs[] = { { -#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS) +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS) .base = { .cra_name = "__ecb(aes)", .cra_driver_name = "__ecb-aes-" MODE, @@ -852,10 +856,17 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, int rounds = 6 + ctx->key_length / 4; if (crypto_simd_usable()) { - kernel_neon_begin(); - aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before, - enc_after); - kernel_neon_end(); + int rem; + + do { + kernel_neon_begin(); + rem = aes_mac_update(in, ctx->key_enc, rounds, blocks, + dg, enc_before, enc_after); + kernel_neon_end(); + in += (blocks - rem) * AES_BLOCK_SIZE; + blocks = rem; + enc_before = 0; + } while (blocks); } else { if (enc_before) aes_encrypt(ctx, dg, dg); diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index cf618d8f6cec..bbdb54702aa7 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -321,42 +321,76 @@ AES_FUNC_END(aes_cbc_cts_decrypt) /* * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int blocks, u8 ctr[]) + * int bytes, u8 ctr[], u8 finalbuf[]) */ AES_FUNC_START(aes_ctr_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp - enc_prepare w3, x2, x6 + enc_prepare w3, x2, x12 ld1 {vctr.16b}, [x5] - umov x6, vctr.d[1] /* keep swabbed ctr in reg */ - rev x6, x6 - cmn w6, w4 /* 32 bit overflow? */ - bcs .Lctrloop + umov x12, vctr.d[1] /* keep swabbed ctr in reg */ + rev x12, x12 + .LctrloopNx: - subs w4, w4, #MAX_STRIDE - bmi .Lctr1x - add w7, w6, #1 + add w7, w4, #15 + sub w4, w4, #MAX_STRIDE << 4 + lsr w7, w7, #4 + mov w8, #MAX_STRIDE + cmp w7, w8 + csel w7, w7, w8, lt + adds x12, x12, x7 + mov v0.16b, vctr.16b - add w8, w6, #2 mov v1.16b, vctr.16b - add w9, w6, #3 mov v2.16b, vctr.16b - add w9, w6, #3 - rev w7, w7 mov v3.16b, vctr.16b - rev w8, w8 ST5( mov v4.16b, vctr.16b ) - mov v1.s[3], w7 - rev w9, w9 -ST5( add w10, w6, #4 ) - mov v2.s[3], w8 -ST5( rev w10, w10 ) - mov v3.s[3], w9 -ST5( mov v4.s[3], w10 ) - ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ + bcs 0f + + .subsection 1 + /* apply carry to outgoing counter */ +0: umov x8, vctr.d[0] + rev x8, x8 + add x8, x8, #1 + rev x8, x8 + ins vctr.d[0], x8 + + /* apply carry to N counter blocks for N := x12 */ + adr x16, 1f + sub x16, x16, x12, lsl #3 + br x16 + hint 34 // bti c + mov v0.d[0], vctr.d[0] + hint 34 // bti c + mov v1.d[0], vctr.d[0] + hint 34 // bti c + mov v2.d[0], vctr.d[0] + hint 34 // bti c + mov v3.d[0], vctr.d[0] +ST5( hint 34 ) +ST5( mov v4.d[0], vctr.d[0] ) +1: b 2f + .previous + +2: rev x7, x12 + ins vctr.d[1], x7 + sub x7, x12, #MAX_STRIDE - 1 + sub x8, x12, #MAX_STRIDE - 2 + sub x9, x12, #MAX_STRIDE - 3 + rev x7, x7 + rev x8, x8 + mov v1.d[1], x7 + rev x9, x9 +ST5( sub x10, x12, #MAX_STRIDE - 4 ) + mov v2.d[1], x8 +ST5( rev x10, x10 ) + mov v3.d[1], x9 +ST5( mov v4.d[1], x10 ) + tbnz w4, #31, .Lctrtail + ld1 {v5.16b-v7.16b}, [x1], #48 ST4( bl aes_encrypt_block4x ) ST5( bl aes_encrypt_block5x ) eor v0.16b, v5.16b, v0.16b @@ -368,47 +402,72 @@ ST5( ld1 {v5.16b-v6.16b}, [x1], #32 ) ST5( eor v4.16b, v6.16b, v4.16b ) st1 {v0.16b-v3.16b}, [x0], #64 ST5( st1 {v4.16b}, [x0], #16 ) - add x6, x6, #MAX_STRIDE - rev x7, x6 - ins vctr.d[1], x7 cbz w4, .Lctrout b .LctrloopNx -.Lctr1x: - adds w4, w4, #MAX_STRIDE - beq .Lctrout -.Lctrloop: - mov v0.16b, vctr.16b - encrypt_block v0, w3, x2, x8, w7 - - adds x6, x6, #1 /* increment BE ctr */ - rev x7, x6 - ins vctr.d[1], x7 - bcs .Lctrcarry /* overflow? */ - -.Lctrcarrydone: - subs w4, w4, #1 - bmi .Lctrtailblock /* blocks <0 means tail block */ - ld1 {v3.16b}, [x1], #16 - eor v3.16b, v0.16b, v3.16b - st1 {v3.16b}, [x0], #16 - bne .Lctrloop .Lctrout: st1 {vctr.16b}, [x5] /* return next CTR value */ ldp x29, x30, [sp], #16 ret -.Lctrtailblock: - st1 {v0.16b}, [x0] +.Lctrtail: + /* XOR up to MAX_STRIDE * 16 - 1 bytes of in/output with v0 ... v3/v4 */ + mov x16, #16 + ands x13, x4, #0xf + csel x13, x13, x16, ne + +ST5( cmp w4, #64 - (MAX_STRIDE << 4) ) +ST5( csel x14, x16, xzr, gt ) + cmp w4, #48 - (MAX_STRIDE << 4) + csel x15, x16, xzr, gt + cmp w4, #32 - (MAX_STRIDE << 4) + csel x16, x16, xzr, gt + cmp w4, #16 - (MAX_STRIDE << 4) + ble .Lctrtail1x + + adr_l x12, .Lcts_permute_table + add x12, x12, x13 + +ST5( ld1 {v5.16b}, [x1], x14 ) + ld1 {v6.16b}, [x1], x15 + ld1 {v7.16b}, [x1], x16 + +ST4( bl aes_encrypt_block4x ) +ST5( bl aes_encrypt_block5x ) + + ld1 {v8.16b}, [x1], x13 + ld1 {v9.16b}, [x1] + ld1 {v10.16b}, [x12] + +ST4( eor v6.16b, v6.16b, v0.16b ) +ST4( eor v7.16b, v7.16b, v1.16b ) +ST4( tbl v3.16b, {v3.16b}, v10.16b ) +ST4( eor v8.16b, v8.16b, v2.16b ) +ST4( eor v9.16b, v9.16b, v3.16b ) + +ST5( eor v5.16b, v5.16b, v0.16b ) +ST5( eor v6.16b, v6.16b, v1.16b ) +ST5( tbl v4.16b, {v4.16b}, v10.16b ) +ST5( eor v7.16b, v7.16b, v2.16b ) +ST5( eor v8.16b, v8.16b, v3.16b ) +ST5( eor v9.16b, v9.16b, v4.16b ) + +ST5( st1 {v5.16b}, [x0], x14 ) + st1 {v6.16b}, [x0], x15 + st1 {v7.16b}, [x0], x16 + add x13, x13, x0 + st1 {v9.16b}, [x13] // overlapping stores + st1 {v8.16b}, [x0] b .Lctrout -.Lctrcarry: - umov x7, vctr.d[0] /* load upper word of ctr */ - rev x7, x7 /* ... to handle the carry */ - add x7, x7, #1 - rev x7, x7 - ins vctr.d[0], x7 - b .Lctrcarrydone +.Lctrtail1x: + csel x0, x0, x6, eq // use finalbuf if less than a full block + ld1 {v5.16b}, [x1] +ST5( mov v3.16b, v4.16b ) + encrypt_block v3, w3, x2, x8, w7 + eor v5.16b, v5.16b, v3.16b + st1 {v5.16b}, [x0] + b .Lctrout AES_FUNC_END(aes_ctr_encrypt) @@ -619,61 +678,47 @@ AES_FUNC_END(aes_xts_decrypt) * int blocks, u8 dg[], int enc_before, int enc_after) */ AES_FUNC_START(aes_mac_update) - frame_push 6 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x6 - - ld1 {v0.16b}, [x23] /* get dg */ + ld1 {v0.16b}, [x4] /* get dg */ enc_prepare w2, x1, x7 cbz w5, .Lmacloop4x encrypt_block v0, w2, x1, x7, w8 .Lmacloop4x: - subs w22, w22, #4 + subs w3, w3, #4 bmi .Lmac1x - ld1 {v1.16b-v4.16b}, [x19], #64 /* get next pt block */ + ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ - encrypt_block v0, w21, x20, x7, w8 + encrypt_block v0, w2, x1, x7, w8 eor v0.16b, v0.16b, v2.16b - encrypt_block v0, w21, x20, x7, w8 + encrypt_block v0, w2, x1, x7, w8 eor v0.16b, v0.16b, v3.16b - encrypt_block v0, w21, x20, x7, w8 + encrypt_block v0, w2, x1, x7, w8 eor v0.16b, v0.16b, v4.16b - cmp w22, wzr - csinv x5, x24, xzr, eq + cmp w3, wzr + csinv x5, x6, xzr, eq cbz w5, .Lmacout - encrypt_block v0, w21, x20, x7, w8 - st1 {v0.16b}, [x23] /* return dg */ - cond_yield_neon .Lmacrestart + encrypt_block v0, w2, x1, x7, w8 + st1 {v0.16b}, [x4] /* return dg */ + cond_yield .Lmacout, x7 b .Lmacloop4x .Lmac1x: - add w22, w22, #4 + add w3, w3, #4 .Lmacloop: - cbz w22, .Lmacout - ld1 {v1.16b}, [x19], #16 /* get next pt block */ + cbz w3, .Lmacout + ld1 {v1.16b}, [x0], #16 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ - subs w22, w22, #1 - csinv x5, x24, xzr, eq + subs w3, w3, #1 + csinv x5, x6, xzr, eq cbz w5, .Lmacout .Lmacenc: - encrypt_block v0, w21, x20, x7, w8 + encrypt_block v0, w2, x1, x7, w8 b .Lmacloop .Lmacout: - st1 {v0.16b}, [x23] /* return dg */ - frame_pop + st1 {v0.16b}, [x4] /* return dg */ + mov w0, w3 ret - -.Lmacrestart: - ld1 {v0.16b}, [x23] /* get dg */ - enc_prepare w21, x20, x0 - b .Lmacloop4x AES_FUNC_END(aes_mac_update) diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S index 63a52ad9a75c..a3405b8c344b 100644 --- a/arch/arm64/crypto/aes-neonbs-core.S +++ b/arch/arm64/crypto/aes-neonbs-core.S @@ -613,7 +613,6 @@ SYM_FUNC_END(aesbs_decrypt8) st1 {\o7\().16b}, [x19], #16 cbz x23, 1f - cond_yield_neon b 99b 1: frame_pop @@ -715,7 +714,6 @@ SYM_FUNC_START(aesbs_cbc_decrypt) 1: st1 {v24.16b}, [x24] // store IV cbz x23, 2f - cond_yield_neon b 99b 2: frame_pop @@ -801,7 +799,7 @@ SYM_FUNC_END(__xts_crypt8) mov x23, x4 mov x24, x5 -0: movi v30.2s, #0x1 + movi v30.2s, #0x1 movi v25.2s, #0x87 uzp1 v30.4s, v30.4s, v25.4s ld1 {v25.16b}, [x24] @@ -846,7 +844,6 @@ SYM_FUNC_END(__xts_crypt8) cbz x23, 1f st1 {v25.16b}, [x24] - cond_yield_neon 0b b 99b 1: st1 {v25.16b}, [x24] @@ -889,7 +886,7 @@ SYM_FUNC_START(aesbs_ctr_encrypt) cset x26, ne add x23, x23, x26 // do one extra block if final -98: ldp x7, x8, [x24] + ldp x7, x8, [x24] ld1 {v0.16b}, [x24] CPU_LE( rev x7, x7 ) CPU_LE( rev x8, x8 ) @@ -967,7 +964,6 @@ CPU_LE( rev x8, x8 ) st1 {v0.16b}, [x24] cbz x23, .Lctr_done - cond_yield_neon 98b b 99b .Lctr_done: diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index 111d9c9abddd..dce6dcebfca1 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -68,10 +68,10 @@ .text .arch armv8-a+crypto - init_crc .req w19 - buf .req x20 - len .req x21 - fold_consts_ptr .req x22 + init_crc .req w0 + buf .req x1 + len .req x2 + fold_consts_ptr .req x3 fold_consts .req v10 @@ -257,12 +257,6 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) .endm .macro crc_t10dif_pmull, p - frame_push 4, 128 - - mov init_crc, w0 - mov buf, x1 - mov len, x2 - __pmull_init_\p // For sizes less than 256 bytes, we can't fold 128 bytes at a time. @@ -317,26 +311,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) fold_32_bytes \p, v6, v7 subs len, len, #128 - b.lt .Lfold_128_bytes_loop_done_\@ - - if_will_cond_yield_neon - stp q0, q1, [sp, #.Lframe_local_offset] - stp q2, q3, [sp, #.Lframe_local_offset + 32] - stp q4, q5, [sp, #.Lframe_local_offset + 64] - stp q6, q7, [sp, #.Lframe_local_offset + 96] - do_cond_yield_neon - ldp q0, q1, [sp, #.Lframe_local_offset] - ldp q2, q3, [sp, #.Lframe_local_offset + 32] - ldp q4, q5, [sp, #.Lframe_local_offset + 64] - ldp q6, q7, [sp, #.Lframe_local_offset + 96] - ld1 {fold_consts.2d}, [fold_consts_ptr] - __pmull_init_\p - __pmull_pre_\p fold_consts - endif_yield_neon - - b .Lfold_128_bytes_loop_\@ - -.Lfold_128_bytes_loop_done_\@: + b.ge .Lfold_128_bytes_loop_\@ // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. @@ -453,7 +428,9 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. umov w0, v0.h[0] - frame_pop + .ifc \p, p8 + ldp x29, x30, [sp], #16 + .endif ret .Lless_than_256_bytes_\@: @@ -489,7 +466,9 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) // Assumes len >= 16. // SYM_FUNC_START(crc_t10dif_pmull_p8) - crc_t10dif_pmull p8 + stp x29, x30, [sp, #-16]! + mov x29, sp + crc_t10dif_pmull p8 SYM_FUNC_END(crc_t10dif_pmull_p8) .align 5 diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index ccc3f6067742..09eb1456aed4 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -37,9 +37,18 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data, u16 *crc = shash_desc_ctx(desc); if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull_p8(*crc, data, length); - kernel_neon_end(); + do { + unsigned int chunk = length; + + if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE) + chunk = SZ_4K; + + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p8(*crc, data, chunk); + kernel_neon_end(); + data += chunk; + length -= chunk; + } while (length); } else { *crc = crc_t10dif_generic(*crc, data, length); } @@ -53,9 +62,18 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data, u16 *crc = shash_desc_ctx(desc); if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull_p64(*crc, data, length); - kernel_neon_end(); + do { + unsigned int chunk = length; + + if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE) + chunk = SZ_4K; + + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p64(*crc, data, chunk); + kernel_neon_end(); + data += chunk; + length -= chunk; + } while (length); } else { *crc = crc_t10dif_generic(*crc, data, length); } diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 92d0d2753e81..8c02bbc2684e 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S @@ -62,40 +62,34 @@ .endm /* - * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, - * int blocks) + * int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + * int blocks) */ SYM_FUNC_START(sha1_ce_transform) - frame_push 3 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - /* load round constants */ -0: loadrc k0.4s, 0x5a827999, w6 + loadrc k0.4s, 0x5a827999, w6 loadrc k1.4s, 0x6ed9eba1, w6 loadrc k2.4s, 0x8f1bbcdc, w6 loadrc k3.4s, 0xca62c1d6, w6 /* load state */ - ld1 {dgav.4s}, [x19] - ldr dgb, [x19, #16] + ld1 {dgav.4s}, [x0] + ldr dgb, [x0, #16] /* load sha1_ce_state::finalize */ ldr_l w4, sha1_ce_offsetof_finalize, x4 - ldr w4, [x19, x4] + ldr w4, [x0, x4] /* load input */ -1: ld1 {v8.4s-v11.4s}, [x20], #64 - sub w21, w21, #1 +0: ld1 {v8.4s-v11.4s}, [x1], #64 + sub w2, w2, #1 CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v9.16b, v9.16b ) CPU_LE( rev32 v10.16b, v10.16b ) CPU_LE( rev32 v11.16b, v11.16b ) -2: add t0.4s, v8.4s, k0.4s +1: add t0.4s, v8.4s, k0.4s mov dg0v.16b, dgav.16b add_update c, ev, k0, 8, 9, 10, 11, dgb @@ -126,25 +120,18 @@ CPU_LE( rev32 v11.16b, v11.16b ) add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s - cbz w21, 3f - - if_will_cond_yield_neon - st1 {dgav.4s}, [x19] - str dgb, [x19, #16] - do_cond_yield_neon + cbz w2, 2f + cond_yield 3f, x5 b 0b - endif_yield_neon - - b 1b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ -3: cbz x4, 4f +2: cbz x4, 3f ldr_l w4, sha1_ce_offsetof_count, x4 - ldr x4, [x19, x4] + ldr x4, [x0, x4] movi v9.2d, #0 mov x8, #0x80000000 movi v10.2d, #0 @@ -153,11 +140,11 @@ CPU_LE( rev32 v11.16b, v11.16b ) mov x4, #0 mov v11.d[0], xzr mov v11.d[1], x7 - b 2b + b 1b /* store new state */ -4: st1 {dgav.4s}, [x19] - str dgb, [x19, #16] - frame_pop +3: st1 {dgav.4s}, [x0] + str dgb, [x0, #16] + mov w0, w2 ret SYM_FUNC_END(sha1_ce_transform) diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index c93121bcfdeb..71fa4f1122d7 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -19,6 +19,7 @@ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha1"); struct sha1_ce_state { struct sha1_state sst; @@ -28,14 +29,22 @@ struct sha1_ce_state { extern const u32 sha1_ce_offsetof_count; extern const u32 sha1_ce_offsetof_finalize; -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, - int blocks); +asmlinkage int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + int blocks); static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src, int blocks) { - sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src, - blocks); + while (blocks) { + int rem; + + kernel_neon_begin(); + rem = sha1_ce_transform(container_of(sst, struct sha1_ce_state, + sst), src, blocks); + kernel_neon_end(); + src += (blocks - rem) * SHA1_BLOCK_SIZE; + blocks = rem; + } } const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count); @@ -50,9 +59,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, return crypto_sha1_update(desc, data, len); sctx->finalize = 0; - kernel_neon_begin(); sha1_base_do_update(desc, data, len, __sha1_ce_transform); - kernel_neon_end(); return 0; } @@ -72,11 +79,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, */ sctx->finalize = finalize; - kernel_neon_begin(); sha1_base_do_update(desc, data, len, __sha1_ce_transform); if (!finalize) sha1_base_do_finalize(desc, __sha1_ce_transform); - kernel_neon_end(); return sha1_base_finish(desc, out); } @@ -88,9 +93,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out) return crypto_sha1_finup(desc, NULL, 0, out); sctx->finalize = 0; - kernel_neon_begin(); sha1_base_do_finalize(desc, __sha1_ce_transform); - kernel_neon_end(); return sha1_base_finish(desc, out); } diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 3f9d0f326987..6cdea7d56059 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S @@ -76,36 +76,30 @@ */ .text SYM_FUNC_START(sha2_ce_transform) - frame_push 3 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - /* load round constants */ -0: adr_l x8, .Lsha2_rcon + adr_l x8, .Lsha2_rcon ld1 { v0.4s- v3.4s}, [x8], #64 ld1 { v4.4s- v7.4s}, [x8], #64 ld1 { v8.4s-v11.4s}, [x8], #64 ld1 {v12.4s-v15.4s}, [x8] /* load state */ - ld1 {dgav.4s, dgbv.4s}, [x19] + ld1 {dgav.4s, dgbv.4s}, [x0] /* load sha256_ce_state::finalize */ ldr_l w4, sha256_ce_offsetof_finalize, x4 - ldr w4, [x19, x4] + ldr w4, [x0, x4] /* load input */ -1: ld1 {v16.4s-v19.4s}, [x20], #64 - sub w21, w21, #1 +0: ld1 {v16.4s-v19.4s}, [x1], #64 + sub w2, w2, #1 CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) -2: add t0.4s, v16.4s, v0.4s +1: add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b @@ -134,24 +128,18 @@ CPU_LE( rev32 v19.16b, v19.16b ) add dgbv.4s, dgbv.4s, dg1v.4s /* handled all input blocks? */ - cbz w21, 3f - - if_will_cond_yield_neon - st1 {dgav.4s, dgbv.4s}, [x19] - do_cond_yield_neon + cbz w2, 2f + cond_yield 3f, x5 b 0b - endif_yield_neon - - b 1b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ -3: cbz x4, 4f +2: cbz x4, 3f ldr_l w4, sha256_ce_offsetof_count, x4 - ldr x4, [x19, x4] + ldr x4, [x0, x4] movi v17.2d, #0 mov x8, #0x80000000 movi v18.2d, #0 @@ -160,10 +148,10 @@ CPU_LE( rev32 v19.16b, v19.16b ) mov x4, #0 mov v19.d[0], xzr mov v19.d[1], x7 - b 2b + b 1b /* store new state */ -4: st1 {dgav.4s, dgbv.4s}, [x19] - frame_pop +3: st1 {dgav.4s, dgbv.4s}, [x0] + mov w0, w2 ret SYM_FUNC_END(sha2_ce_transform) diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 31ba3da5e61b..c57a6119fefc 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -19,6 +19,8 @@ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); struct sha256_ce_state { struct sha256_state sst; @@ -28,14 +30,22 @@ struct sha256_ce_state { extern const u32 sha256_ce_offsetof_count; extern const u32 sha256_ce_offsetof_finalize; -asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, - int blocks); +asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, + int blocks); static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src, int blocks) { - sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src, - blocks); + while (blocks) { + int rem; + + kernel_neon_begin(); + rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state, + sst), src, blocks); + kernel_neon_end(); + src += (blocks - rem) * SHA256_BLOCK_SIZE; + blocks = rem; + } } const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, @@ -61,9 +71,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data, __sha256_block_data_order); sctx->finalize = 0; - kernel_neon_begin(); sha256_base_do_update(desc, data, len, __sha2_ce_transform); - kernel_neon_end(); return 0; } @@ -88,11 +96,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, */ sctx->finalize = finalize; - kernel_neon_begin(); sha256_base_do_update(desc, data, len, __sha2_ce_transform); if (!finalize) sha256_base_do_finalize(desc, __sha2_ce_transform); - kernel_neon_end(); return sha256_base_finish(desc, out); } @@ -106,9 +112,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out) } sctx->finalize = 0; - kernel_neon_begin(); sha256_base_do_finalize(desc, __sha2_ce_transform); - kernel_neon_end(); return sha256_base_finish(desc, out); } diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S index 1cfb768df350..6f5208414fe3 100644 --- a/arch/arm64/crypto/sha3-ce-core.S +++ b/arch/arm64/crypto/sha3-ce-core.S @@ -37,20 +37,13 @@ .endm /* - * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) + * int sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) */ .text SYM_FUNC_START(sha3_ce_transform) - frame_push 4 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - -0: /* load state */ - add x8, x19, #32 - ld1 { v0.1d- v3.1d}, [x19] + /* load state */ + add x8, x0, #32 + ld1 { v0.1d- v3.1d}, [x0] ld1 { v4.1d- v7.1d}, [x8], #32 ld1 { v8.1d-v11.1d}, [x8], #32 ld1 {v12.1d-v15.1d}, [x8], #32 @@ -58,13 +51,13 @@ SYM_FUNC_START(sha3_ce_transform) ld1 {v20.1d-v23.1d}, [x8], #32 ld1 {v24.1d}, [x8] -1: sub w21, w21, #1 +0: sub w2, w2, #1 mov w8, #24 adr_l x9, .Lsha3_rcon /* load input */ - ld1 {v25.8b-v28.8b}, [x20], #32 - ld1 {v29.8b-v31.8b}, [x20], #24 + ld1 {v25.8b-v28.8b}, [x1], #32 + ld1 {v29.8b-v31.8b}, [x1], #24 eor v0.8b, v0.8b, v25.8b eor v1.8b, v1.8b, v26.8b eor v2.8b, v2.8b, v27.8b @@ -73,10 +66,10 @@ SYM_FUNC_START(sha3_ce_transform) eor v5.8b, v5.8b, v30.8b eor v6.8b, v6.8b, v31.8b - tbnz x22, #6, 3f // SHA3-512 + tbnz x3, #6, 2f // SHA3-512 - ld1 {v25.8b-v28.8b}, [x20], #32 - ld1 {v29.8b-v30.8b}, [x20], #16 + ld1 {v25.8b-v28.8b}, [x1], #32 + ld1 {v29.8b-v30.8b}, [x1], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b eor v9.8b, v9.8b, v27.8b @@ -84,34 +77,34 @@ SYM_FUNC_START(sha3_ce_transform) eor v11.8b, v11.8b, v29.8b eor v12.8b, v12.8b, v30.8b - tbnz x22, #4, 2f // SHA3-384 or SHA3-224 + tbnz x3, #4, 1f // SHA3-384 or SHA3-224 // SHA3-256 - ld1 {v25.8b-v28.8b}, [x20], #32 + ld1 {v25.8b-v28.8b}, [x1], #32 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b - b 4f + b 3f -2: tbz x22, #2, 4f // bit 2 cleared? SHA-384 +1: tbz x3, #2, 3f // bit 2 cleared? SHA-384 // SHA3-224 - ld1 {v25.8b-v28.8b}, [x20], #32 - ld1 {v29.8b}, [x20], #8 + ld1 {v25.8b-v28.8b}, [x1], #32 + ld1 {v29.8b}, [x1], #8 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b eor v17.8b, v17.8b, v29.8b - b 4f + b 3f // SHA3-512 -3: ld1 {v25.8b-v26.8b}, [x20], #16 +2: ld1 {v25.8b-v26.8b}, [x1], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b -4: sub w8, w8, #1 +3: sub w8, w8, #1 eor3 v29.16b, v4.16b, v9.16b, v14.16b eor3 v26.16b, v1.16b, v6.16b, v11.16b @@ -190,33 +183,19 @@ SYM_FUNC_START(sha3_ce_transform) eor v0.16b, v0.16b, v31.16b - cbnz w8, 4b - cbz w21, 5f - - if_will_cond_yield_neon - add x8, x19, #32 - st1 { v0.1d- v3.1d}, [x19] - st1 { v4.1d- v7.1d}, [x8], #32 - st1 { v8.1d-v11.1d}, [x8], #32 - st1 {v12.1d-v15.1d}, [x8], #32 - st1 {v16.1d-v19.1d}, [x8], #32 - st1 {v20.1d-v23.1d}, [x8], #32 - st1 {v24.1d}, [x8] - do_cond_yield_neon - b 0b - endif_yield_neon - - b 1b + cbnz w8, 3b + cond_yield 3f, x8 + cbnz w2, 0b /* save state */ -5: st1 { v0.1d- v3.1d}, [x19], #32 - st1 { v4.1d- v7.1d}, [x19], #32 - st1 { v8.1d-v11.1d}, [x19], #32 - st1 {v12.1d-v15.1d}, [x19], #32 - st1 {v16.1d-v19.1d}, [x19], #32 - st1 {v20.1d-v23.1d}, [x19], #32 - st1 {v24.1d}, [x19] - frame_pop +3: st1 { v0.1d- v3.1d}, [x0], #32 + st1 { v4.1d- v7.1d}, [x0], #32 + st1 { v8.1d-v11.1d}, [x0], #32 + st1 {v12.1d-v15.1d}, [x0], #32 + st1 {v16.1d-v19.1d}, [x0], #32 + st1 {v20.1d-v23.1d}, [x0], #32 + st1 {v24.1d}, [x0] + mov w0, w2 ret SYM_FUNC_END(sha3_ce_transform) diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index e5a2936f0886..8c65cecf560a 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -23,9 +23,13 @@ MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha3-224"); +MODULE_ALIAS_CRYPTO("sha3-256"); +MODULE_ALIAS_CRYPTO("sha3-384"); +MODULE_ALIAS_CRYPTO("sha3-512"); -asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks, - int md_len); +asmlinkage int sha3_ce_transform(u64 *st, const u8 *data, int blocks, + int md_len); static int sha3_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -55,11 +59,15 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, blocks = len / sctx->rsiz; len %= sctx->rsiz; - if (blocks) { + while (blocks) { + int rem; + kernel_neon_begin(); - sha3_ce_transform(sctx->st, data, blocks, digest_size); + rem = sha3_ce_transform(sctx->st, data, blocks, + digest_size); kernel_neon_end(); - data += blocks * sctx->rsiz; + data += (blocks - rem) * sctx->rsiz; + blocks = rem; } } diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S index cde606c0323e..d6e7f6c95fa6 100644 --- a/arch/arm64/crypto/sha512-ce-core.S +++ b/arch/arm64/crypto/sha512-ce-core.S @@ -107,23 +107,17 @@ */ .text SYM_FUNC_START(sha512_ce_transform) - frame_push 3 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - /* load state */ -0: ld1 {v8.2d-v11.2d}, [x19] + ld1 {v8.2d-v11.2d}, [x0] /* load first 4 round constants */ adr_l x3, .Lsha512_rcon ld1 {v20.2d-v23.2d}, [x3], #64 /* load input */ -1: ld1 {v12.2d-v15.2d}, [x20], #64 - ld1 {v16.2d-v19.2d}, [x20], #64 - sub w21, w21, #1 +0: ld1 {v12.2d-v15.2d}, [x1], #64 + ld1 {v16.2d-v19.2d}, [x1], #64 + sub w2, w2, #1 CPU_LE( rev64 v12.16b, v12.16b ) CPU_LE( rev64 v13.16b, v13.16b ) @@ -201,19 +195,12 @@ CPU_LE( rev64 v19.16b, v19.16b ) add v10.2d, v10.2d, v2.2d add v11.2d, v11.2d, v3.2d + cond_yield 3f, x4 /* handled all input blocks? */ - cbz w21, 3f - - if_will_cond_yield_neon - st1 {v8.2d-v11.2d}, [x19] - do_cond_yield_neon - b 0b - endif_yield_neon - - b 1b + cbnz w2, 0b /* store new state */ -3: st1 {v8.2d-v11.2d}, [x19] - frame_pop +3: st1 {v8.2d-v11.2d}, [x0] + mov w0, w2 ret SYM_FUNC_END(sha512_ce_transform) diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index faa83f6cf376..e62a094a9d52 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -23,12 +23,28 @@ MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); -asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src, - int blocks); +asmlinkage int sha512_ce_transform(struct sha512_state *sst, u8 const *src, + int blocks); asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); +static void __sha512_ce_transform(struct sha512_state *sst, u8 const *src, + int blocks) +{ + while (blocks) { + int rem; + + kernel_neon_begin(); + rem = sha512_ce_transform(sst, src, blocks); + kernel_neon_end(); + src += (blocks - rem) * SHA512_BLOCK_SIZE; + blocks = rem; + } +} + static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src, int blocks) { @@ -38,45 +54,30 @@ static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src, static int sha512_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - if (!crypto_simd_usable()) - return sha512_base_do_update(desc, data, len, - __sha512_block_data_order); - - kernel_neon_begin(); - sha512_base_do_update(desc, data, len, sha512_ce_transform); - kernel_neon_end(); + sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform + : __sha512_block_data_order; + sha512_base_do_update(desc, data, len, fn); return 0; } static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) { - if (len) - sha512_base_do_update(desc, data, len, - __sha512_block_data_order); - sha512_base_do_finalize(desc, __sha512_block_data_order); - return sha512_base_finish(desc, out); - } + sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform + : __sha512_block_data_order; - kernel_neon_begin(); - sha512_base_do_update(desc, data, len, sha512_ce_transform); - sha512_base_do_finalize(desc, sha512_ce_transform); - kernel_neon_end(); + sha512_base_do_update(desc, data, len, fn); + sha512_base_do_finalize(desc, fn); return sha512_base_finish(desc, out); } static int sha512_ce_final(struct shash_desc *desc, u8 *out) { - if (!crypto_simd_usable()) { - sha512_base_do_finalize(desc, __sha512_block_data_order); - return sha512_base_finish(desc, out); - } + sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform + : __sha512_block_data_order; - kernel_neon_begin(); - sha512_base_do_finalize(desc, sha512_ce_transform); - kernel_neon_end(); + sha512_base_do_finalize(desc, fn); return sha512_base_finish(desc, out); } diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index ffb1a40d5475..09e43272ccb0 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -4,10 +4,26 @@ #ifdef CONFIG_ARCH_RANDOM +#include <linux/arm-smccc.h> #include <linux/bug.h> #include <linux/kernel.h> #include <asm/cpufeature.h> +#define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL + +extern bool smccc_trng_available; + +static inline bool __init smccc_probe_trng(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res); + if ((s32)res.a0 < 0) + return false; + + return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION; +} + static inline bool __arm64_rndr(unsigned long *v) { bool ok; @@ -38,26 +54,55 @@ static inline bool __must_check arch_get_random_int(unsigned int *v) static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { + struct arm_smccc_res res; + + /* + * We prefer the SMCCC call, since its semantics (return actual + * hardware backed entropy) is closer to the idea behind this + * function here than what even the RNDRSS register provides + * (the output of a pseudo RNG freshly seeded by a TRNG). + */ + if (smccc_trng_available) { + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); + if ((int)res.a0 >= 0) { + *v = res.a3; + return true; + } + } + /* * Only support the generic interface after we have detected * the system wide capability, avoiding complexity with the * cpufeature code and with potential scheduling between CPUs * with and without the feature. */ - if (!cpus_have_const_cap(ARM64_HAS_RNG)) - return false; + if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) + return true; - return __arm64_rndr(v); + return false; } - static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { + struct arm_smccc_res res; unsigned long val; - bool ok = arch_get_random_seed_long(&val); - *v = val; - return ok; + if (smccc_trng_available) { + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res); + if ((int)res.a0 >= 0) { + *v = res.a3 & GENMASK(31, 0); + return true; + } + } + + if (cpus_have_const_cap(ARM64_HAS_RNG)) { + if (__arm64_rndr(&val)) { + *v = val; + return true; + } + } + + return false; } static inline bool __init __early_cpu_has_rndr(void) @@ -72,12 +117,29 @@ arch_get_random_seed_long_early(unsigned long *v) { WARN_ON(system_state != SYSTEM_BOOTING); - if (!__early_cpu_has_rndr()) - return false; + if (smccc_trng_available) { + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); + if ((int)res.a0 >= 0) { + *v = res.a3; + return true; + } + } - return __arm64_rndr(v); + if (__early_cpu_has_rndr() && __arm64_rndr(v)) + return true; + + return false; } #define arch_get_random_seed_long_early arch_get_random_seed_long_early +#else /* !CONFIG_ARCH_RANDOM */ + +static inline bool __init smccc_probe_trng(void) +{ + return false; +} + #endif /* CONFIG_ARCH_RANDOM */ #endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 9990059be106..ccedf548dac9 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -15,10 +15,10 @@ .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir bic \tmp1, \tmp1, #TTBR_ASID_MASK - sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir + sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb - add \tmp1, \tmp1, #PAGE_SIZE + add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET msr ttbr1_el1, \tmp1 // set reserved ASID isb .endm diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index bf125c591116..ca31594d3d6c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -676,6 +676,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* + * Set SCTLR_EL1 to the passed value, and invalidate the local icache + * in the process. This is called when setting the MMU on. + */ +.macro set_sctlr_el1, reg + msr sctlr_el1, \reg + isb + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb +.endm + +/* * Check whether to yield to another runnable task from kernel mode NEON code * (which runs with preemption disabled). * @@ -745,6 +762,22 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm + /* + * Check whether preempt-disabled code should yield as soon as it + * is able. This is the case if re-enabling preemption a single + * time results in a preempt count of zero, and the TIF_NEED_RESCHED + * flag is set. (Note that the latter is stored negated in the + * top word of the thread_info::preempt_count field) + */ + .macro cond_yield, lbl:req, tmp:req +#ifdef CONFIG_PREEMPTION + get_current_task \tmp + ldr \tmp, [\tmp, #TSK_TI_PREEMPT] + sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET + cbz \tmp, \lbl +#endif + .endm + /* * This macro emits a program property note section identifying * architecture features which require special handling, mainly for diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 77cbbe3625f2..a074459f8f2f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -6,7 +6,6 @@ #define __ASM_CACHE_H #include <asm/cputype.h> -#include <asm/mte-kasan.h> #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 45217f21f1fe..52e5c1623224 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -30,11 +30,6 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT I-cache. * - * flush_cache_mm(mm) - * - * Clean and invalidate all user space cache entries - * before a change of page tables. - * * flush_icache_range(start, end) * * Ensure coherency between the I-cache and the D-cache in the diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9a555809b89c..61177bac49fa 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -63,6 +63,11 @@ struct arm64_ftr_bits { s64 safe_val; /* safe value for FTR_EXACT features */ }; +struct arm64_ftr_override { + u64 val; + u64 mask; +}; + /* * @arm64_ftr_reg - Feature register * @strict_mask Bits which should match across all CPUs for sanity. @@ -74,6 +79,7 @@ struct arm64_ftr_reg { u64 user_mask; u64 sys_val; u64 user_val; + struct arm64_ftr_override *override; const struct arm64_ftr_bits *ftr_bits; }; @@ -600,6 +606,7 @@ void __init setup_cpu_features(void); void check_local_cpu_capabilities(void); u64 read_sanitised_ftr_reg(u32 id); +u64 __read_sysreg_by_encoding(u32 sys_id); static inline bool cpu_supports_mixed_endian_el0(void) { @@ -811,6 +818,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) return 8; } +extern struct arm64_ftr_override id_aa64mmfr1_override; +extern struct arm64_ftr_override id_aa64pfr1_override; +extern struct arm64_ftr_override id_aa64isar1_override; + u32 get_kvm_ipa_limit(void); void dump_cpu_features(void); diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index a7f5a1bbc8ac..d77d358f9395 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -32,46 +32,39 @@ * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in * EL2. */ -.macro __init_el2_timers mode -.ifeqs "\mode", "nvhe" +.macro __init_el2_timers mrs x0, cnthctl_el2 orr x0, x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 -.endif msr cntvoff_el2, xzr // Clear virtual offset .endm -.macro __init_el2_debug mode +.macro __init_el2_debug mrs x1, id_aa64dfr0_el1 sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 cmp x0, #1 - b.lt 1f // Skip if no PMU present + b.lt .Lskip_pmu_\@ // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to -1: +.Lskip_pmu_\@: csel x2, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 - cbz x0, 3f // Skip if SPE not present + cbz x0, .Lskip_spe_\@ // Skip if SPE not present -.ifeqs "\mode", "nvhe" mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) - cbnz x0, 2f // then permit sampling of physical + cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ 1 << SYS_PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter -2: +.Lskip_spe_el2_\@: mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) orr x2, x2, x0 // If we don't have VHE, then // use EL1&0 translation. -.else - orr x2, x2, #MDCR_EL2_TPMS // For VHE, use EL2 translation - // and disable access from EL1 -.endif -3: +.Lskip_spe_\@: msr mdcr_el2, x2 // Configure debug traps .endm @@ -79,9 +72,9 @@ .macro __init_el2_lor mrs x1, id_aa64mmfr1_el1 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 - cbz x0, 1f + cbz x0, .Lskip_lor_\@ msr_s SYS_LORC_EL1, xzr -1: +.Lskip_lor_\@: .endm /* Stage-2 translation */ @@ -93,7 +86,7 @@ .macro __init_el2_gicv3 mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 - cbz x0, 1f + cbz x0, .Lskip_gicv3_\@ mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 @@ -103,7 +96,7 @@ mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, tbz x0, #0, 1f // and check that it sticks msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults -1: +.Lskip_gicv3_\@: .endm .macro __init_el2_hstr @@ -128,14 +121,14 @@ .macro __init_el2_nvhe_sve mrs x1, id_aa64pfr0_el1 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 - cbz x1, 1f + cbz x1, .Lskip_sve_\@ bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps msr cptr_el2, x0 // Disable copro. traps to EL2 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. -1: +.Lskip_sve_\@: .endm .macro __init_el2_nvhe_prepare_eret @@ -145,37 +138,24 @@ /** * Initialize EL2 registers to sane values. This should be called early on all - * cores that were booted in EL2. + * cores that were booted in EL2. Note that everything gets initialised as + * if VHE was not evailable. The kernel context will be upgraded to VHE + * if possible later on in the boot process * * Regs: x0, x1 and x2 are clobbered. */ -.macro init_el2_state mode -.ifnes "\mode", "vhe" -.ifnes "\mode", "nvhe" -.error "Invalid 'mode' argument" -.endif -.endif - +.macro init_el2_state __init_el2_sctlr - __init_el2_timers \mode - __init_el2_debug \mode + __init_el2_timers + __init_el2_debug __init_el2_lor __init_el2_stage2 __init_el2_gicv3 __init_el2_hstr - - /* - * When VHE is not in use, early init of EL2 needs to be done here. - * When VHE _is_ in use, EL1 will not be used in the host and - * requires no configuration, and all non-hyp-specific EL2 setup - * will be done via the _EL1 system register aliases in __cpu_setup. - */ -.ifeqs "\mode", "nvhe" __init_el2_nvhe_idregs __init_el2_nvhe_cptr __init_el2_nvhe_sve __init_el2_nvhe_prepare_eret -.endif .endm #endif /* __ARM_KVM_INIT_H__ */ diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h index daa1a1da539e..737ded6b6d0d 100644 --- a/arch/arm64/include/asm/hyp_image.h +++ b/arch/arm64/include/asm/hyp_image.h @@ -7,6 +7,9 @@ #ifndef __ARM64_HYP_IMAGE_H__ #define __ARM64_HYP_IMAGE_H__ +#define __HYP_CONCAT(a, b) a ## b +#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b) + /* * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, * to separate it from the kernel proper. @@ -21,9 +24,31 @@ */ #define HYP_SECTION_NAME(NAME) .hyp##NAME +/* Symbol defined at the beginning of each hyp section. */ +#define HYP_SECTION_SYMBOL_NAME(NAME) \ + HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME)) + +/* + * Helper to generate linker script statements starting a hyp section. + * + * A symbol with a well-known name is defined at the first byte. This + * is used as a base for hyp relocations (see gen-hyprel.c). It must + * be defined inside the section so the linker of `vmlinux` cannot + * separate it from the section data. + */ +#define BEGIN_HYP_SECTION(NAME) \ + HYP_SECTION_NAME(NAME) : { \ + HYP_SECTION_SYMBOL_NAME(NAME) = .; + +/* Helper to generate linker script statements ending a hyp section. */ +#define END_HYP_SECTION \ + } + /* Defines an ELF hyp section from input section @NAME and its subsections. */ -#define HYP_SECTION(NAME) \ - HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) } +#define HYP_SECTION(NAME) \ + BEGIN_HYP_SECTION(NAME) \ + *(NAME NAME##.*) \ + END_HYP_SECTION /* * Defines a linker script alias of a kernel-proper symbol referenced by diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 0aaf9044cd6a..12d5f47f7dbe 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/memory.h> +#include <asm/mte-kasan.h> #include <asm/pgtable-types.h> #define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index d24b527e8c00..9befcd87e9a8 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,18 +90,19 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif -#ifdef CONFIG_KEXEC_FILE #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { void *dtb; - unsigned long dtb_mem; + phys_addr_t dtb_mem; + phys_addr_t kern_reloc; /* Core ELF header buffer */ void *elf_headers; unsigned long elf_headers_mem; unsigned long elf_headers_sz; }; +#ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; struct kimage; diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h new file mode 100644 index 000000000000..d061176d57ea --- /dev/null +++ b/arch/arm64/include/asm/kfence.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arm64 KFENCE support. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef __ASM_KFENCE_H +#define __ASM_KFENCE_H + +#include <asm/cacheflush.h> + +static inline bool arch_kfence_init_pool(void) { return true; } + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + set_memory_valid(addr, 1, !protect); + + return true; +} + +#endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 8a33d83ea843..22d933e9b59e 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -#if defined(GCC_VERSION) && GCC_VERSION < 50000 -#define SYM_CONSTRAINT "i" -#else -#define SYM_CONSTRAINT "S" -#endif - -/* - * Obtain the PC-relative address of a kernel symbol - * s: symbol - * - * The goal of this macro is to return a symbol's address based on a - * PC-relative computation, as opposed to a loading the VA from a - * constant pool or something similar. This works well for HYP, as an - * absolute VA is guaranteed to be wrong. Only use this if trying to - * obtain the address of a symbol (i.e. not something you obtained by - * following a pointer). - */ -#define hyp_symbol_addr(s) \ - ({ \ - typeof(s) *addr; \ - asm("adrp %0, %1\n" \ - "add %0, %0, :lo12:%1\n" \ - : "=r" (addr) : SYM_CONSTRAINT (&s)); \ - addr; \ - }) - #define __KVM_EXTABLE(from, to) \ " .pushsection __kvm_ex_table, \"a\"\n" \ " .align 3\n" \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8fcfab0c2567..3d10e6527f7d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,7 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 512 #define KVM_HALT_POLL_NS_DEFAULT 500000 #include <kvm/arm_vgic.h> @@ -771,4 +770,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_vcpu_has_pmu(vcpu) \ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) +int kvm_trng_call(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e52d82aeadca..90873851f677 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -73,49 +73,39 @@ alternative_cb_end .endm /* - * Convert a kernel image address to a PA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a PA + * reg: hypervisor address to be converted in place * tmp: temporary register - * - * The actual code generation takes place in kvm_get_kimage_voffset, and - * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_get_kimage_voffset uses the - * specific registers encoded in the instructions). */ -.macro kimg_pa reg, tmp -alternative_cb kvm_get_kimage_voffset - movz \tmp, #0 - movk \tmp, #0, lsl #16 - movk \tmp, #0, lsl #32 - movk \tmp, #0, lsl #48 -alternative_cb_end - - /* reg = __pa(reg) */ - sub \reg, \reg, \tmp +.macro hyp_pa reg, tmp + ldr_l \tmp, hyp_physvirt_offset + add \reg, \reg, \tmp .endm /* - * Convert a kernel image address to a hyp VA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a kernel image address + * reg: hypervisor address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_update_kimg_phys_offset uses the + * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions). */ -.macro kimg_hyp_va reg, tmp -alternative_cb kvm_update_kimg_phys_offset +.macro hyp_kimg_va reg, tmp + /* Convert hyp VA -> PA. */ + hyp_pa \reg, \tmp + + /* Load kimage_voffset. */ +alternative_cb kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #48 alternative_cb_end - sub \reg, \reg, \tmp - mov_q \tmp, PAGE_OFFSET - orr \reg, \reg, \tmp - kern_hyp_va \reg + /* Convert PA -> kimg VA. */ + add \reg, \reg, \tmp .endm #else @@ -129,6 +119,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); +void kvm_apply_hyp_relocations(void); static __always_inline unsigned long __kern_hyp_va(unsigned long v) { @@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) -static __always_inline unsigned long __kimg_hyp_va(unsigned long v) -{ - unsigned long offset; - - asm volatile(ALTERNATIVE_CB("movz %0, #0\n" - "movk %0, #0, lsl #16\n" - "movk %0, #0, lsl #32\n" - "movk %0, #0, lsl #48\n", - kvm_update_kimg_phys_offset) - : "=r" (offset)); - - return __kern_hyp_va((v - offset) | PAGE_OFFSET); -} - -#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) - -#define kimg_fn_ptr(x) (typeof(x) **)(x) - /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 52ab38db04c7..8886d43cfb11 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -157,6 +157,11 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); * If device attributes are not explicitly requested in @prot, then the * mapping will be normal, cacheable. * + * Note that the update of a valid leaf PTE in this function will be aborted, + * if it's trying to recreate the exact same mapping or only change the access + * permissions. Instead, the vCPU will exit one more time from guest if still + * needed and then go through the path of relaxing permissions. + * * Note that this function will both coalesce existing table entries and split * existing block mappings, relying on page-faults to fault back areas outside * of the new mapping lazily. diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ff4732785c32..c759faf7a1ff 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -159,6 +159,18 @@ #define IOREMAP_MAX_ORDER (PMD_SHIFT) #endif +/* + * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated + * until link time. + */ +#define RESERVED_SWAPPER_OFFSET (PAGE_SIZE) + +/* + * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated + * until link time. + */ +#define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) + #ifndef __ASSEMBLY__ #include <linux/bitops.h> @@ -232,6 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) #ifdef CONFIG_KASAN_HW_TAGS #define arch_enable_tagging() mte_enable_kernel() +#define arch_set_tagging_report_once(state) mte_set_report_once(state) #define arch_init_tags(max_tag) mte_init_tags(max_tag) #define arch_get_random_tag() mte_get_random_tag() #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0b3079fd28eb..70ce8c1d2b07 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -81,16 +81,15 @@ static inline bool __cpu_uses_extended_idmap_level(void) } /* - * Set TCR.T0SZ to its default value (based on VA_BITS) + * Ensure TCR.T0SZ is set to the provided value. */ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { - unsigned long tcr; + unsigned long tcr = read_sysreg(tcr_el1); - if (!__cpu_uses_extended_idmap()) + if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz) return; - tcr = read_sysreg(tcr_el1); tcr &= ~TCR_T0SZ_MASK; tcr |= t0sz << TCR_T0SZ_OFFSET; write_sysreg(tcr, tcr_el1); diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 691f15af788e..810045628c66 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -1,7 +1,7 @@ #ifdef CONFIG_ARM64_MODULE_PLTS SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .init.plt (NOLOAD) : { BYTE(0) } - .text.ftrace_trampoline (NOLOAD) : { BYTE(0) } + .plt 0 (NOLOAD) : { BYTE(0) } + .init.plt 0 (NOLOAD) : { BYTE(0) } + .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } } #endif diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h index 2d73a1612f09..cf241b0f0a42 100644 --- a/arch/arm64/include/asm/mte-def.h +++ b/arch/arm64/include/asm/mte-def.h @@ -11,4 +11,6 @@ #define MTE_TAG_SIZE 4 #define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) +#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" + #endif /* __ASM_MTE_DEF_H */ diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 26349a4b5e2e..7ab500e2ad17 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -11,11 +11,14 @@ #include <linux/types.h> +#ifdef CONFIG_ARM64_MTE + /* - * The functions below are meant to be used only for the - * KASAN_HW_TAGS interface defined in asm/memory.h. + * These functions are meant to be only used from KASAN runtime through + * the arch_*() interface defined in asm/memory.h. + * These functions don't include system_supports_mte() checks, + * as KASAN only calls them when MTE is supported and enabled. */ -#ifdef CONFIG_ARM64_MTE static inline u8 mte_get_ptr_tag(void *ptr) { @@ -25,13 +28,61 @@ static inline u8 mte_get_ptr_tag(void *ptr) return tag; } -u8 mte_get_mem_tag(void *addr); -u8 mte_get_random_tag(void); -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); +/* Get allocation tag for the address. */ +static inline u8 mte_get_mem_tag(void *addr) +{ + asm(__MTE_PREAMBLE "ldg %0, [%0]" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +/* Generate a random tag. */ +static inline u8 mte_get_random_tag(void) +{ + void *addr; + + asm(__MTE_PREAMBLE "irg %0, %0" + : "=r" (addr)); + + return mte_get_ptr_tag(addr); +} + +/* + * Assign allocation tags for a region of memory based on the pointer tag. + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and + * size must be non-zero and MTE_GRANULE_SIZE aligned. + */ +static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + u64 curr, end; + + if (!size) + return; + + curr = (u64)__tag_set(addr, tag); + end = curr + size; + + do { + /* + * 'asm volatile' is required to prevent the compiler to move + * the statement outside of the loop. + */ + asm volatile(__MTE_PREAMBLE "stg %0, [%0]" + : + : "r" (curr) + : "memory"); + + curr += MTE_GRANULE_SIZE; + } while (curr != end); +} void mte_enable_kernel(void); void mte_init_tags(u64 max_tag); +void mte_set_report_once(bool state); +bool mte_report_once(void); + #else /* CONFIG_ARM64_MTE */ static inline u8 mte_get_ptr_tag(void *ptr) @@ -43,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr) { return 0xFF; } + static inline u8 mte_get_random_tag(void) { return 0xFF; } -static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) + +static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) { - return addr; } static inline void mte_enable_kernel(void) @@ -60,6 +112,15 @@ static inline void mte_init_tags(u64 max_tag) { } +static inline void mte_set_report_once(bool state) +{ +} + +static inline bool mte_report_once(void) +{ + return false; +} + #endif /* CONFIG_ARM64_MTE */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index d02aff9f493d..9b557a457f24 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -8,8 +8,6 @@ #include <asm/compiler.h> #include <asm/mte-def.h> -#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" - #ifndef __ASSEMBLY__ #include <linux/bitfield.h> diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index dd870390d639..8c8cf4297cc3 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -3,52 +3,6 @@ #define __ASM_NUMA_H #include <asm/topology.h> - -#ifdef CONFIG_NUMA - -#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2) - -int __node_distance(int from, int to); -#define node_distance(a, b) __node_distance(a, b) - -extern nodemask_t numa_nodes_parsed __initdata; - -extern bool numa_off; - -/* Mappings between node number and cpus on that node. */ -extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; -void numa_clear_node(unsigned int cpu); - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS -const struct cpumask *cpumask_of_node(int node); -#else -/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ -static inline const struct cpumask *cpumask_of_node(int node) -{ - if (node == NUMA_NO_NODE) - return cpu_all_mask; - - return node_to_cpumask_map[node]; -} -#endif - -void __init arm64_numa_init(void); -int __init numa_add_memblk(int nodeid, u64 start, u64 end); -void __init numa_set_distance(int from, int to, int distance); -void __init numa_free_distance(void); -void __init early_map_cpu_to_node(unsigned int cpu, int nid); -void numa_store_cpu_info(unsigned int cpu); -void numa_add_cpu(unsigned int cpu); -void numa_remove_cpu(unsigned int cpu); - -#else /* CONFIG_NUMA */ - -static inline void numa_store_cpu_info(unsigned int cpu) { } -static inline void numa_add_cpu(unsigned int cpu) { } -static inline void numa_remove_cpu(unsigned int cpu) { } -static inline void arm64_numa_init(void) { } -static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } - -#endif /* CONFIG_NUMA */ +#include <asm-generic/numa.h> #endif /* __ASM_NUMA_H */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 501562793ce2..e17b96d0e4b5 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -980,7 +980,17 @@ static inline bool arch_faults_on_old_pte(void) return !cpu_has_hw_af(); } -#define arch_faults_on_old_pte arch_faults_on_old_pte +#define arch_faults_on_old_pte arch_faults_on_old_pte + +/* + * Experimentally, it's cheap to set the access flag in hardware and we + * benefit from prefaulting mappings as 'old' to start with. + */ +static inline bool arch_wants_old_prefaulted_pte(void) +{ + return !arch_faults_on_old_pte(); +} +#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index c6b4f0603024..b112a11e9302 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) return ptrauth_clear_pac(ptr); } +static __always_inline void ptrauth_enable(void) +{ + if (!system_supports_address_auth()) + return; + sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)); + isb(); +} + #define ptrauth_thread_init_user(tsk) \ ptrauth_keys_init_user(&(tsk)->thread.keys_user) #define ptrauth_thread_init_kernel(tsk) \ @@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel) #else /* CONFIG_ARM64_PTR_AUTH */ +#define ptrauth_enable() #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 8ff579361731..2f36b16a5b5d 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; -extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[]; +extern char __hyp_rodata_start[], __hyp_rodata_end[]; +extern char __hyp_reloc_begin[], __hyp_reloc_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h new file mode 100644 index 000000000000..d3320618ed14 --- /dev/null +++ b/arch/arm64/include/asm/setup.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef __ARM64_ASM_SETUP_H +#define __ARM64_ASM_SETUP_H + +#include <uapi/asm/setup.h> + +void *get_early_fdt_ptr(void); +void early_fdt_map(u64 dt_phys); + +#endif diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 1f43fcc79738..eb4a75d720ed 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -7,7 +7,26 @@ #ifdef CONFIG_SPARSEMEM #define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS -#define SECTION_SIZE_BITS 30 -#endif + +/* + * Section size must be at least 512MB for 64K base + * page size config. Otherwise it will be less than + * (MAX_ORDER - 1) and the build process will fail. + */ +#ifdef CONFIG_ARM64_64K_PAGES +#define SECTION_SIZE_BITS 29 + +#else + +/* + * Section size must be at least 128MB for 4K base + * page size config. Otherwise PMD based huge page + * entries could not be created for vmemmap mappings. + * 16K follows 4K for simplicity. + */ +#define SECTION_SIZE_BITS 27 +#endif /* CONFIG_ARM64_64K_PAGES */ + +#endif /* CONFIG_SPARSEMEM*/ #endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 9083d6992603..0525c0b089ed 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -5,8 +5,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/qrwlock.h> #include <asm/qspinlock.h> +#include <asm/qrwlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index 7263e0bac680..33f1bb453150 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void) #endif ptrauth_thread_init_kernel(current); ptrauth_thread_switch_kernel(current); + ptrauth_enable(); } #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8b5e7e5c3cc8..dfd4edbfe360 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -191,6 +191,7 @@ #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) +#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) #define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) #define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) @@ -291,7 +292,11 @@ #define SYS_PMSFCR_EL1_ST_SHIFT 18 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL +#define SYS_PMSEVFR_EL1_RES0_8_2 \ + (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ + BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) +#define SYS_PMSEVFR_EL1_RES0_8_3 \ + (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) #define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 @@ -471,6 +476,7 @@ #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) +#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) @@ -829,6 +835,7 @@ #define ID_AA64MMFR2_CNP_SHIFT 0 /* id_aa64dfr0 */ +#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 #define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 #define ID_AA64DFR0_PMSVER_SHIFT 32 #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 @@ -844,9 +851,15 @@ #define ID_AA64DFR0_PMUVER_8_5 0x6 #define ID_AA64DFR0_PMUVER_IMP_DEF 0xf +#define ID_AA64DFR0_PMSVER_8_2 0x1 +#define ID_AA64DFR0_PMSVER_8_3 0x2 + #define ID_DFR0_PERFMON_SHIFT 24 +#define ID_DFR0_PERFMON_8_0 0x3 #define ID_DFR0_PERFMON_8_1 0x4 +#define ID_DFR0_PERFMON_8_4 0x5 +#define ID_DFR0_PERFMON_8_5 0x6 #define ID_ISAR4_SWP_FRAC_SHIFT 28 #define ID_ISAR4_PSR_M_SHIFT 24 @@ -1003,6 +1016,14 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) +#define TRFCR_ELx_TS_SHIFT 5 +#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) +#define TRFCR_EL2_CX BIT(3) +#define TRFCR_ELx_ExTRE BIT(1) +#define TRFCR_ELx_E0TRE BIT(0) + #ifdef __ASSEMBLY__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h new file mode 100644 index 000000000000..5d08e5adf3d5 --- /dev/null +++ b/arch/arm64/include/asm/trans_pgd.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (c) 2020, Microsoft Corporation. + * Pavel Tatashin <pasha.tatashin@soleen.com> + */ + +#ifndef _ASM_TRANS_TABLE_H +#define _ASM_TRANS_TABLE_H + +#include <linux/bits.h> +#include <linux/types.h> +#include <asm/pgtable-types.h> + +/* + * trans_alloc_page + * - Allocator that should return exactly one zeroed page, if this + * allocator fails, trans_pgd_create_copy() and trans_pgd_map_page() + * return -ENOMEM error. + * + * trans_alloc_arg + * - Passed to trans_alloc_page as an argument + */ + +struct trans_pgd_info { + void * (*trans_alloc_page)(void *arg); + void *trans_alloc_arg; +}; + +int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd, + unsigned long start, unsigned long end); + +int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd, + void *page, unsigned long dst_addr, pgprot_t pgprot); + +int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, + unsigned long *t0sz, void *page); + +#endif /* _ASM_TRANS_TABLE_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index f0fe0cc6abe0..0deb88467111 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -87,7 +87,7 @@ static inline void __uaccess_ttbr0_disable(void) ttbr = read_sysreg(ttbr1_el1); ttbr &= ~TTBR_ASID_MASK; /* reserved_pg_dir placed before swapper_pg_dir */ - write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1); + write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1); isb(); /* Set reserved ASID */ write_sysreg(ttbr, ttbr1_el1); diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 86a9d7b3eabe..949788f5ba40 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 442 +#define __NR_compat_syscalls 443 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index cccfbbefbf95..3d874f624056 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -891,6 +891,8 @@ __SYSCALL(__NR_faccessat2, sys_faccessat2) __SYSCALL(__NR_process_madvise, sys_process_madvise) #define __NR_epoll_pwait2 441 __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2) +#define __NR_mount_setattr 442 +__SYSCALL(__NR_mount_setattr, sys_mount_setattr) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index ee6a48df89d9..7379f35ae2c6 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -35,8 +35,13 @@ */ #define HVC_RESET_VECTORS 2 +/* + * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible + */ +#define HVC_VHE_RESTART 3 + /* Max number of HYP stub hypercalls */ -#define HVC_STUB_HCALL_NR 3 +#define HVC_STUB_HCALL_NR 4 /* Error returned when an invalid stub number is passed into x0 */ #define HVC_STUB_ERR 0xbadca11 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 86364ab6f13f..ed65576ce710 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ - syscall.o proton-pack.o + syscall.o proton-pack.o idreg-override.o targets += efi-entry.o @@ -59,9 +59,10 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_ARM64_MTE) += mte.o +obj-y += vdso-wrap.o +obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o -obj-y += vdso/ probes/ -obj-$(CONFIG_COMPAT_VDSO) += vdso32/ +obj-y += probes/ head-y := head.o extra-y += $(head-y) vmlinux.lds diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index 7ff800045434..fdfecf0991ce 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -118,15 +118,3 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) node_set(node, numa_nodes_parsed); } -int __init arm64_acpi_numa_init(void) -{ - int ret; - - ret = acpi_numa_init(); - if (ret) { - pr_info("Failed to initialise from firmware\n"); - return ret; - } - - return srat_disabled() ? -EINVAL : 0; -} diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index a57cffb752e8..1184c44ea2c7 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -17,7 +17,7 @@ #include <asm/sections.h> #include <linux/stop_machine.h> -#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) +#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f) #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 301784463587..a36e2fc330d4 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -99,6 +99,9 @@ int main(void) DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); BLANK(); + DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val)); + DEFINE(FTR_OVR_MASK_OFFSET, offsetof(struct arm64_ftr_override, mask)); + BLANK(); #ifdef CONFIG_KVM DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a63428301f42..506a1cd37973 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -107,8 +107,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) } #ifdef CONFIG_ARM64_ERRATUM_1463225 -DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); - static bool has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, int scope) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3e6331b64932..066030717a4c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -352,9 +352,12 @@ static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_END, }; +static struct arm64_ftr_override __ro_after_init no_override = { }; + struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { .name = "SYS_CTR_EL0", - .ftr_bits = ftr_ctr + .ftr_bits = ftr_ctr, + .override = &no_override, }; static const struct arm64_ftr_bits ftr_id_mmfr0[] = { @@ -544,13 +547,20 @@ static const struct arm64_ftr_bits ftr_raz[] = { ARM64_FTR_END, }; -#define ARM64_FTR_REG(id, table) { \ - .sys_id = id, \ - .reg = &(struct arm64_ftr_reg){ \ - .name = #id, \ - .ftr_bits = &((table)[0]), \ +#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \ + .sys_id = id, \ + .reg = &(struct arm64_ftr_reg){ \ + .name = #id, \ + .override = (ovr), \ + .ftr_bits = &((table)[0]), \ }} +#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override) + +struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; +struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; +struct arm64_ftr_override __ro_after_init id_aa64isar1_override; + static const struct __ftr_reg_entry { u32 sys_id; struct arm64_ftr_reg *reg; @@ -585,7 +595,8 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 4 */ ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), - ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1, + &id_aa64pfr1_override), ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0), /* Op1 = 0, CRn = 0, CRm = 5 */ @@ -594,11 +605,13 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 6 */ ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), - ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, + &id_aa64isar1_override), /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), - ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1, + &id_aa64mmfr1_override), ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), /* Op1 = 0, CRn = 1, CRm = 2 */ @@ -770,6 +783,33 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { u64 ftr_mask = arm64_ftr_mask(ftrp); s64 ftr_new = arm64_ftr_value(ftrp, new); + s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val); + + if ((ftr_mask & reg->override->mask) == ftr_mask) { + s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new); + char *str = NULL; + + if (ftr_ovr != tmp) { + /* Unsafe, remove the override */ + reg->override->mask &= ~ftr_mask; + reg->override->val &= ~ftr_mask; + tmp = ftr_ovr; + str = "ignoring override"; + } else if (ftr_new != tmp) { + /* Override was valid */ + ftr_new = tmp; + str = "forced"; + } else if (ftr_ovr == tmp) { + /* Override was the safe value */ + str = "already set"; + } + + if (str) + pr_warn("%s[%d:%d]: %s to %llx\n", + reg->name, + ftrp->shift + ftrp->width - 1, + ftrp->shift, str, tmp); + } val = arm64_ftr_set_value(ftrp, val, ftr_new); @@ -1115,14 +1155,17 @@ u64 read_sanitised_ftr_reg(u32 id) EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg); #define read_sysreg_case(r) \ - case r: return read_sysreg_s(r) + case r: val = read_sysreg_s(r); break; /* * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated. * Read the system register on the current CPU */ -static u64 __read_sysreg_by_encoding(u32 sys_id) +u64 __read_sysreg_by_encoding(u32 sys_id) { + struct arm64_ftr_reg *regp; + u64 val; + switch (sys_id) { read_sysreg_case(SYS_ID_PFR0_EL1); read_sysreg_case(SYS_ID_PFR1_EL1); @@ -1165,6 +1208,14 @@ static u64 __read_sysreg_by_encoding(u32 sys_id) BUG(); return 0; } + + regp = get_arm64_ftr_reg(sys_id); + if (regp) { + val &= ~regp->override->mask; + val |= (regp->override->val & regp->override->mask); + } + + return val; } #include <linux/irqchip/arm-gic-v3.h> @@ -1455,7 +1506,7 @@ static bool cpu_has_broken_dbm(void) /* List of CPUs which have broken DBM support. */ static const struct midr_range cpus[] = { #ifdef CONFIG_ARM64_ERRATUM_1024718 - MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), /* Kryo4xx Silver (rdpe => r1p0) */ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), #endif diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 5346953e4382..9d3588450473 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -109,6 +109,55 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) exit_to_kernel_mode(regs); } +#ifdef CONFIG_ARM64_ERRATUM_1463225 +static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static void cortex_a76_erratum_1463225_svc_handler(void) +{ + u32 reg, val; + + if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) + return; + + if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) + return; + + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); + reg = read_sysreg(mdscr_el1); + val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; + write_sysreg(val, mdscr_el1); + asm volatile("msr daifclr, #8"); + isb(); + + /* We will have taken a single-step exception by this point */ + + write_sysreg(reg, mdscr_el1); + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); +} + +static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) + return false; + + /* + * We've taken a dummy step exception from the kernel to ensure + * that interrupts are re-enabled on the syscall path. Return back + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions + * masked so that we can safely restore the mdscr and get on with + * handling the syscall. + */ + regs->pstate |= PSR_D_BIT; + return true; +} +#else /* CONFIG_ARM64_ERRATUM_1463225 */ +static void cortex_a76_erratum_1463225_svc_handler(void) { } +static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + return false; +} +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -186,7 +235,8 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); arm64_enter_el1_dbg(regs); - do_debug_exception(far, esr, regs); + if (!cortex_a76_erratum_1463225_debug_handler(regs)) + do_debug_exception(far, esr, regs); arm64_exit_el1_dbg(regs); } @@ -362,6 +412,7 @@ static void noinstr el0_svc(struct pt_regs *regs) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); enter_from_user_mode(); + cortex_a76_erratum_1463225_svc_handler(); do_el0_svc(regs); } @@ -439,6 +490,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); enter_from_user_mode(); + cortex_a76_erratum_1463225_svc_handler(); do_el0_svc_compat(regs); } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index c9bae73f2621..a31a0a713c85 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -261,16 +261,16 @@ alternative_else_nop_endif stp lr, x21, [sp, #S_LR] /* - * In order to be able to dump the contents of struct pt_regs at the - * time the exception was taken (in case we attempt to walk the call - * stack later), chain it together with the stack frames. + * For exceptions from EL0, terminate the callchain here. + * For exceptions from EL1, create a synthetic frame record so the + * interrupted code shows up in the backtrace. */ .if \el == 0 - stp xzr, xzr, [sp, #S_STACKFRAME] + mov x29, xzr .else stp x29, x22, [sp, #S_STACKFRAME] - .endif add x29, sp, #S_STACKFRAME + .endif #ifdef CONFIG_ARM64_SW_TTBR0_PAN alternative_if_not ARM64_HAS_PAN @@ -805,7 +805,7 @@ SYM_CODE_END(ret_to_user) // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 - add \tmp, \tmp, #(2 * PAGE_SIZE) + add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 @@ -825,7 +825,7 @@ alternative_else_nop_endif // Move from swapper_pg_dir to tramp_pg_dir .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 - sub \tmp, \tmp, #(2 * PAGE_SIZE) + sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a0dc987724ed..66b0e0b66e31 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -404,10 +404,6 @@ SYM_FUNC_START_LOCAL(__primary_switched) adr_l x5, init_task msr sp_el0, x5 // Save thread_info -#ifdef CONFIG_ARM64_PTR_AUTH - __ptrauth_keys_init_cpu x5, x6, x7, x8 -#endif - adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb @@ -436,10 +432,12 @@ SYM_FUNC_START_LOCAL(__primary_switched) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bl kasan_early_init #endif + mov x0, x21 // pass FDT address in x0 + bl early_fdt_map // Try mapping the FDT early + bl init_feature_override // Parse cpu feature overrides #ifdef CONFIG_RANDOMIZE_BASE tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f - mov x0, x21 // pass FDT address in x0 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset @@ -447,6 +445,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) ret // to __primary_switch() 0: #endif + bl switch_to_vhe // Prefer VHE if possible add sp, sp, #16 mov x29, #0 mov x30, #0 @@ -478,13 +477,14 @@ EXPORT_SYMBOL(kimage_vaddr) * booted in EL1 or EL2 respectively. */ SYM_FUNC_START(init_kernel_el) + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + msr sctlr_el1, x0 + mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq init_el2 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) - mov_q x0, INIT_SCTLR_EL1_MMU_OFF - msr sctlr_el1, x0 isb mov_q x0, INIT_PSTATE_EL1 msr spsr_el1, x0 @@ -493,50 +493,11 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) eret SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) -#ifdef CONFIG_ARM64_VHE - /* - * Check for VHE being present. x2 being non-zero indicates that we - * do have VHE, and that the kernel is intended to run at EL2. - */ - mrs x2, id_aa64mmfr1_el1 - ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4 -#else - mov x2, xzr -#endif - cbz x2, init_el2_nvhe - - /* - * When VHE _is_ in use, EL1 will not be used in the host and - * requires no configuration, and all non-hyp-specific EL2 setup - * will be done via the _EL1 system register aliases in __cpu_setup. - */ - mov_q x0, HCR_HOST_VHE_FLAGS - msr hcr_el2, x0 - isb - - init_el2_state vhe - - isb - - mov_q x0, INIT_PSTATE_EL2 - msr spsr_el2, x0 - msr elr_el2, lr - mov w0, #BOOT_CPU_MODE_EL2 - eret - -SYM_INNER_LABEL(init_el2_nvhe, SYM_L_LOCAL) - /* - * When VHE is not in use, early init of EL2 and EL1 needs to be - * done here. - */ - mov_q x0, INIT_SCTLR_EL1_MMU_OFF - msr sctlr_el1, x0 - mov_q x0, HCR_HOST_NVHE_FLAGS msr hcr_el2, x0 isb - init_el2_state nvhe + init_el2_state /* Hypervisor stub */ adr_l x0, __hyp_stub_vectors @@ -623,6 +584,7 @@ SYM_FUNC_START_LOCAL(secondary_startup) /* * Common entry point for secondary CPUs. */ + bl switch_to_vhe bl __cpu_secondary_check52bitva bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir @@ -703,16 +665,9 @@ SYM_FUNC_START(__enable_mmu) offset_ttbr1 x1, x3 msr ttbr1_el1, x1 // load TTBR1 isb - msr sctlr_el1, x0 - isb - /* - * Invalidate the local I-cache so that any instructions fetched - * speculatively from the PoC are discarded, since they may have - * been dynamically patched at the PoU. - */ - ic iallu - dsb nsh - isb + + set_sctlr_el1 x0 + ret SYM_FUNC_END(__enable_mmu) @@ -882,13 +837,10 @@ SYM_FUNC_START_LOCAL(__primary_switch) tlbi vmalle1 // Remove any stale TLB entries dsb nsh - - msr sctlr_el1, x19 // re-enable the MMU - isb - ic iallu // flush instructions fetched - dsb nsh // via old mapping isb + set_sctlr_el1 x19 // re-enable the MMU + bl __relocate_kernel #endif #endif diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 9c9f47e9f7f4..b1cef371df2b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -16,7 +16,6 @@ #define pr_fmt(x) "hibernate: " x #include <linux/cpu.h> #include <linux/kvm_host.h> -#include <linux/mm.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/suspend.h> @@ -31,13 +30,12 @@ #include <asm/memory.h> #include <asm/mmu_context.h> #include <asm/mte.h> -#include <asm/pgalloc.h> -#include <asm/pgtable-hwdef.h> #include <asm/sections.h> #include <asm/smp.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/sysreg.h> +#include <asm/trans_pgd.h> #include <asm/virt.h> /* @@ -178,52 +176,9 @@ int arch_hibernation_header_restore(void *addr) } EXPORT_SYMBOL(arch_hibernation_header_restore); -static int trans_pgd_map_page(pgd_t *trans_pgd, void *page, - unsigned long dst_addr, - pgprot_t pgprot) +static void *hibernate_page_alloc(void *arg) { - pgd_t *pgdp; - p4d_t *p4dp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - - pgdp = pgd_offset_pgd(trans_pgd, dst_addr); - if (pgd_none(READ_ONCE(*pgdp))) { - pudp = (void *)get_safe_page(GFP_ATOMIC); - if (!pudp) - return -ENOMEM; - pgd_populate(&init_mm, pgdp, pudp); - } - - p4dp = p4d_offset(pgdp, dst_addr); - if (p4d_none(READ_ONCE(*p4dp))) { - pudp = (void *)get_safe_page(GFP_ATOMIC); - if (!pudp) - return -ENOMEM; - p4d_populate(&init_mm, p4dp, pudp); - } - - pudp = pud_offset(p4dp, dst_addr); - if (pud_none(READ_ONCE(*pudp))) { - pmdp = (void *)get_safe_page(GFP_ATOMIC); - if (!pmdp) - return -ENOMEM; - pud_populate(&init_mm, pudp, pmdp); - } - - pmdp = pmd_offset(pudp, dst_addr); - if (pmd_none(READ_ONCE(*pmdp))) { - ptep = (void *)get_safe_page(GFP_ATOMIC); - if (!ptep) - return -ENOMEM; - pmd_populate_kernel(&init_mm, pmdp, ptep); - } - - ptep = pte_offset_kernel(pmdp, dst_addr); - set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC)); - - return 0; + return (void *)get_safe_page((__force gfp_t)(unsigned long)arg); } /* @@ -239,11 +194,16 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page, * page system. */ static int create_safe_exec_page(void *src_start, size_t length, - unsigned long dst_addr, phys_addr_t *phys_dst_addr) { + struct trans_pgd_info trans_info = { + .trans_alloc_page = hibernate_page_alloc, + .trans_alloc_arg = (__force void *)GFP_ATOMIC, + }; + void *page = (void *)get_safe_page(GFP_ATOMIC); - pgd_t *trans_pgd; + phys_addr_t trans_ttbr0; + unsigned long t0sz; int rc; if (!page) @@ -251,13 +211,7 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy(page, src_start, length); __flush_icache_range((unsigned long)page, (unsigned long)page + length); - - trans_pgd = (void *)get_safe_page(GFP_ATOMIC); - if (!trans_pgd) - return -ENOMEM; - - rc = trans_pgd_map_page(trans_pgd, page, dst_addr, - PAGE_KERNEL_EXEC); + rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page); if (rc) return rc; @@ -270,12 +224,15 @@ static int create_safe_exec_page(void *src_start, size_t length, * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI * runtime services), while for a userspace-driven test_resume cycle it * points to userspace page tables (and we must point it at a zero page - * ourselves). Elsewhere we only (un)install the idmap with preemption - * disabled, so T0SZ should be as required regardless. + * ourselves). + * + * We change T0SZ as part of installing the idmap. This is undone by + * cpu_uninstall_idmap() in __cpu_suspend_exit(). */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1); + __cpu_set_tcr_t0sz(t0sz); + write_sysreg(trans_ttbr0, ttbr0_el1); isb(); *phys_dst_addr = virt_to_phys(page); @@ -462,182 +419,6 @@ int swsusp_arch_suspend(void) return ret; } -static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) -{ - pte_t pte = READ_ONCE(*src_ptep); - - if (pte_valid(pte)) { - /* - * Resume will overwrite areas that may be marked - * read only (code, rodata). Clear the RDONLY bit from - * the temporary mappings we use during restore. - */ - set_pte(dst_ptep, pte_mkwrite(pte)); - } else if (debug_pagealloc_enabled() && !pte_none(pte)) { - /* - * debug_pagealloc will removed the PTE_VALID bit if - * the page isn't in use by the resume kernel. It may have - * been in use by the original kernel, in which case we need - * to put it back in our copy to do the restore. - * - * Before marking this entry valid, check the pfn should - * be mapped. - */ - BUG_ON(!pfn_valid(pte_pfn(pte))); - - set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte))); - } -} - -static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, - unsigned long end) -{ - pte_t *src_ptep; - pte_t *dst_ptep; - unsigned long addr = start; - - dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC); - if (!dst_ptep) - return -ENOMEM; - pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep); - dst_ptep = pte_offset_kernel(dst_pmdp, start); - - src_ptep = pte_offset_kernel(src_pmdp, start); - do { - _copy_pte(dst_ptep, src_ptep, addr); - } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end); - - return 0; -} - -static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, - unsigned long end) -{ - pmd_t *src_pmdp; - pmd_t *dst_pmdp; - unsigned long next; - unsigned long addr = start; - - if (pud_none(READ_ONCE(*dst_pudp))) { - dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC); - if (!dst_pmdp) - return -ENOMEM; - pud_populate(&init_mm, dst_pudp, dst_pmdp); - } - dst_pmdp = pmd_offset(dst_pudp, start); - - src_pmdp = pmd_offset(src_pudp, start); - do { - pmd_t pmd = READ_ONCE(*src_pmdp); - - next = pmd_addr_end(addr, end); - if (pmd_none(pmd)) - continue; - if (pmd_table(pmd)) { - if (copy_pte(dst_pmdp, src_pmdp, addr, next)) - return -ENOMEM; - } else { - set_pmd(dst_pmdp, - __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY)); - } - } while (dst_pmdp++, src_pmdp++, addr = next, addr != end); - - return 0; -} - -static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start, - unsigned long end) -{ - pud_t *dst_pudp; - pud_t *src_pudp; - unsigned long next; - unsigned long addr = start; - - if (p4d_none(READ_ONCE(*dst_p4dp))) { - dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC); - if (!dst_pudp) - return -ENOMEM; - p4d_populate(&init_mm, dst_p4dp, dst_pudp); - } - dst_pudp = pud_offset(dst_p4dp, start); - - src_pudp = pud_offset(src_p4dp, start); - do { - pud_t pud = READ_ONCE(*src_pudp); - - next = pud_addr_end(addr, end); - if (pud_none(pud)) - continue; - if (pud_table(pud)) { - if (copy_pmd(dst_pudp, src_pudp, addr, next)) - return -ENOMEM; - } else { - set_pud(dst_pudp, - __pud(pud_val(pud) & ~PUD_SECT_RDONLY)); - } - } while (dst_pudp++, src_pudp++, addr = next, addr != end); - - return 0; -} - -static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, - unsigned long end) -{ - p4d_t *dst_p4dp; - p4d_t *src_p4dp; - unsigned long next; - unsigned long addr = start; - - dst_p4dp = p4d_offset(dst_pgdp, start); - src_p4dp = p4d_offset(src_pgdp, start); - do { - next = p4d_addr_end(addr, end); - if (p4d_none(READ_ONCE(*src_p4dp))) - continue; - if (copy_pud(dst_p4dp, src_p4dp, addr, next)) - return -ENOMEM; - } while (dst_p4dp++, src_p4dp++, addr = next, addr != end); - - return 0; -} - -static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, - unsigned long end) -{ - unsigned long next; - unsigned long addr = start; - pgd_t *src_pgdp = pgd_offset_k(start); - - dst_pgdp = pgd_offset_pgd(dst_pgdp, start); - do { - next = pgd_addr_end(addr, end); - if (pgd_none(READ_ONCE(*src_pgdp))) - continue; - if (copy_p4d(dst_pgdp, src_pgdp, addr, next)) - return -ENOMEM; - } while (dst_pgdp++, src_pgdp++, addr = next, addr != end); - - return 0; -} - -static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start, - unsigned long end) -{ - int rc; - pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); - - if (!trans_pgd) { - pr_err("Failed to allocate memory for temporary page tables.\n"); - return -ENOMEM; - } - - rc = copy_page_tables(trans_pgd, start, end); - if (!rc) - *dst_pgdp = trans_pgd; - - return rc; -} - /* * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). * @@ -650,16 +431,20 @@ int swsusp_arch_resume(void) void *zero_page; size_t exit_size; pgd_t *tmp_pg_dir; - phys_addr_t phys_hibernate_exit; void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, void *, phys_addr_t, phys_addr_t); + struct trans_pgd_info trans_info = { + .trans_alloc_page = hibernate_page_alloc, + .trans_alloc_arg = (void *)GFP_ATOMIC, + }; /* * Restoring the memory image will overwrite the ttbr1 page tables. * Create a second copy of just the linear map, and use this when * restoring. */ - rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END); + rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET, + PAGE_END); if (rc) return rc; @@ -673,19 +458,13 @@ int swsusp_arch_resume(void) return -ENOMEM; } - /* - * Locate the exit code in the bottom-but-one page, so that *NULL - * still has disastrous affects. - */ - hibernate_exit = (void *)PAGE_SIZE; exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; /* * Copy swsusp_arch_suspend_exit() to a safe page. This will generate * a new set of ttbr0 page tables and load them. */ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, - (unsigned long)hibernate_exit, - &phys_hibernate_exit); + (phys_addr_t *)&hibernate_exit); if (rc) { pr_err("Failed to create safe executable page for hibernate_exit code.\n"); return rc; @@ -704,7 +483,7 @@ int swsusp_arch_resume(void) * We can skip this step if we booted at EL1, or are running with VHE. */ if (el2_reset_needed()) { - phys_addr_t el2_vectors = phys_hibernate_exit; /* base */ + phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit; el2_vectors += hibernate_el2_vectors - __hibernate_exit_text_start; /* offset */ diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 160f5881a0b7..5eccbd62fec8 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -8,9 +8,9 @@ #include <linux/init.h> #include <linux/linkage.h> -#include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> +#include <asm/el2_setup.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/ptrace.h> @@ -47,10 +47,13 @@ SYM_CODE_END(__hyp_stub_vectors) SYM_CODE_START_LOCAL(el1_sync) cmp x0, #HVC_SET_VECTORS - b.ne 2f + b.ne 1f msr vbar_el2, x1 b 9f +1: cmp x0, #HVC_VHE_RESTART + b.eq mutate_to_vhe + 2: cmp x0, #HVC_SOFT_RESTART b.ne 3f mov x0, x2 @@ -70,6 +73,102 @@ SYM_CODE_START_LOCAL(el1_sync) eret SYM_CODE_END(el1_sync) +// nVHE? No way! Give me the real thing! +SYM_CODE_START_LOCAL(mutate_to_vhe) + // Sanity check: MMU *must* be off + mrs x1, sctlr_el2 + tbnz x1, #0, 1f + + // Needs to be VHE capable, obviously + mrs x1, id_aa64mmfr1_el1 + ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4 + cbz x1, 1f + + // Check whether VHE is disabled from the command line + adr_l x1, id_aa64mmfr1_override + ldr x2, [x1, FTR_OVR_VAL_OFFSET] + ldr x1, [x1, FTR_OVR_MASK_OFFSET] + ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4 + ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4 + cmp x1, xzr + and x2, x2, x1 + csinv x2, x2, xzr, ne + cbnz x2, 2f + +1: mov_q x0, HVC_STUB_ERR + eret +2: + // Engage the VHE magic! + mov_q x0, HCR_HOST_VHE_FLAGS + msr hcr_el2, x0 + isb + + // Use the EL1 allocated stack, per-cpu offset + mrs x0, sp_el1 + mov sp, x0 + mrs x0, tpidr_el1 + msr tpidr_el2, x0 + + // FP configuration, vectors + mrs_s x0, SYS_CPACR_EL12 + msr cpacr_el1, x0 + mrs_s x0, SYS_VBAR_EL12 + msr vbar_el1, x0 + + // Use EL2 translations for SPE and disable access from EL1 + mrs x0, mdcr_el2 + bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) + msr mdcr_el2, x0 + + // Transfer the MM state from EL1 to EL2 + mrs_s x0, SYS_TCR_EL12 + msr tcr_el1, x0 + mrs_s x0, SYS_TTBR0_EL12 + msr ttbr0_el1, x0 + mrs_s x0, SYS_TTBR1_EL12 + msr ttbr1_el1, x0 + mrs_s x0, SYS_MAIR_EL12 + msr mair_el1, x0 + isb + + // Hack the exception return to stay at EL2 + mrs x0, spsr_el1 + and x0, x0, #~PSR_MODE_MASK + mov x1, #PSR_MODE_EL2h + orr x0, x0, x1 + msr spsr_el1, x0 + + b enter_vhe +SYM_CODE_END(mutate_to_vhe) + + // At the point where we reach enter_vhe(), we run with + // the MMU off (which is enforced by mutate_to_vhe()). + // We thus need to be in the idmap, or everything will + // explode when enabling the MMU. + + .pushsection .idmap.text, "ax" + +SYM_CODE_START_LOCAL(enter_vhe) + // Invalidate TLBs before enabling the MMU + tlbi vmalle1 + dsb nsh + isb + + // Enable the EL2 S1 MMU, as set up from EL1 + mrs_s x0, SYS_SCTLR_EL12 + set_sctlr_el1 x0 + + // Disable the EL1 S1 MMU for a good measure + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + msr_s SYS_SCTLR_EL12, x0 + + mov x0, xzr + + eret +SYM_CODE_END(enter_vhe) + + .popsection + .macro invalid_vector label SYM_CODE_START_LOCAL(\label) b \label @@ -85,6 +184,8 @@ SYM_CODE_END(\label) invalid_vector el1_fiq_invalid invalid_vector el1_error_invalid + .popsection + /* * __hyp_set_vectors: Call this after boot to set the initial hypervisor * vectors as part of hypervisor installation. On an SMP system, this should @@ -118,3 +219,27 @@ SYM_FUNC_START(__hyp_reset_vectors) hvc #0 ret SYM_FUNC_END(__hyp_reset_vectors) + +/* + * Entry point to switch to VHE if deemed capable + */ +SYM_FUNC_START(switch_to_vhe) +#ifdef CONFIG_ARM64_VHE + // Need to have booted at EL2 + adr_l x1, __boot_cpu_mode + ldr w0, [x1] + cmp w0, #BOOT_CPU_MODE_EL2 + b.ne 1f + + // and still be at EL1 + mrs x0, CurrentEL + cmp x0, #CurrentEL_EL1 + b.ne 1f + + // Turn the world upside down + mov x0, #HVC_VHE_RESTART + hvc #0 +1: +#endif + ret +SYM_FUNC_END(switch_to_vhe) diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c new file mode 100644 index 000000000000..dffb16682330 --- /dev/null +++ b/arch/arm64/kernel/idreg-override.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Early cpufeature override framework + * + * Copyright (C) 2020 Google LLC + * Author: Marc Zyngier <maz@kernel.org> + */ + +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/libfdt.h> + +#include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/setup.h> + +#define FTR_DESC_NAME_LEN 20 +#define FTR_DESC_FIELD_LEN 10 +#define FTR_ALIAS_NAME_LEN 30 +#define FTR_ALIAS_OPTION_LEN 80 + +struct ftr_set_desc { + char name[FTR_DESC_NAME_LEN]; + struct arm64_ftr_override *override; + struct { + char name[FTR_DESC_FIELD_LEN]; + u8 shift; + } fields[]; +}; + +static const struct ftr_set_desc mmfr1 __initconst = { + .name = "id_aa64mmfr1", + .override = &id_aa64mmfr1_override, + .fields = { + { "vh", ID_AA64MMFR1_VHE_SHIFT }, + {} + }, +}; + +static const struct ftr_set_desc pfr1 __initconst = { + .name = "id_aa64pfr1", + .override = &id_aa64pfr1_override, + .fields = { + { "bt", ID_AA64PFR1_BT_SHIFT }, + {} + }, +}; + +static const struct ftr_set_desc isar1 __initconst = { + .name = "id_aa64isar1", + .override = &id_aa64isar1_override, + .fields = { + { "gpi", ID_AA64ISAR1_GPI_SHIFT }, + { "gpa", ID_AA64ISAR1_GPA_SHIFT }, + { "api", ID_AA64ISAR1_API_SHIFT }, + { "apa", ID_AA64ISAR1_APA_SHIFT }, + {} + }, +}; + +extern struct arm64_ftr_override kaslr_feature_override; + +static const struct ftr_set_desc kaslr __initconst = { + .name = "kaslr", +#ifdef CONFIG_RANDOMIZE_BASE + .override = &kaslr_feature_override, +#endif + .fields = { + { "disabled", 0 }, + {} + }, +}; + +static const struct ftr_set_desc * const regs[] __initconst = { + &mmfr1, + &pfr1, + &isar1, + &kaslr, +}; + +static const struct { + char alias[FTR_ALIAS_NAME_LEN]; + char feature[FTR_ALIAS_OPTION_LEN]; +} aliases[] __initconst = { + { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" }, + { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" }, + { "arm64.nobti", "id_aa64pfr1.bt=0" }, + { "arm64.nopauth", + "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 " + "id_aa64isar1.api=0 id_aa64isar1.apa=0" }, + { "nokaslr", "kaslr.disabled=1" }, +}; + +static int __init find_field(const char *cmdline, + const struct ftr_set_desc *reg, int f, u64 *v) +{ + char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2]; + int len; + + len = snprintf(opt, ARRAY_SIZE(opt), "%s.%s=", + reg->name, reg->fields[f].name); + + if (!parameqn(cmdline, opt, len)) + return -1; + + return kstrtou64(cmdline + len, 0, v); +} + +static void __init match_options(const char *cmdline) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + int f; + + if (!regs[i]->override) + continue; + + for (f = 0; strlen(regs[i]->fields[f].name); f++) { + u64 shift = regs[i]->fields[f].shift; + u64 mask = 0xfUL << shift; + u64 v; + + if (find_field(cmdline, regs[i], f, &v)) + continue; + + regs[i]->override->val &= ~mask; + regs[i]->override->val |= (v << shift) & mask; + regs[i]->override->mask |= mask; + + return; + } + } +} + +static __init void __parse_cmdline(const char *cmdline, bool parse_aliases) +{ + do { + char buf[256]; + size_t len; + int i; + + cmdline = skip_spaces(cmdline); + + for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++); + if (!len) + return; + + len = min(len, ARRAY_SIZE(buf) - 1); + strncpy(buf, cmdline, len); + buf[len] = 0; + + if (strcmp(buf, "--") == 0) + return; + + cmdline += len; + + match_options(buf); + + for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++) + if (parameq(buf, aliases[i].alias)) + __parse_cmdline(aliases[i].feature, false); + } while (1); +} + +static __init void parse_cmdline(void) +{ + if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { + const u8 *prop; + void *fdt; + int node; + + fdt = get_early_fdt_ptr(); + if (!fdt) + goto out; + + node = fdt_path_offset(fdt, "/chosen"); + if (node < 0) + goto out; + + prop = fdt_getprop(fdt, node, "bootargs", NULL); + if (!prop) + goto out; + + __parse_cmdline(prop, true); + + if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND)) + return; + } + +out: + __parse_cmdline(CONFIG_CMDLINE, true); +} + +/* Keep checkers quiet */ +void init_feature_override(void); + +asmlinkage void __init init_feature_override(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + if (regs[i]->override) { + regs[i]->override->val = 0; + regs[i]->override->mask = 0; + } + } + + parse_cmdline(); + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + if (regs[i]->override) + __flush_dcache_area(regs[i]->override, + sizeof(*regs[i]->override)); + } +} diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index f676243abac6..23f1a557bd9f 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,7 +64,6 @@ __efistub__ctype = _ctype; /* Alternative callbacks for init-time patching of nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); -KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset); /* Global kernel state accessed by nVHE hyp code. */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 1c74c45b9494..27f8939deb1b 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -19,6 +19,7 @@ #include <asm/memory.h> #include <asm/mmu.h> #include <asm/sections.h> +#include <asm/setup.h> enum kaslr_status { KASLR_ENABLED, @@ -50,39 +51,7 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init bool cmdline_contains_nokaslr(const u8 *cmdline) -{ - const u8 *str; - - str = strstr(cmdline, "nokaslr"); - return str == cmdline || (str > cmdline && *(str - 1) == ' '); -} - -static __init bool is_kaslr_disabled_cmdline(void *fdt) -{ - if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { - int node; - const u8 *prop; - - node = fdt_path_offset(fdt, "/chosen"); - if (node < 0) - goto out; - - prop = fdt_getprop(fdt, node, "bootargs", NULL); - if (!prop) - goto out; - - if (cmdline_contains_nokaslr(prop)) - return true; - - if (IS_ENABLED(CONFIG_CMDLINE_EXTEND)) - goto out; - - return false; - } -out: - return cmdline_contains_nokaslr(CONFIG_CMDLINE); -} +struct arm64_ftr_override kaslr_feature_override __initdata; /* * This routine will be executed with the kernel mapped at its default virtual @@ -92,12 +61,11 @@ out: * containing function pointers) to be reinitialized, and zero-initialized * .bss variables will be reset to 0. */ -u64 __init kaslr_early_init(u64 dt_phys) +u64 __init kaslr_early_init(void) { void *fdt; u64 seed, offset, mask, module_range; unsigned long raw; - int size; /* * Set a reasonable default for module_alloc_base in case @@ -111,8 +79,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * and proceed with KASLR disabled. We will make another * attempt at mapping the FDT in setup_machine() */ - early_fixmap_init(); - fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); + fdt = get_early_fdt_ptr(); if (!fdt) { kaslr_status = KASLR_DISABLED_FDT_REMAP; return 0; @@ -127,7 +94,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - if (is_kaslr_disabled_cmdline(fdt)) { + if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) { kaslr_status = KASLR_DISABLED_CMDLINE; return 0; } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index a0b144cfaea7..90a335c74442 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -42,6 +42,7 @@ static void _kexec_image_info(const char *func, int line, pr_debug(" start: %lx\n", kimage->start); pr_debug(" head: %lx\n", kimage->head); pr_debug(" nr_segments: %lu\n", kimage->nr_segments); + pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc); for (i = 0; i < kimage->nr_segments; i++) { pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", @@ -58,6 +59,23 @@ void machine_kexec_cleanup(struct kimage *kimage) /* Empty routine needed to avoid build errors. */ } +int machine_kexec_post_load(struct kimage *kimage) +{ + void *reloc_code = page_to_virt(kimage->control_code_page); + + memcpy(reloc_code, arm64_relocate_new_kernel, + arm64_relocate_new_kernel_size); + kimage->arch.kern_reloc = __pa(reloc_code); + kexec_image_info(kimage); + + /* Flush the reloc_code in preparation for its execution. */ + __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size); + flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code + + arm64_relocate_new_kernel_size); + + return 0; +} + /** * machine_kexec_prepare - Prepare for a kexec reboot. * @@ -67,8 +85,6 @@ void machine_kexec_cleanup(struct kimage *kimage) */ int machine_kexec_prepare(struct kimage *kimage) { - kexec_image_info(kimage); - if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) { pr_err("Can't kexec: CPUs are stuck in the kernel.\n"); return -EBUSY; @@ -143,8 +159,6 @@ static void kexec_segment_flush(const struct kimage *kimage) */ void machine_kexec(struct kimage *kimage) { - phys_addr_t reboot_code_buffer_phys; - void *reboot_code_buffer; bool in_kexec_crash = (kimage == kexec_crash_image); bool stuck_cpus = cpus_are_stuck_in_kernel(); @@ -155,31 +169,6 @@ void machine_kexec(struct kimage *kimage) WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()), "Some CPUs may be stale, kdump will be unreliable.\n"); - reboot_code_buffer_phys = page_to_phys(kimage->control_code_page); - reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys); - - kexec_image_info(kimage); - - /* - * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use - * after the kernel is shut down. - */ - memcpy(reboot_code_buffer, arm64_relocate_new_kernel, - arm64_relocate_new_kernel_size); - - /* Flush the reboot_code_buffer in preparation for its execution. */ - __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); - - /* - * Although we've killed off the secondary CPUs, we don't update - * the online mask if we're handling a crash kernel and consequently - * need to avoid flush_icache_range(), which will attempt to IPI - * the offline CPUs. Therefore, we must use the __* variant here. - */ - __flush_icache_range((uintptr_t)reboot_code_buffer, - (uintptr_t)reboot_code_buffer + - arm64_relocate_new_kernel_size); - /* Flush the kimage list and its buffers. */ kexec_list_flush(kimage); @@ -193,7 +182,7 @@ void machine_kexec(struct kimage *kimage) /* * cpu_soft_restart will shutdown the MMU, disable data caches, then - * transfer control to the reboot_code_buffer which contains a copy of + * transfer control to the kern_reloc which contains a copy of * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel * uses physical addressing to relocate the new image to its final * position and transfers control to the image entry point when the @@ -203,12 +192,8 @@ void machine_kexec(struct kimage *kimage) * userspace (kexec-tools). * In kexec_file case, the kernel starts directly without purgatory. */ - cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, -#ifdef CONFIG_KEXEC_FILE - kimage->arch.dtb_mem); -#else - 0); -#endif + cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start, + kimage->arch.dtb_mem); BUG(); /* Should never get here. */ } diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 03210f644790..0cde47a63beb 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -182,8 +182,10 @@ static int create_dtb(struct kimage *image, /* duplicate a device tree blob */ ret = fdt_open_into(initial_boot_params, buf, buf_size); - if (ret) + if (ret) { + vfree(buf); return -EINVAL; + } ret = setup_dtb(image, initrd_load_addr, initrd_len, cmdline, buf); diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 2e224435c024..e53493d8b208 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -131,7 +131,7 @@ u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, } #endif -#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) +#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b)) static int cmp_rela(const void *a, const void *b) { diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 80b62fe49dcf..b3c70a612c7a 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -19,12 +19,13 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> #include <asm/mte.h> -#include <asm/mte-kasan.h> #include <asm/ptrace.h> #include <asm/sysreg.h> u64 gcr_kernel_excl __ro_after_init; +static bool report_fault_once = true; + static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) { pte_t old_pte = READ_ONCE(*ptep); @@ -86,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2) return ret; } -u8 mte_get_mem_tag(void *addr) -{ - if (!system_supports_mte()) - return 0xFF; - - asm(__MTE_PREAMBLE "ldg %0, [%0]" - : "+r" (addr)); - - return mte_get_ptr_tag(addr); -} - -u8 mte_get_random_tag(void) -{ - void *addr; - - if (!system_supports_mte()) - return 0xFF; - - asm(__MTE_PREAMBLE "irg %0, %0" - : "+r" (addr)); - - return mte_get_ptr_tag(addr); -} - -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) -{ - void *ptr = addr; - - if ((!system_supports_mte()) || (size == 0)) - return addr; - - /* Make sure that size is MTE granule aligned. */ - WARN_ON(size & (MTE_GRANULE_SIZE - 1)); - - /* Make sure that the address is MTE granule aligned. */ - WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1)); - - tag = 0xF0 | tag; - ptr = (void *)__tag_set(ptr, tag); - - mte_assign_mem_tag_range(ptr, size); - - return ptr; -} - void mte_init_tags(u64 max_tag) { static bool gcr_kernel_excl_initialized; @@ -158,6 +114,16 @@ void mte_enable_kernel(void) isb(); } +void mte_set_report_once(bool state) +{ + WRITE_ONCE(report_fault_once, state); +} + +bool mte_report_once(void) +{ + return READ_ONCE(report_fault_once); +} + static void update_sctlr_el1_tcf0(u64 tcf0) { /* ISB required for the kernel uaccess routines */ diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 3605f77ad4df..7d2318f80955 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -280,7 +280,7 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj, return 0; } -static struct attribute_group armv8_pmuv3_events_attr_group = { +static const struct attribute_group armv8_pmuv3_events_attr_group = { .name = "events", .attrs = armv8_pmuv3_event_attrs, .is_visible = armv8pmu_event_attr_is_visible, @@ -300,7 +300,7 @@ static struct attribute *armv8_pmuv3_format_attrs[] = { NULL, }; -static struct attribute_group armv8_pmuv3_format_attr_group = { +static const struct attribute_group armv8_pmuv3_format_attr_group = { .name = "format", .attrs = armv8_pmuv3_format_attrs, }; @@ -322,7 +322,7 @@ static struct attribute *armv8_pmuv3_caps_attrs[] = { NULL, }; -static struct attribute_group armv8_pmuv3_caps_attr_group = { +static const struct attribute_group armv8_pmuv3_caps_attr_group = { .name = "caps", .attrs = armv8_pmuv3_caps_attrs, }; @@ -810,7 +810,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, { int idx; - for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) { + for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -1188,6 +1188,12 @@ static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) armv8_pmuv3_map_event); } +static int armv8_a78_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a78", + armv8_pmuv3_map_event); +} + static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1", @@ -1225,6 +1231,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init}, {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init}, {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init}, + {.compatible = "arm,cortex-a78-pmu", .data = armv8_a78_pmu_init}, {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init}, {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init}, {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index a412d8edbcd2..2c247634552b 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, /* TODO: Currently we do not support AARCH32 instruction probing */ if (mm->context.flags & MMCF_AARCH32) - return -ENOTSUPP; + return -EOPNOTSUPP; else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) return -EINVAL; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6616486a58fe..325c83b1a24d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -304,7 +304,7 @@ void __show_regs(struct pt_regs *regs) } } -void show_regs(struct pt_regs * regs) +void show_regs(struct pt_regs *regs) { __show_regs(regs); dump_backtrace(regs, NULL, KERN_DEFAULT); @@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ptrauth_thread_init_kernel(p); - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; @@ -587,7 +587,7 @@ unsigned long get_wchan(struct task_struct *p) ret = frame.pc; goto out; } - } while (count ++ < 16); + } while (count++ < 16); out: put_task_stack(p); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 8ac487c84e37..170f42fd6101 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -194,6 +194,7 @@ static void ptrace_hbptriggered(struct perf_event *bp, } arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, desc); + return; } #endif arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); @@ -1796,7 +1797,7 @@ int syscall_trace_enter(struct pt_regs *regs) if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); - if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU)) + if (flags & _TIF_SYSCALL_EMU) return NO_SYSCALL; } diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 84eec95ec06c..b78ea5de97a4 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -17,28 +17,24 @@ /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. * - * The memory that the old kernel occupies may be overwritten when coping the + * The memory that the old kernel occupies may be overwritten when copying the * new image to its final location. To assure that the * arm64_relocate_new_kernel routine which does that copy is not overwritten, * all code and data needed by arm64_relocate_new_kernel must be between the * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec - * control_code_page, a special page which has been set up to be preserved - * during the copy operation. + * safe memory that has been set up to be preserved during the copy operation. */ SYM_CODE_START(arm64_relocate_new_kernel) - /* Setup the list loop variables. */ mov x18, x2 /* x18 = dtb address */ mov x17, x1 /* x17 = kimage_start */ mov x16, x0 /* x16 = kimage_head */ - raw_dcache_line_size x15, x0 /* x15 = dcache line size */ mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ - /* Check if the new image needs relocation. */ tbnz x16, IND_DONE_BIT, .Ldone - + raw_dcache_line_size x15, x1 /* x15 = dcache line size */ .Lloop: and x12, x16, PAGE_MASK /* x12 = addr */ @@ -47,44 +43,28 @@ SYM_CODE_START(arm64_relocate_new_kernel) tbz x16, IND_SOURCE_BIT, .Ltest_indirection /* Invalidate dest page to PoC. */ - mov x0, x13 - add x20, x0, #PAGE_SIZE + mov x2, x13 + add x20, x2, #PAGE_SIZE sub x1, x15, #1 - bic x0, x0, x1 -2: dc ivac, x0 - add x0, x0, x15 - cmp x0, x20 + bic x2, x2, x1 +2: dc ivac, x2 + add x2, x2, x15 + cmp x2, x20 b.lo 2b dsb sy - mov x20, x13 - mov x21, x12 - copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7 - - /* dest += PAGE_SIZE */ - add x13, x13, PAGE_SIZE + copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 b .Lnext - .Ltest_indirection: tbz x16, IND_INDIRECTION_BIT, .Ltest_destination - - /* ptr = addr */ - mov x14, x12 + mov x14, x12 /* ptr = addr */ b .Lnext - .Ltest_destination: tbz x16, IND_DESTINATION_BIT, .Lnext - - /* dest = addr */ - mov x13, x12 - + mov x13, x12 /* dest = addr */ .Lnext: - /* entry = *ptr++ */ - ldr x16, [x14], #8 - - /* while (!(entry & DONE)) */ - tbz x16, IND_DONE_BIT, .Lloop - + ldr x16, [x14], #8 /* entry = *ptr++ */ + tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ .Ldone: /* wait for writes from copy_page to finish */ dsb nsh diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index c18aacde8bb0..61845c0821d9 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -168,6 +168,21 @@ static void __init smp_build_mpidr_hash(void) pr_warn("Large number of MPIDR hash buckets detected\n"); } +static void *early_fdt_ptr __initdata; + +void __init *get_early_fdt_ptr(void) +{ + return early_fdt_ptr; +} + +asmlinkage void __init early_fdt_map(u64 dt_phys) +{ + int fdt_size; + + early_fixmap_init(); + early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL); +} + static void __init setup_machine_fdt(phys_addr_t dt_phys) { int size; diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 6bdef7362c0e..5bfd9b87f85d 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -100,6 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) bl init_kernel_el + bl switch_to_vhe bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ad00f99ee9b0..357590beaabb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -434,8 +434,10 @@ static void __init hyp_mode_check(void) "CPU: CPUs started in inconsistent modes"); else pr_info("CPU: All CPU(s) started at EL1\n"); - if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) + if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) { kvm_compute_layout(); + kvm_apply_hyp_relocations(); + } } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index fa56af1a59c3..ad20981dfda4 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -44,6 +44,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) unsigned long fp = frame->fp; struct stack_info info; + /* Terminal record; nothing to unwind */ + if (!fp) + return -ENOENT; + if (fp & 0xf) return -EINVAL; @@ -104,15 +108,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ptrauth_strip_insn_pac(frame->pc); - /* - * Frames created upon entry from EL0 have NULL FP and PC values, so - * don't bother reporting these. Frames created by __noreturn functions - * might have a valid FP even if PC is bogus, so only terminate where - * both are NULL. - */ - if (!frame->fp && !frame->pc) - return -EINVAL; - return 0; } NOKPROBE_SYMBOL(unwind_frame); diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index a67b37a7a47e..d7564891ffe1 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) if (!ret) ret = -EOPNOTSUPP; } else { - __cpu_suspend_exit(); + RCU_NONIDLE(__cpu_suspend_exit()); } unpause_graph_tracing(); diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index c2877c332f2d..b9cf12b271d7 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -65,35 +65,6 @@ static inline bool has_syscall_work(unsigned long flags) int syscall_trace_enter(struct pt_regs *regs); void syscall_trace_exit(struct pt_regs *regs); -#ifdef CONFIG_ARM64_ERRATUM_1463225 -DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); - -static void cortex_a76_erratum_1463225_svc_handler(void) -{ - u32 reg, val; - - if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) - return; - - if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) - return; - - __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); - reg = read_sysreg(mdscr_el1); - val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; - write_sysreg(val, mdscr_el1); - asm volatile("msr daifclr, #8"); - isb(); - - /* We will have taken a single-step exception by this point */ - - write_sysreg(reg, mdscr_el1); - __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); -} -#else -static void cortex_a76_erratum_1463225_svc_handler(void) { } -#endif /* CONFIG_ARM64_ERRATUM_1463225 */ - static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, const syscall_fn_t syscall_table[]) { @@ -120,7 +91,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, * (Similarly for HVC and SMC elsewhere.) */ - cortex_a76_erratum_1463225_svc_handler(); local_daif_restore(DAIF_PROCCTX); if (flags & _TIF_MTE_ASYNC_FAULT) { diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index f6faa697e83e..e08a4126453a 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -199,76 +199,38 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) return 0; } -static inline bool -enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) { - pr_debug("CPU%d: No cpufreq policy found.\n", cpu); - return false; - } - - if (cpumask_subset(policy->related_cpus, valid_cpus)) - cpumask_or(amu_fie_cpus, policy->related_cpus, - amu_fie_cpus); - - cpufreq_cpu_put(policy); - - return true; -} - static DEFINE_STATIC_KEY_FALSE(amu_fie_key); #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) -static int __init init_amu_fie(void) +static void amu_fie_setup(const struct cpumask *cpus) { - bool invariance_status = topology_scale_freq_invariant(); - cpumask_var_t valid_cpus; - bool have_policy = false; - int ret = 0; + bool invariant; int cpu; - if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL)) - return -ENOMEM; - - if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) { - ret = -ENOMEM; - goto free_valid_mask; - } + /* We are already set since the last insmod of cpufreq driver */ + if (unlikely(cpumask_subset(cpus, amu_fie_cpus))) + return; - for_each_present_cpu(cpu) { + for_each_cpu(cpu, cpus) { if (!freq_counters_valid(cpu) || freq_inv_set_max_ratio(cpu, cpufreq_get_hw_max_freq(cpu) * 1000, arch_timer_get_rate())) - continue; - - cpumask_set_cpu(cpu, valid_cpus); - have_policy |= enable_policy_freq_counters(cpu, valid_cpus); + return; } - /* - * If we are not restricted by cpufreq policies, we only enable - * the use of the AMU feature for FIE if all CPUs support AMU. - * Otherwise, enable_policy_freq_counters has already enabled - * policy cpus. - */ - if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask)) - cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus); + cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); - if (!cpumask_empty(amu_fie_cpus)) { - pr_info("CPUs[%*pbl]: counters will be used for FIE.", - cpumask_pr_args(amu_fie_cpus)); - static_branch_enable(&amu_fie_key); - } + invariant = topology_scale_freq_invariant(); - /* - * If the system is not fully invariant after AMU init, disable - * partial use of counters for frequency invariance. - */ - if (!topology_scale_freq_invariant()) - static_branch_disable(&amu_fie_key); + /* We aren't fully invariant yet */ + if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask)) + return; + + static_branch_enable(&amu_fie_key); + + pr_debug("CPUs[%*pbl]: counters will be used for FIE.", + cpumask_pr_args(cpus)); /* * Task scheduler behavior depends on frequency invariance support, @@ -276,15 +238,50 @@ static int __init init_amu_fie(void) * a result of counter initialisation and use, retrigger the build of * scheduling domains to ensure the information is propagated properly. */ - if (invariance_status != topology_scale_freq_invariant()) + if (!invariant) rebuild_sched_domains_energy(); +} + +static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_policy *policy = data; + + if (val == CPUFREQ_CREATE_POLICY) + amu_fie_setup(policy->related_cpus); + + /* + * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU + * counters don't have any dependency on cpufreq driver once we have + * initialized AMU support and enabled invariance. The AMU counters will + * keep on working just fine in the absence of the cpufreq driver, and + * for the CPUs for which there are no counters available, the last set + * value of freq_scale will remain valid as that is the frequency those + * CPUs are running at. + */ + + return 0; +} + +static struct notifier_block init_amu_fie_notifier = { + .notifier_call = init_amu_fie_callback, +}; + +static int __init init_amu_fie(void) +{ + int ret; + + if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) + return -ENOMEM; -free_valid_mask: - free_cpumask_var(valid_cpus); + ret = cpufreq_register_notifier(&init_amu_fie_notifier, + CPUFREQ_POLICY_NOTIFIER); + if (ret) + free_cpumask_var(amu_fie_cpus); return ret; } -late_initcall_sync(init_amu_fie); +core_initcall(init_amu_fie); bool arch_freq_counters_available(const struct cpumask *cpus) { diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 6895ce777e7f..a05d34f0e82a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -45,7 +45,7 @@ #include <asm/system_misc.h> #include <asm/sysreg.h> -static const char *handler[]= { +static const char *handler[] = { "Synchronous Abort", "IRQ", "FIQ", diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso-wrap.S index c4b1990bf2be..c4b1990bf2be 100644 --- a/arch/arm64/kernel/vdso/vdso.S +++ b/arch/arm64/kernel/vdso-wrap.S diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index cd9c3fa25902..945e6bb326e3 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -29,7 +29,8 @@ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \ + $(CC_FLAGS_LTO) KASAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y @@ -44,7 +45,6 @@ endif # Disable gcov profiling for VDSO code GCOV_PROFILE := n -obj-y += vdso.o targets += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh index 8b806eacd0a6..0387209d65b1 100755 --- a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh +++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh @@ -13,4 +13,4 @@ LC_ALL=C sed -n -e 's/^00*/0/' -e \ -'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p' +'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2 0x\1/p' diff --git a/arch/arm64/kernel/vdso32/vdso.S b/arch/arm64/kernel/vdso32-wrap.S index e72ac7bc4c04..e72ac7bc4c04 100644 --- a/arch/arm64/kernel/vdso32/vdso.S +++ b/arch/arm64/kernel/vdso32-wrap.S diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index a1e0f91e6cea..789ad420f16b 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -155,7 +155,6 @@ c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday)) asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso)) obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) -obj-y += vdso.o targets += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 4c0b0c89ad59..7eea7888bb02 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -31,10 +31,11 @@ jiffies = jiffies_64; __stop___kvm_ex_table = .; #define HYPERVISOR_DATA_SECTIONS \ - HYP_SECTION_NAME(.data..ro_after_init) : { \ - __hyp_data_ro_after_init_start = .; \ + HYP_SECTION_NAME(.rodata) : { \ + __hyp_rodata_start = .; \ *(HYP_SECTION_NAME(.data..ro_after_init)) \ - __hyp_data_ro_after_init_end = .; \ + *(HYP_SECTION_NAME(.rodata)) \ + __hyp_rodata_end = .; \ } #define HYPERVISOR_PERCPU_SECTION \ @@ -42,10 +43,19 @@ jiffies = jiffies_64; HYP_SECTION_NAME(.data..percpu) : { \ *(HYP_SECTION_NAME(.data..percpu)) \ } + +#define HYPERVISOR_RELOC_SECTION \ + .hyp.reloc : ALIGN(4) { \ + __hyp_reloc_begin = .; \ + *(.hyp.reloc) \ + __hyp_reloc_end = .; \ + } + #else /* CONFIG_KVM */ #define HYPERVISOR_EXTABLE #define HYPERVISOR_DATA_SECTIONS #define HYPERVISOR_PERCPU_SECTION +#define HYPERVISOR_RELOC_SECTION #endif #define HYPERVISOR_TEXT \ @@ -216,6 +226,8 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) HYPERVISOR_PERCPU_SECTION + HYPERVISOR_RELOC_SECTION + .rela.dyn : ALIGN(8) { *(.rela .rela*) } @@ -316,3 +328,11 @@ ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, * If padding is applied before .head.text, virt<->phys conversions will fail. */ ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned") + +ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET, + "RESERVED_SWAPPER_OFFSET is wrong!") + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, + "TRAMP_SWAPPER_OFFSET is wrong!") +#endif diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 13b017284bf9..589921392cb1 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -16,7 +16,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ inject_fault.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o \ - arch_timer.o \ + arch_timer.o trng.o\ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fe60d25c000e..fc4c95dd2d26 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1750,11 +1750,10 @@ static int init_hyp_mode(void) goto out_err; } - err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start), - kvm_ksym_ref(__hyp_data_ro_after_init_end), - PAGE_HYP_RO); + err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), + kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); if (err) { - kvm_err("Cannot map .hyp.data..ro_after_init section\n"); + kvm_err("Cannot map .hyp.rodata section\n"); goto out_err; } @@ -1967,6 +1966,9 @@ static int __init early_kvm_mode_cfg(char *arg) return 0; } + if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) + return 0; + return -EINVAL; } early_param("kvm-arm.mode", early_kvm_mode_cfg); diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index d179056e1af8..5f49df4ffdd8 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -119,7 +119,7 @@ el2_error: .macro invalid_vector label, target = __guest_exit_panic .align 2 -SYM_CODE_START(\label) +SYM_CODE_START_LOCAL(\label) b \target SYM_CODE_END(\label) .endm diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 84473574c2e7..54f4860cd87c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void) struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); - entry = hyp_symbol_addr(__start___kvm_ex_table); - end = hyp_symbol_addr(__stop___kvm_ex_table); + entry = &__start___kvm_ex_table; + end = &__stop___kvm_ex_table; while (entry < end) { addr = (unsigned long)&entry->insn + entry->insn; diff --git a/arch/arm64/kvm/hyp/nvhe/.gitignore b/arch/arm64/kvm/hyp/nvhe/.gitignore index 695d73d0249e..5b6c43cc96f8 100644 --- a/arch/arm64/kvm/hyp/nvhe/.gitignore +++ b/arch/arm64/kvm/hyp/nvhe/.gitignore @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only +gen-hyprel hyp.lds +hyp-reloc.S diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 1f1e351c5fe2..a6707df4f6c0 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -3,8 +3,11 @@ # Makefile for Kernel-based Virtual Machine module, HYP/nVHE part # -asflags-y := -D__KVM_NVHE_HYPERVISOR__ -ccflags-y := -D__KVM_NVHE_HYPERVISOR__ +asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS +ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS + +hostprogs := gen-hyprel +HOST_EXTRACFLAGS += -I$(objtree)/include obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o @@ -19,7 +22,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y)) obj-y := kvm_nvhe.o -extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds +extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o # 1) Compile all source files to `.nvhe.o` object files. The file extension # avoids file name clashes for files shared with VHE. @@ -42,11 +45,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE $(call if_changed,ld) -# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'. +# 4) Generate list of hyp code/data positions that need to be relocated at +# runtime. Because the hypervisor is part of the kernel binary, relocations +# produce a kernel VA. We enumerate relocations targeting hyp at build time +# and convert the kernel VAs at those positions to hyp VAs. +$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel + $(call if_changed,hyprel) + +# 5) Compile hyp-reloc.S and link it into the existing partially linked object. +# The object file now contains a section with pointers to hyp positions that +# will contain kernel VAs at runtime. These pointers have relocations on them +# so that they get updated as the hyp object is linked into `vmlinux`. +LDFLAGS_kvm_nvhe.rel.o := -r +$(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE + $(call if_changed,ld) + +# 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'. # Prefixes names of ELF symbols with '__kvm_nvhe_'. -$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE +$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE $(call if_changed,hypcopy) +# The HYPREL command calls `gen-hyprel` to generate an assembly file with +# a list of relocations targeting hyp code/data. +quiet_cmd_hyprel = HYPREL $@ + cmd_hyprel = $(obj)/gen-hyprel $< > $@ + # The HYPCOPY command uses `objcopy` to prefix all ELF symbol names # to avoid clashes with VHE code/data. quiet_cmd_hypcopy = HYPCOPY $@ diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c new file mode 100644 index 000000000000..ead02c6a7628 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 - Google LLC + * Author: David Brazdil <dbrazdil@google.com> + * + * Generates relocation information used by the kernel to convert + * absolute addresses in hyp data from kernel VAs to hyp VAs. + * + * This is necessary because hyp code is linked into the same binary + * as the kernel but executes under different memory mappings. + * If the compiler used absolute addressing, those addresses need to + * be converted before they are used by hyp code. + * + * The input of this program is the relocatable ELF object containing + * all hyp code/data, not yet linked into vmlinux. Hyp section names + * should have been prefixed with `.hyp` at this point. + * + * The output (printed to stdout) is an assembly file containing + * an array of 32-bit integers and static relocations that instruct + * the linker of `vmlinux` to populate the array entries with offsets + * to positions in the kernel binary containing VAs used by hyp code. + * + * Note that dynamic relocations could be used for the same purpose. + * However, those are only generated if CONFIG_RELOCATABLE=y. + */ + +#include <elf.h> +#include <endian.h> +#include <errno.h> +#include <fcntl.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <generated/autoconf.h> + +#define HYP_SECTION_PREFIX ".hyp" +#define HYP_RELOC_SECTION ".hyp.reloc" +#define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_" + +/* + * AArch64 relocation type constants. + * Included in case these are not defined in the host toolchain. + */ +#ifndef R_AARCH64_ABS64 +#define R_AARCH64_ABS64 257 +#endif +#ifndef R_AARCH64_LD_PREL_LO19 +#define R_AARCH64_LD_PREL_LO19 273 +#endif +#ifndef R_AARCH64_ADR_PREL_LO21 +#define R_AARCH64_ADR_PREL_LO21 274 +#endif +#ifndef R_AARCH64_ADR_PREL_PG_HI21 +#define R_AARCH64_ADR_PREL_PG_HI21 275 +#endif +#ifndef R_AARCH64_ADR_PREL_PG_HI21_NC +#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 +#endif +#ifndef R_AARCH64_ADD_ABS_LO12_NC +#define R_AARCH64_ADD_ABS_LO12_NC 277 +#endif +#ifndef R_AARCH64_LDST8_ABS_LO12_NC +#define R_AARCH64_LDST8_ABS_LO12_NC 278 +#endif +#ifndef R_AARCH64_TSTBR14 +#define R_AARCH64_TSTBR14 279 +#endif +#ifndef R_AARCH64_CONDBR19 +#define R_AARCH64_CONDBR19 280 +#endif +#ifndef R_AARCH64_JUMP26 +#define R_AARCH64_JUMP26 282 +#endif +#ifndef R_AARCH64_CALL26 +#define R_AARCH64_CALL26 283 +#endif +#ifndef R_AARCH64_LDST16_ABS_LO12_NC +#define R_AARCH64_LDST16_ABS_LO12_NC 284 +#endif +#ifndef R_AARCH64_LDST32_ABS_LO12_NC +#define R_AARCH64_LDST32_ABS_LO12_NC 285 +#endif +#ifndef R_AARCH64_LDST64_ABS_LO12_NC +#define R_AARCH64_LDST64_ABS_LO12_NC 286 +#endif +#ifndef R_AARCH64_MOVW_PREL_G0 +#define R_AARCH64_MOVW_PREL_G0 287 +#endif +#ifndef R_AARCH64_MOVW_PREL_G0_NC +#define R_AARCH64_MOVW_PREL_G0_NC 288 +#endif +#ifndef R_AARCH64_MOVW_PREL_G1 +#define R_AARCH64_MOVW_PREL_G1 289 +#endif +#ifndef R_AARCH64_MOVW_PREL_G1_NC +#define R_AARCH64_MOVW_PREL_G1_NC 290 +#endif +#ifndef R_AARCH64_MOVW_PREL_G2 +#define R_AARCH64_MOVW_PREL_G2 291 +#endif +#ifndef R_AARCH64_MOVW_PREL_G2_NC +#define R_AARCH64_MOVW_PREL_G2_NC 292 +#endif +#ifndef R_AARCH64_MOVW_PREL_G3 +#define R_AARCH64_MOVW_PREL_G3 293 +#endif +#ifndef R_AARCH64_LDST128_ABS_LO12_NC +#define R_AARCH64_LDST128_ABS_LO12_NC 299 +#endif + +/* Global state of the processed ELF. */ +static struct { + const char *path; + char *begin; + size_t size; + Elf64_Ehdr *ehdr; + Elf64_Shdr *sh_table; + const char *sh_string; +} elf; + +#if defined(CONFIG_CPU_LITTLE_ENDIAN) + +#define elf16toh(x) le16toh(x) +#define elf32toh(x) le32toh(x) +#define elf64toh(x) le64toh(x) + +#define ELFENDIAN ELFDATA2LSB + +#elif defined(CONFIG_CPU_BIG_ENDIAN) + +#define elf16toh(x) be16toh(x) +#define elf32toh(x) be32toh(x) +#define elf64toh(x) be64toh(x) + +#define ELFENDIAN ELFDATA2MSB + +#else + +#error PDP-endian sadly unsupported... + +#endif + +#define fatal_error(fmt, ...) \ + ({ \ + fprintf(stderr, "error: %s: " fmt "\n", \ + elf.path, ## __VA_ARGS__); \ + exit(EXIT_FAILURE); \ + __builtin_unreachable(); \ + }) + +#define fatal_perror(msg) \ + ({ \ + fprintf(stderr, "error: %s: " msg ": %s\n", \ + elf.path, strerror(errno)); \ + exit(EXIT_FAILURE); \ + __builtin_unreachable(); \ + }) + +#define assert_op(lhs, rhs, fmt, op) \ + ({ \ + typeof(lhs) _lhs = (lhs); \ + typeof(rhs) _rhs = (rhs); \ + \ + if (!(_lhs op _rhs)) { \ + fatal_error("assertion " #lhs " " #op " " #rhs \ + " failed (lhs=" fmt ", rhs=" fmt \ + ", line=%d)", _lhs, _rhs, __LINE__); \ + } \ + }) + +#define assert_eq(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, ==) +#define assert_ne(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, !=) +#define assert_lt(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, <) +#define assert_ge(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, >=) + +/* + * Return a pointer of a given type at a given offset from + * the beginning of the ELF file. + */ +#define elf_ptr(type, off) ((type *)(elf.begin + (off))) + +/* Iterate over all sections in the ELF. */ +#define for_each_section(var) \ + for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var) + +/* Iterate over all Elf64_Rela relocations in a given section. */ +#define for_each_rela(shdr, var) \ + for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset)); \ + var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++) + +/* True if a string starts with a given prefix. */ +static inline bool starts_with(const char *str, const char *prefix) +{ + return memcmp(str, prefix, strlen(prefix)) == 0; +} + +/* Returns a string containing the name of a given section. */ +static inline const char *section_name(Elf64_Shdr *shdr) +{ + return elf.sh_string + elf32toh(shdr->sh_name); +} + +/* Returns a pointer to the first byte of section data. */ +static inline const char *section_begin(Elf64_Shdr *shdr) +{ + return elf_ptr(char, elf64toh(shdr->sh_offset)); +} + +/* Find a section by its offset from the beginning of the file. */ +static inline Elf64_Shdr *section_by_off(Elf64_Off off) +{ + assert_ne(off, 0UL, "%lu"); + return elf_ptr(Elf64_Shdr, off); +} + +/* Find a section by its index. */ +static inline Elf64_Shdr *section_by_idx(uint16_t idx) +{ + assert_ne(idx, SHN_UNDEF, "%u"); + return &elf.sh_table[idx]; +} + +/* + * Memory-map the given ELF file, perform sanity checks, and + * populate global state. + */ +static void init_elf(const char *path) +{ + int fd, ret; + struct stat stat; + + /* Store path in the global struct for error printing. */ + elf.path = path; + + /* Open the ELF file. */ + fd = open(path, O_RDONLY); + if (fd < 0) + fatal_perror("Could not open ELF file"); + + /* Get status of ELF file to obtain its size. */ + ret = fstat(fd, &stat); + if (ret < 0) { + close(fd); + fatal_perror("Could not get status of ELF file"); + } + + /* mmap() the entire ELF file read-only at an arbitrary address. */ + elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (elf.begin == MAP_FAILED) { + close(fd); + fatal_perror("Could not mmap ELF file"); + } + + /* mmap() was successful, close the FD. */ + close(fd); + + /* Get pointer to the ELF header. */ + assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu"); + elf.ehdr = elf_ptr(Elf64_Ehdr, 0); + + /* Check the ELF magic. */ + assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x"); + assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x"); + assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x"); + assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x"); + + /* Sanity check that this is an ELF64 relocatable object for AArch64. */ + assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u"); + assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u"); + assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u"); + assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u"); + + /* Populate fields of the global struct. */ + elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff)); + elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx))); +} + +/* Print the prologue of the output ASM file. */ +static void emit_prologue(void) +{ + printf(".data\n" + ".pushsection " HYP_RELOC_SECTION ", \"a\"\n"); +} + +/* Print ASM statements needed as a prologue to a processed hyp section. */ +static void emit_section_prologue(const char *sh_orig_name) +{ + /* Declare the hyp section symbol. */ + printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name); +} + +/* + * Print ASM statements to create a hyp relocation entry for a given + * R_AARCH64_ABS64 relocation. + * + * The linker of vmlinux will populate the position given by `rela` with + * an absolute 64-bit kernel VA. If the kernel is relocatable, it will + * also generate a dynamic relocation entry so that the kernel can shift + * the address at runtime for KASLR. + * + * Emit a 32-bit offset from the current address to the position given + * by `rela`. This way the kernel can iterate over all kernel VAs used + * by hyp at runtime and convert them to hyp VAs. However, that offset + * will not be known until linking of `vmlinux`, so emit a PREL32 + * relocation referencing a symbol that the hyp linker script put at + * the beginning of the relocated section + the offset from `rela`. + */ +static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name) +{ + /* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */ + static size_t reloc_offset; + + /* Create storage for the 32-bit offset. */ + printf(".word 0\n"); + + /* + * Create a PREL32 relocation which instructs the linker of `vmlinux` + * to insert offset to position <base> + <offset>, where <base> is + * a symbol at the beginning of the relocated section, and <offset> + * is `rela->r_offset`. + */ + printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n", + reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name, + elf64toh(rela->r_offset)); + + reloc_offset += 4; +} + +/* Print the epilogue of the output ASM file. */ +static void emit_epilogue(void) +{ + printf(".popsection\n"); +} + +/* + * Iterate over all RELA relocations in a given section and emit + * hyp relocation data for all absolute addresses in hyp code/data. + * + * Static relocations that generate PC-relative-addressing are ignored. + * Failure is reported for unexpected relocation types. + */ +static void emit_rela_section(Elf64_Shdr *sh_rela) +{ + Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)]; + const char *sh_orig_name = section_name(sh_orig); + Elf64_Rela *rela; + + /* Skip all non-hyp sections. */ + if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX)) + return; + + emit_section_prologue(sh_orig_name); + + for_each_rela(sh_rela, rela) { + uint32_t type = (uint32_t)elf64toh(rela->r_info); + + /* Check that rela points inside the relocated section. */ + assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx"); + + switch (type) { + /* + * Data relocations to generate absolute addressing. + * Emit a hyp relocation. + */ + case R_AARCH64_ABS64: + emit_rela_abs64(rela, sh_orig_name); + break; + /* Allow relocations to generate PC-relative addressing. */ + case R_AARCH64_LD_PREL_LO19: + case R_AARCH64_ADR_PREL_LO21: + case R_AARCH64_ADR_PREL_PG_HI21: + case R_AARCH64_ADR_PREL_PG_HI21_NC: + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + case R_AARCH64_LDST16_ABS_LO12_NC: + case R_AARCH64_LDST32_ABS_LO12_NC: + case R_AARCH64_LDST64_ABS_LO12_NC: + case R_AARCH64_LDST128_ABS_LO12_NC: + break; + /* Allow relative relocations for control-flow instructions. */ + case R_AARCH64_TSTBR14: + case R_AARCH64_CONDBR19: + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + break; + /* Allow group relocations to create PC-relative offset inline. */ + case R_AARCH64_MOVW_PREL_G0: + case R_AARCH64_MOVW_PREL_G0_NC: + case R_AARCH64_MOVW_PREL_G1: + case R_AARCH64_MOVW_PREL_G1_NC: + case R_AARCH64_MOVW_PREL_G2: + case R_AARCH64_MOVW_PREL_G2_NC: + case R_AARCH64_MOVW_PREL_G3: + break; + default: + fatal_error("Unexpected RELA type %u", type); + } + } +} + +/* Iterate over all sections and emit hyp relocation data for RELA sections. */ +static void emit_all_relocs(void) +{ + Elf64_Shdr *shdr; + + for_each_section(shdr) { + switch (elf32toh(shdr->sh_type)) { + case SHT_REL: + fatal_error("Unexpected SHT_REL section \"%s\"", + section_name(shdr)); + case SHT_RELA: + emit_rela_section(shdr); + break; + } + } +} + +int main(int argc, const char **argv) +{ + if (argc != 2) { + fprintf(stderr, "Usage: %s <elf_input>\n", argv[0]); + return EXIT_FAILURE; + } + + init_elf(argv[1]); + + emit_prologue(); + emit_all_relocs(); + emit_epilogue(); + + return EXIT_SUCCESS; +} diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index a820dfdc9c25..6585a7cbbc56 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter) * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par); */ SYM_FUNC_START(__hyp_do_panic) - /* Load the format arguments into x1-7 */ - mov x6, x3 - get_vcpu_ptr x7, x3 - - mrs x3, esr_el2 - mrs x4, far_el2 - mrs x5, hpfar_el2 - /* Prepare and exit to the host's panic funciton. */ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr ldr lr, =panic + hyp_kimg_va lr, x6 msr elr_el2, lr - /* - * Set the panic format string and enter the host, conditionally - * restoring the host context. - */ + /* Set the panic format string. Use the, now free, LR as scratch. */ + ldr lr, =__hyp_panic_string + hyp_kimg_va lr, x6 + + /* Load the format arguments into x1-7. */ + mov x6, x3 + get_vcpu_ptr x7, x3 + mrs x3, esr_el2 + mrs x4, far_el2 + mrs x5, hpfar_el2 + + /* Enter the host, conditionally restoring the host context. */ cmp x0, xzr - ldr x0, =__hyp_panic_string + mov x0, lr b.eq __host_enter_without_restoring b __host_enter_for_panic SYM_FUNC_END(__hyp_do_panic) @@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic) * Preserve x0-x4, which may contain stub parameters. */ ldr x5, =__kvm_handle_stub_hvc - kimg_pa x5, x6 + hyp_pa x5, x6 br x5 .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index b17bf19217f1..c631e29fb001 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -18,7 +18,7 @@ #include <asm/virt.h> .text - .pushsection .hyp.idmap.text, "ax" + .pushsection .idmap.text, "ax" .align 11 @@ -57,17 +57,10 @@ __do_hyp_init: cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc - // We only actively check bits [24:31], and everything - // else has to be zero, which we check at build time. -#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF) -#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value -#endif + mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) + cmp x0, x3 + b.eq 1f - ror x0, x0, #24 - eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF) - ror x0, x0, #4 - eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF) - cbz x0, 1f mov x0, #SMCCC_RET_NOT_SUPPORTED eret @@ -141,7 +134,6 @@ alternative_else_nop_endif /* Set the host vector */ ldr x0, =__kvm_hyp_host_vector - kimg_hyp_va x0, x1 msr vbar_el2, x0 ret @@ -191,7 +183,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 2: msr SPsel, #1 // We want to use SP_EL{1,2} /* Initialize EL2 CPU state to sane values. */ - init_el2_state nvhe // Clobbers x0..x2 + init_el2_state // Clobbers x0..x2 /* Enable MMU, set vectors and stack. */ mov x0, x28 @@ -200,7 +192,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) /* Leave idmap. */ mov x0, x29 ldr x1, =kvm_host_psci_cpu_entry - kimg_hyp_va x1, x2 br x1 SYM_CODE_END(__kvm_hyp_init_cpu) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index a906f9e2ff34..f012f8665ecc 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) typedef void (*hcall_t)(struct kvm_cpu_context *); -#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x) +#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x -static const hcall_t *host_hcall[] = { +static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), @@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = { static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(unsigned long, id, host_ctxt, 0); - const hcall_t *kfn; hcall_t hfn; id -= KVM_HOST_SMCCC_ID(0); @@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) if (unlikely(id >= ARRAY_SIZE(host_hcall))) goto inval; - kfn = host_hcall[id]; - if (unlikely(!kfn)) + hfn = host_hcall[id]; + if (unlikely(!hfn)) goto inval; cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS; - - hfn = kimg_fn_hyp_va(kfn); hfn(host_ctxt); return; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c index 2997aa156d8e..879559057dee 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c @@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu) if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base)) hyp_panic(); - cpu_base_array = (unsigned long *)hyp_symbol_addr(kvm_arm_hyp_percpu_base); + cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base; this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); - elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start); + elf_base = (unsigned long)&__per_cpu_start; return this_cpu_base - elf_base; } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index 1206d0d754d5..cd119d82d8e3 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -12,14 +12,17 @@ #include <asm/memory.h> SECTIONS { + HYP_SECTION(.idmap.text) HYP_SECTION(.text) + HYP_SECTION(.data..ro_after_init) + HYP_SECTION(.rodata) + /* * .hyp..data..percpu needs to be page aligned to maintain the same * alignment for when linking into vmlinux. */ . = ALIGN(PAGE_SIZE); - HYP_SECTION_NAME(.data..percpu) : { + BEGIN_HYP_SECTION(.data..percpu) PERCPU_INPUT(L1_CACHE_BYTES) - } - HYP_SECTION(.data..ro_after_init) + END_HYP_SECTION } diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 8e7128cb7667..63de71c0481e 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -128,8 +128,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) if (cpu_id == INVALID_CPU_ID) return PSCI_RET_INVALID_PARAMS; - boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id); - init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id); + boot_args = per_cpu_ptr(&cpu_on_args, cpu_id); + init_params = per_cpu_ptr(&kvm_init_params, cpu_id); /* Check if the target CPU is already being booted. */ if (!try_acquire_boot_args(boot_args)) @@ -140,7 +140,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) wmb(); ret = psci_call(func_id, mpidr, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)), + __hyp_pa(&kvm_hyp_cpu_entry), __hyp_pa(init_params)); /* If successful, the lock will be released by the target CPU. */ @@ -159,8 +159,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); - init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); + boot_args = this_cpu_ptr(&suspend_args); + init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core @@ -174,7 +174,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) * point if it is a deep sleep state. */ return psci_call(func_id, power_state, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), + __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params)); } @@ -186,8 +186,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); - init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); + boot_args = this_cpu_ptr(&suspend_args); + init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core @@ -198,7 +198,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) /* Will only return on error. */ return psci_call(func_id, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), + __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params), 0); } @@ -207,12 +207,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; - host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt; + host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; if (is_cpu_on) - boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args)); + boot_args = this_cpu_ptr(&cpu_on_args); else - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); + boot_args = this_cpu_ptr(&suspend_args); cpu_reg(host_ctxt, 0) = boot_args->r0; write_sysreg_el2(boot_args->pc, SYS_ELR); diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index bdf8e55ed308..4d177ce1d536 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -45,6 +45,10 @@ #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) +#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ + KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ + KVM_PTE_LEAF_ATTR_HI_S2_XN) + struct kvm_pgtable_walk_data { struct kvm_pgtable *pgt; struct kvm_pgtable_walker *walker; @@ -170,10 +174,9 @@ static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp) smp_store_release(ptep, pte); } -static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, - u32 level) +static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) { - kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(pa); + kvm_pte_t pte = kvm_phys_to_pte(pa); u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE : KVM_PTE_TYPE_BLOCK; @@ -181,12 +184,7 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, pte |= FIELD_PREP(KVM_PTE_TYPE, type); pte |= KVM_PTE_VALID; - /* Tolerate KVM recreating the exact same mapping. */ - if (kvm_pte_valid(old)) - return old == pte; - - smp_store_release(ptep, pte); - return true; + return pte; } static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr, @@ -341,12 +339,17 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot, static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct hyp_map_data *data) { + kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; if (!kvm_block_mapping_supported(addr, end, phys, level)) return false; - WARN_ON(!kvm_set_valid_leaf_pte(ptep, phys, data->attr, level)); + /* Tolerate KVM recreating the exact same mapping */ + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + if (old != new && !WARN_ON(kvm_pte_valid(old))) + smp_store_release(ptep, new); + data->phys += granule; return true; } @@ -461,34 +464,41 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, return 0; } -static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, - struct stage2_map_data *data) +static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, + kvm_pte_t *ptep, + struct stage2_map_data *data) { + kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; + struct page *page = virt_to_page(ptep); if (!kvm_block_mapping_supported(addr, end, phys, level)) - return false; + return -E2BIG; - /* - * If the PTE was already valid, drop the refcount on the table - * early, as it will be bumped-up again in stage2_map_walk_leaf(). - * This ensures that the refcount stays constant across a valid to - * valid PTE update. - */ - if (kvm_pte_valid(*ptep)) - put_page(virt_to_page(ptep)); + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + if (kvm_pte_valid(old)) { + /* + * Skip updating the PTE if we are trying to recreate the exact + * same mapping or only change the access permissions. Instead, + * the vCPU will exit one more time from guest if still needed + * and then go through the path of relaxing permissions. + */ + if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS))) + return -EAGAIN; - if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level)) - goto out; + /* + * There's an existing different valid leaf entry, so perform + * break-before-make. + */ + kvm_set_invalid_pte(ptep); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); + put_page(page); + } - /* There's an existing valid leaf entry, so perform break-before-make */ - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); - kvm_set_valid_leaf_pte(ptep, phys, data->attr, level); -out: + smp_store_release(ptep, new); + get_page(page); data->phys += granule; - return true; + return 0; } static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, @@ -516,6 +526,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { + int ret; kvm_pte_t *childp, pte = *ptep; struct page *page = virt_to_page(ptep); @@ -526,8 +537,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, return 0; } - if (stage2_map_walker_try_leaf(addr, end, level, ptep, data)) - goto out_get_page; + ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data); + if (ret != -E2BIG) + return ret; if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; @@ -551,9 +563,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, } kvm_set_table_pte(ptep, childp); - -out_get_page: get_page(page); + return 0; } diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 8f0585640241..87a54375bd6e 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) } rd = kvm_vcpu_dabt_get_rd(vcpu); - addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; + addr = kvm_vgic_global_state.vcpu_hyp_va; addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 25ea4ecb6449..ead21b98b620 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -71,6 +71,12 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) if (gpa != GPA_INVALID) val = gpa; break; + case ARM_SMCCC_TRNG_VERSION: + case ARM_SMCCC_TRNG_FEATURES: + case ARM_SMCCC_TRNG_GET_UUID: + case ARM_SMCCC_TRNG_RND32: + case ARM_SMCCC_TRNG_RND64: + return kvm_trng_call(vcpu); default: return kvm_psci_call(vcpu); } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 7d2257cc5438..77cb2d28f2a4 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -879,11 +879,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (vma_pagesize == PAGE_SIZE && !force_pte) vma_pagesize = transparent_hugepage_adjust(memslot, hva, &pfn, &fault_ipa); - if (writable) { + if (writable) prot |= KVM_PGTABLE_PROT_W; - kvm_set_pfn_dirty(pfn); - mark_page_dirty(kvm, gfn); - } if (fault_status != FSC_PERM && !device) clean_dcache_guest_page(pfn, vma_pagesize); @@ -911,11 +908,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, memcache); } + /* Mark the page dirty only if the fault is handled successfully */ + if (writable && !ret) { + kvm_set_pfn_dirty(pfn); + mark_page_dirty(kvm, gfn); + } + out_unlock: spin_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); - return ret; + return ret != -EAGAIN ? ret : 0; } /* Resolve the access fault by making the page young again. */ diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 247422ac78a9..e9ec08b0b070 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -23,11 +23,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); static u32 kvm_pmu_event_mask(struct kvm *kvm) { switch (kvm->arch.pmuver) { - case 1: /* ARMv8.0 */ + case ID_AA64DFR0_PMUVER_8_0: return GENMASK(9, 0); - case 4: /* ARMv8.1 */ - case 5: /* ARMv8.4 */ - case 6: /* ARMv8.5 */ + case ID_AA64DFR0_PMUVER_8_1: + case ID_AA64DFR0_PMUVER_8_4: + case ID_AA64DFR0_PMUVER_8_5: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); @@ -795,6 +795,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) base = 0; } else { val = read_sysreg(pmceid1_el0); + /* + * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled + * as RAZ + */ + if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4) + val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7c4f79532406..4f2f1e3145de 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -9,6 +9,7 @@ * Christoffer Dall <c.dall@virtualopensystems.com> */ +#include <linux/bitfield.h> #include <linux/bsearch.h> #include <linux/kvm_host.h> #include <linux/mm.h> @@ -700,14 +701,18 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 pmceid; + u64 pmceid, mask, shift; BUG_ON(p->is_write); if (pmu_access_el0_disabled(vcpu)) return false; + get_access_mask(r, &mask, &shift); + pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); + pmceid &= mask; + pmceid >>= shift; p->regval = pmceid; @@ -1021,6 +1026,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, return true; } +#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT)) + /* Read a sanitised cpufeature ID register by sys_reg_desc */ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r, bool raz) @@ -1028,36 +1035,41 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, u32 id = reg_to_encoding(r); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1) { + switch (id) { + case SYS_ID_AA64PFR0_EL1: if (!vcpu_has_sve(vcpu)) - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); - val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); - val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT); - val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT); - val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT); - val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT); - } else if (id == SYS_ID_AA64PFR1_EL1) { - val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT); - } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { - val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | - (0xfUL << ID_AA64ISAR1_API_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); - } else if (id == SYS_ID_AA64DFR0_EL1) { - u64 cap = 0; - - /* Limit guests to PMUv3 for ARMv8.1 */ - if (kvm_vcpu_has_pmu(vcpu)) - cap = ID_AA64DFR0_PMUVER_8_1; - + val &= ~FEATURE(ID_AA64PFR0_SVE); + val &= ~FEATURE(ID_AA64PFR0_AMU); + val &= ~FEATURE(ID_AA64PFR0_CSV2); + val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); + val &= ~FEATURE(ID_AA64PFR0_CSV3); + val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); + break; + case SYS_ID_AA64PFR1_EL1: + val &= ~FEATURE(ID_AA64PFR1_MTE); + break; + case SYS_ID_AA64ISAR1_EL1: + if (!vcpu_has_ptrauth(vcpu)) + val &= ~(FEATURE(ID_AA64ISAR1_APA) | + FEATURE(ID_AA64ISAR1_API) | + FEATURE(ID_AA64ISAR1_GPA) | + FEATURE(ID_AA64ISAR1_GPI)); + break; + case SYS_ID_AA64DFR0_EL1: + /* Limit debug to ARMv8.0 */ + val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); + /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_PMUVER_SHIFT, - cap); - } else if (id == SYS_ID_DFR0_EL1) { - /* Limit guests to PMUv3 for ARMv8.1 */ + ID_AA64DFR0_PMUVER_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); + break; + case SYS_ID_DFR0_EL1: + /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, - ID_DFR0_PERFMON_SHIFT, - ID_DFR0_PERFMON_8_1); + ID_DFR0_PERFMON_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0); + break; } return val; @@ -1493,6 +1505,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { .access = access_pminten, .reg = PMINTENSET_EL1 }, { PMU_SYS_REG(SYS_PMINTENCLR_EL1), .access = access_pminten, .reg = PMINTENSET_EL1 }, + { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, @@ -1720,7 +1733,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 }, }; -static bool trap_dbgidr(struct kvm_vcpu *vcpu, +static bool trap_dbgdidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -1734,7 +1747,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) - | (6 << 16) | (el3 << 14) | (el3 << 12)); + | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } } @@ -1767,8 +1780,8 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, * guest. Revisit this one day, would this principle change. */ static const struct sys_reg_desc cp14_regs[] = { - /* DBGIDR */ - { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, + /* DBGDIDR */ + { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr }, /* DBGDTRRXext */ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, @@ -1918,8 +1931,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, - { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, - { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, + { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, + { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, @@ -1927,6 +1940,10 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, + { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid }, + { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid }, + /* PMMIR */ + { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi }, /* PRRR/MAIR0 */ { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, diff --git a/arch/arm64/kvm/trng.c b/arch/arm64/kvm/trng.c new file mode 100644 index 000000000000..99bdd7103c9c --- /dev/null +++ b/arch/arm64/kvm/trng.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2020 Arm Ltd. + +#include <linux/arm-smccc.h> +#include <linux/kvm_host.h> + +#include <asm/kvm_emulate.h> + +#include <kvm/arm_hypercalls.h> + +#define ARM_SMCCC_TRNG_VERSION_1_0 0x10000UL + +/* Those values are deliberately separate from the generic SMCCC definitions. */ +#define TRNG_SUCCESS 0UL +#define TRNG_NOT_SUPPORTED ((unsigned long)-1) +#define TRNG_INVALID_PARAMETER ((unsigned long)-2) +#define TRNG_NO_ENTROPY ((unsigned long)-3) + +#define TRNG_MAX_BITS64 192 + +static const uuid_t arm_smc_trng_uuid __aligned(4) = UUID_INIT( + 0x0d21e000, 0x4384, 0x11eb, 0x80, 0x70, 0x52, 0x44, 0x55, 0x4e, 0x5a, 0x4c); + +static int kvm_trng_do_rnd(struct kvm_vcpu *vcpu, int size) +{ + DECLARE_BITMAP(bits, TRNG_MAX_BITS64); + u32 num_bits = smccc_get_arg1(vcpu); + int i; + + if (num_bits > 3 * size) { + smccc_set_retval(vcpu, TRNG_INVALID_PARAMETER, 0, 0, 0); + return 1; + } + + /* get as many bits as we need to fulfil the request */ + for (i = 0; i < DIV_ROUND_UP(num_bits, BITS_PER_LONG); i++) + bits[i] = get_random_long(); + + bitmap_clear(bits, num_bits, TRNG_MAX_BITS64 - num_bits); + + if (size == 32) + smccc_set_retval(vcpu, TRNG_SUCCESS, lower_32_bits(bits[1]), + upper_32_bits(bits[0]), lower_32_bits(bits[0])); + else + smccc_set_retval(vcpu, TRNG_SUCCESS, bits[2], bits[1], bits[0]); + + memzero_explicit(bits, sizeof(bits)); + return 1; +} + +int kvm_trng_call(struct kvm_vcpu *vcpu) +{ + const __le32 *u = (__le32 *)arm_smc_trng_uuid.b; + u32 func_id = smccc_get_function(vcpu); + unsigned long val = TRNG_NOT_SUPPORTED; + int size = 64; + + switch (func_id) { + case ARM_SMCCC_TRNG_VERSION: + val = ARM_SMCCC_TRNG_VERSION_1_0; + break; + case ARM_SMCCC_TRNG_FEATURES: + switch (smccc_get_arg1(vcpu)) { + case ARM_SMCCC_TRNG_VERSION: + case ARM_SMCCC_TRNG_FEATURES: + case ARM_SMCCC_TRNG_GET_UUID: + case ARM_SMCCC_TRNG_RND32: + case ARM_SMCCC_TRNG_RND64: + val = TRNG_SUCCESS; + } + break; + case ARM_SMCCC_TRNG_GET_UUID: + smccc_set_retval(vcpu, le32_to_cpu(u[0]), le32_to_cpu(u[1]), + le32_to_cpu(u[2]), le32_to_cpu(u[3])); + return 1; + case ARM_SMCCC_TRNG_RND32: + size = 32; + fallthrough; + case ARM_SMCCC_TRNG_RND64: + return kvm_trng_do_rnd(vcpu, size); + } + + smccc_set_retval(vcpu, val, 0, 0, 0); + return 1; +} diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 70fcd6a12fe1..978301392d67 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -81,6 +81,34 @@ __init void kvm_compute_layout(void) init_hyp_physvirt_offset(); } +/* + * The .hyp.reloc ELF section contains a list of kimg positions that + * contains kimg VAs but will be accessed only in hyp execution context. + * Convert them to hyp VAs. See gen-hyprel.c for more details. + */ +__init void kvm_apply_hyp_relocations(void) +{ + int32_t *rel; + int32_t *begin = (int32_t *)__hyp_reloc_begin; + int32_t *end = (int32_t *)__hyp_reloc_end; + + for (rel = begin; rel < end; ++rel) { + uintptr_t *ptr, kimg_va; + + /* + * Each entry contains a 32-bit relative offset from itself + * to a kimg VA position. + */ + ptr = (uintptr_t *)lm_alias((char *)rel + *rel); + + /* Read the kimg VA value at the relocation address. */ + kimg_va = *ptr; + + /* Convert to hyp VA and store back to the relocation address. */ + *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va)); + } +} + static u32 compute_instruction(int n, u32 rd, u32 rn) { u32 insn = AARCH64_BREAK_FAULT; @@ -255,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst *updptr++ = cpu_to_le32(insn); } -void kvm_update_kimg_phys_offset(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) -{ - generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst); -} - void kvm_get_kimage_voffset(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 9e1a12e10053..351537c12f36 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags) ret SYM_FUNC_END(mte_restore_page_tags) - -/* - * Assign allocation tags for a region of memory based on the pointer tag - * x0 - source pointer - * x1 - size - * - * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and - * size must be non-zero and MTE_GRANULE_SIZE aligned. - */ -SYM_FUNC_START(mte_assign_mem_tag_range) -1: stg x0, [x0] - add x0, x0, #MTE_GRANULE_SIZE - subs x1, x1, #MTE_GRANULE_SIZE - b.gt 1b - ret -SYM_FUNC_END(mte_assign_mem_tag_range) diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 5ead3c3de3b6..f188c9092696 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o -obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ARM64_MTE) += mteswap.o KASAN_SANITIZE_physaddr.o += n diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 35d75c60e2b8..f37d4e3830b7 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -10,6 +10,7 @@ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/extable.h> +#include <linux/kfence.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/hardirq.h> @@ -302,12 +303,24 @@ static void die_kernel_fault(const char *msg, unsigned long addr, static void report_tag_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - bool is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0; + static bool reported; + bool is_write; + + if (READ_ONCE(reported)) + return; + + /* + * This is used for KASAN tests and assumes that no MTE faults + * happened before running the tests. + */ + if (mte_report_once()) + WRITE_ONCE(reported, true); /* * SAS bits aren't set for all faults reported in EL1, so we can't * find out access size. */ + is_write = !!(esr & ESR_ELx_WNR); kasan_report(addr, 0, is_write, regs->pc); } #else @@ -319,12 +332,8 @@ static inline void report_tag_fault(unsigned long addr, unsigned int esr, static void do_tag_recovery(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - static bool reported; - if (!READ_ONCE(reported)) { - report_tag_fault(addr, esr, regs); - WRITE_ONCE(reported, true); - } + report_tag_fault(addr, esr, regs); /* * Disable MTE Tag Checking on the local CPU for the current EL. @@ -381,6 +390,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { + if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) + return; + msg = "paging request"; } @@ -564,7 +576,7 @@ retry: mmap_read_lock(mm); } else { /* - * The above down_read_trylock() might have succeeded in which + * The above mmap_read_trylock() might have succeeded in which * case, we'll have missed the might_sleep() from down_read(). */ might_sleep(); @@ -875,44 +887,12 @@ static void debug_exception_exit(struct pt_regs *regs) } NOKPROBE_SYMBOL(debug_exception_exit); -#ifdef CONFIG_ARM64_ERRATUM_1463225 -DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); - -static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) -{ - if (user_mode(regs)) - return 0; - - if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) - return 0; - - /* - * We've taken a dummy step exception from the kernel to ensure - * that interrupts are re-enabled on the syscall path. Return back - * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions - * masked so that we can safely restore the mdscr and get on with - * handling the syscall. - */ - regs->pstate |= PSR_D_BIT; - return 1; -} -#else -static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) -{ - return 0; -} -#endif /* CONFIG_ARM64_ERRATUM_1463225 */ -NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler); - void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); unsigned long pc = instruction_pointer(regs); - if (cortex_a76_erratum_1463225_debug_handler(regs)) - return; - debug_exception_enter(regs); if (user_mode(regs) && !is_ttbr0_addr(pc)) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 709d98fea90c..0ace5e68efba 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -416,10 +416,10 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = max; min_low_pfn = min; - arm64_numa_init(); + arch_numa_init(); /* - * must be done after arm64_numa_init() which calls numa_init() to + * must be done after arch_numa_init() which calls numa_init() to * initialize node_online_map that gets used in hugetlb_cma_reserve() * while allocating required CMA size across online nodes. */ diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 07937b49cb88..a38f54cd638c 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -5,20 +5,11 @@ * Copyright (C) 2012 ARM Ltd. */ -#include <linux/elf.h> -#include <linux/fs.h> -#include <linux/memblock.h> -#include <linux/mm.h> -#include <linux/mman.h> -#include <linux/export.h> -#include <linux/shm.h> -#include <linux/sched/signal.h> -#include <linux/sched/mm.h> #include <linux/io.h> -#include <linux/personality.h> -#include <linux/random.h> +#include <linux/memblock.h> +#include <linux/types.h> -#include <asm/cputype.h> +#include <asm/page.h> /* * You really shouldn't be using read() or write() on /dev/mem. This might go diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ae0c3d023824..3802cfbdd20d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -628,7 +628,7 @@ static bool arm64_early_this_cpu_has_bti(void) if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) return false; - pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1); + pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); return cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_BT_SHIFT); } @@ -1094,6 +1094,7 @@ static void free_empty_tables(unsigned long addr, unsigned long end, int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { + WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); return vmemmap_populate_basepages(start, end, node, altmap); } #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ @@ -1107,6 +1108,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pud_t *pudp; pmd_t *pmdp; + WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); do { next = pmd_addr_end(addr, end); @@ -1153,7 +1155,7 @@ void vmemmap_free(unsigned long start, unsigned long end, } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ -static inline pud_t * fixmap_pud(unsigned long addr) +static inline pud_t *fixmap_pud(unsigned long addr) { pgd_t *pgdp = pgd_offset_k(addr); p4d_t *p4dp = p4d_offset(pgdp, addr); @@ -1164,7 +1166,7 @@ static inline pud_t * fixmap_pud(unsigned long addr) return pud_offset_kimg(p4dp, addr); } -static inline pmd_t * fixmap_pmd(unsigned long addr) +static inline pmd_t *fixmap_pmd(unsigned long addr) { pud_t *pudp = fixmap_pud(addr); pud_t pud = READ_ONCE(*pudp); @@ -1174,7 +1176,7 @@ static inline pmd_t * fixmap_pmd(unsigned long addr) return pmd_offset_kimg(pudp, addr); } -static inline pte_t * fixmap_pte(unsigned long addr) +static inline pte_t *fixmap_pte(unsigned long addr) { return &bm_pte[pte_index(addr)]; } @@ -1442,16 +1444,19 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); } -static bool inside_linear_region(u64 start, u64 size) +struct range arch_get_mappable_range(void) { + struct range mhp_range; + /* * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] * accommodating both its ends but excluding PAGE_END. Max physical * range which can be mapped inside this linear mapping range, must * also be derived from its end points. */ - return start >= __pa(_PAGE_OFFSET(vabits_actual)) && - (start + size - 1) <= __pa(PAGE_END - 1); + mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); + mhp_range.end = __pa(PAGE_END - 1); + return mhp_range; } int arch_add_memory(int nid, u64 start, u64 size, @@ -1459,12 +1464,14 @@ int arch_add_memory(int nid, u64 start, u64 size, { int ret, flags = 0; - if (!inside_linear_region(start, size)) { - pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); - return -EINVAL; - } + VM_BUG_ON(!mhp_range_allowed(start, size, true)); - if (rodata_full || debug_pagealloc_enabled()) + /* + * KFENCE requires linear map to be mapped at page granularity, so that + * it is possible to protect/unprotect single pages in the KFENCE pool. + */ + if (rodata_full || debug_pagealloc_enabled() || + IS_ENABLED(CONFIG_KFENCE)) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c deleted file mode 100644 index a8303bc6b62a..000000000000 --- a/arch/arm64/mm/numa.c +++ /dev/null @@ -1,464 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * NUMA support, based on the x86 implementation. - * - * Copyright (C) 2015 Cavium Inc. - * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com> - */ - -#define pr_fmt(fmt) "NUMA: " fmt - -#include <linux/acpi.h> -#include <linux/memblock.h> -#include <linux/module.h> -#include <linux/of.h> - -#include <asm/acpi.h> -#include <asm/sections.h> - -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); -nodemask_t numa_nodes_parsed __initdata; -static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; - -static int numa_distance_cnt; -static u8 *numa_distance; -bool numa_off; - -static __init int numa_parse_early_param(char *opt) -{ - if (!opt) - return -EINVAL; - if (str_has_prefix(opt, "off")) - numa_off = true; - - return 0; -} -early_param("numa", numa_parse_early_param); - -cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; -EXPORT_SYMBOL(node_to_cpumask_map); - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - -/* - * Returns a pointer to the bitmask of CPUs on Node 'node'. - */ -const struct cpumask *cpumask_of_node(int node) -{ - - if (node == NUMA_NO_NODE) - return cpu_all_mask; - - if (WARN_ON(node < 0 || node >= nr_node_ids)) - return cpu_none_mask; - - if (WARN_ON(node_to_cpumask_map[node] == NULL)) - return cpu_online_mask; - - return node_to_cpumask_map[node]; -} -EXPORT_SYMBOL(cpumask_of_node); - -#endif - -static void numa_update_cpu(unsigned int cpu, bool remove) -{ - int nid = cpu_to_node(cpu); - - if (nid == NUMA_NO_NODE) - return; - - if (remove) - cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); - else - cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); -} - -void numa_add_cpu(unsigned int cpu) -{ - numa_update_cpu(cpu, false); -} - -void numa_remove_cpu(unsigned int cpu) -{ - numa_update_cpu(cpu, true); -} - -void numa_clear_node(unsigned int cpu) -{ - numa_remove_cpu(cpu); - set_cpu_numa_node(cpu, NUMA_NO_NODE); -} - -/* - * Allocate node_to_cpumask_map based on number of available nodes - * Requires node_possible_map to be valid. - * - * Note: cpumask_of_node() is not valid until after this is done. - * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) - */ -static void __init setup_node_to_cpumask_map(void) -{ - int node; - - /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) - setup_nr_node_ids(); - - /* allocate and clear the mapping */ - for (node = 0; node < nr_node_ids; node++) { - alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); - cpumask_clear(node_to_cpumask_map[node]); - } - - /* cpumask_of_node() will now work */ - pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); -} - -/* - * Set the cpu to node and mem mapping - */ -void numa_store_cpu_info(unsigned int cpu) -{ - set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); -} - -void __init early_map_cpu_to_node(unsigned int cpu, int nid) -{ - /* fallback to node 0 */ - if (nid < 0 || nid >= MAX_NUMNODES || numa_off) - nid = 0; - - cpu_to_node_map[cpu] = nid; - - /* - * We should set the numa node of cpu0 as soon as possible, because it - * has already been set up online before. cpu_to_node(0) will soon be - * called. - */ - if (!cpu) - set_cpu_numa_node(cpu, nid); -} - -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; -EXPORT_SYMBOL(__per_cpu_offset); - -static int __init early_cpu_to_node(int cpu) -{ - return cpu_to_node_map[cpu]; -} - -static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) -{ - return node_distance(early_cpu_to_node(from), early_cpu_to_node(to)); -} - -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, - size_t align) -{ - int nid = early_cpu_to_node(cpu); - - return memblock_alloc_try_nid(size, align, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); -} - -static void __init pcpu_fc_free(void *ptr, size_t size) -{ - memblock_free_early(__pa(ptr), size); -} - -void __init setup_per_cpu_areas(void) -{ - unsigned long delta; - unsigned int cpu; - int rc; - - /* - * Always reserve area for module percpu variables. That's - * what the legacy allocator did. - */ - rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, - PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, - pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); - if (rc < 0) - panic("Failed to initialize percpu areas."); - - delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; - for_each_possible_cpu(cpu) - __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; -} -#endif - -/** - * numa_add_memblk() - Set node id to memblk - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - int ret; - - ret = memblock_set_node(start, (end - start), &memblock.memory, nid); - if (ret < 0) { - pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", - start, (end - 1), nid); - return ret; - } - - node_set(nid, numa_nodes_parsed); - return ret; -} - -/* - * Initialize NODE_DATA for a node on the local memory - */ -static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) -{ - const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); - u64 nd_pa; - void *nd; - int tnid; - - if (start_pfn >= end_pfn) - pr_info("Initmem setup node %d [<memory-less node>]\n", nid); - - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) - panic("Cannot allocate %zu bytes for node %d data\n", - nd_size, nid); - - nd = __va(nd_pa); - - /* report and initialize */ - pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n", - nd_pa, nd_pa + nd_size - 1); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) - pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); - - node_data[nid] = nd; - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); - NODE_DATA(nid)->node_id = nid; - NODE_DATA(nid)->node_start_pfn = start_pfn; - NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; -} - -/* - * numa_free_distance - * - * The current table is freed. - */ -void __init numa_free_distance(void) -{ - size_t size; - - if (!numa_distance) - return; - - size = numa_distance_cnt * numa_distance_cnt * - sizeof(numa_distance[0]); - - memblock_free(__pa(numa_distance), size); - numa_distance_cnt = 0; - numa_distance = NULL; -} - -/* - * Create a new NUMA distance table. - */ -static int __init numa_alloc_distance(void) -{ - size_t size; - u64 phys; - int i, j; - - size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); - phys = memblock_find_in_range(0, PFN_PHYS(max_pfn), - size, PAGE_SIZE); - if (WARN_ON(!phys)) - return -ENOMEM; - - memblock_reserve(phys, size); - - numa_distance = __va(phys); - numa_distance_cnt = nr_node_ids; - - /* fill with the default distances */ - for (i = 0; i < numa_distance_cnt; i++) - for (j = 0; j < numa_distance_cnt; j++) - numa_distance[i * numa_distance_cnt + j] = i == j ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - - pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt); - - return 0; -} - -/** - * numa_set_distance() - Set inter node NUMA distance from node to node. - * @from: the 'from' node to set distance - * @to: the 'to' node to set distance - * @distance: NUMA distance - * - * Set the distance from node @from to @to to @distance. - * If distance table doesn't exist, a warning is printed. - * - * If @from or @to is higher than the highest known node or lower than zero - * or @distance doesn't make sense, the call is ignored. - */ -void __init numa_set_distance(int from, int to, int distance) -{ - if (!numa_distance) { - pr_warn_once("Warning: distance table not allocated yet\n"); - return; - } - - if (from >= numa_distance_cnt || to >= numa_distance_cnt || - from < 0 || to < 0) { - pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - if ((u8)distance != distance || - (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - numa_distance[from * numa_distance_cnt + to] = distance; -} - -/* - * Return NUMA distance @from to @to - */ -int __node_distance(int from, int to) -{ - if (from >= numa_distance_cnt || to >= numa_distance_cnt) - return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return numa_distance[from * numa_distance_cnt + to]; -} -EXPORT_SYMBOL(__node_distance); - -static int __init numa_register_nodes(void) -{ - int nid; - struct memblock_region *mblk; - - /* Check that valid nid is set to memblks */ - for_each_mem_region(mblk) { - int mblk_nid = memblock_get_region_node(mblk); - - if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) { - pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", - mblk_nid, mblk->base, - mblk->base + mblk->size - 1); - return -EINVAL; - } - } - - /* Finally register nodes. */ - for_each_node_mask(nid, numa_nodes_parsed) { - unsigned long start_pfn, end_pfn; - - get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); - setup_node_data(nid, start_pfn, end_pfn); - node_set_online(nid); - } - - /* Setup online nodes to actual nodes*/ - node_possible_map = numa_nodes_parsed; - - return 0; -} - -static int __init numa_init(int (*init_func)(void)) -{ - int ret; - - nodes_clear(numa_nodes_parsed); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - - ret = numa_alloc_distance(); - if (ret < 0) - return ret; - - ret = init_func(); - if (ret < 0) - goto out_free_distance; - - if (nodes_empty(numa_nodes_parsed)) { - pr_info("No NUMA configuration found\n"); - ret = -EINVAL; - goto out_free_distance; - } - - ret = numa_register_nodes(); - if (ret < 0) - goto out_free_distance; - - setup_node_to_cpumask_map(); - - return 0; -out_free_distance: - numa_free_distance(); - return ret; -} - -/** - * dummy_numa_init() - Fallback dummy NUMA init - * - * Used if there's no underlying NUMA architecture, NUMA initialization - * fails, or NUMA is disabled on the command line. - * - * Must online at least one node (node 0) and add memory blocks that cover all - * allowed memory. It is unlikely that this function fails. - * - * Return: 0 on success, -errno on failure. - */ -static int __init dummy_numa_init(void) -{ - phys_addr_t start = memblock_start_of_DRAM(); - phys_addr_t end = memblock_end_of_DRAM(); - int ret; - - if (numa_off) - pr_info("NUMA disabled\n"); /* Forced off on command line. */ - pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", start, end - 1); - - ret = numa_add_memblk(0, start, end); - if (ret) { - pr_err("NUMA init failed\n"); - return ret; - } - - numa_off = true; - return 0; -} - -/** - * arm64_numa_init() - Initialize NUMA - * - * Try each configured NUMA initialization method until one succeeds. The - * last fallback is dummy single node config encompassing whole memory. - */ -void __init arm64_numa_init(void) -{ - if (!numa_off) { - if (!acpi_disabled && !numa_init(arm64_acpi_numa_init)) - return; - if (acpi_disabled && !numa_init(of_numa_init)) - return; - } - - numa_init(dummy_numa_init); -} diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 1f7ee8c8b7b8..c967bfd30d2b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -291,17 +291,7 @@ skip_pgd: /* We're done: fire up the MMU again */ mrs x17, sctlr_el1 orr x17, x17, #SCTLR_ELx_M - msr sctlr_el1, x17 - isb - - /* - * Invalidate the local I-cache so that any instructions fetched - * speculatively from the PoC are discarded, since they may have - * been dynamically patched at the PoU. - */ - ic iallu - dsb nsh - isb + set_sctlr_el1 x17 /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] @@ -464,8 +454,8 @@ SYM_FUNC_START(__cpu_setup) #endif msr mair_el1, x5 /* - * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for - * both user and kernel. + * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further + * adjusted if the kernel is compiled with 52bit VA support. */ mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 04137a8f3d2d..0e050d76b83a 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -324,6 +324,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info) st = (struct pg_state){ .seq = s, .marker = info->markers, + .level = -1, .ptdump = { .note_page = note_page, .range = (struct ptdump_range[]){ diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c new file mode 100644 index 000000000000..527f0a39c3da --- /dev/null +++ b/arch/arm64/mm/trans_pgd.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Transitional page tables for kexec and hibernate + * + * This file derived from: arch/arm64/kernel/hibernate.c + * + * Copyright (c) 2020, Microsoft Corporation. + * Pavel Tatashin <pasha.tatashin@soleen.com> + * + */ + +/* + * Transitional tables are used during system transferring from one world to + * another: such as during hibernate restore, and kexec reboots. During these + * phases one cannot rely on page table not being overwritten. This is because + * hibernate and kexec can overwrite the current page tables during transition. + */ + +#include <asm/trans_pgd.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <linux/suspend.h> +#include <linux/bug.h> +#include <linux/mm.h> +#include <linux/mmzone.h> + +static void *trans_alloc(struct trans_pgd_info *info) +{ + return info->trans_alloc_page(info->trans_alloc_arg); +} + +static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) +{ + pte_t pte = READ_ONCE(*src_ptep); + + if (pte_valid(pte)) { + /* + * Resume will overwrite areas that may be marked + * read only (code, rodata). Clear the RDONLY bit from + * the temporary mappings we use during restore. + */ + set_pte(dst_ptep, pte_mkwrite(pte)); + } else if (debug_pagealloc_enabled() && !pte_none(pte)) { + /* + * debug_pagealloc will removed the PTE_VALID bit if + * the page isn't in use by the resume kernel. It may have + * been in use by the original kernel, in which case we need + * to put it back in our copy to do the restore. + * + * Before marking this entry valid, check the pfn should + * be mapped. + */ + BUG_ON(!pfn_valid(pte_pfn(pte))); + + set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte))); + } +} + +static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp, + pmd_t *src_pmdp, unsigned long start, unsigned long end) +{ + pte_t *src_ptep; + pte_t *dst_ptep; + unsigned long addr = start; + + dst_ptep = trans_alloc(info); + if (!dst_ptep) + return -ENOMEM; + pmd_populate_kernel(NULL, dst_pmdp, dst_ptep); + dst_ptep = pte_offset_kernel(dst_pmdp, start); + + src_ptep = pte_offset_kernel(src_pmdp, start); + do { + _copy_pte(dst_ptep, src_ptep, addr); + } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end); + + return 0; +} + +static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp, + pud_t *src_pudp, unsigned long start, unsigned long end) +{ + pmd_t *src_pmdp; + pmd_t *dst_pmdp; + unsigned long next; + unsigned long addr = start; + + if (pud_none(READ_ONCE(*dst_pudp))) { + dst_pmdp = trans_alloc(info); + if (!dst_pmdp) + return -ENOMEM; + pud_populate(NULL, dst_pudp, dst_pmdp); + } + dst_pmdp = pmd_offset(dst_pudp, start); + + src_pmdp = pmd_offset(src_pudp, start); + do { + pmd_t pmd = READ_ONCE(*src_pmdp); + + next = pmd_addr_end(addr, end); + if (pmd_none(pmd)) + continue; + if (pmd_table(pmd)) { + if (copy_pte(info, dst_pmdp, src_pmdp, addr, next)) + return -ENOMEM; + } else { + set_pmd(dst_pmdp, + __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY)); + } + } while (dst_pmdp++, src_pmdp++, addr = next, addr != end); + + return 0; +} + +static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp, + p4d_t *src_p4dp, unsigned long start, + unsigned long end) +{ + pud_t *dst_pudp; + pud_t *src_pudp; + unsigned long next; + unsigned long addr = start; + + if (p4d_none(READ_ONCE(*dst_p4dp))) { + dst_pudp = trans_alloc(info); + if (!dst_pudp) + return -ENOMEM; + p4d_populate(NULL, dst_p4dp, dst_pudp); + } + dst_pudp = pud_offset(dst_p4dp, start); + + src_pudp = pud_offset(src_p4dp, start); + do { + pud_t pud = READ_ONCE(*src_pudp); + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + continue; + if (pud_table(pud)) { + if (copy_pmd(info, dst_pudp, src_pudp, addr, next)) + return -ENOMEM; + } else { + set_pud(dst_pudp, + __pud(pud_val(pud) & ~PUD_SECT_RDONLY)); + } + } while (dst_pudp++, src_pudp++, addr = next, addr != end); + + return 0; +} + +static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp, + pgd_t *src_pgdp, unsigned long start, + unsigned long end) +{ + p4d_t *dst_p4dp; + p4d_t *src_p4dp; + unsigned long next; + unsigned long addr = start; + + dst_p4dp = p4d_offset(dst_pgdp, start); + src_p4dp = p4d_offset(src_pgdp, start); + do { + next = p4d_addr_end(addr, end); + if (p4d_none(READ_ONCE(*src_p4dp))) + continue; + if (copy_pud(info, dst_p4dp, src_p4dp, addr, next)) + return -ENOMEM; + } while (dst_p4dp++, src_p4dp++, addr = next, addr != end); + + return 0; +} + +static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp, + unsigned long start, unsigned long end) +{ + unsigned long next; + unsigned long addr = start; + pgd_t *src_pgdp = pgd_offset_k(start); + + dst_pgdp = pgd_offset_pgd(dst_pgdp, start); + do { + next = pgd_addr_end(addr, end); + if (pgd_none(READ_ONCE(*src_pgdp))) + continue; + if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next)) + return -ENOMEM; + } while (dst_pgdp++, src_pgdp++, addr = next, addr != end); + + return 0; +} + +/* + * Create trans_pgd and copy linear map. + * info: contains allocator and its argument + * dst_pgdp: new page table that is created, and to which map is copied. + * start: Start of the interval (inclusive). + * end: End of the interval (exclusive). + * + * Returns 0 on success, and -ENOMEM on failure. + */ +int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp, + unsigned long start, unsigned long end) +{ + int rc; + pgd_t *trans_pgd = trans_alloc(info); + + if (!trans_pgd) { + pr_err("Failed to allocate memory for temporary page tables.\n"); + return -ENOMEM; + } + + rc = copy_page_tables(info, trans_pgd, start, end); + if (!rc) + *dst_pgdp = trans_pgd; + + return rc; +} + +/* + * Add map entry to trans_pgd for a base-size page at PTE level. + * info: contains allocator and its argument + * trans_pgd: page table in which new map is added. + * page: page to be mapped. + * dst_addr: new VA address for the page + * pgprot: protection for the page. + * + * Returns 0 on success, and -ENOMEM on failure. + */ +int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd, + void *page, unsigned long dst_addr, pgprot_t pgprot) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + pgdp = pgd_offset_pgd(trans_pgd, dst_addr); + if (pgd_none(READ_ONCE(*pgdp))) { + p4dp = trans_alloc(info); + if (!pgdp) + return -ENOMEM; + pgd_populate(NULL, pgdp, p4dp); + } + + p4dp = p4d_offset(pgdp, dst_addr); + if (p4d_none(READ_ONCE(*p4dp))) { + pudp = trans_alloc(info); + if (!pudp) + return -ENOMEM; + p4d_populate(NULL, p4dp, pudp); + } + + pudp = pud_offset(p4dp, dst_addr); + if (pud_none(READ_ONCE(*pudp))) { + pmdp = trans_alloc(info); + if (!pmdp) + return -ENOMEM; + pud_populate(NULL, pudp, pmdp); + } + + pmdp = pmd_offset(pudp, dst_addr); + if (pmd_none(READ_ONCE(*pmdp))) { + ptep = trans_alloc(info); + if (!ptep) + return -ENOMEM; + pmd_populate_kernel(NULL, pmdp, ptep); + } + + ptep = pte_offset_kernel(pmdp, dst_addr); + set_pte(ptep, pfn_pte(virt_to_pfn(page), pgprot)); + + return 0; +} + +/* + * The page we want to idmap may be outside the range covered by VA_BITS that + * can be built using the kernel's p?d_populate() helpers. As a one off, for a + * single page, we build these page tables bottom up and just assume that will + * need the maximum T0SZ. + * + * Returns 0 on success, and -ENOMEM on failure. + * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to + * maximum T0SZ for this page. + */ +int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, + unsigned long *t0sz, void *page) +{ + phys_addr_t dst_addr = virt_to_phys(page); + unsigned long pfn = __phys_to_pfn(dst_addr); + int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47; + int bits_mapped = PAGE_SHIFT - 4; + unsigned long level_mask, prev_level_entry, *levels[4]; + int this_level, index, level_lsb, level_msb; + + dst_addr &= PAGE_MASK; + prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC)); + + for (this_level = 3; this_level >= 0; this_level--) { + levels[this_level] = trans_alloc(info); + if (!levels[this_level]) + return -ENOMEM; + + level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level); + level_msb = min(level_lsb + bits_mapped, max_msb); + level_mask = GENMASK_ULL(level_msb, level_lsb); + + index = (dst_addr & level_mask) >> level_lsb; + *(levels[this_level] + index) = prev_level_entry; + + pfn = virt_to_pfn(levels[this_level]); + prev_level_entry = pte_val(pfn_pte(pfn, + __pgprot(PMD_TYPE_TABLE))); + + if (level_msb == max_msb) + break; + } + + *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn)); + *t0sz = TCR_T0SZ(max_msb + 1); + + return 0; +} diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 89dd2fcf38fa..34e91224adc3 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -7,7 +7,7 @@ config CSKY select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_USE_BUILTIN_BSWAP - select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 + select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select COMMON_CLK @@ -35,6 +35,9 @@ config CSKY select GENERIC_IRQ_MULTI_HANDLER select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD + select GENERIC_TIME_VSYSCALL + select GENERIC_VDSO_32 + select GENERIC_GETTIMEOFDAY select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL @@ -43,11 +46,14 @@ config CSKY select HAVE_CONTEXT_TRACKING select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_DEBUG_BUGVERBOSE + select HAVE_DEBUG_KMEMLEAK select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_GENERIC_VDSO select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_ERROR_INJECTION + select HAVE_FUTEX_CMPXCHG if FUTEX && SMP select HAVE_FTRACE_MCOUNT_RECORD select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO @@ -192,6 +198,22 @@ config CPU_CK860 endchoice choice + prompt "PAGE OFFSET" + default PAGE_OFFSET_80000000 + +config PAGE_OFFSET_80000000 + bool "PAGE OFFSET 2G (user:kernel = 2:2)" + +config PAGE_OFFSET_A0000000 + bool "PAGE OFFSET 2.5G (user:kernel = 2.5:1.5)" +endchoice + +config PAGE_OFFSET + hex + default 0x80000000 if PAGE_OFFSET_80000000 + default 0xa0000000 if PAGE_OFFSET_A0000000 +choice + prompt "C-SKY PMU type" depends on PERF_EVENTS depends on CPU_CK807 || CPU_CK810 || CPU_CK860 diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h index d3e04208d53c..6cab7afae962 100644 --- a/arch/csky/abiv1/inc/abi/cacheflush.h +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ABI_CSKY_CACHEFLUSH_H #define __ABI_CSKY_CACHEFLUSH_H diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h index ba8eb5870835..416b30c57983 100644 --- a/arch/csky/abiv1/inc/abi/ckmmu.h +++ b/arch/csky/abiv1/inc/abi/ckmmu.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_CKMMUV1_H #define __ASM_CSKY_CKMMUV1_H @@ -89,13 +88,14 @@ static inline void tlb_invalid_indexed(void) cpwcr("cpcr8", 0x02000000); } -static inline void setup_pgd(unsigned long pgd, bool kernel) +static inline void setup_pgd(pgd_t *pgd, int asid) { - cpwcr("cpcr29", pgd | BIT(0)); + cpwcr("cpcr29", __pa(pgd) | BIT(0)); + write_mmu_entryhi(asid); } -static inline unsigned long get_pgd(void) +static inline pgd_t *get_pgd(void) { - return cprcr("cpcr29") & ~BIT(0); + return __va(cprcr("cpcr29") & ~BIT(0)); } #endif /* __ASM_CSKY_CKMMUV1_H */ diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h index 13c23e2c707c..b6a2109b895e 100644 --- a/arch/csky/abiv1/inc/abi/entry.h +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_ENTRY_H #define __ASM_CSKY_ENTRY_H diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h index c864519117c7..2d2159933b76 100644 --- a/arch/csky/abiv1/inc/abi/page.h +++ b/arch/csky/abiv1/inc/abi/page.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <asm/shmparam.h> diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h index d605445aad9a..752c8b3f9194 100644 --- a/arch/csky/abiv1/inc/abi/pgtable-bits.h +++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h @@ -1,37 +1,49 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PGTABLE_BITS_H #define __ASM_CSKY_PGTABLE_BITS_H /* implemented in software */ -#define _PAGE_ACCESSED (1<<3) -#define PAGE_ACCESSED_BIT (3) - +#define _PAGE_PRESENT (1<<0) #define _PAGE_READ (1<<1) #define _PAGE_WRITE (1<<2) -#define _PAGE_PRESENT (1<<0) - +#define _PAGE_ACCESSED (1<<3) #define _PAGE_MODIFIED (1<<4) -#define PAGE_MODIFIED_BIT (4) /* implemented in hardware */ #define _PAGE_GLOBAL (1<<6) - #define _PAGE_VALID (1<<7) -#define PAGE_VALID_BIT (7) - #define _PAGE_DIRTY (1<<8) -#define PAGE_DIRTY_BIT (8) #define _PAGE_CACHE (3<<9) #define _PAGE_UNCACHE (2<<9) #define _PAGE_SO _PAGE_UNCACHE - #define _CACHE_MASK (7<<9) -#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE) -#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_UNCACHE) +#define _CACHE_CACHED _PAGE_CACHE +#define _CACHE_UNCACHED _PAGE_UNCACHE + +#define _PAGE_PROT_NONE _PAGE_READ + +/* + * Encode and decode a swap entry + * + * Format of swap PTE: + * bit 0: _PAGE_PRESENT (zero) + * bit 1: _PAGE_READ (zero) + * bit 2 - 5: swap type[0 - 3] + * bit 6: _PAGE_GLOBAL (zero) + * bit 7: _PAGE_VALID (zero) + * bit 8: swap type[4] + * bit 9 - 31: swap offset + */ +#define __swp_type(x) ((((x).val >> 2) & 0xf) | \ + (((x).val >> 4) & 0x10)) +#define __swp_offset(x) ((x).val >> 9) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type & 0xf) << 2) | \ + ((type & 0x10) << 4) | \ + ((offset) << 9)}) #define HAVE_ARCH_UNMAPPED_AREA diff --git a/arch/csky/abiv1/inc/abi/reg_ops.h b/arch/csky/abiv1/inc/abi/reg_ops.h index a153bd3918f7..abd01a243388 100644 --- a/arch/csky/abiv1/inc/abi/reg_ops.h +++ b/arch/csky/abiv1/inc/abi/reg_ops.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ABI_REG_OPS_H #define __ABI_REG_OPS_H diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h index 104707fbdcc1..7b386fd67070 100644 --- a/arch/csky/abiv1/inc/abi/regdef.h +++ b/arch/csky/abiv1/inc/abi/regdef.h @@ -1,10 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_REGDEF_H #define __ASM_CSKY_REGDEF_H +#ifdef __ASSEMBLY__ #define syscallid r1 +#else +#define syscallid "r1" +#endif + #define regs_syscallid(regs) regs->regs[9] #define regs_fp(regs) regs->regs[2] diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h index 0cd43384f8d2..9d95594b0feb 100644 --- a/arch/csky/abiv1/inc/abi/string.h +++ b/arch/csky/abiv1/inc/abi/string.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ABI_CSKY_STRING_H #define __ABI_CSKY_STRING_H diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h index 17c82686498e..ec73fd7c9f87 100644 --- a/arch/csky/abiv1/inc/abi/switch_context.h +++ b/arch/csky/abiv1/inc/abi/switch_context.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ABI_CSKY_PTRACE_H #define __ABI_CSKY_PTRACE_H diff --git a/arch/csky/abiv1/inc/abi/vdso.h b/arch/csky/abiv1/inc/abi/vdso.h index 14352f524f1d..9e6d0a2fdd2b 100644 --- a/arch/csky/abiv1/inc/abi/vdso.h +++ b/arch/csky/abiv1/inc/abi/vdso.h @@ -1,17 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/uaccess.h> +#ifndef __ABI_CSKY_VDSO_H +#define __ABI_CSKY_VDSO_H -static inline int setup_vdso_page(unsigned short *ptr) -{ - int err = 0; +/* movi r1, 127; addi r1, (139 - 127) */ +#define SET_SYSCALL_ID .long 0x20b167f1 - /* movi r1, 127 */ - err |= __put_user(0x67f1, ptr + 0); - /* addi r1, (139 - 127) */ - err |= __put_user(0x20b1, ptr + 1); - /* trap 0 */ - err |= __put_user(0x0008, ptr + 2); - - return err; -} +#endif /* __ABI_CSKY_VDSO_H */ diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index 790f1ebfba44..39c51399dd81 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -12,6 +12,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, unsigned long addr; struct page *page; + if (!pfn_valid(pte_pfn(*pte))) + return; + page = pfn_to_page(pte_pfn(*pte)); if (page == ZERO_PAGE(0)) return; diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h index 73ded7c72482..64215f2380f1 100644 --- a/arch/csky/abiv2/inc/abi/ckmmu.h +++ b/arch/csky/abiv2/inc/abi/ckmmu.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_CKMMUV2_H #define __ASM_CSKY_CKMMUV2_H @@ -78,8 +77,13 @@ static inline void tlb_read(void) static inline void tlb_invalid_all(void) { #ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.alls\n":::"memory"); sync_is(); + asm volatile( + "tlbi.alls \n" + "sync.i \n" + : + : + : "memory"); #else mtcr("cr<8, 15>", 0x04000000); #endif @@ -88,8 +92,13 @@ static inline void tlb_invalid_all(void) static inline void local_tlb_invalid_all(void) { #ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.all\n":::"memory"); sync_is(); + asm volatile( + "tlbi.all \n" + "sync.i \n" + : + : + : "memory"); #else tlb_invalid_all(); #endif @@ -100,16 +109,31 @@ static inline void tlb_invalid_indexed(void) mtcr("cr<8, 15>", 0x02000000); } -static inline void setup_pgd(unsigned long pgd, bool kernel) +#define NOP32 ".long 0x4820c400\n" + +static inline void setup_pgd(pgd_t *pgd, int asid) { - if (kernel) - mtcr("cr<28, 15>", pgd | BIT(0)); - else - mtcr("cr<29, 15>", pgd | BIT(0)); +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); +#else + mb(); +#endif + asm volatile( +#ifdef CONFIG_CPU_HAS_TLBI + "mtcr %1, cr<28, 15> \n" +#endif + "mtcr %1, cr<29, 15> \n" + "mtcr %0, cr< 4, 15> \n" + ".rept 64 \n" + NOP32 + ".endr \n" + : + :"r"(asid), "r"(__pa(pgd) | BIT(0)) + :"memory"); } -static inline unsigned long get_pgd(void) +static inline pgd_t *get_pgd(void) { - return mfcr("cr<29, 15>") & ~BIT(0); + return __va(mfcr("cr<29, 15>") & ~BIT(0)); } #endif /* __ASM_CSKY_CKMMUV2_H */ diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h index bedcc6f06bba..cca63e699b58 100644 --- a/arch/csky/abiv2/inc/abi/entry.h +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_ENTRY_H #define __ASM_CSKY_ENTRY_H @@ -26,6 +25,9 @@ stw tls, (sp, 0) stw lr, (sp, 4) + RD_MEH lr + WR_MEH lr + mfcr lr, epc movi tls, \epc_inc add lr, tls @@ -231,6 +233,16 @@ mtcr \rx, cr<8, 15> .endm +#ifdef CONFIG_PAGE_OFFSET_80000000 +#define MSA_SET cr<30, 15> +#define MSA_CLR cr<31, 15> +#endif + +#ifdef CONFIG_PAGE_OFFSET_A0000000 +#define MSA_SET cr<31, 15> +#define MSA_CLR cr<30, 15> +#endif + .macro SETUP_MMU /* Init psr and enable ee */ lrw r6, DEFAULT_PSR_VALUE @@ -281,15 +293,15 @@ * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 * BA Reserved SH WA B SO SEC C D V */ - mfcr r6, cr<30, 15> /* Get MSA0 */ + mfcr r6, MSA_SET /* Get MSA */ 2: lsri r6, 29 lsli r6, 29 addi r6, 0x1ce - mtcr r6, cr<30, 15> /* Set MSA0 */ + mtcr r6, MSA_SET /* Set MSA */ movi r6, 0 - mtcr r6, cr<31, 15> /* Clr MSA1 */ + mtcr r6, MSA_CLR /* Clr MSA */ /* enable MMU */ mfcr r6, cr18 diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h index 09e2700a3693..aabb79355013 100644 --- a/arch/csky/abiv2/inc/abi/fpu.h +++ b/arch/csky/abiv2/inc/abi/fpu.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_FPU_H #define __ASM_CSKY_FPU_H diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h index 0a70cb553dca..cf005f13cd15 100644 --- a/arch/csky/abiv2/inc/abi/page.h +++ b/arch/csky/abiv2/inc/abi/page.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page) diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h index 137f7932c83b..7e7f389f546f 100644 --- a/arch/csky/abiv2/inc/abi/pgtable-bits.h +++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h @@ -1,37 +1,48 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PGTABLE_BITS_H #define __ASM_CSKY_PGTABLE_BITS_H /* implemented in software */ #define _PAGE_ACCESSED (1<<7) -#define PAGE_ACCESSED_BIT (7) - #define _PAGE_READ (1<<8) #define _PAGE_WRITE (1<<9) #define _PAGE_PRESENT (1<<10) - #define _PAGE_MODIFIED (1<<11) -#define PAGE_MODIFIED_BIT (11) /* implemented in hardware */ #define _PAGE_GLOBAL (1<<0) - #define _PAGE_VALID (1<<1) -#define PAGE_VALID_BIT (1) - #define _PAGE_DIRTY (1<<2) -#define PAGE_DIRTY_BIT (2) #define _PAGE_SO (1<<5) #define _PAGE_BUF (1<<6) - #define _PAGE_CACHE (1<<3) - #define _CACHE_MASK _PAGE_CACHE -#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF) -#define _CACHE_UNCACHED (_PAGE_VALID) +#define _CACHE_CACHED (_PAGE_CACHE | _PAGE_BUF) +#define _CACHE_UNCACHED (0) + +#define _PAGE_PROT_NONE _PAGE_WRITE + +/* + * Encode and decode a swap entry + * + * Format of swap PTE: + * bit 0: _PAGE_GLOBAL (zero) + * bit 1: _PAGE_VALID (zero) + * bit 2 - 6: swap type + * bit 7 - 8: swap offset[0 - 1] + * bit 9: _PAGE_WRITE (zero) + * bit 10: _PAGE_PRESENT (zero) + * bit 11 - 31: swap offset[2 - 22] + */ +#define __swp_type(x) (((x).val >> 2) & 0x1f) +#define __swp_offset(x) ((((x).val >> 7) & 0x3) | \ + (((x).val >> 9) & 0x7ffffc)) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type & 0x1f) << 2) | \ + ((offset & 0x3) << 7) | \ + ((offset & 0x7ffffc) << 9)}) #endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h index ae82c3f26a6b..49ba18a64751 100644 --- a/arch/csky/abiv2/inc/abi/reg_ops.h +++ b/arch/csky/abiv2/inc/abi/reg_ops.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ABI_REG_OPS_H #define __ABI_REG_OPS_H diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h index d7328bbc1ce7..0933addbc27b 100644 --- a/arch/csky/abiv2/inc/abi/regdef.h +++ b/arch/csky/abiv2/inc/abi/regdef.h @@ -1,10 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_REGDEF_H #define __ASM_CSKY_REGDEF_H +#ifdef __ASSEMBLY__ #define syscallid r7 +#else +#define syscallid "r7" +#endif + #define regs_syscallid(regs) regs->regs[3] #define regs_fp(regs) regs->regs[4] diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h index 73a81245a3b3..5dd5c3f4ee7e 100644 --- a/arch/csky/abiv2/inc/abi/switch_context.h +++ b/arch/csky/abiv2/inc/abi/switch_context.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ABI_CSKY_PTRACE_H #define __ABI_CSKY_PTRACE_H diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h index b60d4a070326..40fd10d893ff 100644 --- a/arch/csky/abiv2/inc/abi/vdso.h +++ b/arch/csky/abiv2/inc/abi/vdso.h @@ -3,21 +3,7 @@ #ifndef __ABI_CSKY_VDSO_H #define __ABI_CSKY_VDSO_H -#include <linux/uaccess.h> +/* movi r7, 173 */ +#define SET_SYSCALL_ID .long 0x008bea07 -static inline int setup_vdso_page(unsigned short *ptr) -{ - int err = 0; - - /* movi r7, 173 */ - err |= __put_user(0xea07, ptr); - err |= __put_user(0x008b, ptr+1); - - /* trap 0 */ - err |= __put_user(0xc000, ptr+2); - err |= __put_user(0x2020, ptr+3); - - return err; -} - -#endif /* __ABI_CSKY_STRING_H */ +#endif /* __ABI_CSKY_VDSO_H */ diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h index bbbedfd34777..61abe9201c50 100644 --- a/arch/csky/abiv2/sysdep.h +++ b/arch/csky/abiv2/sysdep.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __SYSDEP_H #define __SYSDEP_H diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h index d1c2ede692ed..6fc05d44536c 100644 --- a/arch/csky/include/asm/addrspace.h +++ b/arch/csky/include/asm/addrspace.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_ADDRSPACE_H #define __ASM_CSKY_ADDRSPACE_H diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h deleted file mode 100644 index e369d73b13e3..000000000000 --- a/arch/csky/include/asm/atomic.h +++ /dev/null @@ -1,212 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __ASM_CSKY_ATOMIC_H -#define __ASM_CSKY_ATOMIC_H - -#include <linux/version.h> -#include <asm/cmpxchg.h> -#include <asm/barrier.h> - -#ifdef CONFIG_CPU_HAS_LDSTEX - -#define __atomic_add_unless __atomic_add_unless -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - unsigned long tmp, ret; - - smp_mb(); - - asm volatile ( - "1: ldex.w %0, (%3) \n" - " mov %1, %0 \n" - " cmpne %0, %4 \n" - " bf 2f \n" - " add %0, %2 \n" - " stex.w %0, (%3) \n" - " bez %0, 1b \n" - "2: \n" - : "=&r" (tmp), "=&r" (ret) - : "r" (a), "r"(&v->counter), "r"(u) - : "memory"); - - if (ret != u) - smp_mb(); - - return ret; -} - -#define ATOMIC_OP(op, c_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - unsigned long tmp; \ - \ - asm volatile ( \ - "1: ldex.w %0, (%2) \n" \ - " " #op " %0, %1 \n" \ - " stex.w %0, (%2) \n" \ - " bez %0, 1b \n" \ - : "=&r" (tmp) \ - : "r" (i), "r"(&v->counter) \ - : "memory"); \ -} - -#define ATOMIC_OP_RETURN(op, c_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ -{ \ - unsigned long tmp, ret; \ - \ - smp_mb(); \ - asm volatile ( \ - "1: ldex.w %0, (%3) \n" \ - " " #op " %0, %2 \n" \ - " mov %1, %0 \n" \ - " stex.w %0, (%3) \n" \ - " bez %0, 1b \n" \ - : "=&r" (tmp), "=&r" (ret) \ - : "r" (i), "r"(&v->counter) \ - : "memory"); \ - smp_mb(); \ - \ - return ret; \ -} - -#define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ -{ \ - unsigned long tmp, ret; \ - \ - smp_mb(); \ - asm volatile ( \ - "1: ldex.w %0, (%3) \n" \ - " mov %1, %0 \n" \ - " " #op " %0, %2 \n" \ - " stex.w %0, (%3) \n" \ - " bez %0, 1b \n" \ - : "=&r" (tmp), "=&r" (ret) \ - : "r" (i), "r"(&v->counter) \ - : "memory"); \ - smp_mb(); \ - \ - return ret; \ -} - -#else /* CONFIG_CPU_HAS_LDSTEX */ - -#include <linux/irqflags.h> - -#define __atomic_add_unless __atomic_add_unless -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - unsigned long tmp, ret, flags; - - raw_local_irq_save(flags); - - asm volatile ( - " ldw %0, (%3) \n" - " mov %1, %0 \n" - " cmpne %0, %4 \n" - " bf 2f \n" - " add %0, %2 \n" - " stw %0, (%3) \n" - "2: \n" - : "=&r" (tmp), "=&r" (ret) - : "r" (a), "r"(&v->counter), "r"(u) - : "memory"); - - raw_local_irq_restore(flags); - - return ret; -} - -#define ATOMIC_OP(op, c_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - unsigned long tmp, flags; \ - \ - raw_local_irq_save(flags); \ - \ - asm volatile ( \ - " ldw %0, (%2) \n" \ - " " #op " %0, %1 \n" \ - " stw %0, (%2) \n" \ - : "=&r" (tmp) \ - : "r" (i), "r"(&v->counter) \ - : "memory"); \ - \ - raw_local_irq_restore(flags); \ -} - -#define ATOMIC_OP_RETURN(op, c_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ -{ \ - unsigned long tmp, ret, flags; \ - \ - raw_local_irq_save(flags); \ - \ - asm volatile ( \ - " ldw %0, (%3) \n" \ - " " #op " %0, %2 \n" \ - " stw %0, (%3) \n" \ - " mov %1, %0 \n" \ - : "=&r" (tmp), "=&r" (ret) \ - : "r" (i), "r"(&v->counter) \ - : "memory"); \ - \ - raw_local_irq_restore(flags); \ - \ - return ret; \ -} - -#define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ -{ \ - unsigned long tmp, ret, flags; \ - \ - raw_local_irq_save(flags); \ - \ - asm volatile ( \ - " ldw %0, (%3) \n" \ - " mov %1, %0 \n" \ - " " #op " %0, %2 \n" \ - " stw %0, (%3) \n" \ - : "=&r" (tmp), "=&r" (ret) \ - : "r" (i), "r"(&v->counter) \ - : "memory"); \ - \ - raw_local_irq_restore(flags); \ - \ - return ret; \ -} - -#endif /* CONFIG_CPU_HAS_LDSTEX */ - -#define atomic_add_return atomic_add_return -ATOMIC_OP_RETURN(add, +) -#define atomic_sub_return atomic_sub_return -ATOMIC_OP_RETURN(sub, -) - -#define atomic_fetch_add atomic_fetch_add -ATOMIC_FETCH_OP(add, +) -#define atomic_fetch_sub atomic_fetch_sub -ATOMIC_FETCH_OP(sub, -) -#define atomic_fetch_and atomic_fetch_and -ATOMIC_FETCH_OP(and, &) -#define atomic_fetch_or atomic_fetch_or -ATOMIC_FETCH_OP(or, |) -#define atomic_fetch_xor atomic_fetch_xor -ATOMIC_FETCH_OP(xor, ^) - -#define atomic_and atomic_and -ATOMIC_OP(and, &) -#define atomic_or atomic_or -ATOMIC_OP(or, |) -#define atomic_xor atomic_xor -ATOMIC_OP(xor, ^) - -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - -#include <asm-generic/atomic.h> - -#endif /* __ASM_CSKY_ATOMIC_H */ diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h index a430e7fddf35..84fc600c8b45 100644 --- a/arch/csky/include/asm/barrier.h +++ b/arch/csky/include/asm/barrier.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BARRIER_H #define __ASM_CSKY_BARRIER_H @@ -8,6 +7,61 @@ #define nop() asm volatile ("nop\n":::"memory") +#ifdef CONFIG_SMP + +/* + * bar.brwarws: ordering barrier for all load/store instructions + * before/after + * + * |31|30 26|25 21|20 16|15 10|9 5|4 0| + * 1 10000 00000 00000 100001 00001 0 bw br aw ar + * + * b: before + * a: after + * r: read + * w: write + * + * Here are all combinations: + * + * bar.brw + * bar.br + * bar.bw + * bar.arw + * bar.ar + * bar.aw + * bar.brwarw + * bar.brarw + * bar.bwarw + * bar.brwar + * bar.brwaw + * bar.brar + * bar.bwaw + */ +#define __bar_brw() asm volatile (".long 0x842cc000\n":::"memory") +#define __bar_br() asm volatile (".long 0x8424c000\n":::"memory") +#define __bar_bw() asm volatile (".long 0x8428c000\n":::"memory") +#define __bar_arw() asm volatile (".long 0x8423c000\n":::"memory") +#define __bar_ar() asm volatile (".long 0x8421c000\n":::"memory") +#define __bar_aw() asm volatile (".long 0x8422c000\n":::"memory") +#define __bar_brwarw() asm volatile (".long 0x842fc000\n":::"memory") +#define __bar_brarw() asm volatile (".long 0x8427c000\n":::"memory") +#define __bar_bwarw() asm volatile (".long 0x842bc000\n":::"memory") +#define __bar_brwar() asm volatile (".long 0x842dc000\n":::"memory") +#define __bar_brwaw() asm volatile (".long 0x842ec000\n":::"memory") +#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") +#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") +#define __bar_bwaw() asm volatile (".long 0x842ac000\n":::"memory") + +#define __smp_mb() __bar_brwarw() +#define __smp_rmb() __bar_brar() +#define __smp_wmb() __bar_bwaw() + +#define ACQUIRE_FENCE ".long 0x8427c000\n" +#define __smp_acquire_fence() __bar_brarw() +#define __smp_release_fence() __bar_brwaw() + +#endif /* CONFIG_SMP */ + /* * sync: completion barrier, all sync.xx instructions * guarantee the last response recieved by bus transaction @@ -15,31 +69,14 @@ * sync.s: inherit from sync, but also shareable to other cores * sync.i: inherit from sync, but also flush cpu pipeline * sync.is: the same with sync.i + sync.s - * - * bar.brwarw: ordering barrier for all load/store instructions before it - * bar.brwarws: ordering barrier for all load/store instructions before it - * and shareable to other cores - * bar.brar: ordering barrier for all load instructions before it - * bar.brars: ordering barrier for all load instructions before it - * and shareable to other cores - * bar.bwaw: ordering barrier for all store instructions before it - * bar.bwaws: ordering barrier for all store instructions before it - * and shareable to other cores */ +#define mb() asm volatile ("sync\n":::"memory") #ifdef CONFIG_CPU_HAS_CACHEV2 -#define mb() asm volatile ("sync.s\n":::"memory") - -#ifdef CONFIG_SMP -#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory") -#define __smp_rmb() asm volatile ("bar.brars\n":::"memory") -#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory") -#endif /* CONFIG_SMP */ - -#define sync_is() asm volatile ("sync.is\n":::"memory") - -#else /* !CONFIG_CPU_HAS_CACHEV2 */ -#define mb() asm volatile ("sync\n":::"memory") +/* + * Using three sync.is to prevent speculative PTW + */ +#define sync_is() asm volatile ("sync.is\nsync.is\nsync.is\n":::"memory") #endif #include <asm-generic/barrier.h> diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h index 43b9838bff63..91818787d860 100644 --- a/arch/csky/include/asm/bitops.h +++ b/arch/csky/include/asm/bitops.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BITOPS_H #define __ASM_CSKY_BITOPS_H diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h index 33ebd16b9c78..03f1a5f9184a 100644 --- a/arch/csky/include/asm/bug.h +++ b/arch/csky/include/asm/bug.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BUG_H #define __ASM_CSKY_BUG_H @@ -21,6 +20,8 @@ do { \ struct pt_regs; void die(struct pt_regs *regs, const char *str); +void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr); + void show_regs(struct pt_regs *regs); void show_code(struct pt_regs *regs); diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h index f0b8f25429a2..d0f9eafe8988 100644 --- a/arch/csky/include/asm/cacheflush.h +++ b/arch/csky/include/asm/cacheflush.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_CACHEFLUSH_H #define __ASM_CSKY_CACHEFLUSH_H diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h index 7685824291b1..aa12ef4b9080 100644 --- a/arch/csky/include/asm/checksum.h +++ b/arch/csky/include/asm/checksum.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_CHECKSUM_H #define __ASM_CSKY_CHECKSUM_H diff --git a/arch/csky/include/asm/clocksource.h b/arch/csky/include/asm/clocksource.h new file mode 100644 index 000000000000..54da0e49efa1 --- /dev/null +++ b/arch/csky/include/asm/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H +#define __ASM_VDSO_CSKY_CLOCKSOURCE_H + +#include <asm/vdso/clocksource.h> + +#endif diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h index 89224530a0ee..dabc8e46ce7b 100644 --- a/arch/csky/include/asm/cmpxchg.h +++ b/arch/csky/include/asm/cmpxchg.h @@ -3,12 +3,12 @@ #ifndef __ASM_CSKY_CMPXCHG_H #define __ASM_CSKY_CMPXCHG_H -#ifdef CONFIG_CPU_HAS_LDSTEX +#ifdef CONFIG_SMP #include <asm/barrier.h> extern void __bad_xchg(void); -#define __xchg(new, ptr, size) \ +#define __xchg_relaxed(new, ptr, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(new) __new = (new); \ @@ -16,7 +16,6 @@ extern void __bad_xchg(void); unsigned long tmp; \ switch (size) { \ case 4: \ - smp_mb(); \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ " mov %1, %2 \n" \ @@ -25,7 +24,6 @@ extern void __bad_xchg(void); : "=&r" (__ret), "=&r" (tmp) \ : "r" (__new), "r"(__ptr) \ :); \ - smp_mb(); \ break; \ default: \ __bad_xchg(); \ @@ -33,9 +31,10 @@ extern void __bad_xchg(void); __ret; \ }) -#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) +#define xchg_relaxed(ptr, x) \ + (__xchg_relaxed((x), (ptr), sizeof(*(ptr)))) -#define __cmpxchg(ptr, old, new, size) \ +#define __cmpxchg_relaxed(ptr, old, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(new) __new = (new); \ @@ -44,7 +43,6 @@ extern void __bad_xchg(void); __typeof__(*(ptr)) __ret; \ switch (size) { \ case 4: \ - smp_mb(); \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ " cmpne %0, %4 \n" \ @@ -56,7 +54,6 @@ extern void __bad_xchg(void); : "=&r" (__ret), "=&r" (__tmp) \ : "r" (__new), "r"(__ptr), "r"(__old) \ :); \ - smp_mb(); \ break; \ default: \ __bad_xchg(); \ @@ -64,8 +61,18 @@ extern void __bad_xchg(void); __ret; \ }) -#define cmpxchg(ptr, o, n) \ - (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) +#define cmpxchg_relaxed(ptr, o, n) \ + (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __smp_release_fence(); \ + __ret = cmpxchg_relaxed(ptr, o, n); \ + __smp_acquire_fence(); \ + __ret; \ +}) + #else #include <asm-generic/cmpxchg.h> #endif diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h index eb2cc5a673b5..48b83e283ed4 100644 --- a/arch/csky/include/asm/elf.h +++ b/arch/csky/include/asm/elf.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_ELF_H #define __ASM_CSKY_ELF_H diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h index 4b589cc20900..49a77cbbe2a9 100644 --- a/arch/csky/include/asm/fixmap.h +++ b/arch/csky/include/asm/fixmap.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_FIXMAP_H #define __ASM_CSKY_FIXMAP_H diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h index fae72b0b1374..9b86341731b6 100644 --- a/arch/csky/include/asm/ftrace.h +++ b/arch/csky/include/asm/ftrace.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_FTRACE_H #define __ASM_CSKY_FTRACE_H diff --git a/arch/csky/include/asm/futex.h b/arch/csky/include/asm/futex.h new file mode 100644 index 000000000000..6cfd312723fa --- /dev/null +++ b/arch/csky/include/asm/futex.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_FUTEX_H +#define __ASM_CSKY_FUTEX_H + +#ifndef CONFIG_SMP +#include <asm-generic/futex.h> +#else +#include <linux/atomic.h> +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <linux/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + u32 tmp; \ + \ + __atomic_pre_full_fence(); \ + \ + __asm__ __volatile__ ( \ + "1: ldex.w %[ov], %[u] \n" \ + " "insn" \n" \ + "2: stex.w %[t], %[u] \n" \ + " bez %[t], 1b \n" \ + " br 4f \n" \ + "3: mov %[r], %[e] \n" \ + "4: \n" \ + " .section __ex_table,\"a\" \n" \ + " .balign 4 \n" \ + " .long 1b, 3b \n" \ + " .long 2b, 3b \n" \ + " .previous \n" \ + : [r] "+r" (ret), [ov] "=&r" (oldval), \ + [u] "+m" (*uaddr), [t] "=&r" (tmp) \ + : [op] "Jr" (oparg), [e] "jr" (-EFAULT) \ + : "memory"); \ + \ + __atomic_post_full_fence(); \ +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret = 0; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %[t], %[ov]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %[t], %[ov], %[op]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %[t], %[ov], %[op]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and %[t], %[ov], %[op]", + ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %[t], %[ov], %[op]", + ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + if (!ret) + *oval = oldval; + + return ret; +} + + + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __atomic_pre_full_fence(); + + __asm__ __volatile__ ( + "1: ldex.w %[v], %[u] \n" + " cmpne %[v], %[ov] \n" + " bt 4f \n" + " mov %[t], %[nv] \n" + "2: stex.w %[t], %[u] \n" + " bez %[t], 1b \n" + " br 4f \n" + "3: mov %[r], %[e] \n" + "4: \n" + " .section __ex_table,\"a\" \n" + " .balign 4 \n" + " .long 1b, 3b \n" + " .long 2b, 3b \n" + " .previous \n" + : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), + [t] "=&r" (tmp) + : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT) + : "memory"); + + __atomic_post_full_fence(); + + *uval = val; + return ret; +} + +#endif /* CONFIG_SMP */ +#endif /* __ASM_CSKY_FUTEX_H */ diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h index 1f4ed3f4c0d9..1ed810effb3d 100644 --- a/arch/csky/include/asm/highmem.h +++ b/arch/csky/include/asm/highmem.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_HIGHMEM_H #define __ASM_CSKY_HIGHMEM_H diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h index e909587f24c5..f82654053dc0 100644 --- a/arch/csky/include/asm/io.h +++ b/arch/csky/include/asm/io.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_IO_H #define __ASM_CSKY_IO_H diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h index a65c6759f537..d12179801ae3 100644 --- a/arch/csky/include/asm/memory.h +++ b/arch/csky/include/asm/memory.h @@ -10,7 +10,7 @@ #define FIXADDR_TOP _AC(0xffffc000, UL) #define PKMAP_BASE _AC(0xff800000, UL) -#define VMALLOC_START _AC(0xc0008000, UL) +#define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8)) #define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) #ifdef CONFIG_HAVE_TCM diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h index 26fbb1d15df0..d78321901d06 100644 --- a/arch/csky/include/asm/mmu.h +++ b/arch/csky/include/asm/mmu.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_MMU_H #define __ASM_CSKY_MMU_H diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index b227d29393a8..95d99b30792c 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_MMU_CONTEXT_H #define __ASM_CSKY_MMU_CONTEXT_H @@ -14,12 +13,6 @@ #include <linux/sched.h> #include <abi/ckmmu.h> -#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ - setup_pgd(__pa(pgd), false) - -#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ - setup_pgd(__pa(pgd), true) - #define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) @@ -36,8 +29,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev != next) check_and_switch_context(next, cpu); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); - write_mmu_entryhi(next->context.asid.counter); + setup_pgd(next->pgd, next->context.asid.counter); flush_icache_deferred(next); } diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index 9b98bf31d57c..3b91fc3cf36f 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -24,7 +24,7 @@ * address region. We use them mapping kernel 1GB direct-map address area and * for more than 1GB of memory we use highmem. */ -#define PAGE_OFFSET 0x80000000 +#define PAGE_OFFSET CONFIG_PAGE_OFFSET #define SSEG_SIZE 0x20000000 #define LOWMEM_LIMIT (SSEG_SIZE * 2) diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h index 572093e11001..249905d8a4e8 100644 --- a/arch/csky/include/asm/perf_event.h +++ b/arch/csky/include/asm/perf_event.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PERF_EVENT_H #define __ASM_CSKY_PERF_EVENT_H diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index d58d8146b729..cd211aabbefd 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PGALLOC_H #define __ASM_CSKY_PGALLOC_H @@ -71,7 +70,7 @@ do { \ } while (0) extern void pagetable_init(void); -extern void pre_mmu_init(void); +extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn); extern void pre_trap_init(void); #endif /* __ASM_CSKY_PGALLOC_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 2002cb7f1053..0d60367b6bfa 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PGTABLE_H #define __ASM_CSKY_PGTABLE_H @@ -14,7 +13,7 @@ #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0UL /* @@ -34,23 +33,13 @@ #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pte_clear(mm, addr, ptep) set_pte((ptep), \ - (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) + (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ | pgprot_val(prot)) -#define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED) -#define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED) - -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \ - _CACHE_MASK) - -#define __swp_type(x) (((x).val >> 4) & 0xff) -#define __swp_offset(x) ((x).val >> 12) -#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \ - ((offset) << 12) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) @@ -59,41 +48,52 @@ pgprot_val(pgprot)) /* - * CSKY can't do page protection for execute, and considers that the same like - * read. Also, write permissions imply read permissions. This is the closest - * we can get by reasonable means.. + * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the + * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED. */ -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) + +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) +#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ | \ _CACHE_CACHED) -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ - _PAGE_GLOBAL | _CACHE_CACHED) -#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ +#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \ _CACHE_CACHED) +#define PAGE_SHARED PAGE_WRITE + +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \ + _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \ + _PAGE_GLOBAL | \ + _CACHE_CACHED) + +#define _PAGE_IOREMAP (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \ + _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \ + _PAGE_GLOBAL | \ + _CACHE_UNCACHED | _PAGE_SO) + +#define _PAGE_CHG_MASK (~(unsigned long) \ + (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_MASK | _PAGE_GLOBAL)) -#define _PAGE_IOREMAP \ - (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \ - _CACHE_UNCACHED | _PAGE_SO) +#define MAX_SWAPFILES_CHECK() \ + BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5) #define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY +#define __P001 PAGE_READ +#define __P010 PAGE_READ +#define __P011 PAGE_READ +#define __P100 PAGE_READ +#define __P101 PAGE_READ +#define __P110 PAGE_READ +#define __P111 PAGE_READ #define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED +#define __S001 PAGE_READ +#define __S010 PAGE_WRITE +#define __S011 PAGE_WRITE +#define __S100 PAGE_READ +#define __S101 PAGE_READ +#define __S110 PAGE_WRITE +#define __S111 PAGE_WRITE extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 4800f6563abb..9e933021fe8e 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PROCESSOR_H #define __ASM_CSKY_PROCESSOR_H @@ -28,7 +27,7 @@ extern struct cpuinfo_csky cpu_data[]; * for a 64 bit kernel expandable to 8192EB, of which the current CSKY * implementations will "only" be able to use 1TB ... */ -#define TASK_SIZE 0x7fff8000UL +#define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8)) #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h index 91ceb1b454c9..4202aab6df42 100644 --- a/arch/csky/include/asm/ptrace.h +++ b/arch/csky/include/asm/ptrace.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PTRACE_H #define __ASM_CSKY_PTRACE_H diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index 79ede9b1a646..589e8321dc14 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SEGMENT_H #define __ASM_CSKY_SEGMENT_H @@ -10,7 +9,7 @@ typedef struct { #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) -#define USER_DS ((mm_segment_t) { 0x80000000UL }) +#define USER_DS ((mm_segment_t) { PAGE_OFFSET }) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h index efafe4c79fed..2fe6cea0dae9 100644 --- a/arch/csky/include/asm/shmparam.h +++ b/arch/csky/include/asm/shmparam.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SHMPARAM_H #define __ASM_CSKY_SHMPARAM_H diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h index 7cf3f2b34cea..69f5aa249c5f 100644 --- a/arch/csky/include/asm/spinlock.h +++ b/arch/csky/include/asm/spinlock.h @@ -6,8 +6,6 @@ #include <linux/spinlock_types.h> #include <asm/barrier.h> -#ifdef CONFIG_QUEUED_RWLOCKS - /* * Ticket-based spin-locking. */ @@ -88,169 +86,4 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) #include <asm/qrwlock.h> -/* See include/linux/spinlock.h */ -#define smp_mb__after_spinlock() smp_mb() - -#else /* CONFIG_QUEUED_RWLOCKS */ - -/* - * Test-and-set spin-locking. - */ -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%1) \n" - " bnez %0, 1b \n" - " movi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); - smp_mb(); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - smp_mb(); - WRITE_ONCE(lock->lock, 0); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%1) \n" - " bnez %0, 2f \n" - " movi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - " movi %0, 0 \n" - "2: \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); - - if (!tmp) - smp_mb(); - - return !tmp; -} - -#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) - -/* - * read lock/unlock/trylock - */ -static inline void arch_read_lock(arch_rwlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%1) \n" - " blz %0, 1b \n" - " addi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); - smp_mb(); -} - -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - smp_mb(); - asm volatile ( - "1: ldex.w %0, (%1) \n" - " subi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%1) \n" - " blz %0, 2f \n" - " addi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - " movi %0, 0 \n" - "2: \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); - - if (!tmp) - smp_mb(); - - return !tmp; -} - -/* - * write lock/unlock/trylock - */ -static inline void arch_write_lock(arch_rwlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%1) \n" - " bnez %0, 1b \n" - " subi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); - smp_mb(); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - smp_mb(); - WRITE_ONCE(lock->lock, 0); -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%1) \n" - " bnez %0, 2f \n" - " subi %0, 1 \n" - " stex.w %0, (%1) \n" - " bez %0, 1b \n" - " movi %0, 0 \n" - "2: \n" - : "=&r" (tmp) - : "r"(p) - : "cc"); - - if (!tmp) - smp_mb(); - - return !tmp; -} - -#endif /* CONFIG_QUEUED_RWLOCKS */ #endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h index 88b82438b182..8ff0f6ff3a00 100644 --- a/arch/csky/include/asm/spinlock_types.h +++ b/arch/csky/include/asm/spinlock_types.h @@ -22,16 +22,6 @@ typedef struct { #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } -#ifdef CONFIG_QUEUED_RWLOCKS #include <asm-generic/qrwlock_types.h> -#else /* CONFIG_NR_CPUS > 2 */ - -typedef struct { - u32 lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } - -#endif /* CONFIG_QUEUED_RWLOCKS */ #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h index 73142de18355..a0d81e9d6b8f 100644 --- a/arch/csky/include/asm/string.h +++ b/arch/csky/include/asm/string.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _CSKY_STRING_MM_H_ #define _CSKY_STRING_MM_H_ diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h index 35a39e88933d..731e466415e2 100644 --- a/arch/csky/include/asm/switch_to.h +++ b/arch/csky/include/asm/switch_to.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SWITCH_TO_H #define __ASM_CSKY_SWITCH_TO_H diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h index 5d48e5e0082e..ea9ce6138b9b 100644 --- a/arch/csky/include/asm/syscalls.h +++ b/arch/csky/include/asm/syscalls.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SYSCALLS_H #define __ASM_CSKY_SYSCALLS_H diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index 21456a3737c2..8c349a8f904d 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -1,12 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _ASM_CSKY_THREAD_INFO_H #define _ASM_CSKY_THREAD_INFO_H #ifndef __ASSEMBLY__ -#include <linux/version.h> #include <asm/types.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h index fdff9b8d70c8..3498e65f59f8 100644 --- a/arch/csky/include/asm/tlb.h +++ b/arch/csky/include/asm/tlb.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_TLB_H #define __ASM_CSKY_TLB_H diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h index 6845b0667703..407160b4fde7 100644 --- a/arch/csky/include/asm/tlbflush.h +++ b/arch/csky/include/asm/tlbflush.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_TLBFLUSH_H #define __ASM_TLBFLUSH_H diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h index 1c081805b962..421a4195e2fe 100644 --- a/arch/csky/include/asm/traps.h +++ b/arch/csky/include/asm/traps.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_TRAPS_H #define __ASM_CSKY_TRAPS_H diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index 1633ffe5ae15..3dec272e1fa3 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_UACCESS_H #define __ASM_CSKY_UACCESS_H diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h index da7a18295615..9cf97de9a26d 100644 --- a/arch/csky/include/asm/unistd.h +++ b/arch/csky/include/asm/unistd.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <uapi/asm/unistd.h> diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h index d963d691f3a1..eb5142f9c564 100644 --- a/arch/csky/include/asm/vdso.h +++ b/arch/csky/include/asm/vdso.h @@ -3,10 +3,25 @@ #ifndef __ASM_CSKY_VDSO_H #define __ASM_CSKY_VDSO_H -#include <abi/vdso.h> +#include <linux/types.h> -struct csky_vdso { - unsigned short rt_signal_retcode[4]; +#ifndef GENERIC_TIME_VSYSCALL +struct vdso_data { }; +#endif + +/* + * The VDSO symbols are mapped into Linux so we can just use regular symbol + * addressing to get their offsets in userspace. The symbols are mapped at an + * offset of 0, but since the linker must support setting weak undefined + * symbols to the absolute address 0 it also happens to support other low + * addresses even when the code model suggests those low addresses would not + * otherwise be availiable. + */ +#define VDSO_SYMBOL(base, name) \ +({ \ + extern const char __vdso_##name[]; \ + (void __user *)((unsigned long)(base) + __vdso_##name); \ +}) #endif /* __ASM_CSKY_VDSO_H */ diff --git a/arch/csky/include/asm/vdso/clocksource.h b/arch/csky/include/asm/vdso/clocksource.h new file mode 100644 index 000000000000..dfca7b4724b7 --- /dev/null +++ b/arch/csky/include/asm/vdso/clocksource.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H +#define __ASM_VDSO_CSKY_CLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_ARCHTIMER + +#endif /* __ASM_VDSO_CSKY_CLOCKSOURCE_H */ diff --git a/arch/csky/include/asm/vdso/gettimeofday.h b/arch/csky/include/asm/vdso/gettimeofday.h new file mode 100644 index 000000000000..6c4f1446944f --- /dev/null +++ b/arch/csky/include/asm/vdso/gettimeofday.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_GETTIMEOFDAY_H +#define __ASM_VDSO_CSKY_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include <asm/barrier.h> +#include <asm/unistd.h> +#include <abi/regdef.h> +#include <uapi/linux/time.h> + +#define VDSO_HAS_CLOCK_GETRES 1 + +static __always_inline +int gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct __kernel_old_timeval *tv asm("a0") = _tv; + register struct timezone *tz asm("a1") = _tz; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_gettimeofday; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(tv), "r"(tz), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_gettime64; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct old_timespec32 *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_gettime; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_getres_time64; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct old_timespec32 *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm(syscallid) = __NR_clock_getres; + + asm volatile ("trap 0\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +uint64_t csky_pmu_read_cc(void); +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) +{ +#ifdef CONFIG_CSKY_PMU_V1 + return csky_pmu_read_cc(); +#else + return 0; +#endif +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return _vdso_data; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CSKY_GETTIMEOFDAY_H */ diff --git a/arch/csky/include/asm/vdso/processor.h b/arch/csky/include/asm/vdso/processor.h new file mode 100644 index 000000000000..39a6b561d0cc --- /dev/null +++ b/arch/csky/include/asm/vdso/processor.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_VDSO_CSKY_PROCESSOR_H +#define __ASM_VDSO_CSKY_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#define cpu_relax() barrier() + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/vdso/vsyscall.h b/arch/csky/include/asm/vdso/vsyscall.h new file mode 100644 index 000000000000..c276211a7c4d --- /dev/null +++ b/arch/csky/include/asm/vdso/vsyscall.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_CSKY_VSYSCALL_H +#define __ASM_VDSO_CSKY_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <vdso/datapage.h> + +extern struct vdso_data *vdso_data; + +static __always_inline struct vdso_data *__csky_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __csky_get_k_vdso_data + +#include <asm-generic/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CSKY_VSYSCALL_H */ diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h index d150cd664873..1aedd513b65a 100644 --- a/arch/csky/include/uapi/asm/byteorder.h +++ b/arch/csky/include/uapi/asm/byteorder.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BYTEORDER_H #define __ASM_CSKY_BYTEORDER_H diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h index 49d4e147a559..d0a8ac6a1b77 100644 --- a/arch/csky/include/uapi/asm/perf_regs.h +++ b/arch/csky/include/uapi/asm/perf_regs.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _ASM_CSKY_PERF_REGS_H #define _ASM_CSKY_PERF_REGS_H diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h index 66b2268e324e..3be9c14334a6 100644 --- a/arch/csky/include/uapi/asm/ptrace.h +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _CSKY_PTRACE_H #define _CSKY_PTRACE_H diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h index 670c020f2cb8..859afb602477 100644 --- a/arch/csky/include/uapi/asm/sigcontext.h +++ b/arch/csky/include/uapi/asm/sigcontext.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SIGCONTEXT_H #define __ASM_CSKY_SIGCONTEXT_H diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index ba4018929733..7ff6a2466af1 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #define __ARCH_WANT_STAT64 #define __ARCH_WANT_NEW_STAT diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile index 37f37c0e934a..6c0f36010ed0 100644 --- a/arch/csky/kernel/Makefile +++ b/arch/csky/kernel/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only extra-y := head.o vmlinux.lds -obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o +obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/ obj-y += power.o syscall.o syscall_table.o setup.o obj-y += process.o cpu-probe.o ptrace.o stacktrace.o obj-y += probes/ diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S index 3821ef9b7567..e73e548f7855 100644 --- a/arch/csky/kernel/atomic.S +++ b/arch/csky/kernel/atomic.S @@ -14,6 +14,10 @@ */ ENTRY(csky_cmpxchg) USPTOKSP + + RD_MEH a3 + WR_MEH a3 + mfcr a3, epc addi a3, TRAP0_SIZE @@ -36,11 +40,11 @@ ENTRY(csky_cmpxchg) 2: sync.is #else -1: +GLOBAL(csky_cmpxchg_ldw) ldw a3, (a2) cmpne a0, a3 bt16 3f -2: +GLOBAL(csky_cmpxchg_stw) stw a1, (a2) 3: #endif @@ -55,19 +59,3 @@ ENTRY(csky_cmpxchg) KSPTOUSP rte END(csky_cmpxchg) - -#ifndef CONFIG_CPU_HAS_LDSTEX -/* - * Called from tlbmodified exception - */ -ENTRY(csky_cmpxchg_fixup) - mfcr a0, epc - lrw a1, 2b - cmpne a1, a0 - bt 1f - subi a1, (2b - 1b) - stw a1, (sp, LSAVE_PC) -1: - rts -END(csky_cmpxchg_fixup) -#endif diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index 5a5cabd076e1..c1bd7a6b4ab6 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -13,10 +13,6 @@ #include <asm/page.h> #include <asm/thread_info.h> -#define PTE_INDX_MSK 0xffc -#define PTE_INDX_SHIFT 10 -#define _PGDIR_SHIFT 22 - .macro zero_fp #ifdef CONFIG_STACKTRACE movi r8, 0 @@ -41,108 +37,15 @@ #endif .endm -.macro tlbop_begin name, val0, val1, val2 -ENTRY(csky_\name) - mtcr a3, ss2 - mtcr r6, ss3 - mtcr a2, ss4 - - RD_PGDR r6 - RD_MEH a3 -#ifdef CONFIG_CPU_HAS_TLBI - tlbi.vaas a3 - sync.is - - btsti a3, 31 - bf 1f - RD_PGDR_K r6 -1: -#else - bgeni a2, 31 - WR_MCIR a2 - bgeni a2, 25 - WR_MCIR a2 -#endif - bclri r6, 0 - lrw a2, va_pa_offset - ld.w a2, (a2, 0) - subu r6, a2 - bseti r6, 31 - - mov a2, a3 - lsri a2, _PGDIR_SHIFT - lsli a2, 2 - addu r6, a2 - ldw r6, (r6) - - lrw a2, va_pa_offset - ld.w a2, (a2, 0) - subu r6, a2 - bseti r6, 31 - - lsri a3, PTE_INDX_SHIFT - lrw a2, PTE_INDX_MSK - and a3, a2 - addu r6, a3 - ldw a3, (r6) - - movi a2, (_PAGE_PRESENT | \val0) - and a3, a2 - cmpne a3, a2 - bt \name - - /* First read/write the page, just update the flags */ - ldw a3, (r6) - bgeni a2, PAGE_VALID_BIT - bseti a2, PAGE_ACCESSED_BIT - bseti a2, \val1 - bseti a2, \val2 - or a3, a2 - stw a3, (r6) - - /* Some cpu tlb-hardrefill bypass the cache */ -#ifdef CONFIG_CPU_NEED_TLBSYNC - movi a2, 0x22 - bseti a2, 6 - mtcr r6, cr22 - mtcr a2, cr17 - sync -#endif - - mfcr a3, ss2 - mfcr r6, ss3 - mfcr a2, ss4 - rte -\name: - mfcr a3, ss2 - mfcr r6, ss3 - mfcr a2, ss4 +.text +ENTRY(csky_pagefault) SAVE_ALL 0 -.endm -.macro tlbop_end is_write zero_fp context_tracking - RD_MEH a2 - psrset ee, ie + psrset ee mov a0, sp - movi a1, \is_write jbsr do_page_fault jmpi ret_from_exception -.endm - -.text - -tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT -tlbop_end 0 - -tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT -tlbop_end 1 - -tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT -#ifndef CONFIG_CPU_HAS_LDSTEX -jbsr csky_cmpxchg_fixup -#endif -tlbop_end 1 ENTRY(csky_systemcall) SAVE_ALL TRAP0_SIZE @@ -314,6 +217,9 @@ ENTRY(csky_trap) ENTRY(csky_get_tls) USPTOKSP + RD_MEH a0 + WR_MEH a0 + /* increase epc for continue */ mfcr a0, epc addi a0, TRAP0_SIZE diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S index 17ed9d250480..7e3e4f15b052 100644 --- a/arch/csky/kernel/head.S +++ b/arch/csky/kernel/head.S @@ -21,10 +21,16 @@ END(_start) ENTRY(_start_smp_secondary) SETUP_MMU - /* copy msa1 from CPU0 */ - lrw r6, secondary_msa1 +#ifdef CONFIG_PAGE_OFFSET_80000000 + lrw r6, secondary_msa1 ld.w r6, (r6, 0) mtcr r6, cr<31, 15> +#endif + + lrw r6, secondary_pgd + ld.w r6, (r6, 0) + mtcr r6, cr<28, 15> + mtcr r6, cr<29, 15> /* set stack point */ lrw r6, secondary_stack diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c index 1a29f1157449..e5f18420ce64 100644 --- a/arch/csky/kernel/perf_event.c +++ b/arch/csky/kernel/perf_event.c @@ -87,7 +87,7 @@ static int csky_pmu_irq; }) /* cycle counter */ -static uint64_t csky_pmu_read_cc(void) +uint64_t csky_pmu_read_cc(void) { uint32_t lo, hi, tmp; uint64_t result; @@ -1319,7 +1319,7 @@ int csky_pmu_device_probe(struct platform_device *pdev, pr_notice("[perf] PMU request irq fail!\n"); } - ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE, "AP_PERF_ONLINE", + ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE", csky_pmu_starting_cpu, csky_pmu_dying_cpu); if (ret) { diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c index 4e464fed52ec..d6e8d092c9b7 100644 --- a/arch/csky/kernel/probes/simulate-insn.c +++ b/arch/csky/kernel/probes/simulate-insn.c @@ -274,9 +274,9 @@ void __kprobes simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; - unsigned long val; + long val; - csky_insn_reg_get_val(regs, tmp, &val); + csky_insn_reg_get_val(regs, tmp, (unsigned long *)&val); val -= 1; @@ -286,7 +286,7 @@ simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) } else instruction_pointer_set(regs, addr + 4); - csky_insn_reg_set_val(regs, tmp, val); + csky_insn_reg_set_val(regs, tmp, (unsigned long)val); } void __kprobes @@ -297,13 +297,11 @@ simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs) csky_insn_reg_get_val(regs, tmp, &val); - if (val >= 0) { + if ((long) val >= 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); - - csky_insn_reg_set_val(regs, tmp, val); } void __kprobes @@ -314,13 +312,11 @@ simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs) csky_insn_reg_get_val(regs, tmp, &val); - if (val > 0) { + if ((long) val > 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); - - csky_insn_reg_set_val(regs, tmp, val); } void __kprobes @@ -331,13 +327,11 @@ simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs) csky_insn_reg_get_val(regs, tmp, &val); - if (val <= 0) { + if ((long) val <= 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); - - csky_insn_reg_set_val(regs, tmp, val); } void __kprobes @@ -348,13 +342,11 @@ simulate_blz32(u32 opcode, long addr, struct pt_regs *regs) csky_insn_reg_get_val(regs, tmp, &val); - if (val < 0) { + if ((long) val < 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); - - csky_insn_reg_set_val(regs, tmp, val); } void __kprobes diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index 69af6bc87e64..3d0ca22cd0e2 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -49,7 +49,7 @@ int copy_thread(unsigned long clone_flags, /* setup thread.sp for switch_to !!! */ p->thread.sp = (unsigned long)childstack; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); childstack->r15 = (unsigned long) ret_from_kernel_thread; childstack->r10 = kthread_arg; diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index d822144906ac..0105ac81b432 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -22,6 +22,7 @@ #include <asm/asm-offsets.h> #include <abi/regdef.h> +#include <abi/ckmmu.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> @@ -83,7 +84,7 @@ static int gpr_get(struct task_struct *target, /* Abiv1 regs->tls is fake and we need sync here. */ regs->tls = task_thread_info(target)->tp_value; - return membuf_write(&to, regs, sizeof(regs)); + return membuf_write(&to, regs, sizeof(*regs)); } static int gpr_set(struct task_struct *target, @@ -343,6 +344,124 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) trace_sys_exit(regs, syscall_get_return_value(current, regs)); } +#ifdef CONFIG_CPU_CK860 +static void show_iutlb(void) +{ + int entry, i; + unsigned long flags; + unsigned long oldpid; + unsigned long entryhi[16], entrylo0[16], entrylo1[16]; + + oldpid = read_mmu_entryhi(); + + entry = 0x8000; + + local_irq_save(flags); + + for (i = 0; i < 16; i++) { + write_mmu_index(entry); + tlb_read(); + entryhi[i] = read_mmu_entryhi(); + entrylo0[i] = read_mmu_entrylo0(); + entrylo1[i] = read_mmu_entrylo1(); + + entry++; + } + + local_irq_restore(flags); + + write_mmu_entryhi(oldpid); + + printk("\n\n\n"); + for (i = 0; i < 16; i++) + printk("iutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;" + " entrylo1 - 0x%lx\n", + i, entryhi[i], entrylo0[i], entrylo1[i]); + printk("\n\n\n"); +} + +static void show_dutlb(void) +{ + int entry, i; + unsigned long flags; + unsigned long oldpid; + unsigned long entryhi[16], entrylo0[16], entrylo1[16]; + + oldpid = read_mmu_entryhi(); + + entry = 0x4000; + + local_irq_save(flags); + + for (i = 0; i < 16; i++) { + write_mmu_index(entry); + tlb_read(); + entryhi[i] = read_mmu_entryhi(); + entrylo0[i] = read_mmu_entrylo0(); + entrylo1[i] = read_mmu_entrylo1(); + + entry++; + } + + local_irq_restore(flags); + + write_mmu_entryhi(oldpid); + + printk("\n\n\n"); + for (i = 0; i < 16; i++) + printk("dutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;" + " entrylo1 - 0x%lx\n", + i, entryhi[i], entrylo0[i], entrylo1[i]); + printk("\n\n\n"); +} + +static unsigned long entryhi[1024], entrylo0[1024], entrylo1[1024]; +static void show_jtlb(void) +{ + int entry; + unsigned long flags; + unsigned long oldpid; + + oldpid = read_mmu_entryhi(); + + entry = 0; + + local_irq_save(flags); + while (entry < 1024) { + write_mmu_index(entry); + tlb_read(); + entryhi[entry] = read_mmu_entryhi(); + entrylo0[entry] = read_mmu_entrylo0(); + entrylo1[entry] = read_mmu_entrylo1(); + + entry++; + } + local_irq_restore(flags); + + write_mmu_entryhi(oldpid); + + printk("\n\n\n"); + + for (entry = 0; entry < 1024; entry++) + printk("jtlb[%x]: entryhi - 0x%lx; entrylo0 - 0x%lx;" + " entrylo1 - 0x%lx\n", + entry, entryhi[entry], entrylo0[entry], entrylo1[entry]); + printk("\n\n\n"); +} + +static void show_tlb(void) +{ + show_iutlb(); + show_dutlb(); + show_jtlb(); +} +#else +static void show_tlb(void) +{ + return; +} +#endif + void show_regs(struct pt_regs *fp) { pr_info("\nCURRENT PROCESS:\n\n"); @@ -363,9 +482,10 @@ void show_regs(struct pt_regs *fp) pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc); pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr); - pr_info("SP: 0x%08lx\n", (long)fp); - pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); + pr_info("SP: 0x%08lx\n", (long)fp->usp); pr_info("PSR: 0x%08lx\n", (long)fp->sr); + pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); + pr_info("PT_REGS: 0x%08lx\n", (long)fp); pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", fp->a0, fp->a1, fp->a2, fp->a3); @@ -395,5 +515,7 @@ void show_regs(struct pt_regs *fp) fp->regs[8], fp->regs[9]); #endif + show_tlb(); + return; } diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index e4cab16056d6..e93bc6f74432 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -45,13 +45,17 @@ static void __init csky_memblock_init(void) if (size >= lowmem_size) { max_low_pfn = min_low_pfn + lowmem_size; +#ifdef CONFIG_PAGE_OFFSET_80000000 write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); +#endif } else if (size > sseg_size) { max_low_pfn = min_low_pfn + sseg_size; } max_zone_pfn[ZONE_NORMAL] = max_low_pfn; + mmu_init(min_low_pfn, max_low_pfn); + #ifdef CONFIG_HIGHMEM max_zone_pfn[ZONE_HIGHMEM] = max_pfn; @@ -101,16 +105,26 @@ void __init setup_arch(char **cmdline_p) unsigned long va_pa_offset; EXPORT_SYMBOL(va_pa_offset); +static inline unsigned long read_mmu_msa(void) +{ +#ifdef CONFIG_PAGE_OFFSET_80000000 + return read_mmu_msa0(); +#endif + +#ifdef CONFIG_PAGE_OFFSET_A0000000 + return read_mmu_msa1(); +#endif +} + asmlinkage __visible void __init csky_start(unsigned int unused, void *dtb_start) { /* Clean up bss section */ memset(__bss_start, 0, __bss_stop - __bss_start); - va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1); + va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1); pre_trap_init(); - pre_mmu_init(); if (dtb_start == NULL) early_init_dt_scan(__dtb_start); diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 37ea64ed3c12..312f046d452d 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -134,7 +134,6 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe *frame; int err = 0; - struct csky_vdso *vdso = current->mm->context.vdso; frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) @@ -152,7 +151,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) return -EFAULT; /* Set up to return from userspace. */ - regs->lr = (unsigned long)(vdso->rt_signal_retcode); + regs->lr = (unsigned long)VDSO_SYMBOL( + current->mm->context.vdso, rt_sigreturn); /* * Set up registers for signal handler. diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index 041d0de6a1b6..0f9f5eef9338 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -203,8 +203,8 @@ volatile unsigned int secondary_hint; volatile unsigned int secondary_hint2; volatile unsigned int secondary_ccr; volatile unsigned int secondary_stack; - -unsigned long secondary_msa1; +volatile unsigned int secondary_msa1; +volatile unsigned int secondary_pgd; int __cpu_up(unsigned int cpu, struct task_struct *tidle) { @@ -216,6 +216,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) secondary_hint2 = mfcr("cr<21, 1>"); secondary_ccr = mfcr("cr18"); secondary_msa1 = read_mmu_msa1(); + secondary_pgd = mfcr("cr<29, 15>"); /* * Because other CPUs are in reset status, we must flush data @@ -262,8 +263,6 @@ void csky_start_secondary(void) flush_tlb_all(); write_mmu_pagemask(0); - TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); - TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); #ifdef CONFIG_CPU_HAS_FPU init_fpu(); diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index 959a917c989d..e5fbf8653a21 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -39,9 +39,7 @@ asmlinkage void csky_cmpxchg(void); asmlinkage void csky_get_tls(void); asmlinkage void csky_irq(void); -asmlinkage void csky_tlbinvalidl(void); -asmlinkage void csky_tlbinvalids(void); -asmlinkage void csky_tlbmodified(void); +asmlinkage void csky_pagefault(void); /* Defined in head.S */ asmlinkage void _start_smp_secondary(void); @@ -66,9 +64,9 @@ void __init trap_init(void) VEC_INIT(VEC_TRAP3, csky_get_tls); /* setup MMU TLB exception */ - VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl); - VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids); - VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified); + VEC_INIT(VEC_TLBINVALIDL, csky_pagefault); + VEC_INIT(VEC_TLBINVALIDS, csky_pagefault); + VEC_INIT(VEC_TLBMODIFIED, csky_pagefault); #ifdef CONFIG_CPU_HAS_FPU init_fpu(); diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c index abc3dbc658d4..16c20d64d165 100644 --- a/arch/csky/kernel/vdso.c +++ b/arch/csky/kernel/vdso.c @@ -1,86 +1,107 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. -#include <linux/kernel.h> -#include <linux/err.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/init.h> #include <linux/binfmts.h> #include <linux/elf.h> -#include <linux/vmalloc.h> -#include <linux/unistd.h> -#include <linux/uaccess.h> +#include <linux/err.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <asm/page.h> +#ifdef GENERIC_TIME_VSYSCALL +#include <vdso/datapage.h> +#else #include <asm/vdso.h> -#include <asm/cacheflush.h> +#endif -static struct page *vdso_page; +extern char vdso_start[], vdso_end[]; -static int __init init_vdso(void) -{ - struct csky_vdso *vdso; - int err = 0; +static unsigned int vdso_pages; +static struct page **vdso_pagelist; - vdso_page = alloc_page(GFP_KERNEL); - if (!vdso_page) - panic("Cannot allocate vdso"); +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; - vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); - if (!vdso) - panic("Cannot map vdso"); - - clear_page(vdso); +static int __init vdso_init(void) +{ + unsigned int i; - err = setup_vdso_page(vdso->rt_signal_retcode); - if (err) - panic("Cannot set signal return code, err: %x.", err); + vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; + vdso_pagelist = + kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); + if (unlikely(vdso_pagelist == NULL)) { + pr_err("vdso: pagelist allocation failed\n"); + return -ENOMEM; + } - dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16); + for (i = 0; i < vdso_pages; i++) { + struct page *pg; - vunmap(vdso); + pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); + vdso_pagelist[i] = pg; + } + vdso_pagelist[i] = virt_to_page(vdso_data); return 0; } -subsys_initcall(init_vdso); +arch_initcall(vdso_init); -int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) { - int ret; - unsigned long addr; struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_len; + int ret; - mmap_write_lock(mm); + vdso_len = (vdso_pages + 1) << PAGE_SHIFT; - addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0); - if (IS_ERR_VALUE(addr)) { - ret = addr; - goto up_fail; + mmap_write_lock(mm); + vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = vdso_base; + goto end; } - ret = install_special_mapping( - mm, - addr, - PAGE_SIZE, - VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - &vdso_page); - if (ret) - goto up_fail; + /* + * Put vDSO base into mm struct. We need to do this before calling + * install_special_mapping or the perf counter mmap tracking code + * will fail to recognise it as a vDSO (since arch_vma_name fails). + */ + mm->context.vdso = (void *)vdso_base; - mm->context.vdso = (void *)addr; + ret = + install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, + (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), + vdso_pagelist); -up_fail: + if (unlikely(ret)) { + mm->context.vdso = NULL; + goto end; + } + + vdso_base += (vdso_pages << PAGE_SHIFT); + ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); + + if (unlikely(ret)) + mm->context.vdso = NULL; +end: mmap_write_unlock(mm); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm == NULL) - return NULL; - - if (vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) return "[vdso]"; - else - return NULL; + if (vma->vm_mm && (vma->vm_start == + (long)vma->vm_mm->context.vdso + PAGE_SIZE)) + return "[vdso_data]"; + return NULL; } diff --git a/arch/x86/platform/sfi/Makefile b/arch/csky/kernel/vdso/.gitignore index 4eba24c2af67..3a19def868ec 100644 --- a/arch/x86/platform/sfi/Makefile +++ b/arch/csky/kernel/vdso/.gitignore @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_SFI) += sfi.o +vdso.lds +*.tmp +vdso-syms.S diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile new file mode 100644 index 000000000000..0b6909f10667 --- /dev/null +++ b/arch/csky/kernel/vdso/Makefile @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_CKCORE_ADDR32|R_CKCORE_JUMP_SLOT +include $(srctree)/lib/vdso/Makefile + +# Symbols present in the vdso +vdso-syms += rt_sigreturn +vdso-syms += vgettimeofday + +# Files to link into the vdso +obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) +endif + +ccflags-y := -fno-stack-protector -DBUILD_VDSO32 + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +obj-y += vdso.o vdso-syms.o +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n +KCOV_INSTRUMENT := n + +# Force dependency +$(obj)/vdso.o: $(obj)/vdso.so + +SYSCFLAGS_vdso.so.dbg = $(c_flags) +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both + +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# actual build commands +# The DSO images are built using a special linker script +# Make sure only to export the intended __vdso_xxx symbol offsets. +quiet_cmd_vdsold = VDSOLD $@ + cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ + $(CROSS_COMPILE)objcopy \ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp + +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/csky/kernel/vdso/note.S b/arch/csky/kernel/vdso/note.S new file mode 100644 index 000000000000..2a956c942211 --- /dev/null +++ b/arch/csky/kernel/vdso/note.S @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/elfnote.h> +#include <linux/version.h> + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END diff --git a/arch/csky/kernel/vdso/rt_sigreturn.S b/arch/csky/kernel/vdso/rt_sigreturn.S new file mode 100644 index 000000000000..0a6bd1216118 --- /dev/null +++ b/arch/csky/kernel/vdso/rt_sigreturn.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <linux/linkage.h> +#include <asm/unistd.h> +#include <abi/vdso.h> + + .text +ENTRY(__vdso_rt_sigreturn) + .cfi_startproc + .cfi_signal_frame + SET_SYSCALL_ID + trap 0 + .cfi_endproc +ENDPROC(__vdso_rt_sigreturn) diff --git a/arch/csky/kernel/vdso/so2s.sh b/arch/csky/kernel/vdso/so2s.sh new file mode 100755 index 000000000000..69da3d529c6d --- /dev/null +++ b/arch/csky/kernel/vdso/so2s.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_5.10\)*!.global \2\n.set \2,0x\1!' \ +| grep '^\.' diff --git a/arch/csky/kernel/vdso/vdso.S b/arch/csky/kernel/vdso/vdso.S new file mode 100644 index 000000000000..5162ca069494 --- /dev/null +++ b/arch/csky/kernel/vdso/vdso.S @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/csky/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/csky/kernel/vdso/vdso.lds.S b/arch/csky/kernel/vdso/vdso.lds.S new file mode 100644 index 000000000000..590a6c79fff7 --- /dev/null +++ b/arch/csky/kernel/vdso/vdso.lds.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <asm/page.h> + +OUTPUT_ARCH(csky) + +SECTIONS +{ + PROVIDE(_vdso_data = . + PAGE_SIZE); + . = SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + .dynamic : { *(.dynamic) } :text :dynamic + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + + . = 0x800; + .text : { *(.text .text.*) } :text + + .data : { + *(.got.plt) *(.got) + *(.data .data.* .gnu.linkonce.d.*) + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + } +} + +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +VERSION +{ + LINUX_5.10 { + global: + __vdso_rt_sigreturn; + __vdso_clock_gettime; + __vdso_clock_gettime64; + __vdso_gettimeofday; + __vdso_clock_getres; + local: *; + }; +} diff --git a/arch/csky/kernel/vdso/vgettimeofday.c b/arch/csky/kernel/vdso/vgettimeofday.c new file mode 100644 index 000000000000..da491832c098 --- /dev/null +++ b/arch/csky/kernel/vdso/vgettimeofday.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/time.h> +#include <linux/types.h> + +int __vdso_clock_gettime(clockid_t clock, + struct old_timespec32 *ts) +{ + return __cvdso_clock_gettime32(clock, ts); +} + +int __vdso_clock_gettime64(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int __vdso_clock_getres(clockid_t clock_id, + struct old_timespec32 *res) +{ + return __cvdso_clock_getres_time32(clock_id, res); +} diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index f03033e17c29..e8b1a4a49798 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -33,6 +33,7 @@ SECTIONS .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; + VBR_BASE IRQENTRY_TEXT SOFTIRQENTRY_TEXT TEXT_TEXT @@ -104,7 +105,6 @@ SECTIONS EXCEPTION_TABLE(L1_CACHE_BYTES) BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) - VBR_BASE _end = . ; STABS_DEBUG diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 081b178b41b1..1482de56f4f7 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -1,29 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. -#include <linux/signal.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/version.h> -#include <linux/vt_kern.h> #include <linux/extable.h> -#include <linux/uaccess.h> -#include <linux/perf_event.h> #include <linux/kprobes.h> - -#include <asm/hardirq.h> -#include <asm/mmu_context.h> -#include <asm/traps.h> -#include <asm/page.h> +#include <linux/mmu_context.h> +#include <linux/perf_event.h> int fixup_exception(struct pt_regs *regs) { @@ -39,180 +20,287 @@ int fixup_exception(struct pt_regs *regs) return 0; } -/* - * This routine handles page faults. It determines the address, - * and the problem, and then passes it off to one of the appropriate - * routines. - */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, - unsigned long mmu_meh) +static inline bool is_write(struct pt_regs *regs) { - struct vm_area_struct *vma = NULL; - struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; - int si_code; - int fault; - unsigned long address = mmu_meh & PAGE_MASK; + switch (trap_no(regs)) { + case VEC_TLBINVALIDS: + return true; + case VEC_TLBMODIFIED: + return true; + } - if (kprobe_page_fault(regs, tsk->thread.trap_no)) + return false; +} + +#ifdef CONFIG_CPU_HAS_LDSTEX +static inline void csky_cmpxchg_fixup(struct pt_regs *regs) +{ + return; +} +#else +extern unsigned long csky_cmpxchg_ldw; +extern unsigned long csky_cmpxchg_stw; +static inline void csky_cmpxchg_fixup(struct pt_regs *regs) +{ + if (trap_no(regs) != VEC_TLBMODIFIED) return; - si_code = SEGV_MAPERR; + if (instruction_pointer(regs) == csky_cmpxchg_stw) + instruction_pointer_set(regs, csky_cmpxchg_ldw); + return; +} +#endif + +static inline void no_context(struct pt_regs *regs, unsigned long addr) +{ + current->thread.trap_no = trap_no(regs); + + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; -#ifndef CONFIG_CPU_HAS_TLBI /* - * We fault-in kernel-space virtual memory on-demand. The - * 'reference' page table is init_mm.pgd. - * - * NOTE! We MUST NOT take any locks for this case. We may - * be in an interrupt or a critical region, and should - * only copy the information from the master page table, - * nothing more. + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. */ - if (unlikely(address >= VMALLOC_START) && - unlikely(address <= VMALLOC_END)) { - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "tsk" here. We might be inside - * an interrupt in the middle of a task switch.. - */ - int offset = pgd_index(address); - pgd_t *pgd, *pgd_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - pte_t *pte_k; + bust_spinlocks(1); + pr_alert("Unable to handle kernel paging request at virtual " + "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc); + die(regs, "Oops"); + do_exit(SIGKILL); +} - unsigned long pgd_base; +static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) +{ + current->thread.trap_no = trap_no(regs); - pgd_base = (unsigned long)__va(get_pgd()); - pgd = (pgd_t *)pgd_base + offset; - pgd_k = init_mm.pgd + offset; + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + if (!user_mode(regs)) { + no_context(regs, addr); + return; + } + pagefault_out_of_memory(); + return; + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, addr); + return; + } + do_trap(regs, SIGBUS, BUS_ADRERR, addr); + return; + } + BUG(); +} - if (!pgd_present(*pgd_k)) - goto no_context; - set_pgd(pgd, *pgd_k); +static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +{ + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + mmap_read_unlock(mm); + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + do_trap(regs, SIGSEGV, code, addr); + return; + } - pud = (pud_t *)pgd; - pud_k = (pud_t *)pgd_k; - if (!pud_present(*pud_k)) - goto no_context; + no_context(regs, addr); +} - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - goto no_context; - set_pmd(pmd, *pmd_k); +static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) +{ + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + int offset; - pte_k = pte_offset_kernel(pmd_k, address); - if (!pte_present(*pte_k)) - goto no_context; + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + do_trap(regs, SIGSEGV, code, addr); return; } -#endif - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* - * If we're in an interrupt or have no user - * context, we must not take the fault.. + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. */ - if (in_atomic() || !mm) - goto bad_area_nosemaphore; + offset = pgd_index(addr); - mmap_read_lock(mm); - vma = find_vma(mm, address); - if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; - /* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ -good_area: - si_code = SEGV_ACCERR; + pgd = get_pgd() + offset; + pgd_k = init_mm.pgd + offset; - if (write) { + if (!pgd_present(*pgd_k)) { + no_context(regs, addr); + return; + } + set_pgd(pgd, *pgd_k); + + pud = (pud_t *)pgd; + pud_k = (pud_t *)pgd_k; + if (!pud_present(*pud_k)) { + no_context(regs, addr); + return; + } + + pmd = pmd_offset(pud, addr); + pmd_k = pmd_offset(pud_k, addr); + if (!pmd_present(*pmd_k)) { + no_context(regs, addr); + return; + } + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, addr); + if (!pte_present(*pte_k)) { + no_context(regs, addr); + return; + } + + flush_tlb_one(addr); +} + +static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma) +{ + if (is_write(regs)) { if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; + return true; } else { if (unlikely(!vma_is_accessible(vma))) - goto bad_area; + return true; } + return false; +} + +/* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs) +{ + struct task_struct *tsk; + struct vm_area_struct *vma; + struct mm_struct *mm; + unsigned long addr = read_mmu_entryhi() & PAGE_MASK; + unsigned int flags = FAULT_FLAG_DEFAULT; + int code = SEGV_MAPERR; + vm_fault_t fault; + + tsk = current; + mm = tsk->mm; + + csky_cmpxchg_fixup(regs); + + if (kprobe_page_fault(regs, tsk->thread.trap_no)) + return; /* - * If for any reason at all we couldn't handle the fault, - * make sure we exit gracefully rather than endlessly redo - * the fault. + * Fault-in kernel-space virtual memory on-demand. + * The 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. */ - fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, - regs); - if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGBUS) - goto do_sigbus; - else if (fault & VM_FAULT_SIGSEGV) - goto bad_area; - BUG(); + if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) { + vmalloc_fault(regs, code, addr); + return; } - mmap_read_unlock(mm); - return; + + /* Enable interrupts if they were enabled in the parent context. */ + if (likely(regs->sr & BIT(6))) + local_irq_enable(); /* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. + * If we're in an interrupt, have no user context, or are running + * in an atomic region, then we must not take the fault. */ -bad_area: - mmap_read_unlock(mm); - -bad_area_nosemaphore: - /* User mode accesses just cause a SIGSEGV */ - if (user_mode(regs)) { - tsk->thread.trap_no = trap_no(regs); - force_sig_fault(SIGSEGV, si_code, (void __user *)address); + if (unlikely(faulthandler_disabled() || !mm)) { + no_context(regs, addr); return; } -no_context: - tsk->thread.trap_no = trap_no(regs); + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; - /* Are we prepared to handle this kernel fault? */ - if (fixup_exception(regs)) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + + if (is_write(regs)) + flags |= FAULT_FLAG_WRITE; +retry: + mmap_read_lock(mm); + vma = find_vma(mm, addr); + if (unlikely(!vma)) { + bad_area(regs, mm, code, addr); + return; + } + if (likely(vma->vm_start <= addr)) + goto good_area; + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { + bad_area(regs, mm, code, addr); return; + } + if (unlikely(expand_stack(vma, addr))) { + bad_area(regs, mm, code, addr); + return; + } /* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. + * Ok, we have a good vm_area for this memory access, so + * we can handle it. */ - bust_spinlocks(1); - pr_alert("Unable to handle kernel paging request at virtual " - "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc); - die(regs, "Oops"); +good_area: + code = SEGV_ACCERR; + + if (unlikely(access_error(regs, vma))) { + bad_area(regs, mm, code, addr); + return; + } -out_of_memory: - tsk->thread.trap_no = trap_no(regs); + /* + * If for any reason at all we could not handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, addr, flags, regs); /* - * We ran out of memory, call the OOM killer, and return the userspace - * (which will retry the fault, or kill us if we got oom-killed). + * If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_lock because it + * would already be released in __lock_page_or_retry in mm/filemap.c. */ - pagefault_out_of_memory(); - return; + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, addr); + return; + } -do_sigbus: - tsk->thread.trap_no = trap_no(regs); + if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { + flags |= FAULT_FLAG_TRIED; - mmap_read_unlock(mm); + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + goto retry; + } - /* Kernel mode? Handle exceptions or die */ - if (!user_mode(regs)) - goto no_context; + mmap_read_unlock(mm); - force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); + if (unlikely(fault & VM_FAULT_ERROR)) { + mm_fault_error(regs, addr, fault); + return; + } + return; } diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index af627128314f..894050a8ce09 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -28,9 +28,15 @@ #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/tlb.h> +#include <asm/cacheflush.h> + +#define PTRS_KERN_TABLE \ + ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE) pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss; + EXPORT_SYMBOL(invalid_pte_table); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; @@ -80,9 +86,9 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM unsigned long tmp; - max_mapnr = highend_pfn; + set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); #else - max_mapnr = max_low_pfn; + set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); @@ -104,24 +110,9 @@ void __init mem_init(void) mem_init_print_info(NULL); } -extern char __init_begin[], __init_end[]; - void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long) &__init_begin; - - while (addr < (unsigned long) &__init_end) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages_inc(); - addr += PAGE_SIZE; - } - - pr_info("Freeing unused kernel memory: %dk freed\n", - ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10); + free_initmem_default(-1); } void pgd_init(unsigned long *p) @@ -130,20 +121,35 @@ void pgd_init(unsigned long *p) for (i = 0; i < PTRS_PER_PGD; i++) p[i] = __pa(invalid_pte_table); + + flush_tlb_all(); + local_icache_inv_all(NULL); } -void __init pre_mmu_init(void) +void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) { - /* - * Setup page-table and enable TLB-hardrefill - */ + int i; + + for (i = 0; i < USER_PTRS_PER_PGD; i++) + swapper_pg_dir[i].pgd = __pa(invalid_pte_table); + + for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) + swapper_pg_dir[i].pgd = + __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); + + for (i = 0; i < PTRS_KERN_TABLE; i++) + set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL)); + + for (i = min_pfn; i < max_pfn; i++) + set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); + flush_tlb_all(); - pgd_init((unsigned long *)swapper_pg_dir); - TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); - TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + local_icache_inv_all(NULL); /* Setup page mask to 4k */ write_mmu_pagemask(0); + + setup_pgd(swapper_pg_dir, 0); } void __init fixrange_init(unsigned long start, unsigned long end, diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c index ed1512381112..9234c5e5ceaf 100644 --- a/arch/csky/mm/tlb.c +++ b/arch/csky/mm/tlb.c @@ -24,7 +24,13 @@ void flush_tlb_all(void) void flush_tlb_mm(struct mm_struct *mm) { #ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm))); + sync_is(); + asm volatile( + "tlbi.asids %0 \n" + "sync.i \n" + : + : "r" (cpu_asid(mm)) + : "memory"); #else tlb_invalid_all(); #endif @@ -53,11 +59,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, end &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI + sync_is(); while (start < end) { - asm volatile("tlbi.vas %0"::"r"(start | newpid)); + asm volatile( + "tlbi.vas %0 \n" + : + : "r" (start | newpid) + : "memory"); + start += 2*PAGE_SIZE; } - sync_is(); + asm volatile("sync.i\n"); #else { unsigned long flags, oldpid; @@ -87,11 +99,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) end &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI + sync_is(); while (start < end) { - asm volatile("tlbi.vaas %0"::"r"(start)); + asm volatile( + "tlbi.vaas %0 \n" + : + : "r" (start) + : "memory"); + start += 2*PAGE_SIZE; } - sync_is(); + asm volatile("sync.i\n"); #else { unsigned long flags, oldpid; @@ -121,8 +139,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) addr &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.vas %0"::"r"(addr | newpid)); sync_is(); + asm volatile( + "tlbi.vas %0 \n" + "sync.i \n" + : + : "r" (addr | newpid) + : "memory"); #else { int oldpid, idx; @@ -147,8 +170,13 @@ void flush_tlb_one(unsigned long addr) addr &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.vaas %0"::"r"(addr)); sync_is(); + asm volatile( + "tlbi.vaas %0 \n" + "sync.i \n" + : + : "r" (addr) + : "memory"); #else { int oldpid, idx; diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index bc1364db58fe..46b1342ce515 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); childregs->retpc = (unsigned long) ret_from_kernel_thread; childregs->er4 = topstk; /* arg */ diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index e324f65f41e7..f19ae2ab0aaa 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -1,7 +1,6 @@ CONFIG_SMP=y CONFIG_DEFAULT_MMAP_MIN_ADDR=0 CONFIG_HZ_100=y -CONFIG_EXPERIMENTAL=y CONFIG_CROSS_COMPILE="hexagon-" CONFIG_LOCALVERSION="-smp" # CONFIG_LOCALVERSION_AUTO is not set diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 6a980cba7b29..c61165c99ae0 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, sizeof(*ss)); ss->lr = (unsigned long)ret_from_fork; p->thread.switch_sp = ss; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); /* r24 <- fn, r25 <- arg */ ss->r24 = usp; diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index f3328a29e881..467b7e7f967c 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -14,7 +14,6 @@ KBUILD_DEFCONFIG := generic_defconfig NM := $(CROSS_COMPILE)nm -B -READELF := $(CROSS_COMPILE)readelf CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ @@ -85,9 +84,3 @@ define archhelp echo ' install - Install compressed kernel image' echo '* unwcheck - Check vmlinux for invalid unwind info' endef - -archprepare: make_nr_irqs_h -PHONY += make_nr_irqs_h - -make_nr_irqs_h: - $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 5acf52e90872..0eccf33dfe8b 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h @@ -14,7 +14,9 @@ #include <linux/types.h> #include <linux/cpumask.h> -#include <generated/nr-irqs.h> +#include <asm/native/irq.h> + +#define NR_IRQS IA64_NATIVE_NR_IRQS static __inline__ int irq_canonicalize (int irq) diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 726df17f1b51..05805249296c 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -14,13 +14,10 @@ #if !defined(__ASSEMBLY__) -#include <linux/interrupt.h> +#include <linux/percpu.h> +#include <linux/threads.h> #include <linux/types.h> - -#include <asm/param.h> -#include <asm/sal.h> -#include <asm/processor.h> -#include <asm/mca_asm.h> +#include <asm/ptrace.h> #define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */ @@ -83,7 +80,7 @@ struct ia64_sal_os_state { /* common */ unsigned long sal_ra; /* Return address in SAL, physical */ unsigned long sal_gp; /* GP of the SAL - physical */ - pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ + struct pal_min_state_area *pal_min_state; /* from R17. physical in asm, virtual in C */ /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK). * Note: if the MCA/INIT recovery code wants to resume to a new context * then it must change these values to reflect the new kernel stack. diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h index f9d2b3b2dfad..b1d87955e8cc 100644 --- a/arch/ia64/include/asm/pal.h +++ b/arch/ia64/include/asm/pal.h @@ -750,7 +750,7 @@ typedef union pal_mc_error_info_u { * for PAL. */ -typedef struct pal_min_state_area_s { +struct pal_min_state_area { u64 pmsa_nat_bits; /* nat bits for saved GRs */ u64 pmsa_gr[15]; /* GR1 - GR15 */ u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */ @@ -766,7 +766,7 @@ typedef struct pal_min_state_area_s { u64 pmsa_xfs; /* previous ifs */ u64 pmsa_br1; /* branch register 1 */ u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */ -} pal_min_state_area_t; +}; struct ia64_pal_retval { diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 779b6972aa84..9b4efe89e62d 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -517,12 +517,6 @@ extern struct page *zero_page_memmap_ptr; __changed; \ }) #endif - -# ifdef CONFIG_VIRTUAL_MEM_MAP - /* arch mem_map init routine is needed due to holes in a virtual mem_map */ - extern void memmap_init (unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn); -# endif /* CONFIG_VIRTUAL_MEM_MAP */ # endif /* !__ASSEMBLY__ */ /* diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index 08f5b6aaed73..78f4f7b40435 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -385,7 +385,7 @@ typedef struct sal_processor_static_info { fr : 1, reserved : 58; } valid; - pal_min_state_area_t min_state_area; + struct pal_min_state_area min_state_area; u64 br[8]; u64 cr[128]; u64 ar[128]; diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index c89bd5f8cbf8..78717819131c 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -47,8 +47,3 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 # The gate DSO image is built using a special linker script. include $(src)/Makefile.gate - -include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE - $(call filechk,offsets,__ASM_NR_IRQS_H__) - -targets += nr-irqs.s diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index fb0deb8a4221..be3b90fef2e9 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -245,23 +245,23 @@ void foo(void) BLANK(); DEFINE(IA64_PMSA_GR_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_gr)); + offsetof(struct pal_min_state_area, pmsa_gr)); DEFINE(IA64_PMSA_BANK1_GR_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_bank1_gr)); + offsetof(struct pal_min_state_area, pmsa_bank1_gr)); DEFINE(IA64_PMSA_PR_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_pr)); + offsetof(struct pal_min_state_area, pmsa_pr)); DEFINE(IA64_PMSA_BR0_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_br0)); + offsetof(struct pal_min_state_area, pmsa_br0)); DEFINE(IA64_PMSA_RSC_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_rsc)); + offsetof(struct pal_min_state_area, pmsa_rsc)); DEFINE(IA64_PMSA_IIP_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_iip)); + offsetof(struct pal_min_state_area, pmsa_iip)); DEFINE(IA64_PMSA_IPSR_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_ipsr)); + offsetof(struct pal_min_state_area, pmsa_ipsr)); DEFINE(IA64_PMSA_IFS_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_ifs)); + offsetof(struct pal_min_state_area, pmsa_ifs)); DEFINE(IA64_PMSA_XIP_OFFSET, - offsetof (struct pal_min_state_area_s, pmsa_xip)); + offsetof(struct pal_min_state_area, pmsa_xip)); BLANK(); /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 4f47741005d2..76730f34685c 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -15,6 +15,7 @@ #include <linux/memblock.h> #include <linux/kexec.h> #include <linux/elfcore.h> +#include <linux/reboot.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/kdebug.h> diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index dd7fd750bb93..c5fe21de46a8 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -40,6 +40,7 @@ #include <asm/meminit.h> #include <asm/processor.h> #include <asm/mca.h> +#include <asm/sal.h> #include <asm/setup.h> #include <asm/tlbflush.h> diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 0fea266b4d39..d4cae2fc69ca 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -97,6 +97,7 @@ #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/mca.h> +#include <asm/mca_asm.h> #include <asm/kexec.h> #include <asm/irq.h> @@ -895,7 +896,7 @@ static void finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, unsigned long *nat) { - const pal_min_state_area_t *ms = sos->pal_min_state; + const struct pal_min_state_area *ms = sos->pal_min_state; const u64 *bank; /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use @@ -971,7 +972,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, char *p; ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ - const pal_min_state_area_t *ms = sos->pal_min_state; + const struct pal_min_state_area *ms = sos->pal_min_state; struct task_struct *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 4d0ab323dee8..36a69b4e6169 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -496,7 +496,7 @@ recover_from_read_error(slidx_table_t *slidx, struct ia64_sal_os_state *sos) { u64 target_identifier; - pal_min_state_area_t *pmsa; + struct pal_min_state_area *pmsa; struct ia64_psr *psr1, *psr2; ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook; diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c deleted file mode 100644 index f2633b22d3be..000000000000 --- a/arch/ia64/kernel/nr-irqs.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * calculate - * NR_IRQS = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, FOO_NR_IRQS...) - * depending on config. - * This must be calculated before processing asm-offset.c. - */ - -#define ASM_OFFSETS_C 1 - -#include <linux/kbuild.h> -#include <linux/threads.h> -#include <asm/native/irq.h> - -void foo(void) -{ - union paravirt_nr_irqs_max { - char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; - }; - - DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); -} diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 4ebbfa076a26..7e1a1525e202 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -338,7 +338,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base, ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (unlikely(!user_stack_base)) { /* fork_idle() called us */ return 0; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e67b22fc3c60..c1b299760bf7 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) * need to push through a forced SIGSEGV. */ while (1) { - get_signal(&ksig); + if (!get_signal(&ksig)) + break; /* * get_signal() may have run a debugger (via notify_parent()) diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile index 813a58cba39c..bf4bda0f63eb 100644 --- a/arch/ia64/kernel/syscalls/Makefile +++ b/arch/ia64/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -22,19 +22,20 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_offset_unistd_64 := __NR_Linux -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_offset_syscall_table := 1024 -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_64.h kapisyshdr-y += syscall_table.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index bfc00f2bd437..d89231166e19 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -362,3 +362,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b19f47a5a305..16d0d7d22657 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -536,18 +536,20 @@ virtual_memmap_init(u64 start, u64 end, void *arg) / sizeof(struct page)); if (map_start < map_end) - memmap_init_zone((unsigned long)(map_end - map_start), + memmap_init_range((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); return 0; } -void __meminit -memmap_init (unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn) +void __meminit memmap_init_zone(struct zone *zone) { + int nid = zone_to_nid(zone), zone_id = zone_idx(zone); + unsigned long start_pfn = zone->zone_start_pfn; + unsigned long size = zone->spanned_pages; + if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size, + memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } else { struct page *start; @@ -557,7 +559,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone, args.start = start; args.end = start + size; args.nid = nid; - args.zone = zone; + args.zone = zone_id; efi_memmap_walk(virtual_memmap_init, &args); } diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c index 7bc666e482eb..076a9caa9557 100644 --- a/arch/m68k/coldfire/clk.c +++ b/arch/m68k/coldfire/clk.c @@ -90,6 +90,10 @@ EXPORT_SYMBOL(clk_get); int clk_enable(struct clk *clk) { unsigned long flags; + + if (!clk) + return -EINVAL; + spin_lock_irqsave(&clk_lock, flags); if ((clk->enabled++ == 0) && clk->clk_ops) clk->clk_ops->enable(clk); diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 19b40b6bc4b7..786656090c50 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -128,6 +128,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -655,7 +656,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 07516abe0489..9bb12be4a38e 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -124,6 +124,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -611,7 +612,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index cc901c4e9492..413232626d9d 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -131,6 +131,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -633,7 +634,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index fc9a94aa7d6b..819cc70b06d8 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -121,6 +121,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -604,7 +605,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 260f1206c810..8f8d5968713b 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -123,6 +123,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -613,7 +614,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index f6d50b3fe8c2..bf15e6c1c939 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -122,6 +122,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -636,7 +637,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index fbe000ca0003..5466d48fcd9d 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -142,6 +142,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -722,7 +723,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 25ca836a5701..93c305918838 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -120,6 +120,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -603,7 +604,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 5794e43a2acb..cacd6c617f69 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -121,6 +121,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -604,7 +605,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index dbfb18938e11..3ae421cb24a4 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -122,6 +122,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -622,7 +623,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index e6afbeee7c4a..6da97e28c48e 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -118,6 +118,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -605,7 +606,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 5340507a9fff..f54481bb789a 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -118,6 +118,7 @@ CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m @@ -605,7 +606,9 @@ CONFIG_FIND_BIT_BENCHMARK=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_SYSCTL=m CONFIG_BITFIELD_KUNIT=m +CONFIG_RESOURCE_KUNIT_TEST=m CONFIG_LINEAR_RANGES_TEST=m +CONFIG_CMDLINE_KUNIT_TEST=m CONFIG_BITS_TEST=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 7f5912af2a52..9e8f0cc30a2c 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -171,7 +171,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void) #include <asm-generic/memory_model.h> #endif -#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory) +#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) #endif /* __ASSEMBLY__ */ diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h index 6bbe52025de3..8d0f862ee9d7 100644 --- a/arch/m68k/include/asm/page_no.h +++ b/arch/m68k/include/asm/page_no.h @@ -30,8 +30,8 @@ extern unsigned long memory_end; #define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) #define pfn_valid(pfn) ((pfn) < max_mapnr) -#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ - ((void *)(kaddr) < (void *)memory_end)) +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET) && \ + ((unsigned long)(kaddr) < memory_end)) #endif /* __ASSEMBLY__ */ diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 08359a6e058f..da83cc83e791 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, */ p->thread.fs = get_fs().seg; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(frame, 0, sizeof(struct fork_frame)); frame->regs.sr = PS_S; diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile index 659faefdcb1d..285aaba832d9 100644 --- a/arch/m68k/kernel/syscalls/Makefile +++ b/arch/m68k/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))' \ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 7fe4e45c864c..72bde6707dd3 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -441,3 +441,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 25a5a3fb14aa..0660f47012bc 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -38,7 +38,6 @@ config MICROBLAZE select OF_EARLY_FLATTREE select PCI_DOMAINS_GENERIC if PCI select PCI_SYSCALL if PCI - select TRACING_SUPPORT select VIRT_TO_BUS select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c index 9f12e3c2bb42..e5db3a57b9e3 100644 --- a/arch/microblaze/kernel/module.c +++ b/arch/microblaze/kernel/module.c @@ -24,9 +24,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, Elf32_Sym *sym; unsigned long int *location; unsigned long int value; -#if __GNUC__ < 4 - unsigned long int old_value; -#endif pr_debug("Applying add relocation section %u to %u\n", relsec, sechdrs[relsec].sh_info); @@ -49,40 +46,17 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, */ case R_MICROBLAZE_32: -#if __GNUC__ < 4 - old_value = *location; - *location = value + old_value; - - pr_debug("R_MICROBLAZE_32 (%08lx->%08lx)\n", - old_value, value); -#else *location = value; -#endif break; case R_MICROBLAZE_64: -#if __GNUC__ < 4 - /* Split relocs only required/used pre gcc4.1.1 */ - old_value = ((location[0] & 0x0000FFFF) << 16) | - (location[1] & 0x0000FFFF); - value += old_value; -#endif location[0] = (location[0] & 0xFFFF0000) | (value >> 16); location[1] = (location[1] & 0xFFFF0000) | (value & 0xFFFF); -#if __GNUC__ < 4 - pr_debug("R_MICROBLAZE_64 (%08lx->%08lx)\n", - old_value, value); -#endif break; case R_MICROBLAZE_64_PCREL: -#if __GNUC__ < 4 - old_value = (location[0] & 0xFFFF) << 16 | - (location[1] & 0xFFFF); - value -= old_value; -#endif value -= (unsigned long int)(location) + 4; location[0] = (location[0] & 0xFFFF0000) | (value >> 16); diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 657c2beb665e..62aa237180b6 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -59,7 +59,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct pt_regs *childregs = task_pt_regs(p); struct thread_info *ti = task_thread_info(p); - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* if we're creating a new kernel thread then just zeroing all * the registers. That's OK for a brand new thread.*/ memset(childregs, 0, sizeof(struct pt_regs)); diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile index 659faefdcb1d..285aaba832d9 100644 --- a/arch/microblaze/kernel/syscalls/Makefile +++ b/arch/microblaze/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))' \ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index a522adf194ab..d603a5ec9338 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -447,3 +447,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index df07b3d06cd6..fb31747ec092 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -45,7 +45,7 @@ SECTIONS { _etext = . ; } - . = ALIGN (4) ; + . = ALIGN (8) ; __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) { _fdt_start = . ; /* place for fdt blob */ *(__fdt_blob) ; /* Any link-placed DTB */ diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 5483e38b5dc7..e4f6e49417a9 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -18,6 +18,7 @@ platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/ platform-$(CONFIG_MACH_LOONGSON32) += loongson32/ platform-$(CONFIG_MACH_LOONGSON64) += loongson64/ platform-$(CONFIG_MIPS_MALTA) += mti-malta/ +platform-$(CONFIG_MACH_NINTENDO64) += n64/ platform-$(CONFIG_NLM_COMMON) += netlogic/ platform-$(CONFIG_PIC32MZDA) += pic32/ platform-$(CONFIG_MACH_PISTACHIO) += pistachio/ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 465bc5425d4c..d89efba3d8a4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,6 +18,7 @@ config MIPS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_LD_ORPHAN_WARN select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1) @@ -42,7 +43,7 @@ config MIPS select HANDLE_DOMAIN_IRQ select HAVE_ARCH_COMPILER_H select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KGDB + select HAVE_ARCH_KGDB if MIPS_FP_SUPPORT select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_SECCOMP_FILTER @@ -75,6 +76,8 @@ config MIPS select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ select HAVE_SPARSE_SYSCALL_NR @@ -123,6 +126,7 @@ choice config MIPS_GENERIC_KERNEL bool "Generic board-agnostic MIPS kernel" + select ARCH_HAS_SETUP_DMA_OPS select MIPS_GENERIC select BOOT_RAW select BUILTIN_DTB @@ -132,7 +136,7 @@ config MIPS_GENERIC_KERNEL select CPU_MIPSR2_IRQ_EI select CPU_MIPSR2_IRQ_VI select CSRC_R4K - select DMA_PERDEV_COHERENT + select DMA_NONCOHERENT select HAVE_PCI select IRQ_MIPS_CPU select MIPS_AUTO_PFN_OFFSET @@ -181,7 +185,7 @@ config MIPS_ALCHEMY select CEVT_R4K select CSRC_R4K select IRQ_MIPS_CPU - select DMA_MAYBE_COHERENT # Au1000,1500,1100 aren't, rest is + select DMA_NONCOHERENT # Au1000,1500,1100 aren't, rest is select MIPS_FIXUP_BIGPHYS_ADDR if PCI select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL @@ -408,6 +412,7 @@ config MACH_JAZZ select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_100HZ + select SYS_SUPPORTS_LITTLE_ENDIAN help This a family of machines based on the MIPS R4030 chipset which was used by several vendors to build RISC/os and Windows NT workstations. @@ -491,8 +496,6 @@ config MACH_LOONGSON64 select SYS_SUPPORTS_ZBOOT select SYS_SUPPORTS_RELOCATABLE select ZONE_DMA32 - select NUMA - select SMP select COMMON_CLK select USE_OF select BUILTIN_DTB @@ -546,7 +549,7 @@ config MIPS_MALTA select CLKSRC_MIPS_GIC select COMMON_CLK select CSRC_R4K - select DMA_MAYBE_COHERENT + select DMA_NONCOHERENT select GENERIC_ISA_DMA select HAVE_PCSPKR_PLATFORM select HAVE_PCI @@ -608,6 +611,18 @@ config MACH_VR41XX select SYS_SUPPORTS_MIPS16 select GPIOLIB +config MACH_NINTENDO64 + bool "Nintendo 64 console" + select CEVT_R4K + select CSRC_R4K + select SYS_HAS_CPU_R4300 + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_ZBOOT + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_64BIT_KERNEL + select DMA_NONCOHERENT + select IRQ_MIPS_CPU + config RALINK bool "Ralink based machines" select CEVT_R4K @@ -627,6 +642,27 @@ config RALINK select ARCH_HAS_RESET_CONTROLLER select RESET_CONTROLLER +config MACH_REALTEK_RTL + bool "Realtek RTL838x/RTL839x based machines" + select MIPS_GENERIC + select DMA_NONCOHERENT + select IRQ_MIPS_CPU + select CSRC_R4K + select CEVT_R4K + select SYS_HAS_CPU_MIPS32_R1 + select SYS_HAS_CPU_MIPS32_R2 + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_MIPS16 + select SYS_SUPPORTS_MULTITHREADING + select SYS_SUPPORTS_VPE_LOADER + select SYS_HAS_EARLY_PRINTK + select SYS_HAS_EARLY_PRINTK_8250 + select USE_GENERIC_EARLY_PRINTK_8250 + select BOOT_RAW + select PINCTRL + select USE_OF + config SGI_IP22 bool "SGI IP22 (Indy/Indigo2)" select ARC_MEMORY @@ -1127,11 +1163,6 @@ config FW_CFE config ARCH_SUPPORTS_UPROBES bool -config DMA_MAYBE_COHERENT - select ARCH_HAS_DMA_COHERENCE_H - select DMA_NONCOHERENT - bool - config DMA_PERDEV_COHERENT bool select ARCH_HAS_SETUP_DMA_OPS @@ -1258,9 +1289,6 @@ config SYS_SUPPORTS_HUGETLBFS config MIPS_HUGE_TLB_SUPPORT def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE -config IRQ_CPU_RM7K - bool - config IRQ_MSP_SLP bool @@ -1664,6 +1692,15 @@ config CPU_VR41XX kernel built with this option will not run on any other type of processor or vice versa. +config CPU_R4300 + bool "R4300" + depends on SYS_HAS_CPU_R4300 + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_64BIT_KERNEL + select CPU_HAS_LOAD_STORE_LR + help + MIPS Technologies R4300-series processors. + config CPU_R4X00 bool "R4x00" depends on SYS_HAS_CPU_R4X00 @@ -1998,6 +2035,9 @@ config SYS_HAS_CPU_TX39XX config SYS_HAS_CPU_VR41XX bool +config SYS_HAS_CPU_R4300 + bool + config SYS_HAS_CPU_R4X00 bool @@ -2758,6 +2798,7 @@ config ARCH_SPARSEMEM_ENABLE config NUMA bool "NUMA Support" depends on SYS_SUPPORTS_NUMA + select SMP help Say Y to compile the kernel to support NUMA (Non-Uniform Memory Access). This option improves performance on systems with more diff --git a/arch/mips/Makefile b/arch/mips/Makefile index f62a6d951d3c..e71d587af49c 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -136,11 +136,31 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ # cflags-y += -fno-stack-check +# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes, +# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll +# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h +# for a description). +# +# We disable this in order to prevent the assembler meddling with the +# instruction that labels refer to, ie. if we label an ll instruction: +# +# 1: ll v0, 0(a0) +# +# ...then with the assembler fix applied the label may actually point at a sync +# instruction inserted by the assembler, and if we were using the label in an +# exception table the table would no longer contain the address of the ll +# instruction. +# +# Avoid this by explicitly disabling that assembler behaviour. +# +cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) + # # CPU-dependent compiler/assembler options for optimization. # cflags-$(CONFIG_CPU_R3000) += -march=r3000 cflags-$(CONFIG_CPU_TX39XX) += -march=r3900 +cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c index d910c0a64de9..b13d8adf3be4 100644 --- a/arch/mips/alchemy/common/prom.c +++ b/arch/mips/alchemy/common/prom.c @@ -143,7 +143,3 @@ int __init prom_get_ethernet_addr(char *ethernet_addr) return 0; } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 0f60efe0481e..2388d68786f4 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c @@ -28,8 +28,8 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/mm.h> +#include <linux/dma-map-ops.h> /* for dma_default_coherent */ -#include <asm/dma-coherence.h> #include <asm/mipsregs.h> #include <au1000.h> @@ -37,6 +37,23 @@ extern void __init board_setup(void); extern void __init alchemy_set_lpj(void); +static bool alchemy_dma_coherent(void) +{ + switch (alchemy_get_cputype()) { + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1100: + return false; + case ALCHEMY_CPU_AU1200: + /* Au1200 AB USB does not support coherent memory */ + if ((read_c0_prid() & PRID_REV_MASK) == 0) + return false; + return true; + default: + return true; + } +} + void __init plat_mem_setup(void) { alchemy_set_lpj(); @@ -48,20 +65,7 @@ void __init plat_mem_setup(void) /* Clear to obtain best system bus performance */ clear_c0_config(1 << 19); /* Clear Config[OD] */ - hw_coherentio = 0; - coherentio = IO_COHERENCE_ENABLED; - switch (alchemy_get_cputype()) { - case ALCHEMY_CPU_AU1000: - case ALCHEMY_CPU_AU1500: - case ALCHEMY_CPU_AU1100: - coherentio = IO_COHERENCE_DISABLED; - break; - case ALCHEMY_CPU_AU1200: - /* Au1200 AB USB does not support coherent memory */ - if (0 == (read_c0_prid() & PRID_REV_MASK)) - coherentio = IO_COHERENCE_DISABLED; - break; - } + dma_default_coherent = alchemy_dma_coherent(); board_setup(); /* board specific setup */ diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c index 787716c5e946..ce8024c1a54e 100644 --- a/arch/mips/ar7/memory.c +++ b/arch/mips/ar7/memory.c @@ -49,8 +49,3 @@ void __init prom_meminit(void) pages = memsize() >> PAGE_SHIFT; memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT); } - -void __init prom_free_prom_memory(void) -{ - /* Nothing to free */ -} diff --git a/arch/mips/ath25/prom.c b/arch/mips/ath25/prom.c index edf82be8870d..4466e14feaa4 100644 --- a/arch/mips/ath25/prom.c +++ b/arch/mips/ath25/prom.c @@ -20,7 +20,3 @@ void __init prom_init(void) { } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c index 25724b4e97fd..cc6dc5600677 100644 --- a/arch/mips/ath79/prom.c +++ b/arch/mips/ath79/prom.c @@ -32,8 +32,3 @@ void __init prom_init(void) } #endif } - -void __init prom_free_prom_memory(void) -{ - /* We do not have to prom memory to free */ -} diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 7e7bf9c2ad26..891f495c4c3c 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -213,16 +213,17 @@ unsigned int get_c0_compare_int(void) void __init plat_mem_setup(void) { - unsigned long fdt_start; + void *dtb; set_io_port_base(KSEG1); /* Get the position of the FDT passed by the bootloader */ - fdt_start = fw_getenvl("fdt_start"); - if (fdt_start) - __dt_setup_arch((void *)KSEG0ADDR(fdt_start)); - else if (fw_passed_dtb) - __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb)); + dtb = (void *)fw_getenvl("fdt_start"); + if (dtb == NULL) + dtb = get_fdt(); + + if (dtb) + __dt_setup_arch((void *)KSEG0ADDR(dtb)); ath79_reset_base = ioremap(AR71XX_RESET_BASE, AR71XX_RESET_SIZE); diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c index 3e2a8166377f..0a63721d0fbf 100644 --- a/arch/mips/bcm47xx/prom.c +++ b/arch/mips/bcm47xx/prom.c @@ -113,10 +113,6 @@ void __init prom_init(void) setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0); } -void __init prom_free_prom_memory(void) -{ -} - #if defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM) #define EXTVBASE 0xc0000000 diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index df69eaa453a1..c3a2ea62c5c3 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -94,7 +94,3 @@ void __init prom_init(void) */ } } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index 19308df5f577..31bcfa4e08b9 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c @@ -129,10 +129,6 @@ void __init prom_init(void) register_bmips_smp_ops(); } -void __init prom_free_prom_memory(void) -{ -} - const char *get_system_type(void) { return "Generic BMIPS kernel"; @@ -165,11 +161,10 @@ void __init plat_mem_setup(void) /* intended to somewhat resemble ARM; see Documentation/arm/booting.rst */ if (fw_arg0 == 0 && fw_arg1 == 0xffffffff) dtb = phys_to_virt(fw_arg2); - else if (fw_passed_dtb) /* UHI interface or appended dtb */ - dtb = (void *)fw_passed_dtb; - else if (__dtb_start != __dtb_end) - dtb = (void *)__dtb_start; else + dtb = get_fdt(); + + if (!dtb) panic("no dtb found"); __dt_setup_arch(dtb); @@ -201,4 +196,4 @@ static int __init plat_dev_init(void) return 0; } -device_initcall(plat_dev_init); +arch_initcall(plat_dev_init); diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 47cd9dc7454a..f93f72bcba97 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -37,6 +37,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n GCOV_PROFILE := n +UBSAN_SANITIZE := n # decompressor objects (linked with vmlinuz) vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index e3946b06e840..3d70d15ada28 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -14,6 +14,7 @@ #include <asm/addrspace.h> #include <asm/unaligned.h> +#include <asm-generic/vmlinux.lds.h> /* * These two variables specify the free mem region @@ -120,6 +121,13 @@ void decompress_kernel(unsigned long boot_heap_start) /* last four bytes is always image size in little endian */ image_size = get_unaligned_le32((void *)&__image_end - 4); + /* The device tree's address must be properly aligned */ + image_size = ALIGN(image_size, STRUCT_ALIGNMENT); + + puts("Copy device tree to address "); + puthex(VMLINUX_LOAD_ADDRESS_ULL + image_size); + puts("\n"); + /* copy dtb to where the booted kernel will expect it */ memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size, __appended_dtb, dtb_size); diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S index 409cb483a9ff..5795d0af1e1b 100644 --- a/arch/mips/boot/compressed/head.S +++ b/arch/mips/boot/compressed/head.S @@ -15,10 +15,7 @@ #include <asm/asm.h> #include <asm/regdef.h> - .set noreorder - .cprestore LEAF(start) -start: /* Save boot rom start args */ move s0, a0 move s1, a1 @@ -29,27 +26,26 @@ start: PTR_LA a0, _edata PTR_LA a2, _end 1: sw zero, 0(a0) + addiu a0, a0, 4 bne a2, a0, 1b - addiu a0, a0, 4 PTR_LA a0, (.heap) /* heap address */ PTR_LA sp, (.stack + 8192) /* stack address */ - PTR_LA ra, 2f - PTR_LA k0, decompress_kernel - jr k0 - nop + PTR_LA t9, decompress_kernel + jalr t9 + 2: move a0, s0 move a1, s1 move a2, s2 move a3, s3 - PTR_LI k0, KERNEL_ENTRY - jr k0 - nop + PTR_LI t9, KERNEL_ENTRY + jalr t9 + 3: b 3b - nop + END(start) .comm .heap,BOOT_HEAP_SIZE,4 diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile index 0259238d7a2e..60bd7d2a9ad8 100644 --- a/arch/mips/boot/dts/Makefile +++ b/arch/mips/boot/dts/Makefile @@ -14,6 +14,7 @@ subdir-$(CONFIG_FIT_IMAGE_FDT_NI169445) += ni subdir-$(CONFIG_MACH_PIC32) += pic32 subdir-$(CONFIG_ATH79) += qca subdir-$(CONFIG_RALINK) += ralink +subdir-$(CONFIG_MACH_REALTEK_RTL) += realtek subdir-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += xilfpga obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/mips/boot/dts/realtek/Makefile b/arch/mips/boot/dts/realtek/Makefile new file mode 100644 index 000000000000..fba4e93187a6 --- /dev/null +++ b/arch/mips/boot/dts/realtek/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-y += cisco_sg220-26.dtb diff --git a/arch/mips/boot/dts/realtek/cisco_sg220-26.dts b/arch/mips/boot/dts/realtek/cisco_sg220-26.dts new file mode 100644 index 000000000000..1cdbb09297ef --- /dev/null +++ b/arch/mips/boot/dts/realtek/cisco_sg220-26.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause + +/dts-v1/; + +#include "rtl83xx.dtsi" +#include "rtl838x.dtsi" + +/ { + model = "Cisco SG220-26"; + compatible = "cisco,sg220-26", "realtek,rtl8382-soc"; + + chosen { + stdout-path = "serial0:9600n8"; + bootargs = "earlycon console=ttyS0,9600"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x8000000>; + }; +}; + +&uart0 { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/realtek/rtl838x.dtsi b/arch/mips/boot/dts/realtek/rtl838x.dtsi new file mode 100644 index 000000000000..6cc4ff5c0d19 --- /dev/null +++ b/arch/mips/boot/dts/realtek/rtl838x.dtsi @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause + +/ { + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + compatible = "mips,mips4KEc"; + reg = <0>; + clocks = <&baseclk 0>; + clock-names = "cpu"; + }; + }; + + baseclk: baseclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <500000000>; + }; +}; diff --git a/arch/mips/boot/dts/realtek/rtl83xx.dtsi b/arch/mips/boot/dts/realtek/rtl83xx.dtsi new file mode 100644 index 000000000000..de65a111b626 --- /dev/null +++ b/arch/mips/boot/dts/realtek/rtl83xx.dtsi @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause + +/ { + #address-cells = <1>; + #size-cells = <1>; + + aliases { + serial0 = &uart0; + serial1 = &uart1; + }; + + cpuintc: cpuintc { + compatible = "mti,cpu-interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + }; + + soc: soc { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x18000000 0x10000>; + + uart0: uart@2000 { + compatible = "ns16550a"; + reg = <0x2000 0x100>; + + clock-frequency = <200000000>; + + interrupt-parent = <&cpuintc>; + interrupts = <31>; + + reg-io-width = <1>; + reg-shift = <2>; + fifo-size = <1>; + no-loopback-test; + + status = "disabled"; + }; + + uart1: uart@2100 { + compatible = "ns16550a"; + reg = <0x2100 0x100>; + + clock-frequency = <200000000>; + + interrupt-parent = <&cpuintc>; + interrupts = <30>; + + reg-io-width = <1>; + reg-shift = <2>; + fifo-size = <1>; + no-loopback-test; + + status = "disabled"; + }; + }; +}; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 982826ba0ef7..ce4e2806159b 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -1149,12 +1149,15 @@ void __init device_tree_init(void) bool do_prune; bool fill_mac; - if (fw_passed_dtb) { - fdt = (void *)fw_passed_dtb; +#ifdef CONFIG_MIPS_ELF_APPENDED_DTB + if (!fdt_check_header(&__appended_dtb)) { + fdt = &__appended_dtb; do_prune = false; fill_mac = true; pr_info("Using appended Device Tree.\n"); - } else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) { + } else +#endif + if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) { fdt = phys_to_virt(octeon_bootinfo->fdt_addr); if (fdt_check_header(fdt)) panic("Corrupt Device Tree passed to kernel."); diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c index 46581e686882..2e099d55a564 100644 --- a/arch/mips/cobalt/setup.c +++ b/arch/mips/cobalt/setup.c @@ -117,8 +117,3 @@ void __init prom_init(void) setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0); } - -void __init prom_free_prom_memory(void) -{ - /* Nothing to do! */ -} diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 9c5fadef38cb..0e79f81217bc 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -31,6 +31,8 @@ CONFIG_PERF_EVENTS=y CONFIG_MACH_LOONGSON64=y CONFIG_CPU_HAS_MSA=y CONFIG_NR_CPUS=16 +CONFIG_NUMA=y +CONFIG_SMP=y CONFIG_HZ_256=y CONFIG_KEXEC=y CONFIG_MIPS32_O32=y diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 72a211d2d556..32c290611723 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig @@ -549,7 +549,6 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=1024 -CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DETECT_HUNG_TASK=y CONFIG_SCHEDSTATS=y diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index 4ecb157e56d4..bf9b9244929e 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig @@ -500,7 +500,6 @@ CONFIG_CRC7=m CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DETECT_HUNG_TASK=y CONFIG_SCHEDSTATS=y diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile index 8e1deaf00e0c..5e4105cccf9f 100644 --- a/arch/mips/crypto/Makefile +++ b/arch/mips/crypto/Makefile @@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o poly1305-mips-y := poly1305-core.o poly1305-glue.o -perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32 -perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64 +perlasm-flavour-$(CONFIG_32BIT) := o32 +perlasm-flavour-$(CONFIG_64BIT) := 64 quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c index 37625ae5e35d..ef5fc1ca1b5d 100644 --- a/arch/mips/fw/arc/memory.c +++ b/arch/mips/fw/arc/memory.c @@ -173,7 +173,7 @@ void __weak __init prom_cleanup(void) { } -void __weak __init prom_free_prom_memory(void) +void __init prom_free_prom_memory(void) { int i; diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c index 8f6730376a42..74975e115950 100644 --- a/arch/mips/fw/sni/sniprom.c +++ b/arch/mips/fw/sni/sniprom.c @@ -87,10 +87,6 @@ void *prom_get_hwconf(void) return (void *)CKSEG1ADDR(hwconf); } -void __init prom_free_prom_memory(void) -{ -} - /* * /proc/cpuinfo system type * diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c index 66a19337d2ab..1842cddd8356 100644 --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c @@ -39,14 +39,13 @@ void __init *plat_get_fdt(void) /* Already set up */ return (void *)fdt; - if (fw_passed_dtb && !fdt_check_header((void *)fw_passed_dtb)) { + fdt = (void *)get_fdt(); + if (fdt && !fdt_check_header(fdt)) { /* * We have been provided with the appropriate device tree for * the board. Make use of it & search for any machine struct * based upon the root compatible string. */ - fdt = (void *)fw_passed_dtb; - for_each_mips_machine(check_mach) { match = mips_machine_is_compatible(check_mach, fdt); if (match) { @@ -202,7 +201,3 @@ void __init arch_init_irq(void) irqchip_init(); } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 95b4fa7bd0d1..8f6fe69674b7 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -4,6 +4,10 @@ generated-y += syscall_table_32_o32.h generated-y += syscall_table_64_n32.h generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_o32.h +generated-y += unistd_nr_n32.h +generated-y += unistd_nr_n64.h +generated-y += unistd_nr_o32.h + generic-y += export.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 3682d1a0bb80..ea4b62ece336 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -20,10 +20,27 @@ #include <asm/sgidefs.h> #include <asm/asm-eva.h> +#ifndef __VDSO__ +/* + * Emit CFI data in .debug_frame sections, not .eh_frame sections. + * We don't do DWARF unwinding at runtime, so only the offline DWARF + * information is useful to anyone. Note we should change this if we + * ever decide to enable DWARF unwinding at runtime. + */ +#define CFI_SECTIONS .cfi_sections .debug_frame +#else + /* + * For the vDSO, emit both runtime unwind information and debug + * symbols for the .dbg file. + */ +#define CFI_SECTIONS +#endif + /* * LEAF - declare leaf routine */ #define LEAF(symbol) \ + CFI_SECTIONS; \ .globl symbol; \ .align 2; \ .type symbol, @function; \ @@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \ * NESTED - declare nested routine entry point */ #define NESTED(symbol, framesize, rpc) \ + CFI_SECTIONS; \ .globl symbol; \ .align 2; \ .type symbol, @function; \ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index f904084fcb1f..27ad76791539 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -248,7 +248,7 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ * bltz that can branch to code outside of the LL/SC loop. As \ * such, we don't need to emit another barrier here. \ */ \ - if (!__SYNC_loongson3_war) \ + if (__SYNC_loongson3_war == 0) \ smp_mb__after_atomic(); \ \ return result; \ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index a74769940fbd..dc2a6234dd3c 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -26,7 +26,7 @@ #include <asm/war.h> #define __bit_op(mem, insn, inputs...) do { \ - unsigned long temp; \ + unsigned long __temp; \ \ asm volatile( \ " .set push \n" \ @@ -37,13 +37,13 @@ " " __SC "%0, %1 \n" \ " " __SC_BEQZ "%0, 1b \n" \ " .set pop \n" \ - : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \ + : "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem) \ : inputs \ : __LLSC_CLOBBER); \ } while (0) #define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \ - unsigned long orig, temp; \ + unsigned long __orig, __temp; \ \ asm volatile( \ " .set push \n" \ @@ -54,12 +54,12 @@ " " __SC "%1, %2 \n" \ " " __SC_BEQZ "%1, 1b \n" \ " .set pop \n" \ - : "=&r"(orig), "=&r"(temp), \ + : "=&r"(__orig), "=&r"(__temp), \ "+" GCC_OFF_SMALL_ASM()(mem) \ : inputs \ : __LLSC_CLOBBER); \ \ - orig; \ + __orig; \ }) /* @@ -435,7 +435,7 @@ static inline int fls(unsigned int x) * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). + * differs in spirit from the below ffz (man ffs). */ static inline int ffs(int word) { diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index aa03b1237155..5be10ece3ef0 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -112,7 +112,27 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE]; extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; #ifdef CONFIG_USE_OF -extern unsigned long fw_passed_dtb; +#include <linux/libfdt.h> +#include <linux/of_fdt.h> + +extern char __appended_dtb[]; + +static inline void *get_fdt(void) +{ + if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) || + IS_ENABLED(CONFIG_MIPS_ELF_APPENDED_DTB)) + if (fdt_magic(&__appended_dtb) == FDT_MAGIC) + return &__appended_dtb; + + if (fw_arg0 == -2) /* UHI interface */ + return (void *)fw_arg1; + + if (IS_ENABLED(CONFIG_BUILTIN_DTB)) + if (&__dtb_start != &__dtb_end) + return &__dtb_start; + + return NULL; +} #endif /* diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 5f80c28f5253..1e6c1354f245 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -130,6 +130,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { + unsigned long tmp = (__force unsigned long)sum; + __asm__( " .set push # csum_tcpudp_nofold\n" " .set noat \n" @@ -157,7 +159,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, " addu %0, $1 \n" #endif " .set pop" - : "=r" (sum) + : "=r" (tmp) : "0" ((__force unsigned long)daddr), "r" ((__force unsigned long)saddr), #ifdef __MIPSEL__ @@ -167,7 +169,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, #endif "r" ((__force unsigned long)sum)); - return sum; + return (__force __wsum)tmp; } #define csum_tcpudp_nofold csum_tcpudp_nofold diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 5b0b3a6777ea..ed8f3f3c4304 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -99,7 +99,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size) * contains a completion barrier prior to the LL, so we don't \ * need to emit an extra one here. \ */ \ - if (!__SYNC_loongson3_war) \ + if (__SYNC_loongson3_war == 0) \ smp_mb__before_llsc(); \ \ __res = (__typeof__(*(ptr))) \ @@ -191,7 +191,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, * contains a completion barrier prior to the LL, so we don't \ * need to emit an extra one here. \ */ \ - if (!__SYNC_loongson3_war) \ + if (__SYNC_loongson3_war == 0) \ smp_mb__before_llsc(); \ \ __res = cmpxchg_local((ptr), (old), (new)); \ @@ -201,7 +201,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, * contains a completion barrier after the SC, so we don't \ * need to emit an extra one here. \ */ \ - if (!__SYNC_loongson3_war) \ + if (__SYNC_loongson3_war == 0) \ smp_llsc_mb(); \ \ __res; \ diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index 3288cef4b168..2be5d7b5de68 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -122,6 +122,11 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_VR4181A: #endif +#ifdef CONFIG_SYS_HAS_CPU_R4300 + case CPU_R4300: + case CPU_R4310: +#endif + #ifdef CONFIG_SYS_HAS_CPU_R4X00 case CPU_R4000PC: case CPU_R4000SC: diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index c9222cc2244f..9e6211e6d76b 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -302,7 +302,7 @@ enum cpu_type_enum { /* * R4000 class processors */ - CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, + CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310, CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650, CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R10000, CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h deleted file mode 100644 index 5eaa1fcc878a..000000000000 --- a/arch/mips/include/asm/dma-coherence.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> - * - */ -#ifndef __ASM_DMA_COHERENCE_H -#define __ASM_DMA_COHERENCE_H - -enum coherent_io_user_state { - IO_COHERENCE_DEFAULT, - IO_COHERENCE_ENABLED, - IO_COHERENCE_DISABLED, -}; - -#if defined(CONFIG_DMA_PERDEV_COHERENT) -/* Don't provide (hw_)coherentio to avoid misuse */ -#elif defined(CONFIG_DMA_MAYBE_COHERENT) -extern enum coherent_io_user_state coherentio; -extern int hw_coherentio; - -static inline bool dev_is_dma_coherent(struct device *dev) -{ - return coherentio == IO_COHERENCE_ENABLED || - (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio); -} -#else -#ifdef CONFIG_DMA_NONCOHERENT -#define coherentio IO_COHERENCE_DISABLED -#else -#define coherentio IO_COHERENCE_ENABLED -#endif -#define hw_coherentio 0 -#endif /* CONFIG_DMA_MAYBE_COHERENT */ - -#endif diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h index 22912f78401c..2f98ced30263 100644 --- a/arch/mips/include/asm/inst.h +++ b/arch/mips/include/asm/inst.h @@ -65,11 +65,11 @@ #define I_FR_SFT 21 #define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT) -#define I_FMA_FUNC_SFT 2 -#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT) +#define I_FMA_FUNC_SFT 3 +#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x00000038) >> I_FMA_FUNC_SFT) #define I_FMA_FFMT_SFT 0 -#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003) +#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000007) typedef unsigned int mips_instruction; diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index c5d351786416..f021de661c3a 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -20,6 +20,7 @@ #define IRQ_STACK_SIZE THREAD_SIZE #define IRQ_STACK_START (IRQ_STACK_SIZE - 16) +extern void __init init_IRQ(void); extern void *irq_stack[NR_CPUS]; /* diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h index 8d321180b5c2..83d7331ab215 100644 --- a/arch/mips/include/asm/irq_cpu.h +++ b/arch/mips/include/asm/irq_cpu.h @@ -10,8 +10,6 @@ #define _ASM_IRQ_CPU_H extern void mips_cpu_irq_init(void); -extern void rm7k_cpu_irq_init(void); -extern void rm9k_cpu_irq_init(void); #ifdef CONFIG_IRQ_DOMAIN struct device_node; diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 24f3d0f9996b..3a5612e7304c 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -83,7 +83,6 @@ #define KVM_MAX_VCPUS 16 -#define KVM_USER_MEM_SLOTS 16 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 0 diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h index 079889ced4f3..4249af4bef84 100644 --- a/arch/mips/include/asm/mach-generic/irq.h +++ b/arch/mips/include/asm/mach-generic/irq.h @@ -28,12 +28,6 @@ #endif /* CONFIG_I8259 */ #endif -#ifdef CONFIG_IRQ_CPU_RM7K -#ifndef RM7K_CPU_IRQ_BASE -#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8) -#endif -#endif - #endif /* CONFIG_IRQ_MIPS_CPU */ #endif /* __ASM_MACH_GENERIC_IRQ_H */ diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index fde1b75c45ea..ac1c20e172a2 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h @@ -23,8 +23,8 @@ extern u32 memsize, highmemsize; extern const struct plat_smp_ops loongson3_smp_ops; /* loongson-specific command line, env and memory initialization */ -extern void __init prom_init_memory(void); extern void __init prom_init_env(void); +extern void __init szmem(unsigned int node); extern void *loongson_fdt_blob; /* irq operation functions */ diff --git a/arch/mips/include/asm/mach-n64/irq.h b/arch/mips/include/asm/mach-n64/irq.h new file mode 100644 index 000000000000..7e260fcb2a51 --- /dev/null +++ b/arch/mips/include/asm/mach-n64/irq.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MACH_N64_IRQ_H +#define __ASM_MACH_N64_IRQ_H + +#define NR_IRQS 8 + +#include <asm/mach-generic/irq.h> + +#endif /* __ASM_MACH_N64_IRQ_H */ diff --git a/arch/mips/include/asm/mach-n64/kmalloc.h b/arch/mips/include/asm/mach-n64/kmalloc.h new file mode 100644 index 000000000000..e8b8d0b19571 --- /dev/null +++ b/arch/mips/include/asm/mach-n64/kmalloc.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MACH_N64_KMALLOC_H +#define __ASM_MACH_N64_KMALLOC_H + +/* The default of 128 bytes wastes too much, use 32 (the largest cacheline, I) */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#endif /* __ASM_MACH_N64_KMALLOC_H */ diff --git a/arch/mips/include/asm/mach-pistachio/irq.h b/arch/mips/include/asm/mach-pistachio/irq.h deleted file mode 100644 index 74ac016503ad..000000000000 --- a/arch/mips/include/asm/mach-pistachio/irq.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Pistachio IRQ setup - * - * Copyright (C) 2014 Google, Inc. - */ - -#ifndef __ASM_MACH_PISTACHIO_IRQ_H -#define __ASM_MACH_PISTACHIO_IRQ_H - -#define NR_IRQS 256 - -#include <asm/mach-generic/irq.h> - -#endif /* __ASM_MACH_PISTACHIO_IRQ_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index a0e8ae5497b6..9c8099a6ffed 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1085,6 +1085,10 @@ #define CVMVMCONF_RMMUSIZEM1_S 0 #define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S) +/* Debug register field definitions */ +#define MIPS_DEBUG_DBP_SHIFT 1 +#define MIPS_DEBUG_DBP (_ULCAST_(1) << MIPS_DEBUG_DBP_SHIFT) + /* * Coprocessor 1 (FPU) register names */ diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 08d48f37c046..7e714aefc76d 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -282,7 +282,6 @@ union octeon_cvmemctl { extern void octeon_check_cpu_bist(void); int octeon_prune_device_tree(void); -extern const char __appended_dtb; extern const char __dtb_octeon_3xxx_begin; extern const char __dtb_octeon_68xx_begin; diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 6a77bc4a6eec..65acab9c41f9 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -202,14 +202,13 @@ static inline unsigned long ___pa(unsigned long x) /* * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The - * discussion can be found in lkml posting - * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is - * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html + * discussion can be found in + * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com * * It is unclear if the misscompilations mentioned in - * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one - * until GCC 3.x has been retired before we can apply - * https://patchwork.linux-mips.org/patch/1541/ + * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com + * also affect MIPS so we keep this one until GCC 3.x has been retired + * before we can apply https://patchwork.linux-mips.org/patch/1541/ */ #ifndef __pa_symbol @@ -255,6 +254,12 @@ extern bool __virt_addr_valid(const volatile void *kaddr); #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC +extern unsigned long __kaslr_offset; +static inline unsigned long kaslr_offset(void) +{ + return __kaslr_offset; +} + #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 4f9c37616d42..804889b70965 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -64,6 +64,7 @@ struct vm_area_struct; #define __S111 __pgprot(0) extern unsigned long _page_cachable_default; +extern void __update_cache(unsigned long address, pte_t pte); /* * ZERO_PAGE is a global shared page that is always zero; used @@ -94,31 +95,31 @@ extern void paging_init(void); #define htw_stop() \ do { \ - unsigned long flags; \ + unsigned long __flags; \ \ if (cpu_has_htw) { \ - local_irq_save(flags); \ + local_irq_save(__flags); \ if(!raw_current_cpu_data.htw_seq++) { \ write_c0_pwctl(read_c0_pwctl() & \ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ back_to_back_c0_hazard(); \ } \ - local_irq_restore(flags); \ + local_irq_restore(__flags); \ } \ } while(0) #define htw_start() \ do { \ - unsigned long flags; \ + unsigned long __flags; \ \ if (cpu_has_htw) { \ - local_irq_save(flags); \ + local_irq_save(__flags); \ if (!--raw_current_cpu_data.htw_seq) { \ write_c0_pwctl(read_c0_pwctl() | \ (1 << MIPS_PWCTL_PWEN_SHIFT)); \ back_to_back_c0_hazard(); \ } \ - local_irq_restore(flags); \ + local_irq_restore(__flags); \ } \ } while(0) @@ -224,7 +225,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - extern void __update_cache(unsigned long address, pte_t pte); if (!pte_present(pteval)) goto cache_sync_done; diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 1e76774b36dd..daf3cf244ea9 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -53,7 +53,7 @@ struct pt_regs { static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { - return regs->regs[31]; + return regs->regs[29]; } static inline void instruction_pointer_set(struct pt_regs *regs, diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 15ab16f99f28..af3788589ee6 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -23,7 +23,6 @@ #include <asm/mipsmtregs.h> #include <asm/mmzone.h> #include <asm/unroll.h> -#include <linux/uaccess.h> /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); extern void (*r4k_blast_icache)(void); @@ -102,14 +101,17 @@ static inline void flush_scache_line(unsigned long addr) cache_op(Hit_Writeback_Inv_SD, addr); } -#define protected_cache_op(op,addr) \ +#ifdef CONFIG_EVA + +#define protected_cache_op(op, addr) \ ({ \ int __err = 0; \ __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set "MIPS_ISA_ARCH_LEVEL" \n" \ - "1: cache %1, (%2) \n" \ + " .set mips0 \n" \ + " .set eva \n" \ + "1: cachee %1, (%2) \n" \ "2: .insn \n" \ " .set pop \n" \ " .section .fixup,\"ax\" \n" \ @@ -123,17 +125,16 @@ static inline void flush_scache_line(unsigned long addr) : "i" (op), "r" (addr), "i" (-EFAULT)); \ __err; \ }) +#else - -#define protected_cachee_op(op,addr) \ +#define protected_cache_op(op, addr) \ ({ \ int __err = 0; \ __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set mips0 \n" \ - " .set eva \n" \ - "1: cachee %1, (%2) \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ + "1: cache %1, (%2) \n" \ "2: .insn \n" \ " .set pop \n" \ " .section .fixup,\"ax\" \n" \ @@ -147,6 +148,7 @@ static inline void flush_scache_line(unsigned long addr) : "i" (op), "r" (addr), "i" (-EFAULT)); \ __err; \ }) +#endif /* * The next two are for badland addresses like signal trampolines. @@ -158,11 +160,7 @@ static inline int protected_flush_icache_line(unsigned long addr) return protected_cache_op(Hit_Invalidate_I_Loongson2, addr); default: -#ifdef CONFIG_EVA - return protected_cachee_op(Hit_Invalidate_I, addr); -#else return protected_cache_op(Hit_Invalidate_I, addr); -#endif } } @@ -174,20 +172,12 @@ static inline int protected_flush_icache_line(unsigned long addr) */ static inline int protected_writeback_dcache_line(unsigned long addr) { -#ifdef CONFIG_EVA - return protected_cachee_op(Hit_Writeback_Inv_D, addr); -#else return protected_cache_op(Hit_Writeback_Inv_D, addr); -#endif } static inline int protected_writeback_scache_line(unsigned long addr) { -#ifdef CONFIG_EVA - return protected_cachee_op(Hit_Writeback_Inv_SD, addr); -#else return protected_cache_op(Hit_Writeback_Inv_SD, addr); -#endif } /* @@ -307,43 +297,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, } \ } -#ifndef CONFIG_EVA - __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) - -#else - -#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \ -static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ - unsigned long end) \ -{ \ - unsigned long lsize = cpu_##desc##_line_size(); \ - unsigned long addr = start & ~(lsize - 1); \ - unsigned long aend = (end - 1) & ~(lsize - 1); \ - \ - if (!uaccess_kernel()) { \ - while (1) { \ - protected_cachee_op(hitop, addr); \ - if (addr == aend) \ - break; \ - addr += lsize; \ - } \ - } else { \ - while (1) { \ - protected_cache_op(hitop, addr); \ - if (addr == aend) \ - break; \ - addr += lsize; \ - } \ - \ - } \ -} - -__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D) -__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I) - -#endif __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ protected_, loongson2_) diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 8a88eb265516..6ce2117e49f6 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -10,7 +10,6 @@ #define _ASM_SPINLOCK_H #include <asm/processor.h> -#include <asm/qrwlock.h> #include <asm-generic/qspinlock_types.h> @@ -27,5 +26,6 @@ static inline void queued_spin_unlock(struct qspinlock *lock) } #include <asm/qspinlock.h> +#include <asm/qrwlock.h> #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h index 63cb90fd4148..373f2a5d495d 100644 --- a/arch/mips/include/asm/spram.h +++ b/arch/mips/include/asm/spram.h @@ -5,7 +5,7 @@ #if defined(CONFIG_MIPS_SPRAM) extern __init void spram_config(void); #else -static inline void spram_config(void) { }; +static inline void spram_config(void) { } #endif /* CONFIG_MIPS_SPRAM */ #endif /* _MIPS_SPRAM_H */ diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h index 6a0864bb604d..b710e76c9c65 100644 --- a/arch/mips/include/asm/traps.h +++ b/arch/mips/include/asm/traps.h @@ -24,6 +24,10 @@ extern void (*board_ebase_setup)(void); extern void (*board_cache_error_setup)(void); extern int register_nmi_notifier(struct notifier_block *nb); +extern void reserve_exception_space(phys_addr_t addr, unsigned long size); +extern char except_vec_nmi[]; + +#define VECTORSPACING 0x100 /* for EI/VI mode */ #define nmi_notifier(fn, pri) \ ({ \ diff --git a/arch/mips/include/asm/vermagic.h b/arch/mips/include/asm/vermagic.h index 4d2dae0c7c57..371c1873df0d 100644 --- a/arch/mips/include/asm/vermagic.h +++ b/arch/mips/include/asm/vermagic.h @@ -26,6 +26,8 @@ #define MODULE_PROC_FAMILY "TX39XX " #elif defined CONFIG_CPU_VR41XX #define MODULE_PROC_FAMILY "VR41XX " +#elif defined CONFIG_CPU_R4300 +#define MODULE_PROC_FAMILY "R4300 " #elif defined CONFIG_CPU_R4X00 #define MODULE_PROC_FAMILY "R4X00 " #elif defined CONFIG_CPU_TX49XX diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h index 80e70dbd1f64..baa949a744cb 100644 --- a/arch/mips/include/asm/vpe.h +++ b/arch/mips/include/asm/vpe.h @@ -26,7 +26,6 @@ #endif #define MAX_VPES 16 -#define VPE_PATH_MAX 256 static inline int aprp_cpu_index(void) { @@ -62,7 +61,6 @@ struct vpe { unsigned long len; char *pbuffer; unsigned long plen; - char cwd[VPE_PATH_MAX]; unsigned long __start; @@ -111,7 +109,6 @@ extern const struct file_operations vpe_fops; int vpe_notify(int index, struct vpe_notifications *notify); void *vpe_get_shared(int index); -char *vpe_getcwd(int index); struct vpe *get_vpe(int minor); struct tc *get_tc(int index); diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index 6db08385d3d8..fdb9c5412cd9 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -2,8 +2,5 @@ generated-y += unistd_n32.h generated-y += unistd_n64.h generated-y += unistd_o32.h -generated-y += unistd_nr_n32.h -generated-y += unistd_nr_n64.h -generated-y += unistd_nr_o32.h generic-y += kvm_para.h diff --git a/arch/mips/include/uapi/asm/perf_regs.h b/arch/mips/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..d0f4ecd616cf --- /dev/null +++ b/arch/mips/include/uapi/asm/perf_regs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_MIPS_PERF_REGS_H +#define _ASM_MIPS_PERF_REGS_H + +enum perf_event_mips_regs { + PERF_REG_MIPS_PC, + PERF_REG_MIPS_R1, + PERF_REG_MIPS_R2, + PERF_REG_MIPS_R3, + PERF_REG_MIPS_R4, + PERF_REG_MIPS_R5, + PERF_REG_MIPS_R6, + PERF_REG_MIPS_R7, + PERF_REG_MIPS_R8, + PERF_REG_MIPS_R9, + PERF_REG_MIPS_R10, + PERF_REG_MIPS_R11, + PERF_REG_MIPS_R12, + PERF_REG_MIPS_R13, + PERF_REG_MIPS_R14, + PERF_REG_MIPS_R15, + PERF_REG_MIPS_R16, + PERF_REG_MIPS_R17, + PERF_REG_MIPS_R18, + PERF_REG_MIPS_R19, + PERF_REG_MIPS_R20, + PERF_REG_MIPS_R21, + PERF_REG_MIPS_R22, + PERF_REG_MIPS_R23, + PERF_REG_MIPS_R24, + PERF_REG_MIPS_R25, + PERF_REG_MIPS_R26, + PERF_REG_MIPS_R27, + PERF_REG_MIPS_R28, + PERF_REG_MIPS_R29, + PERF_REG_MIPS_R30, + PERF_REG_MIPS_R31, + PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1, +}; +#endif /* _ASM_MIPS_PERF_REGS_H */ diff --git a/arch/mips/jazz/Kconfig b/arch/mips/jazz/Kconfig index 06838f80a5d7..42932ca98db9 100644 --- a/arch/mips/jazz/Kconfig +++ b/arch/mips/jazz/Kconfig @@ -3,7 +3,6 @@ config ACER_PICA_61 bool "Support for Acer PICA 1 chipset" depends on MACH_JAZZ select DMA_NONCOHERENT - select SYS_SUPPORTS_LITTLE_ENDIAN help This is a machine with a R4400 133/150 MHz CPU. To compile a Linux kernel that runs on these, say Y here. For details about Linux on @@ -15,7 +14,6 @@ config MIPS_MAGNUM_4000 depends on MACH_JAZZ select DMA_NONCOHERENT select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_LITTLE_ENDIAN help This is a machine with a R4000 100 MHz CPU. To compile a Linux kernel that runs on these, say Y here. For details about Linux on @@ -26,7 +24,6 @@ config OLIVETTI_M700 bool "Support for Olivetti M700-10" depends on MACH_JAZZ select DMA_NONCOHERENT - select SYS_SUPPORTS_LITTLE_ENDIAN help This is a machine with a R4000 100 MHz CPU. To compile a Linux kernel that runs on these, say Y here. For details about Linux on diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 943eaeef73e9..b4a57f1de772 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -71,7 +71,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o -obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o obj-$(CONFIG_MIPS_MSC) += irq-msc01.o obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o @@ -104,7 +103,7 @@ obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) -obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index 47312c529410..53d8ea7d36e6 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -35,6 +35,11 @@ static int __init_cache_level(unsigned int cpu) leaves += (c->icache.waysize) ? 2 : 1; + if (c->vcache.waysize) { + levels++; + leaves++; + } + if (c->scache.waysize) { levels++; leaves++; @@ -74,25 +79,36 @@ static int __populate_cache_leaves(unsigned int cpu) struct cpuinfo_mips *c = ¤t_cpu_data; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf = this_cpu_ci->info_list; + int level = 1; if (c->icache.waysize) { - /* L1 caches are per core */ + /* I/D caches are per core */ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); - populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA); fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); - populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); + populate_cache(icache, this_leaf, level, CACHE_TYPE_INST); + level++; } else { - populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); + populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED); + level++; + } + + if (c->vcache.waysize) { + /* Vcache is per core as well */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); + populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED); + level++; } if (c->scache.waysize) { - /* L2 cache is per cluster */ + /* Scache is per cluster */ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); - populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED); + level++; } if (c->tcache.waysize) - populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); + populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED); this_cpu_ci->cpu_map_populated = true; diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 5709469c21ff..d761ead2e7fe 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -193,7 +193,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq, cd->min_delta_ns = clockevent_delta2ns(0xf, cd); cd->min_delta_ticks = 0xf; cd->irq = irq; - cd->cpumask = cpumask_of(0), + cd->cpumask = cpumask_of(0); clockevents_register_device(cd); if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER, "txx9tmr", &txx9_clock_event_device)) diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 4db7ff055c9f..975343240148 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -91,7 +91,6 @@ .set pop .endm -.section .text.cps-vec .balign 0x1000 LEAF(mips_cps_core_entry) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index e6853697a056..b71892064f27 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -26,6 +26,7 @@ #include <asm/elf.h> #include <asm/pgtable-bits.h> #include <asm/spram.h> +#include <asm/traps.h> #include <linux/uaccess.h> #include "fpu-probe.h" @@ -1154,6 +1155,15 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) break; } break; + case PRID_IMP_R4300: + c->cputype = CPU_R4300; + __cpu_name[cpu] = "R4300"; + set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; case PRID_IMP_R4600: c->cputype = CPU_R4600; __cpu_name[cpu] = "R4600"; @@ -1619,6 +1629,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_BMIPS3300; __cpu_name[cpu] = "Broadcom BMIPS3300"; set_elf_platform(cpu, "bmips3300"); + reserve_exception_space(0x400, VECTORSPACING * 64); break; case PRID_IMP_BMIPS43XX: { int rev = c->processor_id & PRID_REV_MASK; @@ -1629,6 +1640,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "Broadcom BMIPS4380"; set_elf_platform(cpu, "bmips4380"); c->options |= MIPS_CPU_RIXI; + reserve_exception_space(0x400, VECTORSPACING * 64); } else { c->cputype = CPU_BMIPS4350; __cpu_name[cpu] = "Broadcom BMIPS4350"; @@ -1645,6 +1657,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "Broadcom BMIPS5000"; set_elf_platform(cpu, "bmips5000"); c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI; + reserve_exception_space(0x1000, VECTORSPACING * 64); break; } } @@ -1830,16 +1843,17 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) */ case PRID_COMP_INGENIC_D0: c->isa_level &= ~MIPS_CPU_ISA_M32R2; - break; + fallthrough; /* * The config0 register in the XBurst CPUs with a processor ID of - * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this - * mode is not compatible with the MIPS standard, it will cause - * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S) - * when starting the init process. After chip reset, the default - * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to - * switch back to VTLB mode to prevent getting stuck. + * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned + * huge page tlb mode, this mode is not compatible with the MIPS + * standard, it will cause tlbmiss and into an infinite loop + * (line 21 in the tlb-funcs.S) when starting the init process. + * After chip reset, the default is HPTLB mode, Write 0xa9000000 + * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent + * getting stuck. */ case PRID_COMP_INGENIC_D1: write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS); @@ -2123,6 +2137,8 @@ void cpu_probe(void) if (cpu == 0) __ua_limit = ~((1ull << cpu_vmbits) - 1); #endif + + reserve_exception_space(0, 0x1000); } void cpu_report(void) diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c index abdbbe8c5a43..af654771918c 100644 --- a/arch/mips/kernel/cpu-r3k-probe.c +++ b/arch/mips/kernel/cpu-r3k-probe.c @@ -21,6 +21,7 @@ #include <asm/fpu.h> #include <asm/mipsregs.h> #include <asm/elf.h> +#include <asm/traps.h> #include "fpu-probe.h" @@ -158,6 +159,8 @@ void cpu_probe(void) cpu_set_fpu_opts(c); else cpu_set_nofpu_opts(c); + + reserve_exception_space(0, 0x400); } void cpu_report(void) diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 01b2bd95ba1f..2e50f55185a6 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/highmem.h> -#include <linux/memblock.h> #include <linux/crash_dump.h> -#include <linux/uaccess.h> -#include <linux/slab.h> - -static void *kdump_buf_page; /** * copy_oldmem_page - copy one page from "oldmem" @@ -19,10 +14,6 @@ static void *kdump_buf_page; * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. - * - * Calling copy_to_user() in atomic context is not desirable. Hence first - * copying the data to a pre-allocated kernel page and then copying to user - * space in non-atomic context. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) @@ -32,36 +23,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, if (!csize) return 0; - vaddr = kmap_atomic_pfn(pfn); + vaddr = kmap_local_pfn(pfn); if (!userbuf) { - memcpy(buf, (vaddr + offset), csize); - kunmap_atomic(vaddr); + memcpy(buf, vaddr + offset, csize); } else { - if (!kdump_buf_page) { - pr_warn("Kdump: Kdump buffer page not allocated\n"); - - return -EFAULT; - } - copy_page(kdump_buf_page, vaddr); - kunmap_atomic(vaddr); - if (copy_to_user(buf, (kdump_buf_page + offset), csize)) - return -EFAULT; + if (copy_to_user(buf, vaddr + offset, csize)) + csize = -EFAULT; } - return csize; -} - -static int __init kdump_buf_page_init(void) -{ - int ret = 0; + kunmap_local(vaddr); - kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!kdump_buf_page) { - pr_warn("Kdump: Failed to allocate kdump buffer page\n"); - ret = -ENOMEM; - } - - return ret; + return csize; } -arch_initcall(kdump_buf_page_init); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index f57e68f40a34..666b9969c1bd 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -73,7 +73,6 @@ static inline void ftrace_dyn_arch_init_insns(void) static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; - mm_segment_t old_fs; /* *(unsigned int *)ip = new_code; */ safe_store_code(new_code, ip, faulted); @@ -81,10 +80,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) if (unlikely(faulted)) return -EFAULT; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index bcce32a3de10..743d75927b71 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -349,8 +349,8 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp) MTC0 k0, CP0_DESAVE mfc0 k0, CP0_DEBUG - sll k0, k0, 30 # Check for SDBBP. - bgez k0, ejtag_return + andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP. + beqz k0, ejtag_return #ifdef CONFIG_SMP 1: PTR_LA k0, ejtag_debug_buffer_spinlock diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 61b73580b877..b825ed4476c7 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -93,33 +93,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point jr t0 0: -#ifdef CONFIG_USE_OF -#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \ - defined(CONFIG_MIPS_ELF_APPENDED_DTB) - - PTR_LA t2, __appended_dtb - -#ifdef CONFIG_CPU_BIG_ENDIAN - li t1, 0xd00dfeed -#else /* !CONFIG_CPU_BIG_ENDIAN */ - li t1, 0xedfe0dd0 -#endif /* !CONFIG_CPU_BIG_ENDIAN */ - lw t0, (t2) - beq t0, t1, dtb_found -#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */ - li t1, -2 - move t2, a1 - beq a0, t1, dtb_found - -#ifdef CONFIG_BUILTIN_DTB - PTR_LA t2, __dtb_start - PTR_LA t1, __dtb_end - bne t1, t2, dtb_found -#endif /* CONFIG_BUILTIN_DTB */ - - li t2, 0 -dtb_found: -#endif /* CONFIG_USE_OF */ PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE @@ -133,10 +106,6 @@ dtb_found: LONG_S a2, fw_arg2 LONG_S a3, fw_arg3 -#ifdef CONFIG_USE_OF - LONG_S t2, fw_passed_dtb -#endif - MTC0 zero, CP0_CONTEXT # clear context register #ifdef CONFIG_64BIT MTC0 zero, CP0_XCONTEXT diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 18e69ebf5691..1aca3b4db904 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -151,6 +151,7 @@ void __init check_wait(void) cpu_wait = r39xx_wait; break; case CPU_R4200: +/* case CPU_R4300: */ case CPU_R4600: case CPU_R4640: case CPU_R4650: diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c deleted file mode 100644 index e1a497f639d7..000000000000 --- a/arch/mips/kernel/irq-rm7000.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2003 Ralf Baechle - * - * Handler for RM7000 extended interrupts. These are a non-standard - * feature so we handle them separately from standard interrupts. - */ -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> - -#include <asm/irq_cpu.h> -#include <asm/mipsregs.h> - -static inline void unmask_rm7k_irq(struct irq_data *d) -{ - set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); -} - -static inline void mask_rm7k_irq(struct irq_data *d) -{ - clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); -} - -static struct irq_chip rm7k_irq_controller = { - .name = "RM7000", - .irq_ack = mask_rm7k_irq, - .irq_mask = mask_rm7k_irq, - .irq_mask_ack = mask_rm7k_irq, - .irq_unmask = unmask_rm7k_irq, - .irq_eoi = unmask_rm7k_irq -}; - -void __init rm7k_cpu_irq_init(void) -{ - int base = RM7K_CPU_IRQ_BASE; - int i; - - clear_c0_intcontrol(0x00000f00); /* Mask all */ - - for (i = base; i < base + 4; i++) - irq_set_chip_and_handler(i, &rm7k_irq_controller, - handle_percpu_irq); -} diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index ea781b29f7f1..09a2d7bb9eef 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -32,7 +32,6 @@ #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/sigcontext.h> -#include <linux/uaccess.h> #include <asm/irq_regs.h> static struct hard_trap_info { @@ -208,18 +207,6 @@ void arch_kgdb_breakpoint(void) ".set\treorder"); } -void kgdb_call_nmi_hook(void *ignored) -{ - mm_segment_t old_fs; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - - kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); - - set_fs(old_fs); -} - static int compute_signal(int tt) { struct hard_trap_info *ht; @@ -302,7 +289,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, struct die_args *args = (struct die_args *)ptr; struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; - mm_segment_t old_fs; #ifdef CONFIG_KPROBES /* @@ -317,17 +303,11 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, if (user_mode(regs)) return NOTIFY_DONE; - /* Kernel mode. Set correct address limit */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(smp_processor_id(), regs); - if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) { - set_fs(old_fs); + if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) return NOTIFY_DONE; - } if (atomic_read(&kgdb_setting_breakpoint)) if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) @@ -337,7 +317,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, local_irq_enable(); __flush_cache_all(); - set_fs(old_fs); return NOTIFY_STOP; } diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 3c0c3d1260c1..14f46d17500a 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -40,22 +40,13 @@ void *module_alloc(unsigned long size) } #endif -static int apply_r_mips_none(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) -{ - return 0; -} - -static int apply_r_mips_32(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v) { *location = base + v; - - return 0; } -static int apply_r_mips_26(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_26(struct module *me, u32 *location, u32 base, + Elf_Addr v) { if (v % 4) { pr_err("module %s: dangerous R_MIPS_26 relocation\n", @@ -75,8 +66,8 @@ static int apply_r_mips_26(struct module *me, u32 *location, return 0; } -static int apply_r_mips_hi16(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_hi16(struct module *me, u32 *location, Elf_Addr v, + bool rela) { struct mips_hi16 *n; @@ -217,26 +208,25 @@ static int apply_r_mips_pc(struct module *me, u32 *location, u32 base, return 0; } -static int apply_r_mips_pc16(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_pc16(struct module *me, u32 *location, u32 base, + Elf_Addr v) { return apply_r_mips_pc(me, location, base, v, 16); } -static int apply_r_mips_pc21(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_pc21(struct module *me, u32 *location, u32 base, + Elf_Addr v) { return apply_r_mips_pc(me, location, base, v, 21); } -static int apply_r_mips_pc26(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_pc26(struct module *me, u32 *location, u32 base, + Elf_Addr v) { return apply_r_mips_pc(me, location, base, v, 26); } -static int apply_r_mips_64(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_64(u32 *location, Elf_Addr v, bool rela) { if (WARN_ON(!rela)) return -EINVAL; @@ -246,8 +236,7 @@ static int apply_r_mips_64(struct module *me, u32 *location, return 0; } -static int apply_r_mips_higher(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_higher(u32 *location, Elf_Addr v, bool rela) { if (WARN_ON(!rela)) return -EINVAL; @@ -258,8 +247,7 @@ static int apply_r_mips_higher(struct module *me, u32 *location, return 0; } -static int apply_r_mips_highest(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela) +static int apply_r_mips_highest(u32 *location, Elf_Addr v, bool rela) { if (WARN_ON(!rela)) return -EINVAL; @@ -272,12 +260,14 @@ static int apply_r_mips_highest(struct module *me, u32 *location, /** * reloc_handler() - Apply a particular relocation to a module + * @type: type of the relocation to apply * @me: the module to apply the reloc to * @location: the address at which the reloc is to be applied * @base: the existing value at location for REL-style; 0 for RELA-style * @v: the value of the reloc, with addend for RELA-style + * @rela: indication of is this a RELA (true) or REL (false) relocation * - * Each implemented reloc_handler function applies a particular type of + * Each implemented relocation function applies a particular type of * relocation to the module @me. Relocs that may be found in either REL or RELA * variants can be handled by making use of the @base & @v parameters which are * set to values which abstract the difference away from the particular reloc @@ -285,23 +275,40 @@ static int apply_r_mips_highest(struct module *me, u32 *location, * * Return: 0 upon success, else -ERRNO */ -typedef int (*reloc_handler)(struct module *me, u32 *location, - u32 base, Elf_Addr v, bool rela); +static int reloc_handler(u32 type, struct module *me, u32 *location, u32 base, + Elf_Addr v, bool rela) +{ + switch (type) { + case R_MIPS_NONE: + break; + case R_MIPS_32: + apply_r_mips_32(location, base, v); + break; + case R_MIPS_26: + return apply_r_mips_26(me, location, base, v); + case R_MIPS_HI16: + return apply_r_mips_hi16(me, location, v, rela); + case R_MIPS_LO16: + return apply_r_mips_lo16(me, location, base, v, rela); + case R_MIPS_PC16: + return apply_r_mips_pc16(me, location, base, v); + case R_MIPS_PC21_S2: + return apply_r_mips_pc21(me, location, base, v); + case R_MIPS_PC26_S2: + return apply_r_mips_pc26(me, location, base, v); + case R_MIPS_64: + return apply_r_mips_64(location, v, rela); + case R_MIPS_HIGHER: + return apply_r_mips_higher(location, v, rela); + case R_MIPS_HIGHEST: + return apply_r_mips_highest(location, v, rela); + default: + pr_err("%s: Unknown relocation type %u\n", me->name, type); + return -EINVAL; + } -/* The handlers for known reloc types */ -static reloc_handler reloc_handlers[] = { - [R_MIPS_NONE] = apply_r_mips_none, - [R_MIPS_32] = apply_r_mips_32, - [R_MIPS_26] = apply_r_mips_26, - [R_MIPS_HI16] = apply_r_mips_hi16, - [R_MIPS_LO16] = apply_r_mips_lo16, - [R_MIPS_PC16] = apply_r_mips_pc16, - [R_MIPS_64] = apply_r_mips_64, - [R_MIPS_HIGHER] = apply_r_mips_higher, - [R_MIPS_HIGHEST] = apply_r_mips_highest, - [R_MIPS_PC21_S2] = apply_r_mips_pc21, - [R_MIPS_PC26_S2] = apply_r_mips_pc26, -}; + return 0; +} static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, @@ -311,7 +318,6 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab, Elf_Mips_Rel *rel; Elf_Mips_Rela *rela; } r; - reloc_handler handler; Elf_Sym *sym; u32 *location, base; unsigned int i, type; @@ -343,17 +349,6 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab, } type = ELF_MIPS_R_TYPE(*r.rel); - if (type < ARRAY_SIZE(reloc_handlers)) - handler = reloc_handlers[type]; - else - handler = NULL; - - if (!handler) { - pr_err("%s: Unknown relocation type %u\n", - me->name, type); - err = -EINVAL; - goto out; - } if (rela) { v = sym->st_value + r.rela->r_addend; @@ -365,7 +360,7 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab, r.rel = &r.rel[1]; } - err = handler(me, location, base, v, rela); + err = reloc_handler(type, me, location, base, v, rela); if (err) goto out; } diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 011eb6bbf81a..22e22c2de1c9 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1919,19 +1919,22 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) { - unsigned int raw_id = config & 0xff; - unsigned int base_id = raw_id & 0x7f; + unsigned int base_id = config & 0x7f; + unsigned int event_max; raw_event.cntr_mask = CNTR_ALL; raw_event.event_id = base_id; - if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { - if (base_id > 0x42) - return ERR_PTR(-EOPNOTSUPP); - } else { - if (base_id > 0x3a) - return ERR_PTR(-EOPNOTSUPP); + if (current_cpu_type() == CPU_CAVIUM_OCTEON3) + event_max = 0x5f; + else if (current_cpu_type() == CPU_CAVIUM_OCTEON2) + event_max = 0x42; + else + event_max = 0x3a; + + if (base_id > event_max) { + return ERR_PTR(-EOPNOTSUPP); } switch (base_id) { @@ -1941,7 +1944,7 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) case 0x1f: case 0x2f: case 0x34: - case 0x3b ... 0x3f: + case 0x3e ... 0x3f: return ERR_PTR(-EOPNOTSUPP); default: break; @@ -2077,6 +2080,7 @@ init_hw_perf_events(void) case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: + case CPU_CAVIUM_OCTEON3: mipspmu.name = "octeon"; mipspmu.general_event_map = &octeon_event_map; mipspmu.cache_event_map = &octeon_cache_map; diff --git a/arch/mips/kernel/perf_regs.c b/arch/mips/kernel/perf_regs.c new file mode 100644 index 000000000000..e686780d1647 --- /dev/null +++ b/arch/mips/kernel/perf_regs.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Some parts derived from x86 version of this file. + * + * Copyright (C) 2013 Cavium, Inc. + */ + +#include <linux/perf_event.h> + +#include <asm/ptrace.h> + +#ifdef CONFIG_32BIT +u64 perf_reg_abi(struct task_struct *tsk) +{ + return PERF_SAMPLE_REGS_ABI_32; +} +#else /* Must be CONFIG_64BIT */ +u64 perf_reg_abi(struct task_struct *tsk) +{ + if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS)) + return PERF_SAMPLE_REGS_ABI_32; + else + return PERF_SAMPLE_REGS_ABI_64; +} +#endif /* CONFIG_32BIT */ + +int perf_reg_validate(u64 mask) +{ + if (!mask) + return -EINVAL; + if (mask & ~((1ull << PERF_REG_MIPS_MAX) - 1)) + return -EINVAL; + return 0; +} + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + long v; + + switch (idx) { + case PERF_REG_MIPS_PC: + v = regs->cp0_epc; + break; + case PERF_REG_MIPS_R1 ... PERF_REG_MIPS_R25: + v = regs->regs[idx - PERF_REG_MIPS_R1 + 1]; + break; + case PERF_REG_MIPS_R28 ... PERF_REG_MIPS_R31: + v = regs->regs[idx - PERF_REG_MIPS_R28 + 28]; + break; + + default: + WARN_ON_ONCE(1); + return 0; + } + + return (s64)v; /* Sign extend if 32-bit. */ +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d7e288f3a1e7..7efa0d1a4c2b 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -9,50 +9,35 @@ * Copyright (C) 2004 Thiemo Seufer * Copyright (C) 2013 Imagination Technologies Ltd. */ +#include <linux/cpu.h> #include <linux/errno.h> -#include <linux/sched.h> -#include <linux/sched/debug.h> -#include <linux/sched/task.h> -#include <linux/sched/task_stack.h> -#include <linux/tick.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/export.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/personality.h> -#include <linux/sys.h> #include <linux/init.h> -#include <linux/completion.h> #include <linux/kallsyms.h> -#include <linux/random.h> -#include <linux/prctl.h> +#include <linux/kernel.h> #include <linux/nmi.h> -#include <linux/cpu.h> +#include <linux/personality.h> +#include <linux/prctl.h> +#include <linux/random.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <asm/abi.h> #include <asm/asm.h> -#include <asm/bootinfo.h> -#include <asm/cpu.h> #include <asm/dsemul.h> #include <asm/dsp.h> +#include <asm/exec.h> #include <asm/fpu.h> +#include <asm/inst.h> #include <asm/irq.h> -#include <asm/mips-cps.h> +#include <asm/irq_regs.h> +#include <asm/isadep.h> #include <asm/msa.h> +#include <asm/mips-cps.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/reg.h> -#include <linux/uaccess.h> -#include <asm/io.h> -#include <asm/elf.h> -#include <asm/isadep.h> -#include <asm/inst.h> #include <asm/stacktrace.h> -#include <asm/irq_regs.h> -#include <asm/exec.h> #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) @@ -135,7 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); @@ -205,6 +190,36 @@ struct mips_frame_info { #define J_TARGET(pc,target) \ (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) +static inline int is_jr_ra_ins(union mips_instruction *ip) +{ +#ifdef CONFIG_CPU_MICROMIPS + /* + * jr16 ra + * jr ra + */ + if (mm_insn_16bit(ip->word >> 16)) { + if (ip->mm16_r5_format.opcode == mm_pool16c_op && + ip->mm16_r5_format.rt == mm_jr16_op && + ip->mm16_r5_format.imm == 31) + return 1; + return 0; + } + + if (ip->r_format.opcode == mm_pool32a_op && + ip->r_format.func == mm_pool32axf_op && + ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op && + ip->r_format.rt == 31) + return 1; + return 0; +#else + if (ip->r_format.opcode == spec_op && + ip->r_format.func == jr_op && + ip->r_format.rs == 31) + return 1; + return 0; +#endif +} + static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) { #ifdef CONFIG_CPU_MICROMIPS @@ -390,10 +405,8 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) static int get_frame_info(struct mips_frame_info *info) { bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); - union mips_instruction insn, *ip; - const unsigned int max_insns = 128; + union mips_instruction insn, *ip, *ip_end; unsigned int last_insn_size = 0; - unsigned int i; bool saw_jump = false; info->pc_offset = -1; @@ -403,7 +416,9 @@ static int get_frame_info(struct mips_frame_info *info) if (!ip) goto err; - for (i = 0; i < max_insns; i++) { + ip_end = (void *)ip + (info->func_size ? info->func_size : 512); + + while (ip < ip_end) { ip = (void *)ip + last_insn_size; if (is_mmips && mm_insn_16bit(ip->halfword[0])) { @@ -417,7 +432,9 @@ static int get_frame_info(struct mips_frame_info *info) last_insn_size = 4; } - if (!info->frame_size) { + if (is_jr_ra_ins(ip)) { + break; + } else if (!info->frame_size) { is_sp_move_ins(&insn, &info->frame_size); continue; } else if (!saw_jump && is_jump_ins(ip)) { diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c index 1ff19f1ea5ca..35729c9e6cfa 100644 --- a/arch/mips/kernel/r4k-bugs64.c +++ b/arch/mips/kernel/r4k-bugs64.c @@ -18,7 +18,7 @@ static char bug64hit[] __initdata = "reliable operation impossible!\n%s"; static char nowar[] __initdata = - "Please report to <linux-mips@linux-mips.org>."; + "Please report to <linux-mips@vger.kernel.org>."; static char r4kwar[] __initdata = "Enable CPU_R4000_WORKAROUNDS to rectify."; static char daddiwar[] __initdata = diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index 0e365b7c742d..499a5357c09f 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -70,18 +70,14 @@ static void __init sync_icache(void *kbase, unsigned long kernel_length) __sync(); } -static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset) +static void __init apply_r_mips_64_rel(u32 *loc_new, long offset) { *(u64 *)loc_new += offset; - - return 0; } -static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset) +static void __init apply_r_mips_32_rel(u32 *loc_new, long offset) { *loc_new += offset; - - return 0; } static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset) @@ -114,7 +110,8 @@ static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset) } -static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset) +static void __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, + long offset) { unsigned long insn = *loc_orig; unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */ @@ -122,17 +119,33 @@ static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset target += offset; *loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff); - return 0; } -static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = { - [R_MIPS_64] = apply_r_mips_64_rel, - [R_MIPS_32] = apply_r_mips_32_rel, - [R_MIPS_26] = apply_r_mips_26_rel, - [R_MIPS_HI16] = apply_r_mips_hi16_rel, -}; +static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new, + long offset) +{ + switch (type) { + case R_MIPS_64: + apply_r_mips_64_rel(loc_new, offset); + break; + case R_MIPS_32: + apply_r_mips_32_rel(loc_new, offset); + break; + case R_MIPS_26: + return apply_r_mips_26_rel(loc_orig, loc_new, offset); + case R_MIPS_HI16: + apply_r_mips_hi16_rel(loc_orig, loc_new, offset); + break; + default: + pr_err("Unhandled relocation type %d at 0x%pK\n", type, + loc_orig); + return -ENOEXEC; + } + + return 0; +} -int __init do_relocations(void *kbase_old, void *kbase_new, long offset) +static int __init do_relocations(void *kbase_old, void *kbase_new, long offset) { u32 *r; u32 *loc_orig; @@ -149,14 +162,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset) loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); loc_new = RELOCATED(loc_orig); - if (reloc_handlers_rel[type] == NULL) { - /* Unsupported relocation */ - pr_err("Unhandled relocation type %d at 0x%pK\n", - type, loc_orig); - return -ENOEXEC; - } - - res = reloc_handlers_rel[type](loc_orig, loc_new, offset); + res = reloc_handler(type, loc_orig, loc_new, offset); if (res) return res; } @@ -300,6 +306,13 @@ static inline int __init relocation_addr_valid(void *loc_new) return 1; } +static inline void __init update_kaslr_offset(unsigned long *addr, long offset) +{ + unsigned long *new_addr = (unsigned long *)RELOCATED(addr); + + *new_addr = (unsigned long)offset; +} + #if defined(CONFIG_USE_OF) void __weak *plat_get_fdt(void) { @@ -410,6 +423,9 @@ void *__init relocate_kernel(void) /* Return the new kernel's entry point */ kernel_entry = RELOCATED(start_kernel); + + /* Error may occur before, so keep it at last */ + update_kaslr_offset(&__kaslr_offset, offset); } out: return kernel_entry; @@ -418,15 +434,11 @@ out: /* * Show relocation information on panic. */ -void show_kernel_relocation(const char *level) +static void show_kernel_relocation(const char *level) { - unsigned long offset; - - offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS); - - if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) { + if (__kaslr_offset > 0) { printk(level); - pr_cont("Kernel relocated by 0x%pK\n", (void *)offset); + pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset); pr_cont(" .text @ 0x%pK\n", _text); pr_cont(" .data @ 0x%pK\n", _sdata); pr_cont(" .bss @ 0x%pK\n", __bss_start); diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 7e1f8e277437..279be0153f8b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -27,8 +27,8 @@ #include <linux/dma-map-ops.h> #include <linux/decompress/generic.h> #include <linux/of_fdt.h> -#include <linux/of_reserved_mem.h> #include <linux/dmi.h> +#include <linux/crash_dump.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -37,7 +37,6 @@ #include <asm/cdmm.h> #include <asm/cpu.h> #include <asm/debug.h> -#include <asm/dma-coherence.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp-ops.h> @@ -84,6 +83,9 @@ static struct resource code_resource = { .name = "Kernel code", }; static struct resource data_resource = { .name = "Kernel data", }; static struct resource bss_resource = { .name = "Kernel bss", }; +unsigned long __kaslr_offset __ro_after_init; +EXPORT_SYMBOL(__kaslr_offset); + static void *detect_magic __initdata = detect_memory_region; #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET @@ -404,34 +406,32 @@ static int __init early_parse_memmap(char *p) } early_param("memmap", early_parse_memmap); -#ifdef CONFIG_PROC_VMCORE -static unsigned long setup_elfcorehdr, setup_elfcorehdr_size; -static int __init early_parse_elfcorehdr(char *p) +static void __init mips_reserve_vmcore(void) { +#ifdef CONFIG_PROC_VMCORE phys_addr_t start, end; u64 i; - setup_elfcorehdr = memparse(p, &p); - - for_each_mem_range(i, &start, &end) { - if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { - /* - * Reserve from the elf core header to the end of - * the memory segment, that should all be kdump - * reserved memory. - */ - setup_elfcorehdr_size = end - setup_elfcorehdr; - break; + if (!elfcorehdr_size) { + for_each_mem_range(i, &start, &end) { + if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { + /* + * Reserve from the elf core header to the end of + * the memory segment, that should all be kdump + * reserved memory. + */ + elfcorehdr_size = end - elfcorehdr_addr; + break; + } } } - /* - * If we don't find it in the memory map, then we shouldn't - * have to worry about it, as the new kernel won't use it. - */ - return 0; -} -early_param("elfcorehdr", early_parse_elfcorehdr); + + pr_info("Reserving %ldKB of memory at %ldKB for kdump\n", + (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10); + + memblock_reserve(elfcorehdr_addr, elfcorehdr_size); #endif +} #ifdef CONFIG_KEXEC @@ -653,13 +653,7 @@ static void __init arch_mem_init(char **cmdline_p) */ memblock_set_current_limit(PFN_PHYS(max_low_pfn)); -#ifdef CONFIG_PROC_VMCORE - if (setup_elfcorehdr && setup_elfcorehdr_size) { - printk(KERN_INFO "kdump reserved memory at %lx-%lx\n", - setup_elfcorehdr, setup_elfcorehdr_size); - memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size); - } -#endif + mips_reserve_vmcore(); mips_parse_crashkernel(); #ifdef CONFIG_KEXEC @@ -686,8 +680,6 @@ static void __init arch_mem_init(char **cmdline_p) memblock_reserve(__pa_symbol(&__nosave_begin), __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); - fdt_init_reserved_mem(); - early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); } @@ -792,10 +784,6 @@ void __init setup_arch(char **cmdline_p) unsigned long kernelsp[NR_CPUS]; unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; -#ifdef CONFIG_USE_OF -unsigned long fw_passed_dtb; -#endif - #ifdef CONFIG_DEBUG_FS struct dentry *mips_debugfs_dir; static int __init debugfs_mips(void) @@ -806,15 +794,10 @@ static int __init debugfs_mips(void) arch_initcall(debugfs_mips); #endif -#ifdef CONFIG_DMA_MAYBE_COHERENT -/* User defined DMA coherency from command line. */ -enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT; -EXPORT_SYMBOL_GPL(coherentio); -int hw_coherentio; /* Actual hardware supported DMA coherency setting. */ - +#ifdef CONFIG_DMA_NONCOHERENT static int __init setcoherentio(char *str) { - coherentio = IO_COHERENCE_ENABLED; + dma_default_coherent = true; pr_info("Hardware DMA cache coherency (command line)\n"); return 0; } @@ -822,7 +805,7 @@ early_param("coherentio", setcoherentio); static int __init setnocoherentio(char *str) { - coherentio = IO_COHERENCE_DISABLED; + dma_default_coherent = true; pr_info("Software DMA cache coherency (command line)\n"); return 0; } diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 8b027c72b8ef..bcd6a944b839 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -451,9 +451,6 @@ static int cps_cpu_disable(void) unsigned cpu = smp_processor_id(); struct core_boot_config *core_cfg; - if (!cpu) - return -EBUSY; - if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 74b9102fd06e..ef86fbad8546 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -59,7 +59,7 @@ static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); /* - * A logcal cpu mask containing only one VPE per core to + * A logical cpu mask containing only one VPE per core to * reduce the number of IPIs on large MT systems. */ cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; @@ -510,8 +510,8 @@ static inline void smp_on_each_tlb(void (*func) (void *info), void *info) * address spaces, a new context is obtained on the current cpu, and tlb * context on other cpus are invalidated to force a new context allocation * at switch_mm time, should the mm ever be used on other cpus. For - * multithreaded address spaces, intercpu interrupts have to be sent. - * Another case where intercpu interrupts are required is when the target + * multithreaded address spaces, inter-CPU interrupts have to be sent. + * Another case where inter-CPU interrupts are required is when the target * mm might be active on another cpu (eg debuggers doing the flushes on * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index 6efb2f6889a7..51f8b805f2ed 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -5,9 +5,9 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscalln32 := $(srctree)/$(src)/syscall_n32.tbl -syscalln64 := $(srctree)/$(src)/syscall_n64.tbl -syscallo32 := $(srctree)/$(src)/syscall_o32.tbl +syscalln32 := $(src)/syscall_n32.tbl +syscalln64 := $(src)/syscall_n64.tbl +syscallo32 := $(src)/syscall_o32.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -31,66 +31,67 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_offset_unistd_n32 := __NR_Linux -$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) +$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_offset_unistd_n64 := __NR_Linux -$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) +$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_offset_unistd_o32 := __NR_Linux -$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) +$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE $(call if_changed,syshdr) sysnr_pfx_unistd_nr_n32 := N32 sysnr_offset_unistd_nr_n32 := 6000 -$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) +$(kapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) FORCE $(call if_changed,sysnr) sysnr_pfx_unistd_nr_n64 := 64 sysnr_offset_unistd_nr_n64 := 5000 -$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) +$(kapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) FORCE $(call if_changed,sysnr) sysnr_pfx_unistd_nr_o32 := O32 sysnr_offset_unistd_nr_o32 := 4000 -$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) +$(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE $(call if_changed,sysnr) systbl_abi_syscall_table_32_o32 := 32_o32 systbl_offset_syscall_table_32_o32 := 4000 -$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) +$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) systbl_abi_syscall_table_64_n32 := 64_n32 systbl_offset_syscall_table_64_n32 := 6000 -$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) +$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE $(call if_changed,systbl) systbl_abi_syscall_table_64_n64 := 64_n64 systbl_offset_syscall_table_64_n64 := 5000 -$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) +$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE $(call if_changed,systbl) systbl_abi_syscall_table_64_o32 := 64_o32 systbl_offset_syscall_table_64_o32 := 4000 -$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) +$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_n32.h \ unistd_n64.h \ - unistd_o32.h \ - unistd_nr_n32.h \ - unistd_nr_n64.h \ - unistd_nr_o32.h + unistd_o32.h kapisyshdr-y += syscall_table_32_o32.h \ syscall_table_64_n32.h \ syscall_table_64_n64.h \ - syscall_table_64_o32.h + syscall_table_64_o32.h \ + unistd_nr_n32.h \ + unistd_nr_n64.h \ + unistd_nr_o32.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 0f03ad223f33..8fd8c1790941 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -380,3 +380,4 @@ 439 n32 faccessat2 sys_faccessat2 440 n32 process_madvise sys_process_madvise 441 n32 epoll_pwait2 compat_sys_epoll_pwait2 +442 n32 mount_setattr sys_mount_setattr diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 91649690b52f..169f21438065 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -356,3 +356,4 @@ 439 n64 faccessat2 sys_faccessat2 440 n64 process_madvise sys_process_madvise 441 n64 epoll_pwait2 sys_epoll_pwait2 +442 n64 mount_setattr sys_mount_setattr diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 4bad0c40aed6..090d29ca80ff 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -429,3 +429,4 @@ 439 o32 faccessat2 sys_faccessat2 440 o32 process_madvise sys_process_madvise 441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 o32 mount_setattr sys_mount_setattr diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e0352958e2f7..808b8b61ded1 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2009,13 +2009,16 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs) nmi_exit(); } -#define VECTORSPACING 0x100 /* for EI/VI mode */ - unsigned long ebase; EXPORT_SYMBOL_GPL(ebase); unsigned long exception_handlers[32]; unsigned long vi_handlers[64]; +void reserve_exception_space(phys_addr_t addr, unsigned long size) +{ + memblock_reserve(addr, size); +} + void __init *set_except_vector(int n, void *addr) { unsigned long handler = (unsigned long) addr; @@ -2367,10 +2370,7 @@ void __init trap_init(void) if (!cpu_has_mips_r2_r6) { ebase = CAC_BASE; - ebase_pa = virt_to_phys((void *)ebase); vec_size = 0x400; - - memblock_reserve(ebase_pa, vec_size); } else { if (cpu_has_veic || cpu_has_vint) vec_size = 0x200 + VECTORSPACING*64; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 5e97e9d02f98..1234834cc4c4 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -66,9 +66,10 @@ SECTIONS KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT - *(.text.*) *(.fixup) *(.gnu.warning) + . = ALIGN(16); + *(.got) /* Global offset table */ } :text = 0 _etext = .; /* End of text section */ @@ -90,6 +91,7 @@ SECTIONS INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA + PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA @@ -137,7 +139,13 @@ SECTIONS PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) #endif + .rel.dyn : ALIGN(8) { + *(.rel) + *(.rel*) + } + #ifdef CONFIG_MIPS_ELF_APPENDED_DTB + STRUCT_ALIGN(); .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) { *(.appended_dtb) KEEP(*(.appended_dtb)) @@ -165,6 +173,11 @@ SECTIONS #endif #ifdef CONFIG_MIPS_RAW_APPENDED_DTB + .fill : { + FILL(0); + BYTE(0); + . = ALIGN(8); + } __appended_dtb = .; /* leave space for appended DTB */ . += 0x100000; @@ -220,9 +233,9 @@ SECTIONS /* ABI crap starts here */ *(.MIPS.abiflags) *(.MIPS.options) + *(.gnu.attributes) *(.options) *(.pdr) *(.reginfo) - *(.eh_frame) } } diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c index 9268ebc0f61e..e673603e11e5 100644 --- a/arch/mips/kernel/vpe-cmp.c +++ b/arch/mips/kernel/vpe-cmp.c @@ -117,8 +117,8 @@ int __init vpe_module_init(void) } device_initialize(&vpe_device); - vpe_device.class = &vpe_class, - vpe_device.parent = NULL, + vpe_device.class = &vpe_class; + vpe_device.parent = NULL; dev_set_name(&vpe_device, "vpe_sp"); vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); err = device_add(&vpe_device); diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 2e003b11a098..bad6b0891b2b 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -365,8 +365,8 @@ int __init vpe_module_init(void) } device_initialize(&vpe_device); - vpe_device.class = &vpe_class, - vpe_device.parent = NULL, + vpe_device.class = &vpe_class; + vpe_device.parent = NULL; dev_set_name(&vpe_device, "vpe1"); vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); err = device_add(&vpe_device); diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index d0d832ab3d3b..13294972707b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -746,28 +746,12 @@ static int vpe_elfload(struct vpe *v) return 0; } -static int getcwd(char *buff, int size) -{ - mm_segment_t old_fs; - int ret; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - - ret = sys_getcwd(buff, size); - - set_fs(old_fs); - - return ret; -} - /* checks VPE is unused and gets ready to load program */ static int vpe_open(struct inode *inode, struct file *filp) { enum vpe_state state; struct vpe_notifications *notifier; struct vpe *v; - int ret; if (VPE_MODULE_MINOR != iminor(inode)) { /* assume only 1 device at the moment. */ @@ -803,12 +787,6 @@ static int vpe_open(struct inode *inode, struct file *filp) v->plen = P_SIZE; v->load_addr = NULL; v->len = 0; - - v->cwd[0] = 0; - ret = getcwd(v->cwd, VPE_PATH_MAX); - if (ret < 0) - pr_warn("VPE loader: open, getcwd returned %d\n", ret); - v->shared_ptr = NULL; v->__start = 0; @@ -915,17 +893,6 @@ int vpe_notify(int index, struct vpe_notifications *notify) } EXPORT_SYMBOL(vpe_notify); -char *vpe_getcwd(int index) -{ - struct vpe *v = get_vpe(index); - - if (v == NULL) - return NULL; - - return v->cwd; -} -EXPORT_SYMBOL(vpe_getcwd); - module_init(vpe_module_init); module_exit(vpe_module_exit); MODULE_DESCRIPTION("MIPS VPE Loader"); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 3d6a7f5827b1..58a8812e2fa5 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -148,7 +148,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) default: /* Unsupported KVM type */ return -EINVAL; - }; + } /* Allocate page table to map GPA -> RPA */ kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index df8eed3875f6..acfbdc01b0ac 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/sched.h> +#include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of_platform.h> #include <linux/of_address.h> @@ -302,7 +303,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc) generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ - if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) + if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); } @@ -422,12 +423,9 @@ unsigned int get_c0_compare_int(void) return CP0_LEGACY_COMPARE_IRQ; } -static const struct of_device_id of_irq_ids[] __initconst = { - { .compatible = "lantiq,icu", .data = icu_of_init }, - {}, -}; +IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init); void __init arch_init_irq(void) { - of_irq_init(of_irq_ids); + irqchip_init(); } diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 51a218f04fe0..bc9f58fcbdf9 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -44,10 +44,6 @@ int ltq_soc_type(void) return soc_info.type; } -void __init prom_free_prom_memory(void) -{ -} - static void __init prom_init_cmdline(void) { int argc = fw_arg0; @@ -77,11 +73,8 @@ void __init plat_mem_setup(void) set_io_port_base((unsigned long) KSEG1); - if (fw_passed_dtb) /* UHI interface */ - dtb = (void *)fw_passed_dtb; - else if (__dtb_start != __dtb_end) - dtb = (void *)__dtb_start; - else + dtb = get_fdt(); + if (dtb == NULL) panic("no dtb found"); /* diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c index 210f5a95ecb1..a9cb28813f0b 100644 --- a/arch/mips/lib/iomap-pci.c +++ b/arch/mips/lib/iomap-pci.c @@ -32,7 +32,7 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev, sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number); printk(KERN_WARNING "io_map_base of root PCI bus %s unset. " "Trying to continue but you better\nfix this issue or " - "report it to linux-mips@linux-mips.org or your " + "report it to linux-mips@vger.kernel.org or your " "vendor.\n", name); #ifdef CONFIG_PCI_DOMAINS panic("To avoid data corruption io_map_base MUST be set with " diff --git a/arch/mips/loongson2ef/common/init.c b/arch/mips/loongson2ef/common/init.c index ce3f02f75e2a..7797359359e4 100644 --- a/arch/mips/loongson2ef/common/init.c +++ b/arch/mips/loongson2ef/common/init.c @@ -19,7 +19,6 @@ unsigned long __maybe_unused _loongson_addrwincfg_base; static void __init mips_nmi_setup(void) { void *base; - extern char except_vec_nmi[]; base = (void *)(CAC_BASE + 0x380); memcpy(base, except_vec_nmi, 0x80); @@ -46,7 +45,3 @@ void __init prom_init(void) prom_init_uart_base(); board_nmi_handler_setup = mips_nmi_setup; } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/loongson2ef/common/mem.c b/arch/mips/loongson2ef/common/mem.c index 057d58bb470e..fceb3ee47eb0 100644 --- a/arch/mips/loongson2ef/common/mem.c +++ b/arch/mips/loongson2ef/common/mem.c @@ -41,14 +41,3 @@ void __init prom_init_memory(void) memblock_add(LOONGSON_HIGHMEM_START, highmemsize << 20); #endif /* !CONFIG_64BIT */ } - -/* override of arch/mips/mm/cache.c: __uncached_access */ -int __uncached_access(struct file *file, unsigned long addr) -{ - if (file->f_flags & O_DSYNC) - return 1; - - return addr >= __pa(high_memory) || - ((addr >= LOONGSON_MMIO_MEM_START) && - (addr < LOONGSON_MMIO_MEM_END)); -} diff --git a/arch/mips/loongson32/common/prom.c b/arch/mips/loongson32/common/prom.c index c133b5adf34e..fc580a22748e 100644 --- a/arch/mips/loongson32/common/prom.c +++ b/arch/mips/loongson32/common/prom.c @@ -36,10 +36,6 @@ void __init prom_init(void) setup_8250_early_printk_port((unsigned long)uart_base, 0, 0); } -void __init prom_free_prom_memory(void) -{ -} - void __init plat_mem_setup(void) { memblock_add(0x0, (memsize << 20)); diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index ec42c5085905..3e660d6d3c2b 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -6,28 +6,6 @@ cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap # -# Some versions of binutils, not currently mainline as of 2019/02/04, support -# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction -# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a -# description). -# -# We disable this in order to prevent the assembler meddling with the -# instruction that labels refer to, ie. if we label an ll instruction: -# -# 1: ll v0, 0(a0) -# -# ...then with the assembler fix applied the label may actually point at a sync -# instruction inserted by the assembler, and if we were using the label in an -# exception table the table would no longer contain the address of the ll -# instruction. -# -# Avoid this by explicitly disabling that assembler behaviour. If upstream -# binutils does not merge support for the flag then we can revisit & remove -# this later - for now it ensures vendor toolchains don't cause problems. -# -cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) - -# # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a # as MIPS64 R2; older versions as just R1. This leaves the possibility open # that GCC might generate R2 code for -march=loongson3a which then is rejected @@ -35,7 +13,7 @@ cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson # can't easily be used safely within the kbuild framework. # ifeq ($(call cc-ifversion, -ge, 0409, y), y) - ifeq ($(call ld-ifversion, -ge, 225000000, y), y) + ifeq ($(call ld-ifversion, -ge, 22500, y), y) cflags-$(CONFIG_CPU_LOONGSON64) += \ $(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) else diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c index ed75f7971261..cfa788bca871 100644 --- a/arch/mips/loongson64/init.c +++ b/arch/mips/loongson64/init.c @@ -25,7 +25,6 @@ u32 node_id_offset; static void __init mips_nmi_setup(void) { void *base; - extern char except_vec_nmi[]; base = (void *)(CAC_BASE + 0x380); memcpy(base, except_vec_nmi, 0x80); @@ -47,6 +46,51 @@ void virtual_early_config(void) node_id_offset = 44; } +void __init szmem(unsigned int node) +{ + u32 i, mem_type; + static unsigned long num_physpages; + u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; + + /* Parse memory information and activate */ + for (i = 0; i < loongson_memmap->nr_map; i++) { + node_id = loongson_memmap->map[i].node_id; + if (node_id != node) + continue; + + mem_type = loongson_memmap->map[i].mem_type; + mem_size = loongson_memmap->map[i].mem_size; + mem_start = loongson_memmap->map[i].mem_start; + + switch (mem_type) { + case SYSTEM_RAM_LOW: + case SYSTEM_RAM_HIGH: + start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; + node_psize = (mem_size << 20) >> PAGE_SHIFT; + end_pfn = start_pfn + node_psize; + num_physpages += node_psize; + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", + (u32)node_id, mem_type, mem_start, mem_size); + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", + start_pfn, end_pfn, num_physpages); + memblock_add_node(PFN_PHYS(start_pfn), PFN_PHYS(node_psize), node); + break; + case SYSTEM_RAM_RESERVED: + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", + (u32)node_id, mem_type, mem_start, mem_size); + memblock_reserve(((node_id << 44) + mem_start), mem_size << 20); + break; + } + } +} + +#ifndef CONFIG_NUMA +static void __init prom_init_memory(void) +{ + szmem(0); +} +#endif + void __init prom_init(void) { fw_init_cmdline(); @@ -57,7 +101,11 @@ void __init prom_init(void) loongson_sysconf.early_config(); +#ifdef CONFIG_NUMA prom_init_numa_memory(); +#else + prom_init_memory(); +#endif /* Hardcode to CPU UART 0 */ setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); @@ -66,10 +114,6 @@ void __init prom_init(void) board_nmi_handler_setup = mips_nmi_setup; } -void __init prom_free_prom_memory(void) -{ -} - static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_t hw_start, resource_size_t size) { diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index c6f0c48384f8..a8f57bf01285 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -25,6 +25,7 @@ #include <asm/time.h> #include <asm/wbflush.h> #include <boot_param.h> +#include <loongson.h> static struct pglist_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; @@ -81,57 +82,6 @@ static void __init init_topology_matrix(void) } } -static void __init szmem(unsigned int node) -{ - u32 i, mem_type; - static unsigned long num_physpages; - u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; - - /* Parse memory information and activate */ - for (i = 0; i < loongson_memmap->nr_map; i++) { - node_id = loongson_memmap->map[i].node_id; - if (node_id != node) - continue; - - mem_type = loongson_memmap->map[i].mem_type; - mem_size = loongson_memmap->map[i].mem_size; - mem_start = loongson_memmap->map[i].mem_start; - - switch (mem_type) { - case SYSTEM_RAM_LOW: - start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; - node_psize = (mem_size << 20) >> PAGE_SHIFT; - end_pfn = start_pfn + node_psize; - num_physpages += node_psize; - pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", - (u32)node_id, mem_type, mem_start, mem_size); - pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", - start_pfn, end_pfn, num_physpages); - memblock_add_node(PFN_PHYS(start_pfn), - PFN_PHYS(node_psize), node); - break; - case SYSTEM_RAM_HIGH: - start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; - node_psize = (mem_size << 20) >> PAGE_SHIFT; - end_pfn = start_pfn + node_psize; - num_physpages += node_psize; - pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", - (u32)node_id, mem_type, mem_start, mem_size); - pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", - start_pfn, end_pfn, num_physpages); - memblock_add_node(PFN_PHYS(start_pfn), - PFN_PHYS(node_psize), node); - break; - case SYSTEM_RAM_RESERVED: - pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", - (u32)node_id, mem_type, mem_start, mem_size); - memblock_reserve(((node_id << 44) + mem_start), - mem_size << 20); - break; - } - } -} - static void __init node_mem_init(unsigned int node) { unsigned long node_addrspace_offset; diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c index b8c1fc3158fd..6acde65f601b 100644 --- a/arch/mips/loongson64/smp.c +++ b/arch/mips/loongson64/smp.c @@ -483,7 +483,8 @@ static void __init loongson3_smp_setup(void) init_cpu_possible(cpu_none_mask); /* For unified kernel, NR_CPUS is the maximum possible value, - * loongson_sysconf.nr_cpus is the really present value */ + * loongson_sysconf.nr_cpus is the really present value + */ while (i < loongson_sysconf.nr_cpus) { if (loongson_sysconf.reserved_cpus_mask & (1<<i)) { /* Reserved physical CPU cores */ @@ -492,6 +493,8 @@ static void __init loongson3_smp_setup(void) __cpu_number_map[i] = num; __cpu_logical_map[num] = i; set_cpu_possible(num, true); + /* Loongson processors are always grouped by 4 */ + cpu_set_cluster(&cpu_data[num], i / 4); num++; } i++; @@ -567,7 +570,8 @@ static void loongson3_cpu_die(unsigned int cpu) /* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and * flush all L1 entries at first. Then, another core (usually Core 0) can * safely disable the clock of the target core. loongson3_play_dead() is - * called via CKSEG1 (uncached and unmmaped) */ + * called via CKSEG1 (uncached and unmmaped) + */ static void loongson3_type1_play_dead(int *state_addr) { register int val; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 4f976d687ab0..74b09e801c3a 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -19,6 +19,7 @@ #include <linux/mm.h> #include <linux/export.h> #include <linux/bitops.h> +#include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include <asm/bcache.h> #include <asm/bootinfo.h> @@ -35,7 +36,6 @@ #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/traps.h> -#include <asm/dma-coherence.h> #include <asm/mips-cps.h> /* @@ -1164,6 +1164,7 @@ static void probe_pcache(void) case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: + case CPU_R4300: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 1; @@ -1593,7 +1594,7 @@ static int probe_scache(void) return 1; } -static void __init loongson2_sc_init(void) +static void loongson2_sc_init(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -1913,15 +1914,11 @@ void r4k_cache_init(void) __local_flush_icache_user_range = local_r4k_flush_icache_user_range; #ifdef CONFIG_DMA_NONCOHERENT -#ifdef CONFIG_DMA_MAYBE_COHERENT - if (coherentio == IO_COHERENCE_ENABLED || - (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) { + if (dma_default_coherent) { _dma_cache_wback_inv = (void *)cache_noop; _dma_cache_wback = (void *)cache_noop; _dma_cache_inv = (void *)cache_noop; - } else -#endif /* CONFIG_DMA_MAYBE_COHERENT */ - { + } else { _dma_cache_wback_inv = r4k_dma_cache_wback_inv; _dma_cache_wback = r4k_dma_cache_wback_inv; _dma_cache_inv = r4k_dma_cache_inv; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 23b16bfd97b2..7719d632df8d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -21,6 +21,7 @@ #include <asm/cpu.h> #include <asm/cpu-features.h> #include <asm/setup.h> +#include <asm/pgtable.h> /* Cache operations. */ void (*flush_cache_all)(void); @@ -156,29 +157,31 @@ unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = PM(_PAGE_PRESENT); - protection_map[5] = PM(_PAGE_PRESENT); - protection_map[6] = PM(_PAGE_PRESENT); - protection_map[7] = PM(_PAGE_PRESENT); + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[4] = PVA(_PAGE_PRESENT); + protection_map[5] = PVA(_PAGE_PRESENT); + protection_map[6] = PVA(_PAGE_PRESENT); + protection_map[7] = PVA(_PAGE_PRESENT); protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = PM(_PAGE_PRESENT); - protection_map[13] = PM(_PAGE_PRESENT); - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); + protection_map[12] = PVA(_PAGE_PRESENT); + protection_map[13] = PVA(_PAGE_PRESENT); + protection_map[14] = PVA(_PAGE_PRESENT); + protection_map[15] = PVA(_PAGE_PRESENT); } +#undef _PVA #undef PM void cpu_cache_init(void) @@ -207,11 +210,3 @@ void cpu_cache_init(void) setup_protection_map(); } - -int __weak __uncached_access(struct file *file, unsigned long addr) -{ - if (file->f_flags & O_DSYNC) - return 1; - - return addr >= __pa(high_memory); -} diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 38d3d9143b47..212f3ce75a6b 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -10,7 +10,6 @@ #include <asm/cache.h> #include <asm/cpu-type.h> -#include <asm/dma-coherence.h> #include <asm/io.h> /* @@ -136,7 +135,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, } #endif -#ifdef CONFIG_DMA_PERDEV_COHERENT +#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 7c871b14e74a..e7abda9c013f 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -156,8 +156,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index bc80893e5c0f..5cb73bf74a8b 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -495,6 +495,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) void (*free_init_pages_eva)(void *begin, void *end) = NULL; +void __weak __init prom_free_prom_memory(void) +{ + /* nothing to do */ +} + void __ref free_initmem(void) { prom_free_prom_memory(); diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index bd4b0656add3..61891af25019 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -45,7 +45,6 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; - flush_tlb_all(); } #endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */ diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 183ff9f9c026..7536f7804c44 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -100,7 +100,6 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; - flush_tlb_all(); } void __init pagetable_init(void) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index a7521b8f7658..0fb1db8a8ef7 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -549,6 +549,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; + case CPU_R4300: case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 893af377aacc..b03cac5fdc02 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -90,7 +90,6 @@ static void __init console_config(void) static void __init mips_nmi_setup(void) { void *base; - extern char except_vec_nmi[]; base = cpu_has_veic ? (void *)(CAC_BASE + 0xa80) : diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 7c25a0a2345c..952018812885 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -37,10 +37,6 @@ void __init fw_meminit(void) free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL; } -void __init prom_free_prom_memory(void) -{ -} - phys_addr_t mips_cdmm_phys_base(void) { /* This address is "typically unused" */ diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index e1fb8b534944..21cb3ac1237b 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -13,8 +13,8 @@ #include <linux/pci.h> #include <linux/screen_info.h> #include <linux/time.h> +#include <linux/dma-map-ops.h> /* for dma_default_coherent */ -#include <asm/dma-coherence.h> #include <asm/fw/fw.h> #include <asm/mips-cps.h> #include <asm/mips-boards/generic.h> @@ -90,16 +90,15 @@ static void __init fd_activate(void) } #endif -static int __init plat_enable_iocoherency(void) +static void __init plat_setup_iocoherency(void) { - int supported = 0; u32 cfg; if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; pr_info("Enabled Bonito CPU coherency\n"); - supported = 1; + dma_default_coherent = true; } if (strstr(fw_getcmdline(), "iobcuncached")) { BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; @@ -118,29 +117,16 @@ static int __init plat_enable_iocoherency(void) /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); - if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { + if (cfg & ROCIT_CONFIG_GEN0_PCI_IOCU) + dma_default_coherent = true; + else pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); - return 0; - } - supported = 1; } - hw_coherentio = supported; - return supported; -} -static void __init plat_setup_iocoherency(void) -{ - if (plat_enable_iocoherency()) { - if (coherentio == IO_COHERENCE_DISABLED) - pr_info("Hardware DMA cache coherency disabled\n"); - else - pr_info("Hardware DMA cache coherency enabled\n"); - } else { - if (coherentio == IO_COHERENCE_ENABLED) - pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n"); - else - pr_info("Software DMA cache coherency enabled\n"); - } + if (dma_default_coherent) + pr_info("Hardware DMA cache coherency enabled\n"); + else + pr_info("Software DMA cache coherency enabled\n"); } static void __init pci_clock_check(void) diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 7efcfe0c9cd4..567720374d57 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -138,7 +138,7 @@ int get_c0_fdc_int(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: return -1; - }; + } if (cpu_has_veic) return -1; diff --git a/arch/mips/n64/Makefile b/arch/mips/n64/Makefile new file mode 100644 index 000000000000..b64a05ae218e --- /dev/null +++ b/arch/mips/n64/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Nintendo 64 +# + +obj-y := init.o irq.o diff --git a/arch/mips/n64/Platform b/arch/mips/n64/Platform new file mode 100644 index 000000000000..24647831356c --- /dev/null +++ b/arch/mips/n64/Platform @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Nintendo 64 +# + +cflags-$(CONFIG_MACH_NINTENDO64) += -I$(srctree)/arch/mips/include/asm/mach-n64 +load-$(CONFIG_MACH_NINTENDO64) += 0xffffffff80101000 diff --git a/arch/mips/n64/init.c b/arch/mips/n64/init.c new file mode 100644 index 000000000000..dfbd864f4667 --- /dev/null +++ b/arch/mips/n64/init.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Nintendo 64 init. + * + * Copyright (C) 2021 Lauri Kasanen + */ +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/memblock.h> +#include <linux/platform_device.h> +#include <linux/platform_data/simplefb.h> +#include <linux/string.h> + +#include <asm/bootinfo.h> +#include <asm/fw/fw.h> +#include <asm/time.h> + +#define IO_MEM_RESOURCE_START 0UL +#define IO_MEM_RESOURCE_END 0x1fffffffUL + +/* + * System-specifc irq names for clarity + */ +#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x)) +#define MIPS_SOFTINT0_IRQ MIPS_CPU_IRQ(0) +#define MIPS_SOFTINT1_IRQ MIPS_CPU_IRQ(1) +#define RCP_IRQ MIPS_CPU_IRQ(2) +#define CART_IRQ MIPS_CPU_IRQ(3) +#define PRENMI_IRQ MIPS_CPU_IRQ(4) +#define RDBR_IRQ MIPS_CPU_IRQ(5) +#define RDBW_IRQ MIPS_CPU_IRQ(6) +#define TIMER_IRQ MIPS_CPU_IRQ(7) + +static void __init iomem_resource_init(void) +{ + iomem_resource.start = IO_MEM_RESOURCE_START; + iomem_resource.end = IO_MEM_RESOURCE_END; +} + +const char *get_system_type(void) +{ + return "Nintendo 64"; +} + +void __init prom_init(void) +{ + fw_init_cmdline(); +} + +#define W 320 +#define H 240 +#define REG_BASE ((u32 *) CKSEG1ADDR(0x4400000)) + +static void __init n64rdp_write_reg(const u8 reg, const u32 value) +{ + __raw_writel(value, REG_BASE + reg); +} + +#undef REG_BASE + +static const u32 ntsc_320[] __initconst = { + 0x00013212, 0x00000000, 0x00000140, 0x00000200, + 0x00000000, 0x03e52239, 0x0000020d, 0x00000c15, + 0x0c150c15, 0x006c02ec, 0x002501ff, 0x000e0204, + 0x00000200, 0x00000400 +}; + +#define MI_REG_BASE 0x4300000 +#define NUM_MI_REGS 4 +#define AI_REG_BASE 0x4500000 +#define NUM_AI_REGS 6 +#define PI_REG_BASE 0x4600000 +#define NUM_PI_REGS 5 +#define SI_REG_BASE 0x4800000 +#define NUM_SI_REGS 7 + +static int __init n64_platform_init(void) +{ + static const char simplefb_resname[] = "FB"; + static const struct simplefb_platform_data mode = { + .width = W, + .height = H, + .stride = W * 2, + .format = "r5g5b5a1" + }; + struct resource res[3]; + void *orig; + unsigned long phys; + unsigned i; + + memset(res, 0, sizeof(struct resource) * 3); + res[0].flags = IORESOURCE_MEM; + res[0].start = MI_REG_BASE; + res[0].end = MI_REG_BASE + NUM_MI_REGS * 4 - 1; + + res[1].flags = IORESOURCE_MEM; + res[1].start = AI_REG_BASE; + res[1].end = AI_REG_BASE + NUM_AI_REGS * 4 - 1; + + res[2].flags = IORESOURCE_IRQ; + res[2].start = RCP_IRQ; + res[2].end = RCP_IRQ; + + platform_device_register_simple("n64audio", -1, res, 3); + + memset(&res[0], 0, sizeof(res[0])); + res[0].flags = IORESOURCE_MEM; + res[0].start = PI_REG_BASE; + res[0].end = PI_REG_BASE + NUM_PI_REGS * 4 - 1; + + platform_device_register_simple("n64cart", -1, res, 1); + + memset(&res[0], 0, sizeof(res[0])); + res[0].flags = IORESOURCE_MEM; + res[0].start = SI_REG_BASE; + res[0].end = SI_REG_BASE + NUM_SI_REGS * 4 - 1; + + platform_device_register_simple("n64joy", -1, res, 1); + + /* The framebuffer needs 64-byte alignment */ + orig = kzalloc(W * H * 2 + 63, GFP_DMA | GFP_KERNEL); + if (!orig) + return -ENOMEM; + phys = virt_to_phys(orig); + phys += 63; + phys &= ~63; + + for (i = 0; i < ARRAY_SIZE(ntsc_320); i++) { + if (i == 1) + n64rdp_write_reg(i, phys); + else + n64rdp_write_reg(i, ntsc_320[i]); + } + + /* setup IORESOURCE_MEM as framebuffer memory */ + memset(&res[0], 0, sizeof(res[0])); + res[0].flags = IORESOURCE_MEM; + res[0].name = simplefb_resname; + res[0].start = phys; + res[0].end = phys + W * H * 2 - 1; + + platform_device_register_resndata(NULL, "simple-framebuffer", 0, + &res[0], 1, &mode, sizeof(mode)); + + return 0; +} + +#undef W +#undef H + +arch_initcall(n64_platform_init); + +void __init plat_mem_setup(void) +{ + iomem_resource_init(); + memblock_add(0x0, 8 * 1024 * 1024); /* Bootloader blocks the 4mb config */ +} + +void __init plat_time_init(void) +{ + /* 93.75 MHz cpu, count register runs at half rate */ + mips_hpt_frequency = 93750000 / 2; +} diff --git a/arch/mips/n64/irq.c b/arch/mips/n64/irq.c new file mode 100644 index 000000000000..1861e962db42 --- /dev/null +++ b/arch/mips/n64/irq.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * N64 IRQ + * + * Copyright (C) 2021 Lauri Kasanen + */ +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include <asm/irq_cpu.h> + +void __init arch_init_irq(void) +{ + mips_cpu_irq_init(); +} diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 9adc0c1b4ffc..9fbaa1e5b340 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -130,11 +130,6 @@ const char *get_system_type(void) } } -void __init prom_free_prom_memory(void) -{ - /* Nothing yet */ -} - void xlp_mmu_init(void) { u32 conf4; diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 627e88101316..aa83d691df0f 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -89,11 +89,6 @@ unsigned int nlm_get_cpu_frequency(void) return (unsigned int)nlm_prom_info.cpu_frequency; } -void __init prom_free_prom_memory(void) -{ - /* Nothing yet */ -} - void nlm_percpu_init(int hwcpuid) { if (hwcpuid % 4 == 0) diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 7285b5667568..1c722dd0c130 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -17,8 +17,8 @@ #include <linux/init.h> #include <linux/syscore_ops.h> #include <linux/vmalloc.h> +#include <linux/dma-map-ops.h> /* for dma_default_coherent */ -#include <asm/dma-coherence.h> #include <asm/mach-au1x00/au1000.h> #include <asm/tlbmisc.h> @@ -429,9 +429,8 @@ static int alchemy_pci_probe(struct platform_device *pdev) ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; /* Au1500 revisions older than AD have borked coherent PCI */ - if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) && - (read_c0_prid() < 0x01030202) && - (coherentio == IO_COHERENCE_DISABLED)) { + if (alchemy_get_cputype() == ALCHEMY_CPU_AU1500 && + read_c0_prid() < 0x01030202 && !dma_default_coherent) { val = __raw_readl(ctx->regs + PCI_REG_CONFIG); val |= PCI_CONFIG_NC; __raw_writel(val, ctx->regs + PCI_REG_CONFIG); diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index 0b15730cef88..f741b8c528e4 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -483,11 +483,11 @@ static int ar2315_pci_probe(struct platform_device *pdev) apc->io_res.name = "AR2315 IO space"; apc->io_res.start = 0; apc->io_res.end = 0; - apc->io_res.flags = IORESOURCE_IO, + apc->io_res.flags = IORESOURCE_IO; apc->pci_ctrl.pci_ops = &ar2315_pci_ops; - apc->pci_ctrl.mem_resource = &apc->mem_res, - apc->pci_ctrl.io_resource = &apc->io_res, + apc->pci_ctrl.mem_resource = &apc->mem_res; + apc->pci_ctrl.io_resource = &apc->io_res; register_pci_controller(&apc->pci_ctrl); diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c index 50f376f058f4..764f2d022fae 100644 --- a/arch/mips/pic32/pic32mzda/init.c +++ b/arch/mips/pic32/pic32mzda/init.c @@ -21,24 +21,11 @@ const char *get_system_type(void) return "PIC32MZDA"; } -static ulong get_fdtaddr(void) -{ - ulong ftaddr = 0; - - if (fw_passed_dtb && !fw_arg2 && !fw_arg3) - return (ulong)fw_passed_dtb; - - if (__dtb_start < __dtb_end) - ftaddr = (ulong)__dtb_start; - - return ftaddr; -} - void __init plat_mem_setup(void) { void *dtb; - dtb = (void *)get_fdtaddr(); + dtb = get_fdt(); if (!dtb) { pr_err("pic32: no DTB found.\n"); return; @@ -91,10 +78,6 @@ void __init prom_init(void) pic32_init_cmdline((int)fw_arg0, (char **)fw_arg1); } -void __init prom_free_prom_memory(void) -{ -} - void __init device_tree_init(void) { if (!initial_boot_params) diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform index f73a1a929965..c59de86dbddf 100644 --- a/arch/mips/pistachio/Platform +++ b/arch/mips/pistachio/Platform @@ -1,8 +1,6 @@ # # IMG Pistachio SoC # -cflags-$(CONFIG_MACH_PISTACHIO) += \ - -I$(srctree)/arch/mips/include/asm/mach-pistachio load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000 zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000 all-$(CONFIG_MACH_PISTACHIO) := uImage.gz diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c index 558995ed6fe8..e0bacfc3c6b4 100644 --- a/arch/mips/pistachio/init.c +++ b/arch/mips/pistachio/init.c @@ -13,7 +13,6 @@ #include <linux/of_fdt.h> #include <asm/cacheflush.h> -#include <asm/dma-coherence.h> #include <asm/fw/fw.h> #include <asm/mips-boards/generic.h> #include <asm/mips-cps.h> @@ -83,7 +82,6 @@ phys_addr_t mips_cdmm_phys_base(void) static void __init mips_nmi_setup(void) { void *base; - extern char except_vec_nmi[]; base = cpu_has_veic ? (void *)(CAC_BASE + 0xa80) : @@ -118,10 +116,6 @@ void __init prom_init(void) pr_info("SoC Type: %s\n", get_system_type()); } -void __init prom_free_prom_memory(void) -{ -} - void __init device_tree_init(void) { if (!initial_boot_params) diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index cbae9d23ab7f..8286c3521476 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -64,20 +64,15 @@ static int __init early_init_dt_find_memory(unsigned long node, void __init plat_mem_setup(void) { - void *dtb = NULL; + void *dtb; set_io_port_base(KSEG1); /* * Load the builtin devicetree. This causes the chosen node to be - * parsed resulting in our memory appearing. fw_passed_dtb is used - * by CONFIG_MIPS_APPENDED_RAW_DTB as well. + * parsed resulting in our memory appearing. */ - if (fw_passed_dtb) - dtb = (void *)fw_passed_dtb; - else if (__dtb_start != __dtb_end) - dtb = (void *)__dtb_start; - + dtb = get_fdt(); __dt_setup_arch(dtb); of_scan_flat_dt(early_init_dt_find_memory, NULL); diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c index 02e7878dc427..25728def3503 100644 --- a/arch/mips/ralink/prom.c +++ b/arch/mips/ralink/prom.c @@ -66,7 +66,3 @@ void __init prom_init(void) prom_init_cmdline(); } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c index 8126f1260407..274d33078c5e 100644 --- a/arch/mips/ralink/reset.c +++ b/arch/mips/ralink/reset.c @@ -27,7 +27,7 @@ static int ralink_assert_device(struct reset_controller_dev *rcdev, { u32 val; - if (id < 8) + if (id == 0) return -1; val = rt_sysc_r32(SYSC_REG_RESET_CTRL); @@ -42,7 +42,7 @@ static int ralink_deassert_device(struct reset_controller_dev *rcdev, { u32 val; - if (id < 8) + if (id == 0) return -1; val = rt_sysc_r32(SYSC_REG_RESET_CTRL); diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c index a9d1f2019dc3..23ad8dd9aa5e 100644 --- a/arch/mips/rb532/prom.c +++ b/arch/mips/rb532/prom.c @@ -34,11 +34,6 @@ static struct resource ddr_reg[] = { } }; -void __init prom_free_prom_memory(void) -{ - /* No prom memory to free */ -} - static inline int match_tag(char *arg, const char *tag) { return strncmp(arg, tag, strlen(tag)) == 0; diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index d411e0a90a5b..87bb6945ec25 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -404,11 +404,6 @@ void __init prom_meminit(void) } } -void __init prom_free_prom_memory(void) -{ - /* We got nothing to free here ... */ -} - extern void setup_zero_pages(void); void __init paging_init(void) diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index 1bbd5bfb5458..e21ea1de05e3 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -343,7 +343,7 @@ static void ip32_unknown_interrupt(void) printk("Register dump:\n"); show_regs(get_irq_regs()); - printk("Please mail this report to linux-mips@linux-mips.org\n"); + printk("Please mail this report to linux-mips@vger.kernel.org\n"); printk("Spinning..."); while(1) ; } diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c index 0f53fed39da6..3fc8d0a0bdfa 100644 --- a/arch/mips/sgi-ip32/ip32-memory.c +++ b/arch/mips/sgi-ip32/ip32-memory.c @@ -40,8 +40,3 @@ void __init prom_meminit(void) memblock_add(base, size); } } - - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index 89f7fca45152..a3323f8dcc1b 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -316,11 +316,6 @@ void __init prom_init(void) #endif } -void __init prom_free_prom_memory(void) -{ - /* Not sure what I'm supposed to do here. Nothing, I think */ -} - void prom_putchar(char c) { int ret; diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 6d0fd0e055b4..42ba1e97dff0 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -340,10 +340,6 @@ void __init prom_init(void) txx9_board_vec->prom_init(); } -void __init prom_free_prom_memory(void) -{ -} - const char *get_system_type(void) { return txx9_system_type; diff --git a/arch/mips/vdso/Kconfig b/arch/mips/vdso/Kconfig index 7aec721398d5..a665f6108cb5 100644 --- a/arch/mips/vdso/Kconfig +++ b/arch/mips/vdso/Kconfig @@ -12,7 +12,7 @@ # the lack of relocations. As such, we disable the VDSO for microMIPS builds. config MIPS_LD_CAN_LINK_VDSO - def_bool LD_VERSION >= 225000000 || LD_IS_LLD + def_bool LD_VERSION >= 22500 || LD_IS_LLD config MIPS_DISABLE_VDSO def_bool CPU_MICROMIPS || (!CPU_MIPSR6 && !MIPS_LD_CAN_LINK_VDSO) diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 5810cc12bc1d..2131d3fd7333 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -16,16 +16,13 @@ ccflags-vdso := \ $(filter -march=%,$(KBUILD_CFLAGS)) \ $(filter -m%-float,$(KBUILD_CFLAGS)) \ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \ + $(CLANG_FLAGS) \ -D__VDSO__ ifndef CONFIG_64BIT ccflags-vdso += -DBUILD_VDSO32 endif -ifdef CONFIG_CC_IS_CLANG -ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS)) -endif - # # The -fno-jump-tables flag only prevents the compiler from generating # jump tables but does not prevent the compiler from emitting absolute diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c index ca53ac3060ef..628dddf79a05 100644 --- a/arch/mips/vr41xx/common/init.c +++ b/arch/mips/vr41xx/common/init.c @@ -58,7 +58,3 @@ void __init prom_init(void) strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); } } - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig index 40313a635075..f9a89cf00aa6 100644 --- a/arch/nds32/configs/defconfig +++ b/arch/nds32/configs/defconfig @@ -1,4 +1,3 @@ -CONFIG_CROSS_COMPILE="nds32le-linux-" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index e01ad5d17224..c1327e552ec6 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); /* kernel thread fn */ p->thread.cpu_context.r6 = stack_start; diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index c356e484dcab..af82e996f412 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(elf_hwcap); /* * The following string table, must sync with HWCAP_xx bitmask, - * which is defined in <asm/procinfo.h> + * which is defined above */ static const char *hwcap_str[] = { "mfusr_pc", diff --git a/arch/nds32/kernel/time.c b/arch/nds32/kernel/time.c index ac9d78ce3a81..574a3d0a8539 100644 --- a/arch/nds32/kernel/time.c +++ b/arch/nds32/kernel/time.c @@ -2,7 +2,7 @@ // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/clocksource.h> -#include <linux/clk-provider.h> +#include <linux/of_clk.h> void __init time_init(void) { diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 6a9772ba7392..ee0d9ae192a5 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -25,17 +25,8 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr); void dump_mem(const char *lvl, unsigned long bottom, unsigned long top) { unsigned long first; - mm_segment_t fs; int i; - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top); for (first = bottom & ~31; first < top; first += 32) { @@ -48,7 +39,9 @@ void dump_mem(const char *lvl, unsigned long bottom, unsigned long top) for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; - if (__get_user(val, (unsigned long *)p) == 0) + + if (get_kernel_nofault(val, + (unsigned long *)p) == 0) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); @@ -56,46 +49,10 @@ void dump_mem(const char *lvl, unsigned long bottom, unsigned long top) } pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str); } - - set_fs(fs); } EXPORT_SYMBOL(dump_mem); -static void dump_instr(struct pt_regs *regs) -{ - unsigned long addr = instruction_pointer(regs); - mm_segment_t fs; - char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; - int i; - - return; - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - - pr_emerg("Code: "); - for (i = -4; i < 1; i++) { - unsigned int val, bad; - - bad = __get_user(val, &((u32 *) addr)[i]); - - if (!bad) { - p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); - } else { - p += sprintf(p, "bad PC value"); - break; - } - } - pr_emerg("Code: %s\n", str); - - set_fs(fs); -} - #define LOOP_TIMES (100) static void __dump(struct task_struct *tsk, unsigned long *base_reg, const char *loglvl) @@ -179,7 +136,6 @@ void die(const char *str, struct pt_regs *regs, int err) if (!user_mode(regs) || in_interrupt()) { dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK); - dump_instr(regs); dump_stack(); } diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index da8442450e46..0794cd7803df 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -389,7 +389,10 @@ ENTRY(ret_from_interrupt) */ ENTRY(sys_clone) SAVE_SWITCH_STACK + subi sp, sp, 4 /* make space for tls pointer */ + stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */ call nios2_clone + addi sp, sp, 4 RESTORE_SWITCH_STACK ret diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 50b4eb19a6cc..c5f916ca6845 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct switch_stack *childstack = ((struct switch_stack *)childregs) - 1; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index 3c6e3c813a0b..d2f21957e99c 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -32,8 +32,6 @@ EXPORT_SYMBOL(memory_start); unsigned long memory_end; EXPORT_SYMBOL(memory_end); -unsigned long memory_size; - static struct pt_regs fake_regs = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; @@ -141,16 +139,22 @@ asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6, parse_early_param(); } +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) +{ + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); +} + void __init setup_arch(char **cmdline_p) { int dram_start; console_verbose(); - dram_start = memblock_start_of_DRAM(); - memory_size = memblock_phys_mem_size(); - memory_start = PAGE_ALIGN((unsigned long)__pa(_end)); - memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size; + memory_start = memblock_start_of_DRAM(); + memory_end = memblock_end_of_DRAM(); init_mm.start_code = (unsigned long) _stext; init_mm.end_code = (unsigned long) _etext; @@ -161,11 +165,10 @@ void __init setup_arch(char **cmdline_p) /* Keep a copy of command line */ *cmdline_p = boot_command_line; - min_low_pfn = PFN_UP(memory_start); - max_low_pfn = PFN_DOWN(memory_end); + find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); max_mapnr = max_low_pfn; - memblock_reserve(dram_start, memory_start - dram_start); + memblock_reserve(__pa_symbol(_stext), _end - _stext); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { memblock_reserve(virt_to_phys((void *)initrd_start), diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c index cd390ec4f88b..b1ca85699952 100644 --- a/arch/nios2/kernel/sys_nios2.c +++ b/arch/nios2/kernel/sys_nios2.c @@ -22,6 +22,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op) { struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; if (len == 0) return 0; @@ -34,16 +35,22 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, if (addr + len < addr) return -EFAULT; + if (mmap_read_lock_killable(mm)) + return -EINTR; + /* * Verify that the specified address region actually belongs * to this process. */ - vma = find_vma(current->mm, addr); - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) + vma = find_vma(mm, addr); + if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { + mmap_read_unlock(mm); return -EFAULT; + } flush_cache_range(vma, addr, addr + len); + mmap_read_unlock(mm); return 0; } diff --git a/arch/openrisc/Kbuild b/arch/openrisc/Kbuild new file mode 100644 index 000000000000..4234b4c03e72 --- /dev/null +++ b/arch/openrisc/Kbuild @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += lib/ kernel/ mm/ +obj-y += boot/dts/ diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index bf10141c7426..410e7abfac69 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -24,6 +24,10 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ +all: vmlinux.bin + +boot := arch/$(ARCH)/boot + ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y) KBUILD_CFLAGS += $(call cc-option,-mhard-mul) else @@ -38,14 +42,13 @@ endif head-y := arch/openrisc/kernel/head.o -core-y += arch/openrisc/lib/ \ - arch/openrisc/kernel/ \ - arch/openrisc/mm/ +core-y += arch/openrisc/ libs-y += $(LIBGCC) -ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""' -BUILTIN_DTB := y -else -BUILTIN_DTB := n -endif -core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ +PHONY += vmlinux.bin + +vmlinux.bin: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/openrisc/boot/.gitignore b/arch/openrisc/boot/.gitignore new file mode 100644 index 000000000000..007d6fea3145 --- /dev/null +++ b/arch/openrisc/boot/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux.bin diff --git a/arch/openrisc/boot/Makefile b/arch/openrisc/boot/Makefile new file mode 100644 index 000000000000..5b28538f4dd1 --- /dev/null +++ b/arch/openrisc/boot/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for bootable kernel images +# + +targets += vmlinux.bin + +OBJCOPYFLAGS_vmlinux.bin := -O binary +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 3c98728cce24..eb62429681fc 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -34,6 +34,7 @@ #include <linux/init_task.h> #include <linux/mqueue.h> #include <linux/fs.h> +#include <linux/reboot.h> #include <linux/uaccess.h> #include <asm/io.h> @@ -49,10 +50,16 @@ */ struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, }; -void machine_restart(void) +void machine_restart(char *cmd) { - printk(KERN_INFO "*** MACHINE RESTART ***\n"); - __asm__("l.nop 1"); + do_kernel_restart(cmd); + + /* Give a grace period for failure to restart of 1s */ + mdelay(1000); + + /* Whoops - the platform was unable to reboot. Tell the user! */ + pr_emerg("Reboot failed -- System halted\n"); + while (1); } /* @@ -167,7 +174,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *)sp; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(kregs, 0, sizeof(struct pt_regs)); kregs->gpr[20] = usp; /* fn, kernel thread */ kregs->gpr[22] = arg; diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index 29c82ef2e207..48e1092a64de 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -16,6 +16,7 @@ #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/irq.h> +#include <linux/of.h> #include <asm/cpuinfo.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> @@ -60,22 +61,32 @@ void __init smp_prepare_boot_cpu(void) void __init smp_init_cpus(void) { - int i; + struct device_node *cpu; + u32 cpu_id; - for (i = 0; i < NR_CPUS; i++) - set_cpu_possible(i, true); + for_each_of_cpu_node(cpu) { + if (of_property_read_u32(cpu, "reg", &cpu_id)) { + pr_warn("%s missing reg property", cpu->full_name); + continue; + } + + if (cpu_id < NR_CPUS) + set_cpu_possible(cpu_id, true); + } } void __init smp_prepare_cpus(unsigned int max_cpus) { - int i; + unsigned int cpu; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); + for_each_possible_cpu(cpu) { + if (cpu < max_cpus) + set_cpu_present(cpu, true); + } } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index e7f611c2719d..afc3b8d03572 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -34,6 +34,7 @@ config PARISC select GENERIC_SMP_IDLE_THREAD select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER + select GENERIC_LIB_DEVMEM_IS_ALLOWED select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_EXCEPTION_TRACE select HAVE_MOD_ARCH_SPECIFIC @@ -60,8 +61,10 @@ config PARISC select HAVE_KRETPROBES select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1) select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE + select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS select SET_FS help @@ -200,9 +203,12 @@ config PREFETCH def_bool y depends on PA8X00 || PA7200 +config PARISC_HUGE_KERNEL + def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST + config MLONGCALLS - def_bool y if !MODULES || UBSAN || FTRACE - bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE + def_bool y if PARISC_HUGE_KERNEL + bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL depends on PA8X00 help If you configure the kernel to include many drivers built-in instead @@ -310,6 +316,16 @@ config IRQSTACKS for handling hard and soft interrupts. This can help avoid overflowing the process kernel stacks. +config TLB_PTLOCK + bool "Use page table locks in TLB fault handler" + depends on SMP + default n + help + Select this option to enable page table locking in the TLB + fault handler. This ensures that page table entries are + updated consistently on SMP machines at the expense of some + loss in performance. + config HOTPLUG_CPU bool default y if SMP diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 3cbcfad5f724..7611d48c599e 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -22,7 +22,6 @@ CONFIG_PCI_LBA=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_UNUSED_SYMBOLS=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index 8f81fcbf04c4..53054b81461a 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -31,7 +31,6 @@ CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y -CONFIG_UNUSED_SYMBOLS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BINFMT_MISC=m # CONFIG_COMPACTION is not set diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 8f33085ff1bd..1a609d38f667 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -179,7 +179,7 @@ static __inline__ void __user *arch_compat_alloc_user_space(long len) static inline int __is_compat_task(struct task_struct *t) { - return test_ti_thread_flag(task_thread_info(t), TIF_32BIT); + return test_tsk_thread_flag(t, TIF_32BIT); } static inline int is_compat_task(void) diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index fad29aa6f45f..1e4fbd0fd944 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -12,10 +12,6 @@ #include <linux/threads.h> #include <linux/irq.h> -#ifdef CONFIG_IRQSTACKS -#define __ARCH_HAS_DO_SOFTIRQ -#endif - typedef struct { unsigned int __softirq_pending; unsigned int kernel_stack_usage; diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 45e20d38dc59..8a11b8cf4719 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -321,4 +321,6 @@ extern void iowrite64be(u64 val, void __iomem *addr); */ #define xlate_dev_kmem_ptr(p) p +extern int devmem_is_allowed(unsigned long pfn); + #endif diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 46f8c22c5977..726257648d9f 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -5,6 +5,7 @@ #include <linux/mm.h> #include <linux/sched.h> #include <linux/atomic.h> +#include <linux/spinlock.h> #include <asm-generic/mm_hooks.h> /* on PA-RISC, we actually have enough contexts to justify an allocator @@ -50,6 +51,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { +#ifdef CONFIG_TLB_PTLOCK + /* put physical address of page_table_lock in cr28 (tr4) + for TLB faults */ + spinlock_t *pgd_lock = &next->page_table_lock; + mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28); +#endif mtctl(__pa(next->pgd), 25); load_context(next->context); } diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 6b3f6740a6a6..d00313d1274e 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -112,7 +112,7 @@ extern int npmem_ranges; #else #define BITS_PER_PTE_ENTRY 2 #define BITS_PER_PMD_ENTRY 2 -#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY +#define BITS_PER_PGD_ENTRY 2 #endif #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index a6482b2ce0ea..dda557085311 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -15,47 +15,23 @@ #define __HAVE_ARCH_PGD_FREE #include <asm-generic/pgalloc.h> -/* Allocate the top level pgd (page directory) - * - * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we - * allocate the first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd sizes are - * arranged so that a single pmd covers 4GB (giving a full 64-bit - * process access to 8TB) so our lookups are effectively L2 for the - * first 4GB of the kernel (i.e. for all ILP32 processes and all the - * kernel for machines with under 4GB of memory) */ +/* Allocate the top level pgd (page directory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, - PGD_ALLOC_ORDER); - pgd_t *actual_pgd = pgd; + pgd_t *pgd; - if (likely(pgd != NULL)) { - memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); -#if CONFIG_PGTABLE_LEVELS == 3 - actual_pgd += PTRS_PER_PGD; - /* Populate first pmd with allocated memory. We mark it - * with PxD_FLAG_ATTACHED as a signal to the system that this - * pmd entry may not be cleared. */ - set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT))); - /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as - * a signal that this pmd may not be freed */ - set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED)); -#endif - } - spin_lock_init(pgd_spinlock(actual_pgd)); - return actual_pgd; + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (unlikely(pgd == NULL)) + return NULL; + + memset(pgd, 0, PAGE_SIZE << PGD_ORDER); + + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { -#if CONFIG_PGTABLE_LEVELS == 3 - pgd -= PTRS_PER_PGD; -#endif - free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); + free_pages((unsigned long)pgd, PGD_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 @@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + pmd_t *pmd; + + pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + if (likely(pmd)) + memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); + return pmd; } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { - /* - * This is the permanent pmd attached to the pgd; - * cannot free it. - * Increment the counter to compensate for the decrement - * done by generic mm code. - */ - mm_inc_nr_pmds(mm); - return; - } free_pages((unsigned long)pmd, PMD_ORDER); } - #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { -#if CONFIG_PGTABLE_LEVELS == 3 - /* preserve the gateway marker if this is the beginning of - * the permanent pmd */ - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); - else -#endif - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); } #define pmd_populate(mm, pmd, pte_page) \ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 75cf84070fc9..39017210dbf0 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -23,8 +23,6 @@ #include <asm/processor.h> #include <asm/cache.h> -static inline spinlock_t *pgd_spinlock(pgd_t *); - /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -42,12 +40,8 @@ static inline spinlock_t *pgd_spinlock(pgd_t *); /* This is for the serialization of PxTLB broadcasts. At least on the N class * systems, only one PxTLB inter processor broadcast can be active at any one - * time on the Merced bus. - - * PTE updates are protected by locks in the PMD. - */ + * time on the Merced bus. */ extern spinlock_t pa_tlb_flush_lock; -extern spinlock_t pa_swapper_pg_lock; #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) extern int pa_serialize_tlb_flushes; #else @@ -86,18 +80,16 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) * within a page table are directly modified. Thus, the following * hook is made available. */ -#define set_pte(pteptr, pteval) \ - do{ \ - *(pteptr) = (pteval); \ - } while(0) +#define set_pte(pteptr, pteval) \ + do { \ + *(pteptr) = (pteval); \ + barrier(); \ + } while(0) -#define set_pte_at(mm, addr, ptep, pteval) \ - do { \ - unsigned long flags; \ - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ - set_pte(ptep, pteval); \ - purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\ +#define set_pte_at(mm, addr, pteptr, pteval) \ + do { \ + *(pteptr) = (pteval); \ + purge_tlb_entries(mm, addr); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -120,12 +112,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #if CONFIG_PGTABLE_LEVELS == 3 -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PMD_ORDER 1 /* Number of pages per pmd */ -#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */ +#define PMD_ORDER 1 +#define PGD_ORDER 0 #else -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PGD_ALLOC_ORDER (PGD_ORDER + 1) +#define PGD_ORDER 1 #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -240,11 +230,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) * able to effectively address 40/42/44-bits of physical address space * depending on 4k/16k/64k PAGE_SIZE */ #define _PxD_PRESENT_BIT 31 -#define _PxD_ATTACHED_BIT 30 -#define _PxD_VALID_BIT 29 +#define _PxD_VALID_BIT 30 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) -#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) @@ -326,23 +314,10 @@ extern unsigned long *empty_zero_page; #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) -#if CONFIG_PGTABLE_LEVELS == 3 -/* The first entry of the permanent pmd is not there if it contains - * the gateway marker */ -#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) -#else #define pmd_none(x) (!pmd_val(x)) -#endif #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) static inline void pmd_clear(pmd_t *pmd) { -#if CONFIG_PGTABLE_LEVELS == 3 - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - /* This is the entry pointing to the permanent pmd - * attached to the pgd; cannot clear it */ - set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED)); - else -#endif set_pmd(pmd, __pmd(0)); } @@ -358,12 +333,6 @@ static inline void pmd_clear(pmd_t *pmd) { #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) static inline void pud_clear(pud_t *pud) { -#if CONFIG_PGTABLE_LEVELS == 3 - if(pud_flag(*pud) & PxD_FLAG_ATTACHED) - /* This is the permanent pmd attached to the pud; cannot - * free it */ - return; -#endif set_pud(pud, __pud(0)); } #endif @@ -456,32 +425,18 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -static inline spinlock_t *pgd_spinlock(pgd_t *pgd) -{ - if (unlikely(pgd == swapper_pg_dir)) - return &pa_swapper_pg_lock; - return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1))); -} - - static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; - unsigned long flags; if (!pte_young(*ptep)) return 0; - spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); pte = *ptep; if (!pte_young(pte)) { - spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 0; } - set_pte(ptep, pte_mkold(pte)); - purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; } @@ -489,24 +444,16 @@ struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; - unsigned long flags; - spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); old_pte = *ptep; - set_pte(ptep, __pte(0)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); + set_pte_at(mm, addr, ptep, __pte(0)); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; - spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); - set_pte(ptep, pte_wrprotect(*ptep)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 305768a40773..cd2cc1b1648c 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -268,7 +268,6 @@ int main(void) DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD); DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD); DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE); - DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER)); DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT)); DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT); DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 4d37cc9cba37..9f939afe6b88 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,10 +35,9 @@ .level 2.0 #endif - .import pa_tlb_lock,data - .macro load_pa_tlb_lock reg - mfctl %cr25,\reg - addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg + /* Get aligned page_table_lock address for this mm from cr28/tr4 */ + .macro get_ptl reg + mfctl %cr28,\reg .endm /* space_to_prot macro creates a prot id from a space id */ @@ -407,7 +406,9 @@ # endif #endif dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ +#if CONFIG_PGTABLE_LEVELS < 3 copy %r0,\pte +#endif ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ @@ -417,38 +418,23 @@ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ .endm - /* Look up PTE in a 3-Level scheme. - * - * Here we implement a Hybrid L2/L3 scheme: we allocate the - * first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd - * sizes are arranged so that a single pmd covers 4GB (giving - * a full LP64 process access to 8TB) so our lookups are - * effectively L2 for the first 4GB of the kernel (i.e. for - * all ILP32 processes and all the kernel for machines with - * under 4GB of memory) */ + /* Look up PTE in a 3-Level scheme. */ .macro L3_ptep pgd,pte,index,va,fault -#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ +#if CONFIG_PGTABLE_LEVELS == 3 + copy %r0,\pte extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - shld \pgd,PxD_VALUE_SHIFT,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - copy \index,\pgd - extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd + shld \pgd,PxD_VALUE_SHIFT,\pgd #endif L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_tlb_lock lock and check page is present. */ - .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault -#ifdef CONFIG_SMP + /* Acquire page_table_lock and check page is present. */ + .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault +#ifdef CONFIG_TLB_PTLOCK 98: cmpib,COND(=),n 0,\spc,2f - load_pa_tlb_lock \tmp + get_ptl \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -463,26 +449,26 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. + /* Release page_table_lock without reloading lock address. Note that the values in the register spc are limited to NR_SPACE_IDS (262144). Thus, the stw instruction always stores a nonzero value even when register spc is 64 bits. We use an ordered store to ensure all prior accesses are performed prior to releasing the lock. */ - .macro tlb_unlock0 spc,tmp -#ifdef CONFIG_SMP + .macro ptl_unlock0 spc,tmp +#ifdef CONFIG_TLB_PTLOCK 98: or,COND(=) %r0,\spc,%r0 stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm - /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp -#ifdef CONFIG_SMP -98: load_pa_tlb_lock \tmp + /* Release page_table_lock. */ + .macro ptl_unlock1 spc,tmp +#ifdef CONFIG_TLB_PTLOCK +98: get_ptl \tmp + ptl_unlock0 \spc,\tmp 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp #endif .endm @@ -1165,14 +1151,14 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1191,14 +1177,14 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1219,7 +1205,7 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1232,7 +1218,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1252,7 +1238,7 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1265,7 +1251,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1285,7 +1271,7 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1294,7 +1280,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1313,7 +1299,7 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1322,7 +1308,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1422,14 +1408,14 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1446,14 +1432,14 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1474,7 +1460,7 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1487,7 +1473,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1498,7 +1484,7 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1511,7 +1497,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1532,7 +1518,7 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1541,7 +1527,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1552,7 +1538,7 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1561,7 +1547,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1584,14 +1570,14 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #else @@ -1604,7 +1590,7 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1617,7 +1603,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop @@ -1628,7 +1614,7 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 @@ -1637,7 +1623,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #endif diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 81de5e2b391c..c2981401775c 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -289,13 +289,3 @@ os_hpmc_6: b . nop .align 16 /* make function length multiple of 16 bytes */ -.os_hpmc_end: - - - __INITRODATA -.globl os_hpmc_size - .align 4 - .type os_hpmc_size, @object - .size os_hpmc_size, 4 -os_hpmc_size: - .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 49cd6d2caefb..0d46b19dc4d3 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -17,6 +17,7 @@ #include <linux/types.h> #include <asm/io.h> +#include <asm/softirq_stack.h> #include <asm/smp.h> #include <asm/ldcw.h> @@ -373,7 +374,11 @@ static inline int eirr_to_irq(unsigned long eirr) /* * IRQ STACK - used for irq handler */ +#ifdef CONFIG_64BIT +#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */ +#else #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ +#endif union irq_stack_union { unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a92a23d6acd9..b144fbe29bc1 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -200,7 +200,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, extern void * const ret_from_kernel_thread; extern void * const child_return; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(cregs, 0, sizeof(struct pt_regs)); if (!usp) /* idle thread */ @@ -260,6 +260,8 @@ get_wchan(struct task_struct *p) do { if (unwind_once(&info) < 0) return 0; + if (p->state == TASK_RUNNING) + return 0; ip = info.ip; if (!in_sched_functions(ip)) return ip; diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2127974982df..65de6c4c9354 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -567,8 +567,6 @@ static const struct user_regset_view user_parisc_native_view = { }; #ifdef CONFIG_64BIT -#include <linux/compat.h> - static int gpr32_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile index c22a21c39f30..283f64407b07 100644 --- a/arch/parisc/kernel/syscalls/Makefile +++ b/arch/parisc/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -22,24 +22,24 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_abis_unistd_32 := common,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abis_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_abis_syscall_table_32 := common,32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_64 := common,64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_c32 := common,32 systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h @@ -47,9 +47,10 @@ kapisyshdr-y += syscall_table_32.h \ syscall_table_64.h \ syscall_table_c32.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 6bcc31966b44..271a92519683 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -439,3 +439,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index a52c7abf2ca4..8d8441d4562a 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -798,14 +798,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs) void __init initialize_ivt(const void *iva) { - extern u32 os_hpmc_size; extern const u32 os_hpmc[]; int i; u32 check = 0; u32 *ivap; u32 *hpmcp; - u32 length, instr; + u32 instr; if (strcmp((const char *)iva, "cows can fly")) panic("IVT invalid"); @@ -836,18 +835,14 @@ void __init initialize_ivt(const void *iva) /* Setup IVA and compute checksum for HPMC handler */ ivap[6] = (u32)__pa(os_hpmc); - length = os_hpmc_size; - ivap[7] = length; hpmcp = (u32 *)os_hpmc; - for (i=0; i<length/4; i++) - check += *hpmcp++; - for (i=0; i<8; i++) check += ivap[i]; ivap[5] = -check; + pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]); } diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d7ba014a7fbb..43652de5f139 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -142,24 +142,17 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned long flags; - - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); __set_huge_pte_at(mm, addr, ptep, entry); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t entry; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); entry = *ptep; __set_huge_pte_at(mm, addr, ptep, __pte(0)); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return entry; } @@ -168,29 +161,23 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t old_pte; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); old_pte = *ptep; __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - unsigned long flags; int changed; struct mm_struct *mm = vma->vm_mm; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); changed = !pte_same(*ptep, pte); if (changed) { __set_huge_pte_at(mm, addr, ptep, pte); } - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return changed; } diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3ec633b11b54..9ca4e4ff6895 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -37,11 +37,6 @@ extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ #if CONFIG_PGTABLE_LEVELS == 3 -/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout - * with the first pmd adjacent to the pgd and below it. gcc doesn't actually - * guarantee that global objects will be laid out in memory in the same order - * as the order of declaration, so put these in different sections and use - * the linker script to order them. */ pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); #endif @@ -559,6 +554,11 @@ void __init mem_init(void) BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD > BITS_PER_LONG); +#if CONFIG_PGTABLE_LEVELS == 3 + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); +#else + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); +#endif high_memory = __va((max_pfn << PAGE_SHIFT)); set_max_mapnr(max_low_pfn); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9141f03060ce..386ae12d8523 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -196,7 +196,6 @@ config PPC select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_CONTEXT_TRACKING if PPC64 - select HAVE_TIF_NOHZ if PPC64 select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW select HAVE_DYNAMIC_FTRACE @@ -236,6 +235,7 @@ config PPC select MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN + select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING @@ -503,18 +503,14 @@ config HOTPLUG_CPU Say N if you are unsure. config PPC_QUEUED_SPINLOCKS - bool "Queued spinlocks" + bool "Queued spinlocks" if EXPERT depends on SMP + default PPC_BOOK3S_64 help Say Y here to use queued spinlocks which give better scalability and fairness on large SMP and NUMA systems without harming single threaded performance. - This option is currently experimental, the code is more complex and - less tested so it defaults to "N" for the moment. - - If unsure, say "N". - config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU @@ -718,18 +714,6 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG -config STDBINUTILS - bool "Using standard binutils settings" - depends on 44x - default y - help - Turning this option off allows you to select 256KB PAGE_SIZE on 44x. - Note, that kernel will be able to run only those applications, - which had been compiled using binutils later than 2.17.50.0.3 with - '-zmax-page-size' set to 256K (the default is 64K). Or, if using - the older binutils, you can patch them with a trivial patch, which - changes the ELF_MAXPAGESIZE definition from 0x10000 to 0x40000. - choice prompt "Page size" default PPC_4K_PAGES @@ -769,17 +753,15 @@ config PPC_64K_PAGES select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64 config PPC_256K_PAGES - bool "256k page size" - depends on 44x && !STDBINUTILS + bool "256k page size (Requires non-standard binutils settings)" + depends on 44x && !PPC_47x help Make the page size 256k. - As the ELF standard only requires alignment to support page - sizes up to 64k, you will need to compile all of your user - space applications with a non-standard binutils settings - (see the STDBINUTILS description for details). - - Say N unless you know what you are doing. + The kernel will only be able to run applications that have been + compiled with '-zmax-page-size' set to 256K (the default is 64K) using + binutils later than 2.17.50.0.3, or by patching the ELF_MAXPAGESIZE + definition from 0x10000 to 0x40000 in older versions. endchoice diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index b88900f4832f..ae084357994e 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -88,6 +88,7 @@ config PPC_IRQ_SOFT_MASK_DEBUG config XMON bool "Include xmon kernel debugger" depends on DEBUG_KERNEL + select CONSOLE_POLL if SERIAL_CPM_CONSOLE help Include in-kernel hooks for the xmon kernel monitor/debugger. Unless you are intending to debug the kernel, say N here. diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index b959fdaec713..5f8544cf724a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -65,7 +65,7 @@ UTS_MACHINE := $(subst $(space),,$(machine-y)) ifdef CONFIG_PPC32 KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o else -ifeq ($(call ld-ifversion, -ge, 225000000, y),y) +ifeq ($(call ld-ifversion, -ge, 22500, y),y) # Have the linker provide sfpr if possible. # There is a corresponding test in arch/powerpc/lib/Makefile KBUILD_LDFLAGS_MODULE += --save-restore-funcs diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig index 72b8f93a9bdd..4bc549c6edc5 100644 --- a/arch/powerpc/configs/44x/akebono_defconfig +++ b/arch/powerpc/configs/44x/akebono_defconfig @@ -20,6 +20,7 @@ CONFIG_IRQ_ALL_CPUS=y # CONFIG_COMPACTION is not set # CONFIG_SUSPEND is not set CONFIG_NET=y +CONFIG_NETDEVICES=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y @@ -40,7 +41,9 @@ CONFIG_BLK_DEV_RAM_SIZE=35000 # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y # CONFIG_SATA_PMP is not set +CONFIG_SATA_AHCI_PLATFORM=y # CONFIG_ATA_SFF is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set @@ -97,6 +100,8 @@ CONFIG_USB_OHCI_HCD=y # CONFIG_USB_OHCI_HCD_PCI is not set CONFIG_USB_STORAGE=y CONFIG_MMC=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_M41T80=y CONFIG_EXT2_FS=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 10c055eaebf0..6677ac0da45a 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1071,7 +1071,6 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_DEBUG_INFO=y -CONFIG_UNUSED_SYMBOLS=y CONFIG_HEADERS_INSTALL=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c index a6e650a97d8f..ffedea7e4bef 100644 --- a/arch/powerpc/crypto/sha256-spe-glue.c +++ b/arch/powerpc/crypto/sha256-spe-glue.c @@ -129,7 +129,7 @@ static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data, src += bytes; len -= bytes; - }; + } memcpy((char *)sctx->buf, src, len); return 0; diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index d0b832cbbec8..939f3c94c8f3 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -56,35 +56,6 @@ int exit_vmx_usercopy(void); int enter_vmx_ops(void); void *exit_vmx_ops(void *dest); -/* Traps */ -long machine_check_early(struct pt_regs *regs); -long hmi_exception_realmode(struct pt_regs *regs); -void SMIException(struct pt_regs *regs); -void handle_hmi_exception(struct pt_regs *regs); -void instruction_breakpoint_exception(struct pt_regs *regs); -void RunModeException(struct pt_regs *regs); -void single_step_exception(struct pt_regs *regs); -void program_check_exception(struct pt_regs *regs); -void alignment_exception(struct pt_regs *regs); -void StackOverflow(struct pt_regs *regs); -void stack_overflow_exception(struct pt_regs *regs); -void kernel_fp_unavailable_exception(struct pt_regs *regs); -void altivec_unavailable_exception(struct pt_regs *regs); -void vsx_unavailable_exception(struct pt_regs *regs); -void fp_unavailable_tm(struct pt_regs *regs); -void altivec_unavailable_tm(struct pt_regs *regs); -void vsx_unavailable_tm(struct pt_regs *regs); -void facility_unavailable_exception(struct pt_regs *regs); -void TAUException(struct pt_regs *regs); -void altivec_assist_exception(struct pt_regs *regs); -void unrecoverable_exception(struct pt_regs *regs); -void kernel_bad_stack(struct pt_regs *regs); -void system_reset_exception(struct pt_regs *regs); -void machine_check_exception(struct pt_regs *regs); -void emulation_assist_interrupt(struct pt_regs *regs); -long do_slb_fault(struct pt_regs *regs, unsigned long ea); -void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err); - /* signals, syscalls and interrupts */ long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index a0117a9d5b06..73bc5d2c431d 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -95,12 +95,12 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) addr &= 0xf0000000; /* align addr to start of segment */ barrier(); /* make sure thread.kuap is updated before playing with SRs */ while (addr < end) { - mtsrin(sr, addr); + mtsr(sr, addr); sr += 0x111; /* next VSID */ sr &= 0xf0ffffff; /* clear VSID overflow */ addr += 0x10000000; /* address of next segment */ } - isync(); /* Context sync required after mtsrin() */ + isync(); /* Context sync required after mtsr() */ } static __always_inline void allow_user_access(void __user *to, const void __user *from, @@ -122,7 +122,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user end = min(addr + size, TASK_SIZE); current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf); - kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */ + kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */ } static __always_inline void prevent_user_access(void __user *to, const void __user *from, @@ -151,7 +151,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us } current->thread.kuap = 0; - kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ + kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */ } static inline unsigned long prevent_user_access_return(void) diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index 685c589e723f..b85f8e114a9c 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -94,7 +94,7 @@ typedef struct { } mm_context_t; void update_bats(void); -static inline void cleanup_cpu_mmu_context(void) { }; +static inline void cleanup_cpu_mmu_context(void) { } /* patch sites */ extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index 7d1ef7b9754e..8bd905050896 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -339,7 +339,7 @@ static inline unsigned long get_kuap(void) * This has no effect in terms of actually blocking things on hash, * so it doesn't break anything. */ - if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) return AMR_KUAP_BLOCKED; return mfspr(SPRN_AMR); @@ -347,7 +347,7 @@ static inline unsigned long get_kuap(void) static inline void set_kuap(unsigned long value) { - if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) return; /* diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 066b1d34c7bc..f911bdb68d8b 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -454,6 +454,8 @@ static inline unsigned long hpt_hash(unsigned long vpn, #define HPTE_NOHPTE_UPDATE 0x2 #define HPTE_USE_KERNEL_KEY 0x4 +long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa, + unsigned long rlags, unsigned long vflags, int psize, int ssize); extern int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpage_prot); @@ -467,6 +469,8 @@ extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long flags); extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, unsigned long dsisr); +void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc); +int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr); int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, unsigned int shift, unsigned int mmu_psize); @@ -521,6 +525,7 @@ void slb_dump_contents(struct slb_entry *slb_ptr); extern void slb_vmalloc_update(void); extern void slb_set_size(u16 size); +void preload_new_slb_context(unsigned long start, unsigned long sp); #endif /* __ASSEMBLY__ */ /* diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 995bbcdd0ef8..eace8c3f7b0a 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -239,7 +239,7 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, #ifdef CONFIG_PPC_PSERIES extern void radix_init_pseries(void); #else -static inline void radix_init_pseries(void) { }; +static inline void radix_init_pseries(void) { } #endif #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index a39886681629..058601efbc8a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -388,11 +388,28 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ ({ \ - int __r; \ - __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ - __r; \ + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ }) +/* + * On Book3S CPUs, clearing the accessed bit without a TLB flush + * doesn't cause data corruption. [ It could cause incorrect + * page aging and the (mistaken) reclaim of hot pages, but the + * chance of that should be relatively low. ] + * + * So as a performance optimization don't flush the TLB when + * clearing the accessed bit, it will eventually be flushed by + * a context switch or a VM operation anyway. [ In the rare + * event of it not getting flushed for a long time the delay + * shouldn't really matter because there's no real memory + * pressure for swapout to react to. ] + */ +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young ptep_test_and_clear_young + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#define pmdp_clear_flush_young pmdp_test_and_clear_young + static inline int __pte_write(pte_t pte) { return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 94439e0cefc9..8b33601cdb9d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -35,7 +35,7 @@ extern void radix__flush_pwc_lpid(unsigned int lpid); extern void radix__flush_all_lpid(unsigned int lpid); extern void radix__flush_all_lpid_guest(unsigned int lpid); #else -static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }; +static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); } static inline void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, unsigned long page_size) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index dcb5c3839d2f..215973b4cb26 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -31,7 +31,7 @@ static inline void tlbiel_all(void) hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL); } #else -static inline void tlbiel_all(void) { BUG(); }; +static inline void tlbiel_all(void) { BUG(); } #endif static inline void tlbiel_all_lpid(bool radix) diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 464f8ca8a5c9..d1635ffbb179 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -111,12 +111,15 @@ #ifndef __ASSEMBLY__ struct pt_regs; -extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); -extern void bad_page_fault(struct pt_regs *, unsigned long, int); -void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); +long do_page_fault(struct pt_regs *); +long hash__do_page_fault(struct pt_regs *); +void bad_page_fault(struct pt_regs *, int); +void __bad_page_fault(struct pt_regs *regs, int sig); +void do_bad_page_fault_segv(struct pt_regs *regs); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void _exception_pkey(struct pt_regs *, unsigned long, int); extern void die(const char *, struct pt_regs *, long); +void die_mce(const char *str, struct pt_regs *regs, long err); extern bool die_will_crash(void); extern void panic_flush_kmsg_start(void); extern void panic_flush_kmsg_end(void); diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 138e46d8c04e..f63495109f63 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -8,6 +8,12 @@ #include <asm/cputable.h> #include <asm/cpu_has_feature.h> +/* + * This flag is used to indicate that the page pointed to by a pte is clean + * and does not require cleaning before returning it to the user. + */ +#define PG_dcache_clean PG_arch_1 + #ifdef CONFIG_PPC_BOOK3S_64 /* * Book3s has no ptesync after setting a pte, so without this ptesync it's diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index ed75d1c318e3..504f7fe6711a 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -87,6 +87,17 @@ static notrace inline void account_cpu_user_exit(void) acct->starttime_user = tb; } +static notrace inline void account_stolen_time(void) +{ +#ifdef CONFIG_PPC_SPLPAR + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + struct lppaca *lp = local_paca->lppaca_ptr; + + if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx))) + accumulate_stolen_time(); + } +#endif +} #endif /* __KERNEL__ */ #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ @@ -96,5 +107,8 @@ static inline void account_cpu_user_entry(void) static inline void account_cpu_user_exit(void) { } +static notrace inline void account_stolen_time(void) +{ +} #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* __POWERPC_CPUTIME_H */ diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 7141ccea8c94..a92059964579 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) #define mfdcr(rn) \ ({unsigned int rval; \ if (__builtin_constant_p(rn) && rn < 1024) \ - asm volatile("mfdcr %0," __stringify(rn) \ - : "=r" (rval)); \ + asm volatile("mfdcr %0, %1" : "=r" (rval) \ + : "n" (rn)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ rval = mfdcrx(rn); \ else \ @@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) #define mtdcr(rn, v) \ do { \ if (__builtin_constant_p(rn) && rn < 1024) \ - asm volatile("mtdcr " __stringify(rn) ",%0" \ - : : "r" (v)); \ + asm volatile("mtdcr %0, %1" \ + : : "n" (rn), "r" (v)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ mtdcrx(rn, v); \ else \ diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index ec57daf87f40..86a14736c76c 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -50,10 +50,6 @@ bool ppc_breakpoint_available(void); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int brkpt); -#else - -extern void do_break(struct pt_regs *regs, unsigned long address, - unsigned long error_code); #endif #endif /* _ASM_POWERPC_DEBUG_H */ diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index aa6a5ef5d483..7604673787d6 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -137,7 +137,7 @@ extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; #ifdef CONFIG_PPC_PSERIES void pseries_probe_fw_features(void); #else -static inline void pseries_probe_fw_features(void) { }; +static inline void pseries_probe_fw_features(void) { } #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 013165e62618..f18c543bc01d 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -17,8 +17,6 @@ extern bool hugetlb_disabled; void hugetlbpage_init_default(void); -void flush_dcache_icache_hugepage(struct page *page); - int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index c98f5141e3fc..ed6086d57b22 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -535,9 +535,12 @@ struct h_cpu_char_result { u64 behaviour; }; -/* Register state for entering a nested guest with H_ENTER_NESTED */ +/* + * Register state for entering a nested guest with H_ENTER_NESTED. + * New member must be added at the end. + */ struct hv_guest_state { - u64 version; /* version of this structure layout */ + u64 version; /* version of this structure layout, must be first */ u32 lpid; u32 vcpu_token; /* These registers are hypervisor privileged (at least for writing) */ @@ -566,10 +569,26 @@ struct hv_guest_state { u64 pidr; u64 cfar; u64 ppr; + /* Version 1 ends here */ + u64 dawr1; + u64 dawrx1; + /* Version 2 ends here */ }; /* Latest version of hv_guest_state structure */ -#define HV_GUEST_STATE_VERSION 1 +#define HV_GUEST_STATE_VERSION 2 + +static inline int hv_guest_state_size(unsigned int version) +{ + switch (version) { + case 1: + return offsetofend(struct hv_guest_state, ppr); + case 2: + return offsetofend(struct hv_guest_state, dawrx1); + default: + return -1; + } +} /* * From the document "H_GetPerformanceCounterInfo Interface" v1.07 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 0363734ff56e..56a98936a6a9 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -38,6 +38,8 @@ #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) #endif +#endif /* CONFIG_PPC64 */ + /* * flags for paca->irq_soft_mask */ @@ -46,18 +48,56 @@ #define IRQS_PMI_DISABLED 2 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) -#endif /* CONFIG_PPC64 */ - #ifndef __ASSEMBLY__ -extern void replay_system_reset(void); -extern void replay_soft_interrupts(void); +static inline void __hard_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + wrtee(MSR_EE); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EIE); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(MSR_EE | MSR_RI, 1); + else + mtmsr(mfmsr() | MSR_EE); +} -extern void timer_interrupt(struct pt_regs *); -extern void timer_broadcast_interrupt(void); -extern void performance_monitor_exception(struct pt_regs *regs); -extern void WatchdogException(struct pt_regs *regs); -extern void unknown_exception(struct pt_regs *regs); +static inline void __hard_irq_disable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + wrtee(0); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EID); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(MSR_RI, 1); + else + mtmsr(mfmsr() & ~MSR_EE); +} + +static inline void __hard_EE_RI_disable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + wrtee(0); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_NRI); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(0, 1); + else + mtmsr(mfmsr() & ~(MSR_EE | MSR_RI)); +} + +static inline void __hard_RI_enable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + return; + + if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EID); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(MSR_RI, 1); + else + mtmsr(mfmsr() | MSR_RI); +} #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -221,18 +261,6 @@ static inline bool arch_irqs_disabled(void) #endif /* CONFIG_PPC_BOOK3S */ -#ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() wrtee(MSR_EE) -#define __hard_irq_disable() wrtee(0) -#define __hard_EE_RI_disable() wrtee(0) -#define __hard_RI_enable() do { } while (0) -#else -#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1) -#define __hard_irq_disable() __mtmsrd(MSR_RI, 1) -#define __hard_EE_RI_disable() __mtmsrd(0, 1) -#define __hard_RI_enable() __mtmsrd(MSR_RI, 1) -#endif - #define hard_irq_disable() do { \ unsigned long flags; \ __hard_irq_disable(); \ @@ -296,8 +324,17 @@ extern void irq_set_pending_from_srr1(unsigned long srr1); extern void force_external_irq_replay(void); +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ + regs->softe = val; +} #else /* CONFIG_PPC64 */ +static inline notrace unsigned long irq_soft_mask_return(void) +{ + return 0; +} + static inline unsigned long arch_local_save_flags(void) { return mfmsr(); @@ -327,22 +364,12 @@ static inline unsigned long arch_local_irq_save(void) static inline void arch_local_irq_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE)) - wrtee(0); - else if (IS_ENABLED(CONFIG_PPC_8xx)) - wrtspr(SPRN_EID); - else - mtmsr(mfmsr() & ~MSR_EE); + __hard_irq_disable(); } static inline void arch_local_irq_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE)) - wrtee(MSR_EE); - else if (IS_ENABLED(CONFIG_PPC_8xx)) - wrtspr(SPRN_EIE); - else - mtmsr(mfmsr() | MSR_EE); + __hard_irq_enable(); } static inline bool arch_irqs_disabled_flags(unsigned long flags) @@ -364,6 +391,9 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) static inline void may_hard_irq_enable(void) { } +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ +} #endif /* CONFIG_PPC64 */ #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h new file mode 100644 index 000000000000..aedfba29e43a --- /dev/null +++ b/arch/powerpc/include/asm/interrupt.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_INTERRUPT_H +#define _ASM_POWERPC_INTERRUPT_H + +#include <linux/context_tracking.h> +#include <linux/hardirq.h> +#include <asm/cputime.h> +#include <asm/ftrace.h> +#include <asm/kprobes.h> +#include <asm/runlatch.h> + +struct interrupt_state { +#ifdef CONFIG_PPC_BOOK3E_64 + enum ctx_state ctx_state; +#endif +}; + +static inline void booke_restore_dbcr0(void) +{ +#ifdef CONFIG_PPC_ADV_DEBUG_REGS + unsigned long dbcr0 = current->thread.debug.dbcr0; + + if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { + mtspr(SPRN_DBSR, -1); + mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); + } +#endif +} + +static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ + /* + * Book3E reconciles irq soft mask in asm + */ +#ifdef CONFIG_PPC_BOOK3S_64 + if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) + trace_hardirqs_off(); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + + if (user_mode(regs)) { + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); + + account_cpu_user_entry(); + account_stolen_time(); + } else { + /* + * CT_WARN_ON comes here via program_check_exception, + * so avoid recursion. + */ + if (TRAP(regs) != 0x700) + CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + } +#endif + +#ifdef CONFIG_PPC_BOOK3E_64 + state->ctx_state = exception_enter(); + if (user_mode(regs)) + account_cpu_user_entry(); +#endif +} + +/* + * Care should be taken to note that interrupt_exit_prepare and + * interrupt_async_exit_prepare do not necessarily return immediately to + * regs context (e.g., if regs is usermode, we don't necessarily return to + * user mode). Other interrupts might be taken between here and return, + * context switch / preemption may occur in the exit path after this, or a + * signal may be delivered, etc. + * + * The real interrupt exit code is platform specific, e.g., + * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. + * + * However interrupt_nmi_exit_prepare does return directly to regs, because + * NMIs do not do "exit work" or replay soft-masked interrupts. + */ +static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +#ifdef CONFIG_PPC_BOOK3E_64 + exception_exit(state->ctx_state); +#endif + + /* + * Book3S exits to user via interrupt_exit_user_prepare(), which does + * context tracking, which is a cleaner way to handle PREEMPT=y + * and avoid context entry/exit in e.g., preempt_schedule_irq()), + * which is likely to be where the core code wants to end up. + * + * The above comment explains why we can't do the + * + * if (user_mode(regs)) + * user_exit_irqoff(); + * + * sequence here. + */ +} + +static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +#ifdef CONFIG_PPC_BOOK3S_64 + if (cpu_has_feature(CPU_FTR_CTRL) && + !test_thread_local_flags(_TLF_RUNLATCH)) + __ppc64_runlatch_on(); +#endif + + interrupt_enter_prepare(regs, state); + irq_enter(); +} + +static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ + irq_exit(); + interrupt_exit_prepare(regs, state); +} + +struct interrupt_nmi_state { +#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S_64 + u8 irq_soft_mask; + u8 irq_happened; +#endif + u8 ftrace_enabled; +#endif +}; + +static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) +{ +#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S_64 + state->irq_soft_mask = local_paca->irq_soft_mask; + state->irq_happened = local_paca->irq_happened; + + /* + * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does + * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile + * because that goes through irq tracing which we don't want in NMI. + */ + local_paca->irq_soft_mask = IRQS_ALL_DISABLED; + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + + /* Don't do any per-CPU operations until interrupt state is fixed */ +#endif + /* Allow DEC and PMI to be traced when they are soft-NMI */ + if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) { + state->ftrace_enabled = this_cpu_get_ftrace_enabled(); + this_cpu_set_ftrace_enabled(0); + } +#endif + + /* + * Do not use nmi_enter() for pseries hash guest taking a real-mode + * NMI because not everything it touches is within the RMA limit. + */ + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || + !firmware_has_feature(FW_FEATURE_LPAR) || + radix_enabled() || (mfmsr() & MSR_DR)) + nmi_enter(); +} + +static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) +{ + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || + !firmware_has_feature(FW_FEATURE_LPAR) || + radix_enabled() || (mfmsr() & MSR_DR)) + nmi_exit(); + +#ifdef CONFIG_PPC64 + if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) + this_cpu_set_ftrace_enabled(state->ftrace_enabled); + +#ifdef CONFIG_PPC_BOOK3S_64 + /* Check we didn't change the pending interrupt mask. */ + WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); + local_paca->irq_happened = state->irq_happened; + local_paca->irq_soft_mask = state->irq_soft_mask; +#endif +#endif +} + +/* + * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each + * function definition. The reason for this is the noinstr section is placed + * after the main text section, i.e., very far away from the interrupt entry + * asm. That creates problems with fitting linker stubs when building large + * kernels. + */ +#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address + +/** + * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + */ +#define DECLARE_INTERRUPT_HANDLER_RAW(func) \ + __visible long func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + * + * @func is called from ASM entry code. + * + * This is a plain function which does no tracing, reconciling, etc. + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + * + * raw interrupt handlers must not enable or disable interrupts, or + * schedule, tracing and instrumentation (ftrace, lockdep, etc) would + * not be advisable either, although may be possible in a pinch, the + * trace will look odd at least. + * + * A raw handler may call one of the other interrupt handler functions + * to be converted into that interrupt context without these restrictions. + * + * On PPC64, _RAW handlers may return with fast_interrupt_return. + * + * Specific handlers may have additional restrictions. + */ +#define DEFINE_INTERRUPT_HANDLER_RAW(func) \ +static __always_inline long ____##func(struct pt_regs *regs); \ + \ +interrupt_handler long func(struct pt_regs *regs) \ +{ \ + long ret; \ + \ + ret = ____##func (regs); \ + \ + return ret; \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline long ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function + * @func: Function name of the entry point + */ +#define DECLARE_INTERRUPT_HANDLER(func) \ + __visible void func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function + * @func: Function name of the entry point + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER(func) \ +static __always_inline void ____##func(struct pt_regs *regs); \ + \ +interrupt_handler void func(struct pt_regs *regs) \ +{ \ + struct interrupt_state state; \ + \ + interrupt_enter_prepare(regs, &state); \ + \ + ____##func (regs); \ + \ + interrupt_exit_prepare(regs, &state); \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline void ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + */ +#define DECLARE_INTERRUPT_HANDLER_RET(func) \ + __visible long func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER_RET(func) \ +static __always_inline long ____##func(struct pt_regs *regs); \ + \ +interrupt_handler long func(struct pt_regs *regs) \ +{ \ + struct interrupt_state state; \ + long ret; \ + \ + interrupt_enter_prepare(regs, &state); \ + \ + ret = ____##func (regs); \ + \ + interrupt_exit_prepare(regs, &state); \ + \ + return ret; \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline long ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function + * @func: Function name of the entry point + */ +#define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ + __visible void func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function + * @func: Function name of the entry point + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ +static __always_inline void ____##func(struct pt_regs *regs); \ + \ +interrupt_handler void func(struct pt_regs *regs) \ +{ \ + struct interrupt_state state; \ + \ + interrupt_async_enter_prepare(regs, &state); \ + \ + ____##func (regs); \ + \ + interrupt_async_exit_prepare(regs, &state); \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline void ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + */ +#define DECLARE_INTERRUPT_HANDLER_NMI(func) \ + __visible long func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER_NMI(func) \ +static __always_inline long ____##func(struct pt_regs *regs); \ + \ +interrupt_handler long func(struct pt_regs *regs) \ +{ \ + struct interrupt_nmi_state state; \ + long ret; \ + \ + interrupt_nmi_enter_prepare(regs, &state); \ + \ + ret = ____##func (regs); \ + \ + interrupt_nmi_exit_prepare(regs, &state); \ + \ + return ret; \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline long ____##func(struct pt_regs *regs) + + +/* Interrupt handlers */ +/* kernel/traps.c */ +DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); +#ifdef CONFIG_PPC_BOOK3S_64 +DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); +#else +DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); +#endif +DECLARE_INTERRUPT_HANDLER(SMIException); +DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); +DECLARE_INTERRUPT_HANDLER(unknown_exception); +DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); +DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); +DECLARE_INTERRUPT_HANDLER(RunModeException); +DECLARE_INTERRUPT_HANDLER(single_step_exception); +DECLARE_INTERRUPT_HANDLER(program_check_exception); +DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); +DECLARE_INTERRUPT_HANDLER(alignment_exception); +DECLARE_INTERRUPT_HANDLER(StackOverflow); +DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); +DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); +DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); +DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); +DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); +DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); +DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); +DECLARE_INTERRUPT_HANDLER(DebugException); +DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); +DECLARE_INTERRUPT_HANDLER(CacheLockingException); +DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); +DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); +DECLARE_INTERRUPT_HANDLER(unrecoverable_exception); +DECLARE_INTERRUPT_HANDLER(WatchdogException); +DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); + +/* slb.c */ +DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); +DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); + +/* hash_utils.c */ +DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); + +/* fault.c */ +DECLARE_INTERRUPT_HANDLER_RET(do_page_fault); +DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); + +/* process.c */ +DECLARE_INTERRUPT_HANDLER(do_break); + +/* time.c */ +DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); + +/* mce.c */ +DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); +DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); + +DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); + +void replay_system_reset(void); +void replay_soft_interrupts(void); + +static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) +{ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); +} + +#endif /* _ASM_POWERPC_INTERRUPT_H */ diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 4f983ca4030a..f3f264e441a7 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -37,8 +37,6 @@ extern int distribute_irqs; struct pt_regs; -#define __ARCH_HAS_DO_SOFTIRQ - #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) /* * Per-cpu stacks for handling critical, debug and machine check diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 55d6ede30c19..9ab344d29a54 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -136,6 +136,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); +unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image); int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline); diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index bf221a2a523e..7ec21af49a45 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -91,6 +91,7 @@ static __always_inline void setup_kup(void) static inline void allow_read_from_user(const void __user *from, unsigned long size) { + barrier_nospec(); allow_user_access(NULL, from, size, KUAP_READ); } @@ -102,6 +103,7 @@ static inline void allow_write_to_user(void __user *to, unsigned long size) static inline void allow_read_write_user(void __user *to, const void __user *from, unsigned long size) { + barrier_nospec(); allow_user_access(to, from, size, KUAP_READ_WRITE); } diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d32ec9ae73bd..2f5f919f6cd3 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -277,6 +277,13 @@ extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); +long kvmppc_read_intr(void); +void kvmppc_bad_interrupt(struct pt_regs *regs); +void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip); +void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip); +void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); +void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 078f4648ea27..b6d31bff5209 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -74,16 +74,6 @@ struct kvm_split_mode { u8 do_nap; u8 napped[MAX_SMT_THREADS]; struct kvmppc_vcore *vc[MAX_SUBCORES]; - /* Bits for changing lpcr on P9 */ - unsigned long lpcr_req; - unsigned long lpidr_req; - unsigned long host_lpcr; - u32 do_set; - u32 do_restore; - union { - u32 allphases; - u8 phase[4]; - } lpcr_sync; }; /* @@ -110,7 +100,6 @@ struct kvmppc_host_state { u8 hwthread_state; u8 host_ipi; u8 ptid; /* thread number within subcore when split */ - u8 tid; /* thread number within whole core */ u8 fake_suspend; struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d67a470e95a3..05fb00d37609 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_USER_MEM_SLOTS 512 #include <asm/cputhreads.h> @@ -307,6 +306,7 @@ struct kvm_arch { u8 svm_enabled; bool threads_indep; bool nested_enable; + bool dawr1_enabled; pgd_t *pgtable; u64 process_table; struct dentry *debugfs_dir; @@ -584,8 +584,10 @@ struct kvm_vcpu_arch { u32 ctrl; u32 dabrx; ulong dabr; - ulong dawr; - ulong dawrx; + ulong dawr0; + ulong dawrx0; + ulong dawr1; + ulong dawrx1; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0a056c64c317..8aacd76bb702 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -314,6 +314,8 @@ struct kvmppc_ops { int size); int (*enable_svm)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm); + int (*enable_dawr1)(struct kvm *kvm); + bool (*hash_v3_possible)(void); }; extern struct kvmppc_ops *kvmppc_hv_ops; @@ -627,9 +629,9 @@ extern int h_ipi_redirect; static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( struct kvm *kvm) { return NULL; } -static inline void kvmppc_alloc_host_rm_ops(void) {}; -static inline void kvmppc_free_host_rm_ops(void) {}; -static inline void kvmppc_free_pimap(struct kvm *kvm) {}; +static inline void kvmppc_alloc_host_rm_ops(void) {} +static inline void kvmppc_free_host_rm_ops(void) {} +static inline void kvmppc_free_pimap(struct kvm *kvm) {} static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) { return 0; } static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) @@ -881,9 +883,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) /* Clear i-cache for new pages */ page = pfn_to_page(pfn); - if (!test_bit(PG_arch_1, &page->flags)) { + if (!test_bit(PG_dcache_clean, &page->flags)) { flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + set_bit(PG_dcache_clean, &page->flags); } } diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index cf6ebbc16cb4..764f2732a821 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -59,6 +59,9 @@ struct machdep_calls { int (*pcibios_root_bridge_prepare)(struct pci_host_bridge *bridge); + /* finds all the pci_controllers present at boot */ + void (*discover_phbs)(void); + /* To setup PHBs when using automatic OF platform driver for PCI */ int (*pci_setup_phb)(struct pci_controller *host); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index e6c27ae843dc..331d944280b8 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -204,7 +204,18 @@ struct mce_error_info { bool ignore_event; }; -#define MAX_MC_EVT 100 +#define MAX_MC_EVT 10 + +struct mce_info { + int mce_nest_count; + struct machine_check_event mce_event[MAX_MC_EVT]; + /* Queue for delayed MCE events. */ + int mce_queue_count; + struct machine_check_event mce_event_queue[MAX_MC_EVT]; + /* Queue for delayed MCE UE events. */ + int mce_ue_count; + struct machine_check_event mce_ue_event_queue[MAX_MC_EVT]; +}; /* Release flags for get_mce_event() */ #define MCE_EVENT_RELEASE true @@ -234,4 +245,11 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs); long __machine_check_early_realmode_p9(struct pt_regs *regs); long __machine_check_early_realmode_p10(struct pt_regs *regs); #endif /* CONFIG_PPC_BOOK3S_64 */ + +#ifdef CONFIG_PPC_BOOK3S_64 +void mce_init(void); +#else +static inline void mce_init(void) { }; +#endif /* CONFIG_PPC_BOOK3S_64 */ + #endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 80b27f5d9648..607168b1aef4 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -228,7 +228,7 @@ enum { #define MMU_FTRS_ALWAYS 0 #endif -static inline bool early_mmu_has_feature(unsigned long feature) +static __always_inline bool early_mmu_has_feature(unsigned long feature) { if (MMU_FTRS_ALWAYS & feature) return true; @@ -286,7 +286,7 @@ static inline void mmu_feature_keys_init(void) } -static inline bool mmu_has_feature(unsigned long feature) +static __always_inline bool mmu_has_feature(unsigned long feature) { return early_mmu_has_feature(feature); } diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index d5821834dba9..652ce85f9410 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -282,9 +282,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, } #define pkey_mm_init(mm) -#define thread_pkey_regs_save(thread) -#define thread_pkey_regs_restore(new_thread, old_thread) -#define thread_pkey_regs_init(thread) #define arch_dup_pkeys(oldmm, mm) static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 84b4cfe73edd..160abcb8e9fa 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -4,6 +4,7 @@ #ifdef CONFIG_PPC_WATCHDOG extern void arch_touch_nmi_watchdog(void); +long soft_nmi_interrupt(struct pt_regs *regs); #else static inline void arch_touch_nmi_watchdog(void) {} #endif diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 9454d29ff4b4..ec18ac818e3a 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -29,6 +29,7 @@ #include <asm/hmi.h> #include <asm/cpuidle.h> #include <asm/atomic.h> +#include <asm/mce.h> #include <asm-generic/mmiowb_types.h> @@ -108,8 +109,7 @@ struct paca_struct { */ /* used for most interrupts/exceptions */ u64 exgen[EX_SIZE] __attribute__((aligned(0x80))); - u64 exslb[EX_SIZE]; /* used for SLB/segment table misses - * on the linear mapping */ + /* SLB related definitions */ u16 vmalloc_sllp; u8 slb_cache_ptr; @@ -273,6 +273,9 @@ struct paca_struct { #ifdef CONFIG_MMIOWB struct mmiowb_state mmiowb_state; #endif +#ifdef CONFIG_PPC_BOOK3S_64 + struct mce_info *mce_info; +#endif /* CONFIG_PPC_BOOK3S_64 */ } ____cacheline_aligned; extern void copy_mm_to_paca(struct mm_struct *mm); @@ -285,9 +288,9 @@ extern void free_unused_pacas(void); #else /* CONFIG_PPC64 */ -static inline void allocate_paca_ptrs(void) { }; -static inline void allocate_paca(int cpu) { }; -static inline void free_unused_pacas(void) { }; +static inline void allocate_paca_ptrs(void) { } +static inline void allocate_paca(int cpu) { } +static inline void free_unused_pacas(void) { } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index edc08f04aef7..5d1726bb28e7 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -10,6 +10,7 @@ #endif #ifdef CONFIG_PPC_SPLPAR +#include <linux/smp.h> #include <asm/kvm_guest.h> #include <asm/cputhreads.h> diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index daec64d41b44..164e910bf654 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -14,6 +14,7 @@ #include <asm/perf_event_server.h> #else static inline bool is_sier_available(void) { return false; } +static inline unsigned long get_pmcs_ext_regs(int idx) { return 0; } #endif #ifdef CONFIG_FSL_EMB_PERF_EVENT @@ -40,6 +41,7 @@ static inline bool is_sier_available(void) { return false; } /* To support perf_regs sier update */ extern bool is_sier_available(void); +extern unsigned long get_pmcs_ext_regs(int idx); /* To define perf extended regs mask value */ extern u64 PERF_REG_EXTENDED_MASK; #define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 3b7baba01c92..00e7e671bb4b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -36,9 +36,9 @@ struct power_pmu { unsigned long test_adder; int (*compute_mmcr)(u64 events[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]); + struct perf_event *pevents[], u32 flags); int (*get_constraint)(u64 event_id, unsigned long *mskp, - unsigned long *valp); + unsigned long *valp, u64 event_config1); int (*get_alternatives)(u64 event_id, unsigned int flags, u64 alt[]); void (*get_mem_data_src)(union perf_mem_data_src *dsrc, @@ -83,6 +83,7 @@ struct power_pmu { #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ #define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ #define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */ +#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f7613f43c9cf..4eed82172e33 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -162,6 +162,9 @@ static inline bool is_ioremap_addr(const void *x) return addr >= IOREMAP_BASE && addr < IOREMAP_END; } + +struct seq_file; +void arch_report_meminfo(struct seq_file *m); #endif /* CONFIG_PPC64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index a7951049e129..59a2c7dbc78f 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -169,10 +169,4 @@ static inline bool arch_pkeys_enabled(void) } extern void pkey_mm_init(struct mm_struct *mm); -extern bool arch_supports_pkeys(int cap); -extern unsigned int arch_usable_pkeys(void); -extern void thread_pkey_regs_save(struct thread_struct *thread); -extern void thread_pkey_regs_restore(struct thread_struct *new_thread, - struct thread_struct *old_thread); -extern void thread_pkey_regs_init(struct thread_struct *thread); #endif /*_ASM_POWERPC_KEYS_H */ diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 7f4be5a05eb3..2b9edbf6e929 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -13,10 +13,6 @@ extern unsigned long isa_io_base; -extern void pci_setup_phb_io(struct pci_controller *hose, int primary); -extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary); - - extern struct list_head hose_list; extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ @@ -32,9 +28,6 @@ struct pci_dn; void *pci_traverse_device_nodes(struct device_node *start, void *(*fn)(struct device_node *, void *), void *data); -void *traverse_pci_dn(struct pci_dn *root, - void *(*fn)(struct pci_dn *, void *), - void *data); extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); /* From rtas_pci.h */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index cc1bca571332..3dceb64fc9af 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -25,7 +25,6 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) -#define ACCOUNT_STOLEN_TIME #else #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \ MFTB(ra); /* get timebase */ \ @@ -44,29 +43,6 @@ PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \ add ra,ra,rb; /* add on to system time */ \ PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr) - -#ifdef CONFIG_PPC_SPLPAR -#define ACCOUNT_STOLEN_TIME \ -BEGIN_FW_FTR_SECTION; \ - beq 33f; \ - /* from user - see if there are any DTL entries to process */ \ - ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ - ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ - addi r10,r10,LPPACA_DTLIDX; \ - LDX_BE r10,0,r10; /* get log write index */ \ - cmpd cr1,r11,r10; \ - beq+ cr1,33f; \ - bl accumulate_stolen_time; \ - ld r12,_MSR(r1); \ - andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ -33: \ -END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) - -#else /* CONFIG_PPC_SPLPAR */ -#define ACCOUNT_STOLEN_TIME - -#endif /* CONFIG_PPC_SPLPAR */ - #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ /* diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 58f9dc060a7b..975ba260006a 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -70,6 +70,9 @@ struct pt_regs }; #endif + +#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) + #ifdef __powerpc64__ /* @@ -229,6 +232,11 @@ static inline bool trap_is_scv(struct pt_regs *regs) return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000); } +static inline bool trap_is_unsupported_scv(struct pt_regs *regs) +{ + return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0; +} + static inline bool trap_is_syscall(struct pt_regs *regs) { return (trap_is_scv(regs) || TRAP(regs) == 0xc00); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e40a921d78f9..da103e92c112 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1375,6 +1375,7 @@ #define mtmsr(v) asm volatile("mtmsr %0" : \ : "r" ((unsigned long)(v)) \ : "memory") +#define __mtmsrd(v, l) BUILD_BUG() #define __MTMSR "mtmsr" #endif @@ -1413,13 +1414,24 @@ static inline void msr_check_and_clear(unsigned long bits) } #ifdef CONFIG_PPC32 -#define mfsrin(v) ({unsigned int rval; \ - asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ - rval;}) +static inline u32 mfsr(u32 idx) +{ + u32 val; + + if (__builtin_constant_p(idx)) + asm volatile("mfsr %0, %1" : "=r" (val): "i" (idx >> 28)); + else + asm volatile("mfsrin %0, %1" : "=r" (val): "r" (idx)); -static inline void mtsrin(u32 val, u32 idx) + return val; +} + +static inline void mtsr(u32 val, u32 idx) { - asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx)); + if (__builtin_constant_p(idx)) + asm volatile("mtsr %1, %0" : : "r" (val), "i" (idx >> 28)); + else + asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx)); } #endif diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 262782f08fd4..17b8dcd9a40d 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -691,6 +691,9 @@ #define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \ : "r" ((unsigned long)(v)) \ : "memory") + +extern unsigned long global_dbcr0[]; + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_POWERPC_REG_BOOKE_H__ */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 332e1000ca0f..658448ca5b8a 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -369,7 +369,7 @@ void rtas_initialize(void); #else static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} static inline void pSeries_coalesce_init(void) { } -static inline void rtas_initialize(void) { }; +static inline void rtas_initialize(void) { } #endif extern int call_rtas(const char *, int, int, unsigned long *, ...); diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index a466749703f1..e89bfebd4e00 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -58,7 +58,7 @@ void do_rfi_flush_fixups(enum l1d_flush_type types); #ifdef CONFIG_PPC_BARRIER_NOSPEC void setup_barrier_nospec(void); #else -static inline void setup_barrier_nospec(void) { }; +static inline void setup_barrier_nospec(void) { } #endif void do_uaccess_flush_fixups(enum l1d_flush_type types); void do_entry_flush_fixups(enum l1d_flush_type types); @@ -68,13 +68,13 @@ extern bool barrier_nospec_enabled; #ifdef CONFIG_PPC_BARRIER_NOSPEC void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); #else -static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; +static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { } #endif #ifdef CONFIG_PPC_FSL_BOOK3E void setup_spectre_v2(void); #else -static inline void setup_spectre_v2(void) {}; +static inline void setup_spectre_v2(void) {} #endif void do_btb_flush_fixups(void); diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h index 9c3c30534333..5b862de29dff 100644 --- a/arch/powerpc/include/asm/simple_spinlock.h +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -90,8 +90,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) void splpar_spin_yield(arch_spinlock_t *lock); void splpar_rw_yield(arch_rwlock_t *lock); #else /* SPLPAR */ -static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; -static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; +static inline void splpar_spin_yield(arch_spinlock_t *lock) {} +static inline void splpar_rw_yield(arch_rwlock_t *lock) {} #endif static inline void spin_yield(arch_spinlock_t *lock) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index c4e2d53acd2b..7a13bc20f0a0 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -236,7 +236,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) #if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)) extern void smp_release_cpus(void); #else -static inline void smp_release_cpus(void) { }; +static inline void smp_release_cpus(void) { } #endif extern int smt_enabled_at_boot; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 3d8a47af7a25..386d576673a1 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -94,7 +94,6 @@ void arch_setup_new_exec(void); #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ -#define TIF_NOHZ 9 /* in adaptive nohz mode */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -128,11 +127,10 @@ void arch_setup_new_exec(void); #define _TIF_UPROBE (1<<TIF_UPROBE) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) -#define _TIF_NOHZ (1<<TIF_NOHZ) #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ | _TIF_SYSCALL_EMU) + _TIF_SYSCALL_EMU) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 8f789b597bae..8dd3cdb25338 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -102,6 +102,8 @@ DECLARE_PER_CPU(u64, decrementers_next_tb); /* Convert timebase ticks to nanoseconds */ unsigned long long tb_to_ns(unsigned long long tb_ticks); +void timer_broadcast_interrupt(void); + /* SPLPAR */ void accumulate_stolen_time(void); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 501c9a79038c..78e2a3990eab 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -52,8 +52,6 @@ static inline bool __access_ok(unsigned long addr, unsigned long size) __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_goto(x, ptr, label) \ - __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) #define __get_user_allowed(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) @@ -110,22 +108,18 @@ static inline bool __access_ok(unsigned long addr, unsigned long size) extern long __put_user_bad(void); -#define __put_user_size_allowed(x, ptr, size, retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ __label__ __pu_failed; \ \ retval = 0; \ + allow_write_to_user(ptr, size); \ __put_user_size_goto(x, ptr, size, __pu_failed); \ + prevent_write_to_user(ptr, size); \ break; \ \ __pu_failed: \ retval = -EFAULT; \ -} while (0) - -#define __put_user_size(x, ptr, size, retval) \ -do { \ - allow_write_to_user(ptr, size); \ - __put_user_size_allowed(x, ptr, size, retval); \ prevent_write_to_user(ptr, size); \ } while (0) @@ -213,11 +207,9 @@ do { \ } \ } while (0) -#define __put_user_nocheck_goto(x, ptr, size, label) \ +#define __unsafe_put_user_goto(x, ptr, size, label) \ do { \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (!is_kernel_addr((unsigned long)__pu_addr)) \ - might_fault(); \ __chk_user_ptr(ptr); \ __put_user_size_goto((x), __pu_addr, (size), label); \ } while (0) @@ -313,9 +305,8 @@ do { \ __typeof__(size) __gu_size = (size); \ \ __chk_user_ptr(__gu_addr); \ - if (!is_kernel_addr((unsigned long)__gu_addr)) \ + if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ - barrier_nospec(); \ if (do_allow) \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ else \ @@ -333,10 +324,8 @@ do { \ __typeof__(size) __gu_size = (size); \ \ might_fault(); \ - if (access_ok(__gu_addr, __gu_size)) { \ - barrier_nospec(); \ + if (access_ok(__gu_addr, __gu_size)) \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ - } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ \ __gu_err; \ @@ -350,7 +339,6 @@ do { \ __typeof__(size) __gu_size = (size); \ \ __chk_user_ptr(__gu_addr); \ - barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ \ @@ -395,7 +383,6 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { unsigned long ret; - barrier_nospec(); allow_read_write_user(to, from, n); ret = __copy_tofrom_user(to, from, n); prevent_read_write_user(to, from, n); @@ -407,32 +394,7 @@ static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long ret; - if (__builtin_constant_p(n) && (n <= 8)) { - ret = 1; - - switch (n) { - case 1: - barrier_nospec(); - __get_user_size(*(u8 *)to, from, 1, ret); - break; - case 2: - barrier_nospec(); - __get_user_size(*(u16 *)to, from, 2, ret); - break; - case 4: - barrier_nospec(); - __get_user_size(*(u32 *)to, from, 4, ret); - break; - case 8: - barrier_nospec(); - __get_user_size(*(u64 *)to, from, 8, ret); - break; - } - if (ret == 0) - return 0; - } - barrier_nospec(); allow_read_from_user(from, n); ret = __copy_tofrom_user((__force void __user *)to, from, n); prevent_read_from_user(from, n); @@ -440,39 +402,12 @@ static inline unsigned long raw_copy_from_user(void *to, } static inline unsigned long -raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n) -{ - if (__builtin_constant_p(n) && (n <= 8)) { - unsigned long ret = 1; - - switch (n) { - case 1: - __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret); - break; - case 2: - __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret); - break; - case 4: - __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret); - break; - case 8: - __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret); - break; - } - if (ret == 0) - return 0; - } - - return __copy_tofrom_user(to, (__force const void __user *)from, n); -} - -static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { unsigned long ret; allow_write_to_user(to, n); - ret = raw_copy_to_user_allowed(to, from, n); + ret = __copy_tofrom_user(to, (__force const void __user *)from, n); prevent_write_to_user(to, n); return ret; } @@ -508,6 +443,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t { if (unlikely(!access_ok(ptr, len))) return false; + + might_fault(); + allow_read_write_user((void __user *)ptr, ptr, len); return true; } @@ -521,6 +459,9 @@ user_read_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; + + might_fault(); + allow_read_from_user(ptr, len); return true; } @@ -532,6 +473,9 @@ user_write_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; + + might_fault(); + allow_write_to_user((void __user *)ptr, len); return true; } @@ -540,7 +484,8 @@ user_write_access_begin(const void __user *ptr, size_t len) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) -#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) +#define unsafe_put_user(x, p, e) \ + __unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e) #define unsafe_copy_to_user(d, s, l, e) \ do { \ @@ -550,17 +495,17 @@ do { \ int _i; \ \ for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ - __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\ + unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \ if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ - __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ + unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ _i += 4; \ } \ if (_len & 2) { \ - __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ + unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ _i += 2; \ } \ if (_len & 1) \ - __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ + unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \ } while (0) #define HAVE_GET_KERNEL_NOFAULT diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h index 881f655caa0a..891c9d5eaabe 100644 --- a/arch/powerpc/include/asm/vdso/timebase.h +++ b/arch/powerpc/include/asm/vdso/timebase.h @@ -43,12 +43,6 @@ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) -/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ -static inline unsigned long get_tbl(void) -{ - return mftb(); -} - static __always_inline u64 get_tb(void) { unsigned int tbhi, tblo, tbhi2; diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 0cf52746531b..721c0d6715ac 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -113,7 +113,7 @@ struct vio_driver { const char *name; const struct vio_device_id *id_table; int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); - int (*remove)(struct vio_dev *dev); + void (*remove)(struct vio_dev *dev); /* A driver must have a get_desired_dma() function to * be loaded in a CMO environment if it uses DMA. */ diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h index 454a7fc6113b..68bfb2361f03 100644 --- a/arch/powerpc/include/asm/xmon.h +++ b/arch/powerpc/include/asm/xmon.h @@ -17,8 +17,8 @@ struct pt_regs; extern int xmon(struct pt_regs *excp); extern irqreturn_t xmon_irq(int, void *); #else -static inline void xmon_setup(void) { }; -static inline void xmon_register_spus(struct list_head *list) { }; +static inline void xmon_setup(void) { } +static inline void xmon_register_spus(struct list_head *list) { } #endif #if defined(CONFIG_XMON) && defined(CONFIG_SMP) diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index c3af3f324c5a..9f18fa090f1f 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -644,6 +644,8 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) #define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) +#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) +#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index bdf5f10f8b9f..578b3ee86105 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -55,17 +55,33 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_MMCR3, PERF_REG_POWERPC_SIER2, PERF_REG_POWERPC_SIER3, + PERF_REG_POWERPC_PMC1, + PERF_REG_POWERPC_PMC2, + PERF_REG_POWERPC_PMC3, + PERF_REG_POWERPC_PMC4, + PERF_REG_POWERPC_PMC5, + PERF_REG_POWERPC_PMC6, /* Max regs without the extended regs */ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1, }; #define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1) -/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */ -#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK) -/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */ -#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK) +/* Exclude MMCR3, SIER2, SIER3 for CPU_FTR_ARCH_300 */ +#define PERF_EXCLUDE_REG_EXT_300 (7ULL << PERF_REG_POWERPC_MMCR3) -#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1) -#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1) +/* + * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 + * includes 9 SPRS from MMCR0 to PMC6 excluding the + * unsupported SPRS in PERF_EXCLUDE_REG_EXT_300. + */ +#define PERF_REG_PMU_MASK_300 ((0xfffULL << PERF_REG_POWERPC_MMCR0) - PERF_EXCLUDE_REG_EXT_300) + +/* + * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 + * includes 12 SPRs from MMCR0 to PMC6. + */ +#define PERF_REG_PMU_MASK_31 (0xfffULL << PERF_REG_POWERPC_MMCR0) + +#define PERF_REG_EXTENDED_MAX (PERF_REG_POWERPC_PMC6 + 1) #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 79ee7750937d..6084fa499aa3 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -46,10 +46,10 @@ obj-y := cputable.o syscalls.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ of_platform.o prom_parse.o firmware.o \ - hw_breakpoint_constraints.o + hw_breakpoint_constraints.o interrupt.o obj-y += ptrace/ obj-$(CONFIG_PPC64) += setup_64.o \ - paca.o nvram_64.o note.o syscall_64.o + paca.o nvram_64.o note.o obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o obj-$(CONFIG_VDSO32) += vdso32_wrapper.o obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index b12d7c049bfe..f3a662201a9f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -255,7 +255,6 @@ int main(void) #endif /* CONFIG_PPC_MM_SLICES */ OFFSET(PACA_EXGEN, paca_struct, exgen); OFFSET(PACA_EXMC, paca_struct, exmc); - OFFSET(PACA_EXSLB, paca_struct, exslb); OFFSET(PACA_EXNMI, paca_struct, exnmi); #ifdef CONFIG_PPC_PSERIES OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr); @@ -309,7 +308,7 @@ int main(void) /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); - DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); + DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS); STACK_PT_REGS_OFFSET(GPR0, gpr[0]); STACK_PT_REGS_OFFSET(GPR1, gpr[1]); STACK_PT_REGS_OFFSET(GPR2, gpr[2]); @@ -526,8 +525,10 @@ int main(void) OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); - OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); - OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); + OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0); + OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0); + OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1); + OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1); OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); @@ -668,7 +669,6 @@ int main(void) HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_PTID, ptid); - HSTATE_FIELD(HSTATE_TID, tid); HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend); HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]); HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]); @@ -698,8 +698,6 @@ int main(void) OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar); OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap); OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped); - OFFSET(KVM_SPLIT_DO_SET, kvm_split_mode, do_set); - OFFSET(KVM_SPLIT_DO_RESTORE, kvm_split_mode, do_restore); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 52680cf07c9d..5545c9cd17c1 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -12,17 +12,17 @@ #include <linux/hardirq.h> #include <asm/dbell.h> +#include <asm/interrupt.h> #include <asm/irq_regs.h> #include <asm/kvm_ppc.h> #include <asm/trace.h> #ifdef CONFIG_SMP -void doorbell_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) { struct pt_regs *old_regs = set_irq_regs(regs); - irq_enter(); trace_doorbell_entry(regs); ppc_msgsync(); @@ -35,13 +35,12 @@ void doorbell_exception(struct pt_regs *regs) smp_ipi_demux_relaxed(); /* already performed the barrier */ trace_doorbell_exit(regs); - irq_exit(); + set_irq_regs(old_regs); } #else /* CONFIG_SMP */ -void doorbell_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) { printk(KERN_WARNING "Received doorbell on non-smp system\n"); } #endif /* CONFIG_SMP */ - diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 813713c9120c..cd60bc1c8701 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1596,6 +1596,35 @@ static int proc_eeh_show(struct seq_file *m, void *v) } #ifdef CONFIG_DEBUG_FS + + +static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + uint32_t domain, bus, dev, fn; + struct pci_dev *pdev; + char buf[20]; + int ret; + + memset(buf, 0, sizeof(buf)); + ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); + if (!ret) + return ERR_PTR(-EFAULT); + + ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); + if (ret != 4) { + pr_err("%s: expected 4 args, got %d\n", __func__, ret); + return ERR_PTR(-EINVAL); + } + + pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); + if (!pdev) + return ERR_PTR(-ENODEV); + + return pdev; +} + static int eeh_enable_dbgfs_set(void *data, u64 val) { if (val) @@ -1688,26 +1717,13 @@ static ssize_t eeh_dev_check_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) { - uint32_t domain, bus, dev, fn; struct pci_dev *pdev; struct eeh_dev *edev; - char buf[20]; int ret; - memset(buf, 0, sizeof(buf)); - ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); - if (!ret) - return -EFAULT; - - ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); - if (ret != 4) { - pr_err("%s: expected 4 args, got %d\n", __func__, ret); - return -EINVAL; - } - - pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); - if (!pdev) - return -ENODEV; + pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); edev = pci_dev_to_eeh_dev(pdev); if (!edev) { @@ -1717,8 +1733,8 @@ static ssize_t eeh_dev_check_write(struct file *filp, } ret = eeh_dev_check_failure(edev); - pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n", - domain, bus, dev, fn, ret); + pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n", + pci_name(pdev), ret); pci_dev_put(pdev); @@ -1829,25 +1845,12 @@ static ssize_t eeh_dev_break_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) { - uint32_t domain, bus, dev, fn; struct pci_dev *pdev; - char buf[20]; int ret; - memset(buf, 0, sizeof(buf)); - ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); - if (!ret) - return -EFAULT; - - ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); - if (ret != 4) { - pr_err("%s: expected 4 args, got %d\n", __func__, ret); - return -EINVAL; - } - - pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); - if (!pdev) - return -ENODEV; + pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); ret = eeh_debugfs_break_device(pdev); pci_dev_put(pdev); @@ -1865,6 +1868,53 @@ static const struct file_operations eeh_dev_break_fops = { .read = eeh_debugfs_dev_usage, }; +static ssize_t eeh_dev_can_recover(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct pci_driver *drv; + struct pci_dev *pdev; + size_t ret; + + pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + /* + * In order for error recovery to work the driver needs to implement + * .error_detected(), so it can quiesce IO to the device, and + * .slot_reset() so it can re-initialise the device after a reset. + * + * Ideally they'd implement .resume() too, but some drivers which + * we need to support (notably IPR) don't so I guess we can tolerate + * that. + * + * .mmio_enabled() is mostly there as a work-around for devices which + * take forever to re-init after a hot reset. Implementing that is + * strictly optional. + */ + drv = pci_dev_driver(pdev); + if (drv && + drv->err_handler && + drv->err_handler->error_detected && + drv->err_handler->slot_reset) { + ret = count; + } else { + ret = -EOPNOTSUPP; + } + + pci_dev_put(pdev); + + return ret; +} + +static const struct file_operations eeh_dev_can_recover_fops = { + .open = simple_open, + .llseek = no_llseek, + .write = eeh_dev_can_recover, + .read = eeh_debugfs_dev_usage, +}; + #endif static int __init eeh_init_proc(void) @@ -1889,6 +1939,9 @@ static int __init eeh_init_proc(void) debugfs_create_file_unsafe("eeh_force_recover", 0600, powerpc_debugfs_root, NULL, &eeh_force_recover_fops); + debugfs_create_file_unsafe("eeh_dev_can_recover", 0600, + powerpc_debugfs_root, NULL, + &eeh_dev_can_recover_fops); eeh_cache_debugfs_init(); #endif } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1c9b0ccc2172..78c430b7f9d9 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -175,14 +175,11 @@ transfer_to_handler: addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP lwz r9,TASK_CPU(r2) - slwi r9,r9,3 + slwi r9,r9,2 add r11,r11,r9 #endif lwz r12,0(r11) mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) #endif b 3f @@ -276,8 +273,7 @@ reenable_mmu: * We save a bunch of GPRs, * r3 can be different from GPR3(r1) at this point, r9 and r11 * contains the old MSR and handler address respectively, - * r4 & r5 can contain page fault arguments that need to be passed - * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left + * r0, r4-r8, r12, CCR, CTR, XER etc... are left * clobbered as they aren't useful past this point. */ @@ -285,15 +281,11 @@ reenable_mmu: stw r9,8(r1) stw r11,12(r1) stw r3,16(r1) - stw r4,20(r1) - stw r5,24(r1) /* If we are disabling interrupts (normal case), simply log it with * lockdep */ 1: bl trace_hardirqs_off - lwz r5,24(r1) - lwz r4,20(r1) lwz r3,16(r1) lwz r11,12(r1) lwz r9,8(r1) @@ -334,132 +326,29 @@ stack_ovf: _ASM_NOKPROBE_SYMBOL(stack_ovf) #endif -#ifdef CONFIG_TRACE_IRQFLAGS -trace_syscall_entry_irq_off: - /* - * Syscall shouldn't happen while interrupts are disabled, - * so let's do a warning here. - */ -0: trap - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING - bl trace_hardirqs_on - - /* Now enable for real */ - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) - mtmsr r10 - - REST_GPR(0, r1) - REST_4GPRS(3, r1) - REST_2GPRS(7, r1) - b DoSyscall -#endif /* CONFIG_TRACE_IRQFLAGS */ - .globl transfer_to_syscall transfer_to_syscall: -#ifdef CONFIG_TRACE_IRQFLAGS - andi. r12,r9,MSR_EE - beq- trace_syscall_entry_irq_off -#endif /* CONFIG_TRACE_IRQFLAGS */ - -/* - * Handle a system call. - */ - .stabs "arch/powerpc/kernel/",N_SO,0,0,0f - .stabs "entry_32.S",N_SO,0,0,0f -0: - -_GLOBAL(DoSyscall) - stw r3,ORIG_GPR3(r1) - li r12,0 - stw r12,RESULT(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* Make sure interrupts are enabled */ - mfmsr r11 - andi. r12,r11,MSR_EE - /* We came in with interrupts disabled, we WARN and mark them enabled - * for lockdep now */ -0: tweqi r12, 0 - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING -#endif /* CONFIG_TRACE_IRQFLAGS */ - lwz r11,TI_FLAGS(r2) - andi. r11,r11,_TIF_SYSCALL_DOTRACE - bne- syscall_dotrace -syscall_dotrace_cont: - cmplwi 0,r0,NR_syscalls - lis r10,sys_call_table@h - ori r10,r10,sys_call_table@l - slwi r0,r0,2 - bge- 66f + SAVE_NVGPRS(r1) +#ifdef CONFIG_PPC_BOOK3S_32 + kuep_lock r11, r12 +#endif - barrier_nospec_asm - /* - * Prevent the load of the handler below (based on the user-passed - * system call number) being speculatively executed until the test - * against NR_syscalls and branch to .66f above has - * committed. - */ + /* Calling convention has r9 = orig r0, r10 = regs */ + addi r10,r1,STACK_FRAME_OVERHEAD + mr r9,r0 + stw r10,THREAD+PT_REGS(r2) + bl system_call_exception - lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ - mtlr r10 - addi r9,r1,STACK_FRAME_OVERHEAD - PPC440EP_ERR42 - blrl /* Call handler */ - .globl ret_from_syscall ret_from_syscall: -#ifdef CONFIG_DEBUG_RSEQ - /* Check whether the syscall is issued inside a restartable sequence */ - stw r3,GPR3(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl rseq_syscall - lwz r3,GPR3(r1) -#endif - mr r6,r3 - /* disable interrupts so current_thread_info()->flags can't change */ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - /* Note: We don't bother telling lockdep about it */ - mtmsr r10 - lwz r9,TI_FLAGS(r2) - li r8,-MAX_ERRNO - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) - bne- syscall_exit_work - cmplw 0,r3,r8 - blt+ syscall_exit_cont - lwz r11,_CCR(r1) /* Load CR */ - neg r3,r3 - oris r11,r11,0x1000 /* Set SO bit in CR */ - stw r11,_CCR(r1) -syscall_exit_cont: - lwz r8,_MSR(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* If we are going to return from the syscall with interrupts - * off, we trace that here. It shouldn't normally happen. - */ - andi. r10,r8,MSR_EE - bne+ 1f - stw r3,GPR3(r1) - bl trace_hardirqs_off - lwz r3,GPR3(r1) -1: -#endif /* CONFIG_TRACE_IRQFLAGS */ -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* If the process has its own DBCR0 value, load it up. The internal - debug mode bit tells us that dbcr0 should be loaded. */ - lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IDM@h - bnel- load_dbcr0 -#endif + addi r4,r1,STACK_FRAME_OVERHEAD + li r5,0 + bl syscall_exit_prepare #ifdef CONFIG_PPC_47x lis r4,icache_44x_need_flush@ha lwz r5,icache_44x_need_flush@l(r4) cmplwi cr0,r5,0 bne- 2f #endif /* CONFIG_PPC_47x */ -1: -BEGIN_FTR_SECTION - lwarx r7,0,r1 -END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) - stwcx. r0,0,r1 /* to clear the reservation */ - ACCOUNT_CPU_USER_EXIT(r2, r5, r7) #ifdef CONFIG_PPC_BOOK3S_32 kuep_unlock r5, r7 #endif @@ -467,21 +356,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) lwz r4,_LINK(r1) lwz r5,_CCR(r1) mtlr r4 - mtcr r5 lwz r7,_NIP(r1) - lwz r2,GPR2(r1) - lwz r1,GPR1(r1) + lwz r8,_MSR(r1) + cmpwi r3,0 + lwz r3,GPR3(r1) syscall_exit_finish: -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 + + bne 3f + mtcr r5 + +1: lwz r2,GPR2(r1) + lwz r1,GPR1(r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif -_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) + +3: mtcr r5 + lwz r4,_CTR(r1) + lwz r5,_XER(r1) + REST_NVGPRS(r1) + mtctr r4 + mtxer r5 + lwz r0,GPR0(r1) + lwz r3,GPR3(r1) + REST_8GPRS(4,r1) + lwz r12,GPR12(r1) + b 1b + #ifdef CONFIG_44x 2: li r7,0 iccci r0,r0 @@ -489,9 +393,6 @@ _ASM_NOKPROBE_SYMBOL(syscall_exit_finish) b 1b #endif /* CONFIG_44x */ -66: li r3,-ENOSYS - b ret_from_syscall - .globl ret_from_fork ret_from_fork: REST_NVGPRS(r1) @@ -510,157 +411,6 @@ ret_from_kernel_thread: li r3,0 b ret_from_syscall -/* Traced system call support */ -syscall_dotrace: - SAVE_NVGPRS(r1) - li r0,0xc00 - stw r0,_TRAP(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_enter - /* - * Restore argument registers possibly just changed. - * We use the return value of do_syscall_trace_enter - * for call number to look up in the table (r0). - */ - mr r0,r3 - lwz r3,GPR3(r1) - lwz r4,GPR4(r1) - lwz r5,GPR5(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) - REST_NVGPRS(r1) - - cmplwi r0,NR_syscalls - /* Return code is already in r3 thanks to do_syscall_trace_enter() */ - bge- ret_from_syscall - b syscall_dotrace_cont - -syscall_exit_work: - andi. r0,r9,_TIF_RESTOREALL - beq+ 0f - REST_NVGPRS(r1) - b 2f -0: cmplw 0,r3,r8 - blt+ 1f - andi. r0,r9,_TIF_NOERROR - bne- 1f - lwz r11,_CCR(r1) /* Load CR */ - neg r3,r3 - oris r11,r11,0x1000 /* Set SO bit in CR */ - stw r11,_CCR(r1) - -1: stw r6,RESULT(r1) /* Save result */ - stw r3,GPR3(r1) /* Update return value */ -2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) - beq 4f - - /* Clear per-syscall TIF flags if any are set. */ - - li r11,_TIF_PERSYSCALL_MASK - addi r12,r2,TI_FLAGS -3: lwarx r8,0,r12 - andc r8,r8,r11 - stwcx. r8,0,r12 - bne- 3b - -4: /* Anything which requires enabling interrupts? */ - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) - beq ret_from_except - - /* Re-enable interrupts. There is no need to trace that with - * lockdep as we are supposed to have IRQs on at this point - */ - ori r10,r10,MSR_EE - mtmsr r10 - - /* Save NVGPRS if they're not saved already */ - lwz r4,_TRAP(r1) - andi. r4,r4,1 - beq 5f - SAVE_NVGPRS(r1) - li r4,0xc00 - stw r4,_TRAP(r1) -5: - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_leave - b ret_from_except_full - - /* - * System call was called from kernel. We get here with SRR1 in r9. - * Mark the exception as recoverable once we have retrieved SRR0, - * trap a warning and return ENOSYS with CR[SO] set. - */ - .globl ret_from_kernel_syscall -ret_from_kernel_syscall: - mfspr r9, SPRN_SRR0 - mfspr r10, SPRN_SRR1 -#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE) - LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR)) - mtmsr r11 -#endif - -0: trap - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING - - li r3, ENOSYS - crset so -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif - mtspr SPRN_SRR0, r9 - mtspr SPRN_SRR1, r10 - rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif -_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) - -/* - * The fork/clone functions need to copy the full register set into - * the child process. Therefore we need to save all the nonvolatile - * registers (r13 - r31) before calling the C code. - */ - .globl ppc_fork -ppc_fork: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_fork - - .globl ppc_vfork -ppc_vfork: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_vfork - - .globl ppc_clone -ppc_clone: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_clone - - .globl ppc_clone3 -ppc_clone3: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_clone3 - - .globl ppc_swapcontext -ppc_swapcontext: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_swapcontext - /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that @@ -670,10 +420,6 @@ ppc_swapcontext: .globl handle_page_fault handle_page_fault: addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_PPC_BOOK3S_32 - andis. r0,r5,DSISR_DABRMATCH@h - bne- handle_dabr_fault -#endif bl do_page_fault cmpwi r3,0 beq+ ret_from_except @@ -681,23 +427,11 @@ handle_page_fault: lwz r0,_TRAP(r1) clrrwi r0,r0,1 stw r0,_TRAP(r1) - mr r5,r3 + mr r4,r3 /* err arg for bad_page_fault */ addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) bl __bad_page_fault b ret_from_except_full -#ifdef CONFIG_PPC_BOOK3S_32 - /* We have a data breakpoint exception - handle it */ -handle_dabr_fault: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - clrrwi r0,r0,1 - stw r0,_TRAP(r1) - bl do_break - b ret_from_except_full -#endif - /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -1237,14 +971,11 @@ load_dbcr0: addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP lwz r9,TASK_CPU(r2) - slwi r9,r9,3 + slwi r9,r9,2 add r11,r11,r9 #endif stw r10,0(r11) mtspr SPRN_DBCR0,r0 - lwz r10,4(r11) - addi r10,r10,1 - stw r10,4(r11) li r11,-1 mtspr SPRN_DBSR,r11 /* clear all pending debug events */ blr @@ -1253,7 +984,7 @@ load_dbcr0: .align 4 .global global_dbcr0 global_dbcr0: - .space 8*NR_CPUS + .space 4*NR_CPUS .previous #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 33ddfeef4fe9..6c4d9e276c4d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -108,7 +108,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) li r11,\trapnr std r11,_TRAP(r1) std r12,_CCR(r1) - std r3,ORIG_GPR3(r1) addi r10,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ @@ -226,6 +225,12 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate) #endif .balign IFETCH_ALIGN_BYTES + .globl system_call_common_real +system_call_common_real: + ld r10,PACAKMSR(r13) /* get MSR value for kernel */ + mtmsrd r10 + + .balign IFETCH_ALIGN_BYTES .globl system_call_common system_call_common: _ASM_NOKPROBE_SYMBOL(system_call_common) @@ -278,7 +283,6 @@ END_BTB_FLUSH_SECTION std r10,_LINK(r1) std r11,_TRAP(r1) std r12,_CCR(r1) - std r3,ORIG_GPR3(r1) addi r10,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 74d07dc0bb48..e8eb9992a270 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -398,7 +398,6 @@ exc_##n##_common: \ std r10,_NIP(r1); /* save SRR0 to stackframe */ \ std r11,_MSR(r1); /* save SRR1 to stackframe */ \ beq 2f; /* if from kernel mode */ \ - ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \ 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,scratch; /* get back r13 */ \ @@ -791,7 +790,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) EXCEPTION_COMMON_CRIT(0xd00) std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r14 ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) bl save_nvgprs @@ -864,7 +862,6 @@ kernel_dbg_exc: INTS_DISABLE std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r14 ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) bl save_nvgprs @@ -1011,8 +1008,6 @@ storage_fault_common: std r14,_DAR(r1) std r15,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r14 - mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) bl do_page_fault @@ -1020,9 +1015,8 @@ storage_fault_common: bne- 1f b ret_from_except_lite 1: bl save_nvgprs - mr r5,r3 + mr r4,r3 addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) bl __bad_page_fault b ret_from_except diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6e53f7638737..60d3051a8bc8 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -139,7 +139,6 @@ name: #define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ #define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ #define __ISTACK(name) .L_ISTACK_ ## name -#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */ #define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ #define INT_DEFINE_BEGIN(n) \ @@ -203,9 +202,6 @@ do_define_int n .ifndef ISTACK ISTACK=1 .endif - .ifndef IRECONCILE - IRECONCILE=1 - .endif .ifndef IKUAP IKUAP=1 .endif @@ -581,7 +577,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real) kuap_save_amr_and_lock r9, r10, cr1, cr0 .endif beq 101f /* if from kernel mode */ - ACCOUNT_CPU_USER_ENTRY(r13, r9, r10) BEGIN_FTR_SECTION ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ std r9,_PPR(r1) @@ -649,14 +644,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r11,exception_marker@toc(r2) std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ - - .if ISTACK - ACCOUNT_STOLEN_TIME - .endif - - .if IRECONCILE - RECONCILE_IRQ_STATE(r10, r11) - .endif .endm /* @@ -705,14 +692,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r1,GPR1(r1) .endm -#define RUNLATCH_ON \ -BEGIN_FTR_SECTION \ - ld r3, PACA_THREAD_INFO(r13); \ - ld r4,TI_LOCAL_FLAGS(r3); \ - andi. r0,r4,_TLF_RUNLATCH; \ - beql ppc64_runlatch_on_trampoline; \ -END_FTR_SECTION_IFSET(CPU_FTR_CTRL) - /* * When the idle code in power4_idle puts the CPU into NAP mode, * it has to do so in a loop, and relies on the external interrupt @@ -935,7 +914,6 @@ INT_DEFINE_BEGIN(system_reset) */ ISET_RI=0 ISTACK=0 - IRECONCILE=0 IKVM_REAL=1 INT_DEFINE_END(system_reset) @@ -1022,20 +1000,6 @@ EXC_COMMON_BEGIN(system_reset_common) ld r1,PACA_NMI_EMERG_SP(r13) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY system_reset - /* - * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does - * the right thing. We do not want to reconcile because that goes - * through irq tracing which we don't want in NMI. - * - * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS - * as we are running with MSR[EE]=0. - */ - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - lbz r10,PACAIRQHAPPENED(r13) - std r10,RESULT(r1) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) addi r3,r1,STACK_FRAME_OVERHEAD bl system_reset_exception @@ -1051,14 +1015,6 @@ EXC_COMMON_BEGIN(system_reset_common) subi r10,r10,1 sth r10,PACA_IN_NMI(r13) - /* - * Restore soft mask settings. - */ - ld r10,RESULT(r1) - stb r10,PACAIRQHAPPENED(r13) - ld r10,SOFTE(r1) - stb r10,PACAIRQSOFTMASK(r13) - kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS RFI_TO_USER_OR_KERNEL @@ -1123,7 +1079,6 @@ INT_DEFINE_BEGIN(machine_check_early) ISTACK=0 IDAR=1 IDSISR=1 - IRECONCILE=0 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ INT_DEFINE_END(machine_check_early) @@ -1205,30 +1160,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) li r10,MSR_RI mtmsrd r10,1 - /* - * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see - * system_reset_common) - */ - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - lbz r10,PACAIRQHAPPENED(r13) - std r10,RESULT(r1) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) - addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) - /* - * Restore soft mask settings. - */ - ld r10,RESULT(r1) - stb r10,PACAIRQHAPPENED(r13) - ld r10,SOFTE(r1) - stb r10,PACAIRQSOFTMASK(r13) - #ifdef CONFIG_PPC_P7_NAP /* * Check if thread was in power saving mode. We come here when any @@ -1401,14 +1337,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) * * Handling: * - Hash MMU - * Go to do_hash_page first to see if the HPT can be filled from an entry in - * the Linux page table. Hash faults can hit in kernel mode in a fairly + * Go to do_hash_fault, which attempts to fill the HPT from an entry in the + * Linux page table. Hash faults can hit in kernel mode in a fairly * arbitrary state (e.g., interrupts disabled, locks held) when accessing * "non-bolted" regions, e.g., vmalloc space. However these should always be - * backed by Linux page tables. + * backed by Linux page table entries. * - * If none is found, do a Linux page fault. Linux page faults can happen in - * kernel mode due to user copy operations of course. + * If no entry is found the Linux page fault handler is invoked (by + * do_hash_fault). Linux page faults can happen in kernel mode due to user + * copy operations of course. * * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest * MMU context, which may cause a DSI in the host, which must go to the @@ -1437,15 +1374,24 @@ EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) EXC_VIRT_END(data_access, 0x4300, 0x80) EXC_COMMON_BEGIN(data_access_common) GEN_COMMON data_access - ld r4,_DAR(r1) - ld r5,_DSISR(r1) + ld r4,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + andis. r0,r4,DSISR_DABRMATCH@h + bne- 1f BEGIN_MMU_FTR_SECTION - ld r6,_MSR(r1) - li r3,0x300 - b do_hash_page /* Try to handle as hpte fault */ + bl do_hash_fault MMU_FTR_SECTION_ELSE - b handle_page_fault + bl do_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) + b interrupt_return + +1: bl do_break + /* + * do_break() may have changed the NV GPRS while handling a breakpoint. + * If so, we need to restore them with their updated values. + */ + REST_NVGPRS(r1) + b interrupt_return GEN_KVM data_access @@ -1466,14 +1412,9 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) * on user-handler data structures. * * KVM: Same as 0x300, DSLB must test for KVM guest. - * - * A dedicated save area EXSLB is used (XXX: but it actually need not be - * these days, we could use EXGEN). */ INT_DEFINE_BEGIN(data_access_slb) IVEC=0x380 - IAREA=PACA_EXSLB - IRECONCILE=0 IDAR=1 IKVM_SKIP=1 IKVM_REAL=1 @@ -1487,10 +1428,9 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) EXC_VIRT_END(data_access_slb, 0x4380, 0x80) EXC_COMMON_BEGIN(data_access_slb_common) GEN_COMMON data_access_slb - ld r4,_DAR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ + addi r3,r1,STACK_FRAME_OVERHEAD bl do_slb_fault cmpdi r3,0 bne- 1f @@ -1501,9 +1441,6 @@ MMU_FTR_SECTION_ELSE li r3,-EFAULT ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) - RECONCILE_IRQ_STATE(r10, r11) - ld r4,_DAR(r1) - ld r5,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_slb_fault b interrupt_return @@ -1538,15 +1475,13 @@ EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) EXC_VIRT_END(instruction_access, 0x4400, 0x80) EXC_COMMON_BEGIN(instruction_access_common) GEN_COMMON instruction_access - ld r4,_DAR(r1) - ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION - ld r6,_MSR(r1) - li r3,0x400 - b do_hash_page /* Try to handle as hpte fault */ + bl do_hash_fault MMU_FTR_SECTION_ELSE - b handle_page_fault + bl do_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) + b interrupt_return GEN_KVM instruction_access @@ -1562,8 +1497,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) */ INT_DEFINE_BEGIN(instruction_access_slb) IVEC=0x480 - IAREA=PACA_EXSLB - IRECONCILE=0 IISIDE=1 IDAR=1 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE @@ -1579,10 +1512,9 @@ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) EXC_COMMON_BEGIN(instruction_access_slb_common) GEN_COMMON instruction_access_slb - ld r4,_DAR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ + addi r3,r1,STACK_FRAME_OVERHEAD bl do_slb_fault cmpdi r3,0 bne- 1f @@ -1593,9 +1525,6 @@ MMU_FTR_SECTION_ELSE li r3,-EFAULT ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) - RECONCILE_IRQ_STATE(r10, r11) - ld r4,_DAR(r1) - ld r5,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_slb_fault b interrupt_return @@ -1643,7 +1572,6 @@ EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) EXC_COMMON_BEGIN(hardware_interrupt_common) GEN_COMMON hardware_interrupt FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return @@ -1697,6 +1625,51 @@ INT_DEFINE_BEGIN(program_check) INT_DEFINE_END(program_check) EXC_REAL_BEGIN(program_check, 0x700, 0x100) + +#ifdef CONFIG_CPU_LITTLE_ENDIAN + /* + * There's a short window during boot where although the kernel is + * running little endian, any exceptions will cause the CPU to switch + * back to big endian. For example a WARN() boils down to a trap + * instruction, which will cause a program check, and we end up here but + * with the CPU in big endian mode. The first instruction of the program + * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when + * executed in the wrong endian is an lhzu with a ~3GB displacement from + * r3. The content of r3 is random, so that is a load from some random + * location, and depending on the system can easily lead to a checkstop, + * or an infinitely recursive page fault. + * + * So to handle that case we have a trampoline here that can detect we + * are in the wrong endian and flip us back to the correct endian. We + * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires + * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as + * SPRG1 is already used for the paca. SPRG3 is user readable, but this + * trampoline is only active very early in boot, and SPRG3 will be + * reinitialised in vdso_getcpu_init() before userspace starts. + */ +BEGIN_FTR_SECTION + tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 + b 1f // Skip trampoline if endian is correct + .long 0xa643707d // mtsprg 0, r11 Backup r11 + .long 0xa6027a7d // mfsrr0 r11 + .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 + .long 0xa6027b7d // mfsrr1 r11 + .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 + .long 0xa600607d // mfmsr r11 + .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] + .long 0xa6037b7d // mtsrr1 r11 + .long 0x34076039 // li r11, 0x734 + .long 0xa6037a7d // mtsrr0 r11 + .long 0x2400004c // rfid + mfsprg r11, 3 + mtsrr1 r11 // Restore SRR1 + mfsprg r11, 2 + mtsrr0 r11 // Restore SRR0 + mfsprg r11, 0 // Restore r11 +1: +END_FTR_SECTION(0, 1) // nop out after boot +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + GEN_INT_ENTRY program_check, virt=0 EXC_REAL_END(program_check, 0x700, 0x100) EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) @@ -1755,7 +1728,6 @@ EXC_COMMON_BEGIN(program_check_common) */ INT_DEFINE_BEGIN(fp_unavailable) IVEC=0x800 - IRECONCILE=0 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif @@ -1770,7 +1742,6 @@ EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) EXC_COMMON_BEGIN(fp_unavailable_common) GEN_COMMON fp_unavailable bne 1f /* if from user, just load it up */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_fp_unavailable_exception 0: trap @@ -1789,7 +1760,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) b fast_interrupt_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl fp_unavailable_tm b interrupt_return @@ -1832,7 +1802,6 @@ EXC_VIRT_END(decrementer, 0x4900, 0x80) EXC_COMMON_BEGIN(decrementer_common) GEN_COMMON decrementer FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl timer_interrupt b interrupt_return @@ -1854,7 +1823,6 @@ INT_DEFINE_BEGIN(hdecrementer) IVEC=0x980 IHSRR=1 ISTACK=0 - IRECONCILE=0 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(hdecrementer) @@ -1919,12 +1887,11 @@ EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) EXC_COMMON_BEGIN(doorbell_super_common) GEN_COMMON doorbell_super FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else - bl unknown_exception + bl unknown_async_exception #endif b interrupt_return @@ -2001,12 +1968,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) HMT_MEDIUM .if ! \virt - __LOAD_HANDLER(r10, system_call_common) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - RFI_TO_KERNEL - b . /* prevent speculative execution */ + __LOAD_HANDLER(r10, system_call_common_real) + mtctr r10 + bctr .else li r10,MSR_RI mtmsrd r10,1 /* Set RI (EE=0) */ @@ -2137,9 +2101,7 @@ EXC_COMMON_BEGIN(h_data_storage_common) GEN_COMMON h_data_storage addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION - ld r4,_DAR(r1) - li r5,SIGSEGV - bl bad_page_fault + bl do_bad_page_fault_segv MMU_FTR_SECTION_ELSE bl unknown_exception ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) @@ -2230,7 +2192,6 @@ INT_DEFINE_BEGIN(hmi_exception_early) IHSRR=1 IREALMODE_COMMON=1 ISTACK=0 - IRECONCILE=0 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ IKVM_REAL=1 INT_DEFINE_END(hmi_exception_early) @@ -2277,7 +2238,6 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) EXC_COMMON_BEGIN(hmi_exception_common) GEN_COMMON hmi_exception FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl handle_hmi_exception b interrupt_return @@ -2307,12 +2267,11 @@ EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) EXC_COMMON_BEGIN(h_doorbell_common) GEN_COMMON h_doorbell FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else - bl unknown_exception + bl unknown_async_exception #endif b interrupt_return @@ -2341,7 +2300,6 @@ EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) EXC_COMMON_BEGIN(h_virt_irq_common) GEN_COMMON h_virt_irq FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return @@ -2388,7 +2346,6 @@ EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) EXC_COMMON_BEGIN(performance_monitor_common) GEN_COMMON performance_monitor FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl performance_monitor_exception b interrupt_return @@ -2404,7 +2361,6 @@ EXC_COMMON_BEGIN(performance_monitor_common) */ INT_DEFINE_BEGIN(altivec_unavailable) IVEC=0xf20 - IRECONCILE=0 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif @@ -2434,7 +2390,6 @@ BEGIN_FTR_SECTION b fast_interrupt_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_tm b interrupt_return @@ -2442,7 +2397,6 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_exception b interrupt_return @@ -2458,7 +2412,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) */ INT_DEFINE_BEGIN(vsx_unavailable) IVEC=0xf40 - IRECONCILE=0 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif @@ -2487,7 +2440,6 @@ BEGIN_FTR_SECTION b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl vsx_unavailable_tm b interrupt_return @@ -2495,7 +2447,6 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl vsx_unavailable_exception b interrupt_return @@ -2830,7 +2781,6 @@ EXC_VIRT_NONE(0x5800, 0x100) INT_DEFINE_BEGIN(soft_nmi) IVEC=0x900 ISTACK=0 - IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */ INT_DEFINE_END(soft_nmi) /* @@ -2849,17 +2799,6 @@ EXC_COMMON_BEGIN(soft_nmi_common) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY soft_nmi - /* - * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see - * system_reset_common) - */ - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - lbz r10,PACAIRQHAPPENED(r13) - std r10,RESULT(r1) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) - addi r3,r1,STACK_FRAME_OVERHEAD bl soft_nmi_interrupt @@ -2867,14 +2806,6 @@ EXC_COMMON_BEGIN(soft_nmi_common) li r9,0 mtmsrd r9,1 - /* - * Restore soft mask settings. - */ - ld r10,RESULT(r1) - stb r10,PACAIRQHAPPENED(r13) - ld r10,SOFTE(r1) - stb r10,PACAIRQSOFTMASK(r13) - kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS hsrr=0 RFI_TO_KERNEL @@ -3148,9 +3079,6 @@ kvmppc_skip_Hinterrupt: * come here. */ -EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) - b __ppc64_runlatch_on - USE_FIXED_SECTION(virt_trampolines) /* * All code below __end_interrupts is treated as soft-masked. If @@ -3221,99 +3149,3 @@ disable_machine_check: RFI_TO_KERNEL 1: mtlr r0 blr - -/* - * Hash table stuff - */ - .balign IFETCH_ALIGN_BYTES -do_hash_page: -#ifdef CONFIG_PPC_BOOK3S_64 - lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h - ori r0,r0,DSISR_BAD_FAULT_64S@l - and. r0,r5,r0 /* weird error? */ - bne- handle_page_fault /* if not, try to insert a HPTE */ - - /* - * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then - * don't call hash_page, just fail the fault. This is required to - * prevent re-entrancy problems in the hash code, namely perf - * interrupts hitting while something holds H_PAGE_BUSY, and taking a - * hash fault. See the comment in hash_preload(). - */ - ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) - andis. r0,r0,NMI_MASK@h - bne 77f - - /* - * r3 contains the trap number - * r4 contains the faulting address - * r5 contains dsisr - * r6 msr - * - * at return r3 = 0 for success, 1 for page fault, negative for error - */ - bl __hash_page /* build HPTE if possible */ - cmpdi r3,0 /* see if __hash_page succeeded */ - - /* Success */ - beq interrupt_return /* Return from exception on success */ - - /* Error */ - blt- 13f - - /* Reload DAR/DSISR into r4/r5 for the DABR check below */ - ld r4,_DAR(r1) - ld r5,_DSISR(r1) -#endif /* CONFIG_PPC_BOOK3S_64 */ - -/* Here we have a page fault that hash_page can't handle. */ -handle_page_fault: -11: andis. r0,r5,DSISR_DABRMATCH@h - bne- handle_dabr_fault - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_page_fault - cmpdi r3,0 - beq+ interrupt_return - mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) - bl __bad_page_fault - b interrupt_return - -/* We have a data breakpoint exception - handle it */ -handle_dabr_fault: - ld r4,_DAR(r1) - ld r5,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_break - /* - * do_break() may have changed the NV GPRS while handling a breakpoint. - * If so, we need to restore them with their updated values. - */ - REST_NVGPRS(r1) - b interrupt_return - - -#ifdef CONFIG_PPC_BOOK3S_64 -/* We have a page fault that hash_page could handle but HV refused - * the PTE insertion - */ -13: mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) - bl low_hash_fault - b interrupt_return -#endif - -/* - * We come here as a result of a DSI at a point where we don't want - * to call hash_page, such as when we are accessing memory (possibly - * user memory) inside a PMU interrupt that occurred while interrupts - * were soft-disabled. We want to invoke the exception handler for - * the access, or panic if there isn't a handler. - */ -77: addi r3,r1,STACK_FRAME_OVERHEAD - li r5,SIGSEGV - bl bad_page_fault - b interrupt_return diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index a2f72c966baf..5d4706c14572 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -47,7 +47,7 @@ lwz r1,TASK_STACK-THREAD(r1) addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE 1: - mtcrf 0x7f, r1 + mtcrf 0x3f, r1 bt 32 - THREAD_ALIGN_SHIFT, stack_overflow #else subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ @@ -116,114 +116,44 @@ .endm .macro SYSCALL_ENTRY trapno - mfspr r12,SPRN_SPRG_THREAD mfspr r9, SPRN_SRR1 -#ifdef CONFIG_VMAP_STACK - mfspr r11, SPRN_SRR0 - mtctr r11 - andi. r11, r9, MSR_PR + mfspr r10, SPRN_SRR0 + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */ + lis r12, 1f@h + ori r12, r12, 1f@l + mtspr SPRN_SRR1, r11 + mtspr SPRN_SRR0, r12 + mfspr r12,SPRN_SPRG_THREAD mr r11, r1 lwz r1,TASK_STACK-THREAD(r12) - beq- 99f - addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE - li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r10 - isync tovirt(r12, r12) + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE + rfi +1: stw r11,GPR1(r1) stw r11,0(r1) mr r11, r1 -#else - andi. r11, r9, MSR_PR - lwz r11,TASK_STACK-THREAD(r12) - beq- 99f - addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE - tophys(r11, r11) - stw r1,GPR1(r11) - stw r1,0(r11) - tovirt(r1, r11) /* set new kernel sp */ -#endif + stw r10,_NIP(r11) mflr r10 stw r10, _LINK(r11) -#ifdef CONFIG_VMAP_STACK - mfctr r10 -#else - mfspr r10,SPRN_SRR0 -#endif - stw r10,_NIP(r11) mfcr r10 rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */ stw r10,_CCR(r11) /* save registers */ #ifdef CONFIG_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ -#else -#ifdef CONFIG_VMAP_STACK - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */ -#else - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */ -#endif - mtmsr r10 /* (except for mach check in rtas) */ #endif lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ stw r2,GPR2(r11) addi r10,r10,STACK_FRAME_REGS_MARKER@l stw r9,_MSR(r11) - li r2, \trapno + 1 + li r2, \trapno stw r10,8(r11) stw r2,_TRAP(r11) SAVE_GPR(0, r11) SAVE_4GPRS(3, r11) SAVE_2GPRS(7, r11) - addi r11,r1,STACK_FRAME_OVERHEAD addi r2,r12,-THREAD - stw r11,PT_REGS(r12) -#if defined(CONFIG_40x) - /* Check to see if the dbcr0 register is set up to debug. Use the - internal debug mode bit to do this. */ - lwz r12,THREAD_DBCR0(r12) - andis. r12,r12,DBCR0_IDM@h -#endif - ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) -#if defined(CONFIG_40x) - beq+ 3f - /* From user and task is ptraced - load up global dbcr0 */ - li r12,-1 /* clear all pending debug events */ - mtspr SPRN_DBSR,r12 - lis r11,global_dbcr0@ha - tophys(r11,r11) - addi r11,r11,global_dbcr0@l - lwz r12,0(r11) - mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) -#endif - -3: - tovirt_novmstack r2, r2 /* set r2 to current */ - lis r11, transfer_to_syscall@h - ori r11, r11, transfer_to_syscall@l -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * If MSR is changing we need to keep interrupts disabled at this point - * otherwise we might risk taking an interrupt before we tell lockdep - * they are enabled. - */ - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL) - rlwimi r10, r9, 0, MSR_EE -#else - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) -#endif -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif - mtspr SPRN_SRR1,r10 - mtspr SPRN_SRR0,r11 - rfi /* jump to handler, enable MMU */ -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif -99: b ret_from_kernel_syscall + b transfer_to_syscall /* jump to handler */ .endm .macro save_dar_dsisr_on_stack reg1, reg2, sp diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a1ae00689e0f..24724a7dad49 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -179,9 +179,9 @@ _ENTRY(saved_ksp_limit) */ START_EXCEPTION(0x0300, DataStorage) EXCEPTION_PROLOG - mfspr r5, SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + mfspr r5, SPRN_ESR /* Grab the ESR, save it */ stw r5, _ESR(r11) - mfspr r4, SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + mfspr r4, SPRN_DEAR /* Grab the DEAR, save it */ stw r4, _DEAR(r11) EXC_XFER_LITE(0x300, handle_page_fault) @@ -191,9 +191,9 @@ _ENTRY(saved_ksp_limit) */ START_EXCEPTION(0x0400, InstructionAccess) EXCEPTION_PROLOG - mr r4,r12 /* Pass SRR0 as arg2 */ - stw r4, _DEAR(r11) - li r5,0 /* Pass zero as arg3 */ + li r5,0 + stw r5, _ESR(r11) /* Zero ESR */ + stw r12, _DEAR(r11) /* SRR0 as DEAR */ EXC_XFER_LITE(0x400, handle_page_fault) /* 0x0500 - External Interrupt Exception */ @@ -476,6 +476,7 @@ _ENTRY(saved_ksp_limit) /* continue normal handling for a critical exception... */ 2: mfspr r4,SPRN_DBSR + stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */ addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_TEMPLATE(DebugException, 0x2002, \ (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 8e36718f3167..813fa305c33b 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -376,7 +376,7 @@ interrupt_base: /* Load the next available TLB index */ lwz r13,tlb_44x_index@l(r10) - bne 2f /* Bail if permission mismach */ + bne 2f /* Bail if permission mismatch */ /* Increment, rollover, and store TLB index */ addi r13,r13,1 @@ -471,7 +471,7 @@ interrupt_base: /* Load the next available TLB index */ lwz r13,tlb_44x_index@l(r10) - bne 2f /* Bail if permission mismach */ + bne 2f /* Bail if permission mismatch */ /* Increment, rollover, and store TLB index */ addi r13,r13,1 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 52702f3db6df..46dff3f9c31f 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -165,7 +165,7 @@ SystemCall: /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ - EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) + EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD) . = 0x1100 /* @@ -312,14 +312,14 @@ DataStoreTLBMiss: . = 0x1300 InstructionTLBError: EXCEPTION_PROLOG - mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ andis. r10,r9,SRR1_ISI_NOPT@h beq+ .Litlbie - tlbie r4 + tlbie r12 /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ .Litlbie: - stw r4, _DAR(r11) + stw r12, _DAR(r11) + stw r5, _DSISR(r11) EXC_XFER_LITE(0x400, handle_page_fault) /* This is the data TLB error on the MPC8xx. This could be due to @@ -364,10 +364,9 @@ do_databreakpoint: addi r3,r1,STACK_FRAME_OVERHEAD mfspr r4,SPRN_BAR stw r4,_DAR(r11) -#ifdef CONFIG_VMAP_STACK - lwz r5,_DSISR(r11) -#else +#ifndef CONFIG_VMAP_STACK mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) #endif EXC_XFER_STD(0x1c00, do_break) diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 858fbc8b19f3..565e84e20a72 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -238,8 +238,8 @@ __secondary_hold_acknowledge: /* System reset */ /* core99 pmac starts the seconary here by changing the vector, and - putting it back to what it was (unknown_exception) when done. */ - EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) + putting it back to what it was (unknown_async_exception) when done. */ + EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD) /* Machine check */ /* @@ -278,12 +278,6 @@ MachineCheck: 7: EXCEPTION_PROLOG_2 addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_CHRP -#ifdef CONFIG_VMAP_STACK - mfspr r4, SPRN_SPRG_THREAD - tovirt(r4, r4) - lwz r4, RTAS_SP(r4) - cmpwi cr1, r4, 0 -#endif beq cr1, machine_check_tramp twi 31, 0, 0 #else @@ -295,6 +289,7 @@ MachineCheck: DO_KVM 0x300 DataAccess: #ifdef CONFIG_VMAP_STACK +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION mtspr SPRN_SPRG_SCRATCH2,r10 mfspr r10, SPRN_SPRG_THREAD @@ -311,12 +306,14 @@ BEGIN_MMU_FTR_SECTION MMU_FTR_SECTION_ELSE b 1f ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#endif 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1 EXCEPTION_PROLOG_1 b handle_page_fault_tramp_1 #else /* CONFIG_VMAP_STACK */ EXCEPTION_PROLOG handle_dar_dsisr=1 get_and_save_dar_dsisr_on_stack r4, r5, r11 +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h bne handle_page_fault_tramp_2 /* if not, try to put a PTE */ @@ -324,8 +321,11 @@ BEGIN_MMU_FTR_SECTION bl hash_page b handle_page_fault_tramp_1 MMU_FTR_SECTION_ELSE +#endif b handle_page_fault_tramp_2 +#ifdef CONFIG_PPC_BOOK3S_604 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#endif #endif /* CONFIG_VMAP_STACK */ /* Instruction access exception. */ @@ -341,12 +341,14 @@ InstructionAccess: mfspr r11, SPRN_SRR1 /* check whether user or kernel */ stw r11, SRR1(r10) mfcr r10 +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */ bne hash_page_isi .Lhash_page_isi_cont: mfspr r11, SPRN_SRR1 /* check whether user or kernel */ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif andi. r11, r11, MSR_PR EXCEPTION_PROLOG_1 @@ -357,13 +359,15 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) beq 1f /* if so, try to put a PTE */ li r3,0 /* into the hash table */ mr r4,r12 /* SRR0 is fault address */ +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION bl hash_page END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif #endif /* CONFIG_VMAP_STACK */ -1: mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ - stw r4, _DAR(r11) + stw r5, _DSISR(r11) + stw r12, _DAR(r11) EXC_XFER_LITE(0x400, handle_page_fault) /* External interrupt */ @@ -453,11 +457,12 @@ InstructionTLBMiss: cmplw 0,r1,r3 #endif mfspr r2, SPRN_SDR1 - li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER rlwinm r2, r2, 28, 0xfffff000 #ifdef CONFIG_MODULES bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ #endif 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ @@ -516,10 +521,11 @@ DataLoadTLBMiss: lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SDR1 - li r1, _PAGE_PRESENT | _PAGE_ACCESSED + li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER rlwinm r2, r2, 28, 0xfffff000 bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + li r1, _PAGE_PRESENT | _PAGE_ACCESSED addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ @@ -593,10 +599,11 @@ DataStoreTLBMiss: lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SDR1 - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER rlwinm r2, r2, 28, 0xfffff000 bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ @@ -640,7 +647,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) #endif #ifndef CONFIG_TAU_INT -#define TAUException unknown_exception +#define TAUException unknown_async_exception #endif EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD) @@ -685,13 +692,16 @@ handle_page_fault_tramp_1: #ifdef CONFIG_VMAP_STACK EXCEPTION_PROLOG_2 handle_dar_dsisr=1 #endif - lwz r4, _DAR(r11) lwz r5, _DSISR(r11) /* fall through */ handle_page_fault_tramp_2: + andis. r0, r5, DSISR_DABRMATCH@h + bne- 1f EXC_XFER_LITE(0x300, handle_page_fault) +1: EXC_XFER_STD(0x300, do_break) #ifdef CONFIG_VMAP_STACK +#ifdef CONFIG_PPC_BOOK3S_604 .macro save_regs_thread thread stw r0, THR0(\thread) stw r3, THR3(\thread) @@ -763,6 +773,7 @@ fast_hash_page_return: mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 rfi +#endif /* CONFIG_PPC_BOOK3S_604 */ stack_overflow: vmap_stack_overflow_exception diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 74e230c200fb..47857795f50a 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -106,10 +106,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) #endif mfspr r9, SPRN_SRR1 BOOKE_CLEAR_BTB(r11) - andi. r11, r9, MSR_PR lwz r11, TASK_STACK - THREAD(r10) rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */ - beq- 99f ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE) stw r12, _CCR(r11) /* save various registers */ mflr r12 @@ -124,60 +122,15 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) stw r2,GPR2(r11) addi r12, r12, STACK_FRAME_REGS_MARKER@l stw r9,_MSR(r11) - li r2, \trapno + 1 + li r2, \trapno stw r12, 8(r11) stw r2,_TRAP(r11) SAVE_GPR(0, r11) SAVE_4GPRS(3, r11) SAVE_2GPRS(7, r11) - addi r11,r1,STACK_FRAME_OVERHEAD addi r2,r10,-THREAD - stw r11,PT_REGS(r10) - /* Check to see if the dbcr0 register is set up to debug. Use the - internal debug mode bit to do this. */ - lwz r12,THREAD_DBCR0(r10) - andis. r12,r12,DBCR0_IDM@h - ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) - beq+ 3f - /* From user and task is ptraced - load up global dbcr0 */ - li r12,-1 /* clear all pending debug events */ - mtspr SPRN_DBSR,r12 - lis r11,global_dbcr0@ha - tophys(r11,r11) - addi r11,r11,global_dbcr0@l -#ifdef CONFIG_SMP - lwz r10, TASK_CPU(r2) - slwi r10, r10, 3 - add r11, r11, r10 -#endif - lwz r12,0(r11) - mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) - -3: - tovirt(r2, r2) /* set r2 to current */ - lis r11, transfer_to_syscall@h - ori r11, r11, transfer_to_syscall@l -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * If MSR is changing we need to keep interrupts disabled at this point - * otherwise we might risk taking an interrupt before we tell lockdep - * they are enabled. - */ - lis r10, MSR_KERNEL@h - ori r10, r10, MSR_KERNEL@l - rlwimi r10, r9, 0, MSR_EE -#else - lis r10, (MSR_KERNEL | MSR_EE)@h - ori r10, r10, (MSR_KERNEL | MSR_EE)@l -#endif - mtspr SPRN_SRR1,r10 - mtspr SPRN_SRR0,r11 - rfi /* jump to handler, enable MMU */ -99: b ret_from_kernel_syscall + b transfer_to_syscall /* jump to handler */ .endm /* To handle the additional exception priority levels on 40x and Book-E @@ -406,6 +359,7 @@ label: \ /* continue normal handling for a debug exception... */ \ 2: mfspr r4,SPRN_DBSR; \ + stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\ addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc) @@ -459,6 +413,7 @@ label: \ /* continue normal handling for a critical exception... */ \ 2: mfspr r4,SPRN_DBSR; \ + stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\ addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc) @@ -476,9 +431,7 @@ label: NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \ mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ stw r5,_ESR(r11); \ - mr r4,r12; /* Pass SRR0 as arg2 */ \ - stw r4, _DEAR(r11); \ - li r5,0; /* Pass zero as arg3 */ \ + stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \ EXC_XFER_LITE(0x0400, handle_page_fault) #define ALIGNMENT_EXCEPTION \ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index fdd4d274c245..3f4a40cccef5 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -364,12 +364,12 @@ interrupt_base: /* Data Storage Interrupt */ START_EXCEPTION(DataStorage) NORMAL_EXCEPTION_PROLOG(DATA_STORAGE) - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + mfspr r5,SPRN_ESR /* Grab the ESR, save it */ stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */ + stw r4, _DEAR(r11) andis. r10,r5,(ESR_ILK|ESR_DLK)@h bne 1f - stw r4, _DEAR(r11) EXC_XFER_LITE(0x0300, handle_page_fault) 1: addi r3,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 22f249b6f58d..f9e6d83e6720 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -52,28 +52,32 @@ _GLOBAL(isa300_idle_stop_mayloss) std r1,PACAR1(r13) mflr r4 mfcr r5 - /* use stack red zone rather than a new frame for saving regs */ - std r2,-8*0(r1) - std r14,-8*1(r1) - std r15,-8*2(r1) - std r16,-8*3(r1) - std r17,-8*4(r1) - std r18,-8*5(r1) - std r19,-8*6(r1) - std r20,-8*7(r1) - std r21,-8*8(r1) - std r22,-8*9(r1) - std r23,-8*10(r1) - std r24,-8*11(r1) - std r25,-8*12(r1) - std r26,-8*13(r1) - std r27,-8*14(r1) - std r28,-8*15(r1) - std r29,-8*16(r1) - std r30,-8*17(r1) - std r31,-8*18(r1) - std r4,-8*19(r1) - std r5,-8*20(r1) + /* + * Use the stack red zone rather than a new frame for saving regs since + * in the case of no GPR loss the wakeup code branches directly back to + * the caller without deallocating the stack frame first. + */ + std r2,-8*1(r1) + std r14,-8*2(r1) + std r15,-8*3(r1) + std r16,-8*4(r1) + std r17,-8*5(r1) + std r18,-8*6(r1) + std r19,-8*7(r1) + std r20,-8*8(r1) + std r21,-8*9(r1) + std r22,-8*10(r1) + std r23,-8*11(r1) + std r24,-8*12(r1) + std r25,-8*13(r1) + std r26,-8*14(r1) + std r27,-8*15(r1) + std r28,-8*16(r1) + std r29,-8*17(r1) + std r30,-8*18(r1) + std r31,-8*19(r1) + std r4,-8*20(r1) + std r5,-8*21(r1) /* 168 bytes */ PPC_STOP b . /* catch bugs */ @@ -89,8 +93,8 @@ _GLOBAL(isa300_idle_stop_mayloss) */ _GLOBAL(idle_return_gpr_loss) ld r1,PACAR1(r13) - ld r4,-8*19(r1) - ld r5,-8*20(r1) + ld r4,-8*20(r1) + ld r5,-8*21(r1) mtlr r4 mtcr r5 /* @@ -98,25 +102,25 @@ _GLOBAL(idle_return_gpr_loss) * from PACATOC. This could be avoided for that less common case * if KVM saved its r2. */ - ld r2,-8*0(r1) - ld r14,-8*1(r1) - ld r15,-8*2(r1) - ld r16,-8*3(r1) - ld r17,-8*4(r1) - ld r18,-8*5(r1) - ld r19,-8*6(r1) - ld r20,-8*7(r1) - ld r21,-8*8(r1) - ld r22,-8*9(r1) - ld r23,-8*10(r1) - ld r24,-8*11(r1) - ld r25,-8*12(r1) - ld r26,-8*13(r1) - ld r27,-8*14(r1) - ld r28,-8*15(r1) - ld r29,-8*16(r1) - ld r30,-8*17(r1) - ld r31,-8*18(r1) + ld r2,-8*1(r1) + ld r14,-8*2(r1) + ld r15,-8*3(r1) + ld r16,-8*4(r1) + ld r17,-8*5(r1) + ld r18,-8*6(r1) + ld r19,-8*7(r1) + ld r20,-8*8(r1) + ld r21,-8*9(r1) + ld r22,-8*10(r1) + ld r23,-8*11(r1) + ld r24,-8*12(r1) + ld r25,-8*13(r1) + ld r26,-8*14(r1) + ld r27,-8*15(r1) + ld r28,-8*16(r1) + ld r29,-8*17(r1) + ld r30,-8*18(r1) + ld r31,-8*19(r1) blr /* @@ -154,28 +158,32 @@ _GLOBAL(isa206_idle_insn_mayloss) std r1,PACAR1(r13) mflr r4 mfcr r5 - /* use stack red zone rather than a new frame for saving regs */ - std r2,-8*0(r1) - std r14,-8*1(r1) - std r15,-8*2(r1) - std r16,-8*3(r1) - std r17,-8*4(r1) - std r18,-8*5(r1) - std r19,-8*6(r1) - std r20,-8*7(r1) - std r21,-8*8(r1) - std r22,-8*9(r1) - std r23,-8*10(r1) - std r24,-8*11(r1) - std r25,-8*12(r1) - std r26,-8*13(r1) - std r27,-8*14(r1) - std r28,-8*15(r1) - std r29,-8*16(r1) - std r30,-8*17(r1) - std r31,-8*18(r1) - std r4,-8*19(r1) - std r5,-8*20(r1) + /* + * Use the stack red zone rather than a new frame for saving regs since + * in the case of no GPR loss the wakeup code branches directly back to + * the caller without deallocating the stack frame first. + */ + std r2,-8*1(r1) + std r14,-8*2(r1) + std r15,-8*3(r1) + std r16,-8*4(r1) + std r17,-8*5(r1) + std r18,-8*6(r1) + std r19,-8*7(r1) + std r20,-8*8(r1) + std r21,-8*9(r1) + std r22,-8*10(r1) + std r23,-8*11(r1) + std r24,-8*12(r1) + std r25,-8*13(r1) + std r26,-8*14(r1) + std r27,-8*15(r1) + std r28,-8*16(r1) + std r29,-8*17(r1) + std r30,-8*18(r1) + std r31,-8*19(r1) + std r4,-8*20(r1) + std r5,-8*21(r1) cmpwi r3,PNV_THREAD_NAP bne 1f IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/interrupt.c index 7c85ed04a164..2ef3c4051bb9 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/interrupt.c @@ -1,10 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/context_tracking.h> #include <linux/err.h> +#include <linux/compat.h> + #include <asm/asm-prototypes.h> #include <asm/kup.h> #include <asm/cputime.h> +#include <asm/interrupt.h> #include <asm/hw_irq.h> +#include <asm/interrupt.h> #include <asm/kprobes.h> #include <asm/paca.h> #include <asm/ptrace.h> @@ -24,16 +29,21 @@ notrace long system_call_exception(long r3, long r4, long r5, { syscall_fn f; + regs->orig_gpr3 = r3; + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); + CT_WARN_ON(ct_state() == CONTEXT_KERNEL); + user_exit_irqoff(); + trace_hardirqs_off(); /* finish reconciling */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S)) + if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x)) BUG_ON(!(regs->msr & MSR_RI)); BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!FULL_REGS(regs)); - BUG_ON(regs->softe != IRQS_ENABLED); + BUG_ON(arch_irq_disabled_regs(regs)); #ifdef CONFIG_PPC_PKEY if (mmu_has_feature(MMU_FTR_PKEY)) { @@ -59,19 +69,15 @@ notrace long system_call_exception(long r3, long r4, long r5, isync(); } else #endif +#ifdef CONFIG_PPC64 kuap_check_amr(); +#endif - account_cpu_user_entry(); + booke_restore_dbcr0(); -#ifdef CONFIG_PPC_SPLPAR - if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && - firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct lppaca *lp = local_paca->lppaca_ptr; + account_cpu_user_entry(); - if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx))) - accumulate_stolen_time(); - } -#endif + account_stolen_time(); /* * This is not required for the syscall exit path, but makes the @@ -79,12 +85,12 @@ notrace long system_call_exception(long r3, long r4, long r5, * frame, or if the unwinder was taught the first stack frame always * returns to user with IRQS_ENABLED, this store could be avoided! */ - regs->softe = IRQS_ENABLED; + irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); local_irq_enable(); if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { - if (unlikely(regs->trap == 0x7ff0)) { + if (unlikely(trap_is_unsupported_scv(regs))) { /* Unsupported scv vector */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return regs->gpr[3]; @@ -107,7 +113,7 @@ notrace long system_call_exception(long r3, long r4, long r5, r8 = regs->gpr[8]; } else if (unlikely(r0 >= NR_syscalls)) { - if (unlikely(regs->trap == 0x7ff0)) { + if (unlikely(trap_is_unsupported_scv(regs))) { /* Unsupported scv vector */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return regs->gpr[3]; @@ -118,7 +124,7 @@ notrace long system_call_exception(long r3, long r4, long r5, /* May be faster to do array_index_nospec? */ barrier_nospec(); - if (unlikely(is_32bit_task())) { + if (unlikely(is_compat_task())) { f = (void *)compat_sys_call_table[r0]; r3 &= 0x00000000ffffffffULL; @@ -138,8 +144,12 @@ notrace long system_call_exception(long r3, long r4, long r5, /* * local irqs must be disabled. Returns false if the caller must re-enable * them, check for new work, and try again. + * + * This should be called with local irqs disabled, but if they were previously + * enabled when the interrupt handler returns (indicating a process-context / + * synchronous interrupt) then irqs_enabled should be true. */ -static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) +static notrace __always_inline bool __prep_irq_for_enabled_exit(bool clear_ri) { /* This must be done with RI=1 because tracing may touch vmaps */ trace_hardirqs_on(); @@ -149,6 +159,7 @@ static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) __hard_EE_RI_disable(); else __hard_irq_disable(); +#ifdef CONFIG_PPC64 if (unlikely(lazy_irq_pending_nocheck())) { /* Took an interrupt, may have more exit work to do. */ if (clear_ri) @@ -160,10 +171,63 @@ static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) } local_paca->irq_happened = 0; irq_soft_mask_set(IRQS_ENABLED); - +#endif return true; } +static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled) +{ + if (__prep_irq_for_enabled_exit(clear_ri)) + return true; + + /* + * Must replay pending soft-masked interrupts now. Don't just + * local_irq_enabe(); local_irq_disable(); because if we are + * returning from an asynchronous interrupt here, another one + * might hit after irqs are enabled, and it would exit via this + * same path allowing another to fire, and so on unbounded. + * + * If interrupts were enabled when this interrupt exited, + * indicating a process context (synchronous) interrupt, + * local_irq_enable/disable can be used, which will enable + * interrupts rather than keeping them masked (unclear how + * much benefit this is over just replaying for all cases, + * because we immediately disable again, so all we're really + * doing is allowing hard interrupts to execute directly for + * a very small time, rather than being masked and replayed). + */ + if (irqs_enabled) { + local_irq_enable(); + local_irq_disable(); + } else { + replay_soft_interrupts(); + } + + return false; +} + +static notrace void booke_load_dbcr0(void) +{ +#ifdef CONFIG_PPC_ADV_DEBUG_REGS + unsigned long dbcr0 = current->thread.debug.dbcr0; + + if (likely(!(dbcr0 & DBCR0_IDM))) + return; + + /* + * Check to see if the dbcr0 register is set up to debug. + * Use the internal debug mode bit to do this. + */ + mtmsr(mfmsr() & ~MSR_DE); + if (IS_ENABLED(CONFIG_PPC32)) { + isync(); + global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0); + } + mtspr(SPRN_DBCR0, dbcr0); + mtspr(SPRN_DBSR, -1); +#endif +} + /* * This should be called after a syscall returns, with r3 the return value * from the syscall. If this function returns non-zero, the system call @@ -177,20 +241,24 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv) { - unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long ti_flags; unsigned long ret = 0; + bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; + CT_WARN_ON(ct_state() == CONTEXT_USER); + +#ifdef CONFIG_PPC64 kuap_check_amr(); +#endif regs->result = r3; /* Check whether the syscall is issued inside a restartable sequence */ rseq_syscall(regs); - ti_flags = *ti_flagsp; + ti_flags = current_thread_info()->flags; - if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) { + if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) { if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { r3 = -r3; regs->ccr |= 0x10000000; /* Set SO bit in CR */ @@ -202,7 +270,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ret = _TIF_RESTOREALL; else regs->gpr[3] = r3; - clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp); + clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags); } else { regs->gpr[3] = r3; } @@ -212,9 +280,10 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ret |= _TIF_RESTOREALL; } -again: local_irq_disable(); - ti_flags = READ_ONCE(*ti_flagsp); + +again: + ti_flags = READ_ONCE(current_thread_info()->flags); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); if (ti_flags & _TIF_NEED_RESCHED) { @@ -230,7 +299,7 @@ again: do_notify_resume(regs, ti_flags); } local_irq_disable(); - ti_flags = READ_ONCE(*ti_flagsp); + ti_flags = READ_ONCE(current_thread_info()->flags); } if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) { @@ -257,9 +326,13 @@ again: } } + user_enter_irqoff(); + /* scv need not set RI=0 because SRRs are not used */ - if (unlikely(!prep_irq_for_enabled_exit(!scv))) { + if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) { + user_exit_irqoff(); local_irq_enable(); + local_irq_disable(); goto again; } @@ -267,9 +340,11 @@ again: local_paca->tm_scratch = regs->msr; #endif + booke_load_dbcr0(); + account_cpu_user_exit(); -#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */ +#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */ /* * We do this at the end so that we do context switch with KERNEL AMR */ @@ -278,33 +353,32 @@ again: return ret; } -#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */ +#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr) { -#ifdef CONFIG_PPC_BOOK3E - struct thread_struct *ts = ¤t->thread; -#endif - unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long ti_flags; unsigned long flags; unsigned long ret = 0; - if (IS_ENABLED(CONFIG_PPC_BOOK3S)) + if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x)) BUG_ON(!(regs->msr & MSR_RI)); BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!FULL_REGS(regs)); - BUG_ON(regs->softe != IRQS_ENABLED); + BUG_ON(arch_irq_disabled_regs(regs)); + CT_WARN_ON(ct_state() == CONTEXT_USER); /* * We don't need to restore AMR on the way back to userspace for KUAP. * AMR can only have been unlocked if we interrupted the kernel. */ +#ifdef CONFIG_PPC64 kuap_check_amr(); +#endif local_irq_save(flags); again: - ti_flags = READ_ONCE(*ti_flagsp); + ti_flags = READ_ONCE(current_thread_info()->flags); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); /* returning to user: may enable */ if (ti_flags & _TIF_NEED_RESCHED) { @@ -315,7 +389,7 @@ again: do_notify_resume(regs, ti_flags); } local_irq_disable(); - ti_flags = READ_ONCE(*ti_flagsp); + ti_flags = READ_ONCE(current_thread_info()->flags); } if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) { @@ -336,23 +410,16 @@ again: } } - if (unlikely(!prep_irq_for_enabled_exit(true))) { + user_enter_irqoff(); + + if (unlikely(!__prep_irq_for_enabled_exit(true))) { + user_exit_irqoff(); local_irq_enable(); local_irq_disable(); goto again; } -#ifdef CONFIG_PPC_BOOK3E - if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) { - /* - * Check to see if the dbcr0 register is set up to debug. - * Use the internal debug mode bit to do this. - */ - mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_DBCR0, ts->debug.dbcr0); - mtspr(SPRN_DBSR, -1); - } -#endif + booke_load_dbcr0(); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM local_paca->tm_scratch = regs->msr; @@ -363,7 +430,9 @@ again: /* * We do this at the end so that we do context switch with KERNEL AMR */ +#ifdef CONFIG_PPC64 kuap_user_restore(regs); +#endif return ret; } @@ -372,56 +441,56 @@ void preempt_schedule_irq(void); notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr) { - unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long flags; unsigned long ret = 0; +#ifdef CONFIG_PPC64 unsigned long amr; +#endif - if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI))) + if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) && + unlikely(!(regs->msr & MSR_RI))) unrecoverable_exception(regs); BUG_ON(regs->msr & MSR_PR); BUG_ON(!FULL_REGS(regs)); + /* + * CT_WARN_ON comes here via program_check_exception, + * so avoid recursion. + */ + if (TRAP(regs) != 0x700) + CT_WARN_ON(ct_state() == CONTEXT_USER); +#ifdef CONFIG_PPC64 amr = kuap_get_and_check_amr(); +#endif - if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { - clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); + if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) { + clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags); ret = 1; } local_irq_save(flags); - if (regs->softe == IRQS_ENABLED) { + if (!arch_irq_disabled_regs(regs)) { /* Returning to a kernel context with local irqs enabled. */ WARN_ON_ONCE(!(regs->msr & MSR_EE)); again: if (IS_ENABLED(CONFIG_PREEMPT)) { /* Return to preemptible kernel context */ - if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) { + if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) preempt_schedule_irq(); } } - if (unlikely(!prep_irq_for_enabled_exit(true))) { - /* - * Can't local_irq_restore to replay if we were in - * interrupt context. Must replay directly. - */ - if (irqs_disabled_flags(flags)) { - replay_soft_interrupts(); - } else { - local_irq_restore(flags); - local_irq_save(flags); - } - /* Took an interrupt, may have more exit work to do. */ + if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags)))) goto again; - } } else { /* Returning to a kernel context with local irqs disabled. */ __hard_EE_RI_disable(); +#ifdef CONFIG_PPC64 if (regs->msr & MSR_EE) local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; +#endif } @@ -434,7 +503,9 @@ again: * which would cause Read-After-Write stalls. Hence, we take the AMR * value from the check above. */ +#ifdef CONFIG_PPC64 kuap_kernel_restore(regs, amr); +#endif return ret; } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5b69a6a72a0e..c00214a4355c 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -25,6 +25,7 @@ #include <linux/pci.h> #include <linux/iommu.h> #include <linux/sched.h> +#include <linux/debugfs.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> @@ -38,6 +39,47 @@ #define DBG(...) +#ifdef CONFIG_IOMMU_DEBUGFS +static int iommu_debugfs_weight_get(void *data, u64 *val) +{ + struct iommu_table *tbl = data; + *val = bitmap_weight(tbl->it_map, tbl->it_size); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n"); + +static void iommu_debugfs_add(struct iommu_table *tbl) +{ + char name[10]; + struct dentry *liobn_entry; + + sprintf(name, "%08lx", tbl->it_index); + liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir); + + debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight); + debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size); + debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift); + debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start); + debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end); + debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels); + debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size); +} + +static void iommu_debugfs_del(struct iommu_table *tbl) +{ + char name[10]; + struct dentry *liobn_entry; + + sprintf(name, "%08lx", tbl->it_index); + liobn_entry = debugfs_lookup(name, iommu_debugfs_dir); + if (liobn_entry) + debugfs_remove(liobn_entry); +} +#else +static void iommu_debugfs_add(struct iommu_table *tbl){} +static void iommu_debugfs_del(struct iommu_table *tbl){} +#endif + static int novmerge; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); @@ -725,6 +767,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, welcomed = 1; } + iommu_debugfs_add(tbl); + return tbl; } @@ -744,6 +788,8 @@ static void iommu_table_free(struct kref *kref) return; } + iommu_debugfs_del(tbl); + iommu_table_release_pages(tbl); /* verify that table contains no entries */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index cc7a6271b6b4..d71fd10a1dd4 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -54,6 +54,7 @@ #include <linux/pgtable.h> #include <linux/uaccess.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/cache.h> @@ -65,6 +66,7 @@ #include <asm/livepatch.h> #include <asm/asm-prototypes.h> #include <asm/hw_irq.h> +#include <asm/softirq_stack.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -269,6 +271,31 @@ again: } } +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) +static inline void replay_soft_interrupts_irqrestore(void) +{ + unsigned long kuap_state = get_kuap(); + + /* + * Check if anything calls local_irq_enable/restore() when KUAP is + * disabled (user access enabled). We handle that case here by saving + * and re-locking AMR but we shouldn't get here in the first place, + * hence the warning. + */ + kuap_check_amr(); + + if (kuap_state != AMR_KUAP_BLOCKED) + set_kuap(AMR_KUAP_BLOCKED); + + replay_soft_interrupts(); + + if (kuap_state != AMR_KUAP_BLOCKED) + set_kuap(kuap_state); +} +#else +#define replay_soft_interrupts_irqrestore() replay_soft_interrupts() +#endif + notrace void arch_local_irq_restore(unsigned long mask) { unsigned char irq_happened; @@ -332,7 +359,7 @@ notrace void arch_local_irq_restore(unsigned long mask) irq_soft_mask_set(IRQS_ALL_DISABLED); trace_hardirqs_off(); - replay_soft_interrupts(); + replay_soft_interrupts_irqrestore(); local_paca->irq_happened = 0; trace_hardirqs_on(); @@ -644,8 +671,6 @@ void __do_irq(struct pt_regs *regs) { unsigned int irq; - irq_enter(); - trace_irq_entry(regs); /* @@ -665,11 +690,9 @@ void __do_irq(struct pt_regs *regs) generic_handle_irq(irq); trace_irq_exit(regs); - - irq_exit(); } -void do_IRQ(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) { struct pt_regs *old_regs = set_irq_regs(regs); void *cursp, *irqsp, *sirqsp; diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 9f3e133b57b7..11f0cae086ed 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -17,22 +17,15 @@ #include <linux/irq_work.h> #include <linux/extable.h> #include <linux/ftrace.h> +#include <linux/memblock.h> +#include <asm/interrupt.h> #include <asm/machdep.h> #include <asm/mce.h> #include <asm/nmi.h> +#include <asm/asm-prototypes.h> -static DEFINE_PER_CPU(int, mce_nest_count); -static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); - -/* Queue for delayed MCE events. */ -static DEFINE_PER_CPU(int, mce_queue_count); -static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue); - -/* Queue for delayed MCE UE events. */ -static DEFINE_PER_CPU(int, mce_ue_count); -static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], - mce_ue_event_queue); +#include "setup.h" static void machine_check_process_queued_event(struct irq_work *work); static void machine_check_ue_irq_work(struct irq_work *work); @@ -103,9 +96,10 @@ void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, uint64_t nip, uint64_t addr, uint64_t phys_addr) { - int index = __this_cpu_inc_return(mce_nest_count) - 1; - struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); + int index = local_paca->mce_info->mce_nest_count++; + struct machine_check_event *mce; + mce = &local_paca->mce_info->mce_event[index]; /* * Return if we don't have enough space to log mce event. * mce_nest_count may go beyond MAX_MC_EVT but that's ok, @@ -191,7 +185,7 @@ void save_mce_event(struct pt_regs *regs, long handled, */ int get_mce_event(struct machine_check_event *mce, bool release) { - int index = __this_cpu_read(mce_nest_count) - 1; + int index = local_paca->mce_info->mce_nest_count - 1; struct machine_check_event *mc_evt; int ret = 0; @@ -201,7 +195,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) /* Check if we have MCE info to process. */ if (index < MAX_MC_EVT) { - mc_evt = this_cpu_ptr(&mce_event[index]); + mc_evt = &local_paca->mce_info->mce_event[index]; /* Copy the event structure and release the original */ if (mce) *mce = *mc_evt; @@ -211,7 +205,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) } /* Decrement the count to free the slot. */ if (release) - __this_cpu_dec(mce_nest_count); + local_paca->mce_info->mce_nest_count--; return ret; } @@ -233,13 +227,14 @@ static void machine_check_ue_event(struct machine_check_event *evt) { int index; - index = __this_cpu_inc_return(mce_ue_count) - 1; + index = local_paca->mce_info->mce_ue_count++; /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { - __this_cpu_dec(mce_ue_count); + local_paca->mce_info->mce_ue_count--; return; } - memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt)); + memcpy(&local_paca->mce_info->mce_ue_event_queue[index], + evt, sizeof(*evt)); /* Queue work to process this event later. */ irq_work_queue(&mce_ue_event_irq_work); @@ -256,13 +251,14 @@ void machine_check_queue_event(void) if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return; - index = __this_cpu_inc_return(mce_queue_count) - 1; + index = local_paca->mce_info->mce_queue_count++; /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { - __this_cpu_dec(mce_queue_count); + local_paca->mce_info->mce_queue_count--; return; } - memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); + memcpy(&local_paca->mce_info->mce_event_queue[index], + &evt, sizeof(evt)); /* Queue irq work to process this event later. */ irq_work_queue(&mce_event_process_work); @@ -289,9 +285,9 @@ static void machine_process_ue_event(struct work_struct *work) int index; struct machine_check_event *evt; - while (__this_cpu_read(mce_ue_count) > 0) { - index = __this_cpu_read(mce_ue_count) - 1; - evt = this_cpu_ptr(&mce_ue_event_queue[index]); + while (local_paca->mce_info->mce_ue_count > 0) { + index = local_paca->mce_info->mce_ue_count - 1; + evt = &local_paca->mce_info->mce_ue_event_queue[index]; blocking_notifier_call_chain(&mce_notifier_list, 0, evt); #ifdef CONFIG_MEMORY_FAILURE /* @@ -304,7 +300,7 @@ static void machine_process_ue_event(struct work_struct *work) */ if (evt->error_type == MCE_ERROR_TYPE_UE) { if (evt->u.ue_error.ignore_event) { - __this_cpu_dec(mce_ue_count); + local_paca->mce_info->mce_ue_count--; continue; } @@ -320,7 +316,7 @@ static void machine_process_ue_event(struct work_struct *work) "was generated\n"); } #endif - __this_cpu_dec(mce_ue_count); + local_paca->mce_info->mce_ue_count--; } } /* @@ -338,17 +334,17 @@ static void machine_check_process_queued_event(struct irq_work *work) * For now just print it to console. * TODO: log this error event to FSP or nvram. */ - while (__this_cpu_read(mce_queue_count) > 0) { - index = __this_cpu_read(mce_queue_count) - 1; - evt = this_cpu_ptr(&mce_event_queue[index]); + while (local_paca->mce_info->mce_queue_count > 0) { + index = local_paca->mce_info->mce_queue_count - 1; + evt = &local_paca->mce_info->mce_event_queue[index]; if (evt->error_type == MCE_ERROR_TYPE_UE && evt->u.ue_error.ignore_event) { - __this_cpu_dec(mce_queue_count); + local_paca->mce_info->mce_queue_count--; continue; } machine_check_print_event_info(evt, false, false); - __this_cpu_dec(mce_queue_count); + local_paca->mce_info->mce_queue_count--; } } @@ -588,15 +584,9 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info); * * regs->nip and regs->msr contains srr0 and ssr1. */ -long notrace machine_check_early(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early) { long handled = 0; - u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); - - this_cpu_set_ftrace_enabled(0); - /* Do not use nmi_enter/exit for pseries hpte guest */ - if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) - nmi_enter(); hv_nmi_check_nonrecoverable(regs); @@ -606,11 +596,6 @@ long notrace machine_check_early(struct pt_regs *regs) if (ppc_md.machine_check_early) handled = ppc_md.machine_check_early(regs); - if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) - nmi_exit(); - - this_cpu_set_ftrace_enabled(ftrace_enabled); - return handled; } @@ -722,7 +707,7 @@ long hmi_handle_debugtrig(struct pt_regs *regs) /* * Return values: */ -long hmi_exception_realmode(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode) { int ret; @@ -741,3 +726,24 @@ long hmi_exception_realmode(struct pt_regs *regs) return 1; } + +void __init mce_init(void) +{ + struct mce_info *mce_info; + u64 limit; + int i; + + limit = min(ppc64_bolted_size(), ppc64_rma_size); + for_each_possible_cpu(i) { + mce_info = memblock_alloc_try_nid(sizeof(*mce_info), + __alignof__(*mce_info), + MEMBLOCK_LOW_LIMIT, + limit, cpu_to_node(i)); + if (!mce_info) + goto err; + paca_ptrs[i]->mce_info = mce_info; + } + return; +err: + panic("Failed to allocate memory for MCE event data\n"); +} diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 69bfe96884e2..7f7cdbeacd1a 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -142,29 +142,10 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) } /* - * emulate_step() requires insn to be emulated as - * second parameter. Load register 'r4' with the - * instruction. - */ -void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) -{ - /* addis r4,0,(insn)@h */ - patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff))); - addr++; - - /* ori r4,r4,(insn)@l */ - patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff))); -} - -/* * Generate instructions to load provided immediate 64-bit value * to register 'reg' and patch these instructions at 'addr'. */ -void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) +static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) { /* lis reg,(op)@highest */ patch_instruction((struct ppc_inst *)addr, diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 2b555997b295..001e90cd8948 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1699,3 +1699,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); + + +static int __init discover_phbs(void) +{ + if (ppc_md.discover_phbs) + ppc_md.discover_phbs(); + + return 0; +} +core_initcall(discover_phbs); diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index e99b7c547d7e..61571ae23953 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -443,46 +443,6 @@ void *pci_traverse_device_nodes(struct device_node *start, } EXPORT_SYMBOL_GPL(pci_traverse_device_nodes); -static struct pci_dn *pci_dn_next_one(struct pci_dn *root, - struct pci_dn *pdn) -{ - struct list_head *next = pdn->child_list.next; - - if (next != &pdn->child_list) - return list_entry(next, struct pci_dn, list); - - while (1) { - if (pdn == root) - return NULL; - - next = pdn->list.next; - if (next != &pdn->parent->child_list) - break; - - pdn = pdn->parent; - } - - return list_entry(next, struct pci_dn, list); -} - -void *traverse_pci_dn(struct pci_dn *root, - void *(*fn)(struct pci_dn *, void *), - void *data) -{ - struct pci_dn *pdn = root; - void *ret; - - /* Only scan the child nodes */ - for (pdn = pci_dn_next_one(root, pdn); pdn; - pdn = pci_dn_next_one(root, pdn)) { - ret = fn(pdn, data); - if (ret) - return ret; - } - - return NULL; -} - static void *add_pdn(struct device_node *dn, void *data) { struct pci_controller *hose = data; @@ -521,28 +481,6 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb) pci_traverse_device_nodes(dn, add_pdn, phb); } -/** - * pci_devs_phb_init - Initialize phbs and pci devs under them. - * - * This routine walks over all phb's (pci-host bridges) on the - * system, and sets up assorted pci-related structures - * (including pci info in the device node structs) for each - * pci device found underneath. This routine runs once, - * early in the boot sequence. - */ -static int __init pci_devs_phb_init(void) -{ - struct pci_controller *phb, *tmp; - - /* This must be done first so the device nodes have valid pci info! */ - list_for_each_entry_safe(phb, tmp, &hose_list, list_node) - pci_devs_phb_init_dynamic(phb); - - return 0; -} - -core_initcall(pci_devs_phb_init); - static void pci_dev_pdn_setup(struct pci_dev *pdev) { struct pci_dn *pdn; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a66f435dabbf..3231c2df9e26 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -41,6 +41,7 @@ #include <linux/pkeys.h> #include <linux/seq_buf.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> @@ -659,11 +660,10 @@ static void do_break_handler(struct pt_regs *regs) } } -void do_break (struct pt_regs *regs, unsigned long address, - unsigned long error_code) +DEFINE_INTERRUPT_HANDLER(do_break) { current->thread.trap_nr = TRAP_HWBKPT; - if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, + if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr, 11, SIGSEGV) == NOTIFY_STOP) return; @@ -681,7 +681,7 @@ void do_break (struct pt_regs *regs, unsigned long address, do_break_handler(regs); /* Deliver the signal to userspace */ - force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar); } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -1670,7 +1670,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); @@ -2047,6 +2047,9 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, unsigned long stack_page; unsigned long cpu = task_cpu(p); + if (!paca_ptrs) + return 0; + stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE; if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) return 1; @@ -2176,7 +2179,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack, * See if this is an exception frame. * We look for the "regshere" marker in the current frame. */ - if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) + if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS) && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ae3c41730367..9a4797d1d40d 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -707,7 +707,7 @@ static void __init save_fscr_to_task(void) init_task.thread.fscr = mfspr(SPRN_FSCR); } #else -static inline void save_fscr_to_task(void) {}; +static inline void save_fscr_to_task(void) {} #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e9d4eb6144e1..ccf77b985c8f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1331,14 +1331,10 @@ static void __init prom_check_platform_support(void) if (prop_len > sizeof(vec)) prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", prop_len); - prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", - &vec, sizeof(vec)); - for (i = 0; i < sizeof(vec); i += 2) { - prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 - , vec[i] - , vec[i + 1]); - prom_parse_platform_support(vec[i], vec[i + 1], - &supported); + prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec)); + for (i = 0; i < prop_len; i += 2) { + prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]); + prom_parse_platform_support(vec[i], vec[i + 1], &supported); } } diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 3d44b73adb83..4f3d4ff3728c 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -262,8 +262,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) { u32 flags; - user_exit(); - flags = READ_ONCE(current_thread_info()->flags) & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); @@ -340,8 +338,6 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); - - user_enter(); } void __init pt_regs_check(void); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 71f38e9248be..bee984b1887b 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -64,6 +64,7 @@ #include <asm/mmu_context.h> #include <asm/cpu_has_feature.h> #include <asm/kasan.h> +#include <asm/mce.h> #include "setup.h" @@ -237,18 +238,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) maj = (pvr >> 8) & 0xFF; min = pvr & 0xFF; - seq_printf(m, "processor\t: %lu\n", cpu_id); - seq_printf(m, "cpu\t\t: "); + seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id); if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name) - seq_printf(m, "%s", cur_cpu_spec->cpu_name); + seq_puts(m, cur_cpu_spec->cpu_name); else seq_printf(m, "unknown (%08x)", pvr); if (cpu_has_feature(CPU_FTR_ALTIVEC)) - seq_printf(m, ", altivec supported"); + seq_puts(m, ", altivec supported"); - seq_printf(m, "\n"); + seq_putc(m, '\n'); #ifdef CONFIG_TAU if (cpu_has_feature(CPU_FTR_TAU)) { @@ -327,7 +327,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100); - seq_printf(m, "\n"); + seq_putc(m, '\n'); /* If this is the last cpu, print the summary */ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) @@ -938,6 +938,7 @@ void __init setup_arch(char **cmdline_p) exc_lvl_early_init(); emergency_stack_init(); + mce_init(); smp_release_cpus(); initmem_init(); diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 2dd0d9cb5a20..84058bbc8fe9 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -14,31 +14,31 @@ void irqstack_early_init(void); #ifdef CONFIG_PPC32 void setup_power_save(void); #else -static inline void setup_power_save(void) { }; +static inline void setup_power_save(void) { } #endif #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) void check_smt_enabled(void); #else -static inline void check_smt_enabled(void) { }; +static inline void check_smt_enabled(void) { } #endif #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) void setup_tlb_core_data(void); #else -static inline void setup_tlb_core_data(void) { }; +static inline void setup_tlb_core_data(void) { } #endif #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x) void exc_lvl_early_init(void); #else -static inline void exc_lvl_early_init(void) { }; +static inline void exc_lvl_early_init(void) { } #endif #if defined(CONFIG_PPC64) || defined(CONFIG_VMAP_STACK) void emergency_stack_init(void); #else -static inline void emergency_stack_init(void) { }; +static inline void emergency_stack_init(void) { } #endif #ifdef CONFIG_PPC64 @@ -55,7 +55,7 @@ extern unsigned long spr_default_dscr; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE void kvm_cma_reserve(void); #else -static inline void kvm_cma_reserve(void) { }; +static inline void kvm_cma_reserve(void) { } #endif #ifdef CONFIG_TAU diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index c28e949cc222..560ed8b975e7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -67,6 +67,7 @@ #include <asm/kup.h> #include <asm/early_ioremap.h> #include <asm/pgalloc.h> +#include <asm/asm-prototypes.h> #include "setup.h" @@ -258,7 +259,7 @@ static void cpu_ready_for_interrupts(void) unsigned long spr_default_dscr = 0; -void __init record_spr_defaults(void) +static void __init record_spr_defaults(void) { if (early_cpu_has_feature(CPU_FTR_DSCR)) spr_default_dscr = mfspr(SPRN_DSCR); @@ -1008,7 +1009,7 @@ void rfi_flush_enable(bool enable) rfi_flush = enable; } -void entry_flush_enable(bool enable) +static void entry_flush_enable(bool enable) { if (enable) { do_entry_flush_fixups(enabled_flush_types); @@ -1020,7 +1021,7 @@ void entry_flush_enable(bool enable) entry_flush = enable; } -void uaccess_flush_enable(bool enable) +static void uaccess_flush_enable(bool enable) { if (enable) { do_uaccess_flush_fixups(enabled_flush_types); diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 53782aa60ade..9ded046edb0e 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -282,8 +282,6 @@ static void do_signal(struct task_struct *tsk) void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { - user_exit(); - if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); @@ -299,8 +297,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); } - - user_enter(); } static unsigned long get_tm_stackpointer(struct task_struct *tsk) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 934cbdf6dd10..75ee918a120a 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -929,8 +929,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) sc; regs->nip = (unsigned long)ksig->ka.sa.sa_handler; - /* enter the signal handler in big-endian mode */ + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; + regs->msr |= (MSR_KERNEL & MSR_LE); return 0; failed: diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9e2246e80efd..5a4d59a1070d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -34,6 +34,7 @@ #include <linux/random.h> #include <linux/stackprotector.h> #include <linux/pgtable.h> +#include <linux/clockchips.h> #include <asm/ptrace.h> #include <linux/atomic.h> @@ -576,7 +577,7 @@ void tick_broadcast(const struct cpumask *mask) #endif #ifdef CONFIG_DEBUGGER -void debugger_ipi_callback(struct pt_regs *regs) +static void debugger_ipi_callback(struct pt_regs *regs) { debugger_ipi(regs); } diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index d36c6391eaf5..16ff0399a257 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -59,57 +59,64 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len, /* * long long munging: * The 32 bit ABI passes long longs in an odd even register pair. + * High and low parts are swapped depending on endian mode, + * so define a macro (similar to mips linux32) to handle that. */ +#ifdef __LITTLE_ENDIAN__ +#define merge_64(low, high) ((u64)high << 32) | low +#else +#define merge_64(high, low) ((u64)high << 32) | low +#endif compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 poshi, u32 poslo) + u32 reg6, u32 pos1, u32 pos2) { - return ksys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); + return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2)); } compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, - u32 reg6, u32 poshi, u32 poslo) + u32 reg6, u32 pos1, u32 pos2) { - return ksys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); + return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2)); } -compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count) +compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count) { - return ksys_readahead(fd, ((loff_t)offhi << 32) | offlo, count); + return ksys_readahead(fd, merge_64(offset1, offset2), count); } asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4, - unsigned long high, unsigned long low) + unsigned long len1, unsigned long len2) { - return ksys_truncate(path, (high << 32) | low); + return ksys_truncate(path, merge_64(len1, len2)); } -asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, - u32 lenhi, u32 lenlo) +asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, + u32 len1, u32 len2) { - return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, - ((loff_t)lenhi << 32) | lenlo); + return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, + merge_64(len1, len2)); } -asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high, - unsigned long low) +asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, + unsigned long len2) { - return ksys_ftruncate(fd, (high << 32) | low); + return ksys_ftruncate(fd, merge_64(len1, len2)); } -long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low, +long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, size_t len, int advice) { - return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, len, + return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len, advice); } asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, - unsigned offset_hi, unsigned offset_lo, - unsigned nbytes_hi, unsigned nbytes_lo) + unsigned offset1, unsigned offset2, + unsigned nbytes1, unsigned nbytes2) { - loff_t offset = ((loff_t)offset_hi << 32) | offset_lo; - loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo; + loff_t offset = merge_64(offset1, offset2); + loff_t nbytes = merge_64(nbytes1, nbytes2); return ksys_sync_file_range(fd, offset, nbytes, flags); } diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile index 27b48954808d..9e3be295dbba 100644 --- a/arch/powerpc/kernel/syscalls/Makefile +++ b/arch/powerpc/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -22,31 +22,31 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_abis_unistd_32 := common,nospu,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abis_unistd_64 := common,nospu,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_abis_syscall_table_32 := common,nospu,32 systbl_abi_syscall_table_32 := 32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_64 := common,nospu,64 systbl_abi_syscall_table_64 := 64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_c32 := common,nospu,32 systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_spu := common,spu systbl_abi_syscall_table_spu := spu -$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) +$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h @@ -55,9 +55,10 @@ kapisyshdr-y += syscall_table_32.h \ syscall_table_c32.h \ syscall_table_spu.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f744eb5cba88..0b2480cf3e47 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -9,9 +9,7 @@ # 0 nospu restart_syscall sys_restart_syscall 1 nospu exit sys_exit -2 32 fork ppc_fork sys_fork -2 64 fork sys_fork -2 spu fork sys_ni_syscall +2 nospu fork sys_fork 3 common read sys_read 4 common write sys_write 5 common open sys_open compat_sys_open @@ -160,9 +158,7 @@ 119 32 sigreturn sys_sigreturn compat_sys_sigreturn 119 64 sigreturn sys_ni_syscall 119 spu sigreturn sys_ni_syscall -120 32 clone ppc_clone sys_clone -120 64 clone sys_clone -120 spu clone sys_ni_syscall +120 nospu clone sys_clone 121 common setdomainname sys_setdomainname 122 common uname sys_newuname 123 common modify_ldt sys_ni_syscall @@ -244,9 +240,7 @@ 186 spu sendfile sys_sendfile64 187 common getpmsg sys_ni_syscall 188 common putpmsg sys_ni_syscall -189 32 vfork ppc_vfork sys_vfork -189 64 vfork sys_vfork -189 spu vfork sys_ni_syscall +189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit 191 common readahead sys_readahead compat_sys_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 @@ -322,9 +316,7 @@ 248 32 clock_nanosleep sys_clock_nanosleep_time32 248 64 clock_nanosleep sys_clock_nanosleep 248 spu clock_nanosleep sys_clock_nanosleep -249 32 swapcontext ppc_swapcontext compat_sys_swapcontext -249 64 swapcontext sys_swapcontext -249 spu swapcontext sys_ni_syscall +249 nospu swapcontext sys_swapcontext compat_sys_swapcontext 250 common tgkill sys_tgkill 251 32 utimes sys_utimes_time32 251 64 utimes sys_utimes @@ -522,12 +514,11 @@ 432 common fsmount sys_fsmount 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open -435 32 clone3 ppc_clone3 sys_clone3 -435 64 clone3 sys_clone3 -435 spu clone3 sys_ni_syscall +435 nospu clone3 sys_clone3 436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 0b4694b8d248..6c31af7f4fa8 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -22,6 +22,7 @@ #include <linux/delay.h> #include <linux/workqueue.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/reg.h> #include <asm/nvram.h> @@ -100,16 +101,13 @@ static void TAUupdate(int cpu) * with interrupts disabled */ -void TAUException(struct pt_regs * regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(TAUException) { int cpu = smp_processor_id(); - irq_enter(); tau[cpu].interrupts++; TAUupdate(cpu); - - irq_exit(); } #endif /* CONFIG_TAU_INT */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 67feb3524460..b67d93a609a2 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -53,9 +53,11 @@ #include <linux/of_clk.h> #include <linux/suspend.h> #include <linux/sched/cputime.h> +#include <linux/sched/clock.h> #include <linux/processor.h> #include <asm/trace.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/nvram.h> #include <asm/cache.h> @@ -570,7 +572,7 @@ void arch_irq_work_raise(void) * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */ -void timer_interrupt(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) { struct clock_event_device *evt = this_cpu_ptr(&decrementers); u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); @@ -609,7 +611,7 @@ void timer_interrupt(struct pt_regs *regs) #endif old_regs = set_irq_regs(regs); - irq_enter(); + trace_timer_interrupt_entry(regs); if (test_irq_work_pending()) { @@ -634,7 +636,7 @@ void timer_interrupt(struct pt_regs *regs) } trace_timer_interrupt_exit(regs); - irq_exit(); + set_irq_regs(old_regs); } EXPORT_SYMBOL(timer_interrupt); @@ -1030,6 +1032,7 @@ void __init time_init(void) tick_setup_hrtimer_broadcast(); of_clk_init(NULL); + enable_sched_clock_irqtime(); } /* diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 3ec7b443fe6b..1583fd1c6010 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -41,6 +41,7 @@ #include <asm/emulated_ops.h> #include <linux/uaccess.h> #include <asm/debugfs.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/rtas.h> @@ -342,8 +343,8 @@ static bool exception_common(int signr, struct pt_regs *regs, int code, show_signal_msg(signr, regs, code, addr); - if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) - local_irq_enable(); + if (arch_irqs_disabled()) + interrupt_cond_local_irq_enable(regs); current->thread.trap_nr = code; @@ -430,16 +431,10 @@ nonrecoverable: regs->msr &= ~MSR_RI; #endif } - -void system_reset_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) { unsigned long hsrr0, hsrr1; bool saved_hsrrs = false; - u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); - - this_cpu_set_ftrace_enabled(0); - - nmi_enter(); /* * System reset can interrupt code where HSRRs are live and MSR[RI]=1. @@ -503,19 +498,20 @@ out: die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ - if (!(regs->msr & MSR_RI)) + if (!(regs->msr & MSR_RI)) { + /* For the reason explained in die_mce, nmi_exit before die */ + nmi_exit(); die("Unrecoverable System Reset", regs, SIGABRT); + } if (saved_hsrrs) { mtspr(SPRN_HSRR0, hsrr0); mtspr(SPRN_HSRR1, hsrr1); } - nmi_exit(); - - this_cpu_set_ftrace_enabled(ftrace_enabled); - /* What should we do here? We could issue a shutdown or hard reset. */ + + return 0; } /* @@ -788,23 +784,33 @@ int machine_check_generic(struct pt_regs *regs) } #endif /* everything else */ -void machine_check_exception(struct pt_regs *regs) +void die_mce(const char *str, struct pt_regs *regs, long err) { - int recover = 0; - /* - * BOOK3S_64 does not call this handler as a non-maskable interrupt - * (it uses its own early real-mode handler to handle the MCE proper - * and then raises irq_work to call this handler when interrupts are - * enabled). - * - * This is silly. The BOOK3S_64 should just call a different function - * rather than expecting semantics to magically change. Something - * like 'non_nmi_machine_check_exception()', perhaps? + * The machine check wants to kill the interrupted context, but + * do_exit() checks for in_interrupt() and panics in that case, so + * exit the irq/nmi before calling die. */ - const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64); + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + irq_exit(); + else + nmi_exit(); + die(str, regs, err); +} - if (nmi) nmi_enter(); +/* + * BOOK3S_64 does not call this handler as a non-maskable interrupt + * (it uses its own early real-mode handler to handle the MCE proper + * and then raises irq_work to call this handler when interrupts are + * enabled). + */ +#ifdef CONFIG_PPC_BOOK3S_64 +DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception) +#else +DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) +#endif +{ + int recover = 0; __this_cpu_inc(irq_stat.mce_exceptions); @@ -830,21 +836,21 @@ void machine_check_exception(struct pt_regs *regs) if (check_io_access(regs)) goto bail; - if (nmi) nmi_exit(); - - die("Machine check", regs, SIGBUS); + die_mce("Machine check", regs, SIGBUS); +bail: /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - die("Unrecoverable Machine check", regs, SIGBUS); + die_mce("Unrecoverable Machine check", regs, SIGBUS); +#ifdef CONFIG_PPC_BOOK3S_64 return; - -bail: - if (nmi) nmi_exit(); +#else + return 0; +#endif } -void SMIException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */ { die("System Management Interrupt", regs, SIGABRT); } @@ -1030,12 +1036,11 @@ static void p9_hmi_special_emu(struct pt_regs *regs) } #endif /* CONFIG_VSX */ -void handle_hmi_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception) { struct pt_regs *old_regs; old_regs = set_irq_regs(regs); - irq_enter(); #ifdef CONFIG_VSX /* Real mode flagged P9 special emu is needed */ @@ -1055,46 +1060,42 @@ void handle_hmi_exception(struct pt_regs *regs) if (ppc_md.handle_hmi_exception) ppc_md.handle_hmi_exception(regs); - irq_exit(); set_irq_regs(old_regs); } -void unknown_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(unknown_exception) { - enum ctx_state prev_state = exception_enter(); - printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); _exception(SIGTRAP, regs, TRAP_UNK, 0); - - exception_exit(prev_state); } -void instruction_breakpoint_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception) { - enum ctx_state prev_state = exception_enter(); + printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", + regs->nip, regs->msr, regs->trap); + _exception(SIGTRAP, regs, TRAP_UNK, 0); +} + +DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception) +{ if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (debugger_iabr_match(regs)) - goto bail; + return; _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - -bail: - exception_exit(prev_state); } -void RunModeException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(RunModeException) { _exception(SIGTRAP, regs, TRAP_UNK, 0); } -void single_step_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(single_step_exception) { - enum ctx_state prev_state = exception_enter(); - clear_single_step(regs); clear_br_trace(regs); @@ -1103,16 +1104,12 @@ void single_step_exception(struct pt_regs *regs) if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (debugger_sstep(regs)) - goto bail; + return; _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); - -bail: - exception_exit(prev_state); } -NOKPROBE_SYMBOL(single_step_exception); /* * After we have successfully emulated an instruction, we have to @@ -1436,9 +1433,8 @@ static int emulate_math(struct pt_regs *regs) static inline int emulate_math(struct pt_regs *regs) { return -1; } #endif -void program_check_exception(struct pt_regs *regs) +static void do_program_check(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); unsigned int reason = get_reason(regs); /* We can now get here via a FP Unavailable exception if the core @@ -1447,22 +1443,22 @@ void program_check_exception(struct pt_regs *regs) if (reason & REASON_FP) { /* IEEE FP exception */ parse_fpe(regs); - goto bail; + return; } if (reason & REASON_TRAP) { unsigned long bugaddr; /* Debugger is first in line to stop recursive faults in * rcu_lock, notify_die, or atomic_notifier_call_chain */ if (debugger_bpt(regs)) - goto bail; + return; if (kprobe_handler(regs)) - goto bail; + return; /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; bugaddr = regs->nip; /* @@ -1474,10 +1470,10 @@ void program_check_exception(struct pt_regs *regs) if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; - goto bail; + return; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - goto bail; + return; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (reason & REASON_TM) { @@ -1498,7 +1494,7 @@ void program_check_exception(struct pt_regs *regs) */ if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); - goto bail; + return; } else { printk(KERN_EMERG "Unexpected TM Bad Thing exception " "at %lx (msr 0x%lx) tm_scratch=%llx\n", @@ -1518,9 +1514,7 @@ void program_check_exception(struct pt_regs *regs) if (!user_mode(regs)) goto sigill; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); /* (reason & REASON_ILLEGAL) would be the obvious thing here, * but there seems to be a hardware bug on the 405GP (RevD) @@ -1531,7 +1525,7 @@ void program_check_exception(struct pt_regs *regs) * pattern to occurrences etc. -dgibson 31/Mar/2003 */ if (!emulate_math(regs)) - goto bail; + return; /* Try to emulate it if we should. */ if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { @@ -1539,10 +1533,10 @@ void program_check_exception(struct pt_regs *regs) case 0: regs->nip += 4; emulate_single_step(regs); - goto bail; + return; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - goto bail; + return; } } @@ -1552,34 +1546,31 @@ sigill: else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); -bail: - exception_exit(prev_state); } -NOKPROBE_SYMBOL(program_check_exception); + +DEFINE_INTERRUPT_HANDLER(program_check_exception) +{ + do_program_check(regs); +} /* * This occurs when running in hypervisor mode on POWER6 or later * and an illegal instruction is encountered. */ -void emulation_assist_interrupt(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt) { regs->msr |= REASON_ILLEGAL; - program_check_exception(regs); + do_program_check(regs); } -NOKPROBE_SYMBOL(emulation_assist_interrupt); -void alignment_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(alignment_exception) { - enum ctx_state prev_state = exception_enter(); int sig, code, fixed = 0; unsigned long reason; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); reason = get_reason(regs); - if (reason & REASON_BOUNDARY) { sig = SIGBUS; code = BUS_ADRALN; @@ -1587,7 +1578,7 @@ void alignment_exception(struct pt_regs *regs) } if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) - goto bail; + return; /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) @@ -1597,7 +1588,7 @@ void alignment_exception(struct pt_regs *regs) /* skip over emulated instruction */ regs->nip += inst_length(reason); emulate_single_step(regs); - goto bail; + return; } /* Operand address was bad */ @@ -1612,13 +1603,10 @@ bad: if (user_mode(regs)) _exception(sig, regs, code, regs->dar); else - bad_page_fault(regs, regs->dar, sig); - -bail: - exception_exit(prev_state); + bad_page_fault(regs, sig); } -void StackOverflow(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(StackOverflow) { pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", current->comm, task_pid_nr(current), regs->gpr[1]); @@ -1627,46 +1615,33 @@ void StackOverflow(struct pt_regs *regs) panic("kernel stack overflow"); } -void stack_overflow_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(stack_overflow_exception) { - enum ctx_state prev_state = exception_enter(); - die("Kernel stack overflow", regs, SIGSEGV); - - exception_exit(prev_state); } -void kernel_fp_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception) { - enum ctx_state prev_state = exception_enter(); - printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); - - exception_exit(prev_state); } -void altivec_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception) { - enum ctx_state prev_state = exception_enter(); - if (user_mode(regs)) { /* A user program has executed an altivec instruction, but this kernel doesn't support altivec. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - goto bail; + return; } printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); - -bail: - exception_exit(prev_state); } -void vsx_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception) { if (user_mode(regs)) { /* A user program has executed an vsx instruction, @@ -1697,7 +1672,7 @@ static void tm_unavailable(struct pt_regs *regs) die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); } -void facility_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception) { static char *facility_strings[] = { [FSCR_FP_LG] = "FPU", @@ -1737,9 +1712,7 @@ void facility_unavailable_exception(struct pt_regs *regs) die("Unexpected facility unavailable exception", regs, SIGABRT); } - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); if (status == FSCR_DSCR_LG) { /* @@ -1817,7 +1790,7 @@ out: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -void fp_unavailable_tm(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm) { /* Note: This does not handle any kind of FP laziness. */ @@ -1850,7 +1823,7 @@ void fp_unavailable_tm(struct pt_regs *regs) tm_recheckpoint(¤t->thread); } -void altivec_unavailable_tm(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm) { /* See the comments in fp_unavailable_tm(). This function operates * the same way. @@ -1865,7 +1838,7 @@ void altivec_unavailable_tm(struct pt_regs *regs) current->thread.used_vr = 1; } -void vsx_unavailable_tm(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm) { /* See the comments in fp_unavailable_tm(). This works similarly, * though we're loading both FP and VEC registers in here. @@ -1890,11 +1863,40 @@ void vsx_unavailable_tm(struct pt_regs *regs) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ -void performance_monitor_exception(struct pt_regs *regs) +#ifdef CONFIG_PPC64 +DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); +DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi) { __this_cpu_inc(irq_stat.pmu_irqs); perf_irq(regs); + + return 0; +} +#endif + +DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); +DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async) +{ + __this_cpu_inc(irq_stat.pmu_irqs); + + perf_irq(regs); +} + +DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception) +{ + /* + * On 64-bit, if perf interrupts hit in a local_irq_disable + * (soft-masked) region, we consider them as NMIs. This is required to + * prevent hash faults on user addresses when reading callchains (and + * looks better from an irq tracing perspective). + */ + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) + performance_monitor_exception_nmi(regs); + else + performance_monitor_exception_async(regs); + + return 0; } #ifdef CONFIG_PPC_ADV_DEBUG_REGS @@ -1957,8 +1959,10 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); } -void DebugException(struct pt_regs *regs, unsigned long debug_status) +DEFINE_INTERRUPT_HANDLER(DebugException) { + unsigned long debug_status = regs->dsisr; + current->thread.debug.dbsr = debug_status; /* Hack alert: On BookE, Branch Taken stops on the branch itself, while @@ -2024,11 +2028,10 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status) } else handle_debug(regs, debug_status); } -NOKPROBE_SYMBOL(DebugException); #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #ifdef CONFIG_ALTIVEC -void altivec_assist_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) { int err; @@ -2062,9 +2065,10 @@ void altivec_assist_exception(struct pt_regs *regs) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_FSL_BOOKE -void CacheLockingException(struct pt_regs *regs, unsigned long address, - unsigned long error_code) +DEFINE_INTERRUPT_HANDLER(CacheLockingException) { + unsigned long error_code = regs->dsisr; + /* We treat cache locking instructions from the user * as priv ops, in the future we could try to do * something smarter @@ -2076,7 +2080,7 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address, #endif /* CONFIG_FSL_BOOKE */ #ifdef CONFIG_SPE -void SPEFloatingPointException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) { extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; @@ -2084,9 +2088,7 @@ void SPEFloatingPointException(struct pt_regs *regs) int code = FPE_FLTUNK; int err; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); flush_spe_to_thread(current); @@ -2128,14 +2130,12 @@ void SPEFloatingPointException(struct pt_regs *regs) return; } -void SPEFloatingPointRoundException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) { extern int speround_handler(struct pt_regs *regs); int err; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); preempt_disable(); if (regs->msr & MSR_SPE) @@ -2170,13 +2170,12 @@ void SPEFloatingPointRoundException(struct pt_regs *regs) * in the MSR is 0. This indicates that SRR0/1 are live, and that * we therefore lost state by taking this exception. */ -void unrecoverable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(unrecoverable_exception) { pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", regs->trap, regs->nip, regs->msr); die("Unrecoverable exception", regs, SIGABRT); } -NOKPROBE_SYMBOL(unrecoverable_exception); #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) /* @@ -2190,7 +2189,7 @@ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) return; } -void WatchdogException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(WatchdogException) /* XXX NMI? async? */ { printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); WatchdogHandler(regs); @@ -2201,13 +2200,12 @@ void WatchdogException(struct pt_regs *regs) * We enter here if we discover during exception entry that we are * running in supervisor mode with a userspace value in the stack pointer. */ -void kernel_bad_stack(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(kernel_bad_stack) { printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", regs->gpr[1], regs->nip); die("Bad kernel stack pointer", regs, SIGABRT); } -NOKPROBE_SYMBOL(kernel_bad_stack); void __init trap_init(void) { diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index af3c15a1d41e..c9a8f4781a10 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -26,7 +26,9 @@ #include <linux/delay.h> #include <linux/smp.h> +#include <asm/interrupt.h> #include <asm/paca.h> +#include <asm/nmi.h> /* * The powerpc watchdog ensures that each CPU is able to service timers. @@ -247,16 +249,17 @@ static void watchdog_timer_interrupt(int cpu) watchdog_smp_panic(cpu, tb); } -void soft_nmi_interrupt(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt) { unsigned long flags; int cpu = raw_smp_processor_id(); u64 tb; - if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) - return; + /* should only arrive from kernel, with irqs disabled */ + WARN_ON_ONCE(!arch_irq_disabled_regs(regs)); - nmi_enter(); + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) + return 0; __this_cpu_inc(irq_stat.soft_nmi_irqs); @@ -265,7 +268,7 @@ void soft_nmi_interrupt(struct pt_regs *regs) wd_smp_lock(&flags); if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { wd_smp_unlock(&flags); - goto out; + return 0; } set_cpu_stuck(cpu, tb); @@ -289,8 +292,7 @@ void soft_nmi_interrupt(struct pt_regs *regs) if (wd_panic_timeout_tb < 0x7fffffff) mtspr(SPRN_DEC, wd_panic_timeout_tb); -out: - nmi_exit(); + return 0; } static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index d0e459bb2f05..9842e33533df 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -102,7 +102,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr); } - fdt_size = fdt_totalsize(initial_boot_params) * 2; + fdt_size = kexec_fdt_totalsize_ppc64(image); fdt = kmalloc(fdt_size, GFP_KERNEL); if (!fdt) { pr_err("Not enough memory for the device tree.\n"); diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index c69bcf9b547a..02b9e4d0dc40 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -21,6 +21,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <asm/setup.h> #include <asm/drmem.h> #include <asm/kexec_ranges.h> #include <asm/crashdump-ppc64.h> @@ -926,6 +927,40 @@ out: } /** + * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT + * for kexec/kdump kernel. + * @image: kexec image being loaded. + * + * Returns the estimated size needed for kexec/kdump kernel FDT. + */ +unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image) +{ + unsigned int fdt_size; + u64 usm_entries; + + /* + * The below estimate more than accounts for a typical kexec case where + * the additional space is to accommodate things like kexec cmdline, + * chosen node with properties for initrd start & end addresses and + * a property to indicate kexec boot.. + */ + fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE); + if (image->type != KEXEC_TYPE_CRASH) + return fdt_size; + + /* + * For kdump kernel, also account for linux,usable-memory and + * linux,drconf-usable-memory properties. Get an approximate on the + * number of usable memory entries and use for FDT size estimation. + */ + usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) + + (2 * (resource_size(&crashk_res) / drmem_lmb_size()))); + fdt_size += (unsigned int)(usm_entries * sizeof(u64)); + + return fdt_size; +} + +/** * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel * being loaded. * @image: kexec image being loaded. diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 549591d9aaa2..e45644657d49 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -54,6 +54,7 @@ config KVM_BOOK3S_32 select KVM select KVM_BOOK3S_32_HANDLER select KVM_BOOK3S_PR_POSSIBLE + select PPC_FPU help Support running unmodified book3s_32 guest kernels in virtual machines on book3s_32 host processors. diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 38ea396a23d6..bb6773594cf8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -27,6 +27,7 @@ #include <asm/cputable.h> #include <asm/pte-walk.h> +#include "book3s.h" #include "trace_hv.h" //#define DEBUG_RESIZE_HPT 1 @@ -590,7 +591,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, } else { /* Call KVM generic code to do the slow-path check */ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, - writing, &write_ok); + writing, &write_ok, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; page = NULL; diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index bb35490400e9..e603de7ade52 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -822,7 +822,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, /* Call KVM generic code to do the slow-path check */ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, - writing, upgrade_p); + writing, upgrade_p, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; page = NULL; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index b08cc15f31c7..fdb57be71aa6 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -61,10 +61,6 @@ #define SPRN_GQR6 918 #define SPRN_GQR7 919 -/* Book3S_32 defines mfsrin(v) - but that messes up our abstract - * function pointers, so let's just disable the define. */ -#undef mfsrin - enum priv_level { PRIV_PROBLEM = 0, PRIV_SUPER = 1, diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6f612d240392..13bad6bf4c95 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -53,6 +53,7 @@ #include <asm/cputable.h> #include <asm/cacheflush.h> #include <linux/uaccess.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -134,7 +135,7 @@ static inline bool nesting_enabled(struct kvm *kvm) } /* If set, the threads on each CPU core have to be in the same MMU mode */ -static bool no_mixing_hpt_and_radix; +static bool no_mixing_hpt_and_radix __read_mostly; static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); @@ -782,8 +783,24 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4; - vcpu->arch.dawr = value1; - vcpu->arch.dawrx = value2; + vcpu->arch.dawr0 = value1; + vcpu->arch.dawrx0 = value2; + return H_SUCCESS; + case H_SET_MODE_RESOURCE_SET_DAWR1: + if (!kvmppc_power8_compatible(vcpu)) + return H_P2; + if (!ppc_breakpoint_available()) + return H_P2; + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return H_P2; + if (!vcpu->kvm->arch.dawr1_enabled) + return H_FUNCTION; + if (mflags) + return H_UNSUPPORTED_FLAG_START; + if (value2 & DABRX_HYP) + return H_P4; + vcpu->arch.dawr1 = value1; + vcpu->arch.dawrx1 = value2; return H_SUCCESS; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: /* KVM does not support mflags=2 (AIL=2) */ @@ -1759,10 +1776,16 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.vcore->vtb); break; case KVM_REG_PPC_DAWR: - *val = get_reg_val(id, vcpu->arch.dawr); + *val = get_reg_val(id, vcpu->arch.dawr0); break; case KVM_REG_PPC_DAWRX: - *val = get_reg_val(id, vcpu->arch.dawrx); + *val = get_reg_val(id, vcpu->arch.dawrx0); + break; + case KVM_REG_PPC_DAWR1: + *val = get_reg_val(id, vcpu->arch.dawr1); + break; + case KVM_REG_PPC_DAWRX1: + *val = get_reg_val(id, vcpu->arch.dawrx1); break; case KVM_REG_PPC_CIABR: *val = get_reg_val(id, vcpu->arch.ciabr); @@ -1991,10 +2014,16 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.vcore->vtb = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWR: - vcpu->arch.dawr = set_reg_val(id, *val); + vcpu->arch.dawr0 = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWRX: - vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; + vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; + break; + case KVM_REG_PPC_DAWR1: + vcpu->arch.dawr1 = set_reg_val(id, *val); + break; + case KVM_REG_PPC_DAWRX1: + vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; break; case KVM_REG_PPC_CIABR: vcpu->arch.ciabr = set_reg_val(id, *val); @@ -2862,11 +2891,6 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) return false; - /* Some POWER9 chips require all threads to be in the same MMU mode */ - if (no_mixing_hpt_and_radix && - kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) - return false; - if (n_threads < cip->max_subcore_threads) n_threads = cip->max_subcore_threads; if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) @@ -2905,6 +2929,9 @@ static void prepare_threads(struct kvmppc_vcore *vc) for_each_runnable_thread(i, vcpu, vc) { if (signal_pending(vcpu->arch.run_task)) vcpu->arch.ret = -EINTR; + else if (no_mixing_hpt_and_radix && + kvm_is_radix(vc->kvm) != radix_enabled()) + vcpu->arch.ret = -EINVAL; else if (vcpu->arch.vpa.update_pending || vcpu->arch.slb_shadow.update_pending || vcpu->arch.dtl.update_pending) @@ -3110,7 +3137,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) int controlled_threads; int trap; bool is_power8; - bool hpt_on_radix; /* * Remove from the list any threads that have a signal pending @@ -3143,11 +3169,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * this is a HPT guest on a radix host machine where the * CPU threads may not be in different MMU modes. */ - hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() && - !kvm_is_radix(vc->kvm); - if (((controlled_threads > 1) && - ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) || - (hpt_on_radix && vc->kvm->arch.threads_indep)) { + if ((controlled_threads > 1) && + ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { for_each_runnable_thread(i, vcpu, vc) { vcpu->arch.ret = -EBUSY; kvmppc_remove_runnable(vc, vcpu); @@ -3215,7 +3238,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S) && !cpu_has_feature(CPU_FTR_ARCH_300); - if (split > 1 || hpt_on_radix) { + if (split > 1) { sip = &split_info; memset(&split_info, 0, sizeof(split_info)); for (sub = 0; sub < core_info.n_subcores; ++sub) @@ -3237,13 +3260,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) split_info.subcore_size = subcore_size; } else { split_info.subcore_size = 1; - if (hpt_on_radix) { - /* Use the split_info for LPCR/LPIDR changes */ - split_info.lpcr_req = vc->lpcr; - split_info.lpidr_req = vc->kvm->arch.lpid; - split_info.host_lpcr = vc->kvm->arch.host_lpcr; - split_info.do_set = 1; - } } /* order writes to split_info before kvm_split_mode pointer */ @@ -3253,7 +3269,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (thr = 0; thr < controlled_threads; ++thr) { struct paca_struct *paca = paca_ptrs[pcpu + thr]; - paca->kvm_hstate.tid = thr; paca->kvm_hstate.napping = 0; paca->kvm_hstate.kvm_split_mode = sip; } @@ -3327,10 +3342,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * When doing micro-threading, poke the inactive threads as well. * This gets them to the nap instruction after kvm_do_nap, * which reduces the time taken to unsplit later. - * For POWER9 HPT guest on radix host, we need all the secondary - * threads woken up so they can do the LPCR/LPIDR change. */ - if (cmd_bit || hpt_on_radix) { + if (cmd_bit) { split_info.do_nap = 1; /* ask secondaries to nap when done */ for (thr = 1; thr < threads_per_subcore; ++thr) if (!(active & (1 << thr))) @@ -3391,24 +3404,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) cpu_relax(); ++loops; } - } else if (hpt_on_radix) { - /* Wait for all threads to have seen final sync */ - for (thr = 1; thr < controlled_threads; ++thr) { - struct paca_struct *paca = paca_ptrs[pcpu + thr]; - - while (paca->kvm_hstate.kvm_split_mode) { - HMT_low(); - barrier(); - } - HMT_medium(); - } + split_info.do_nap = 0; } - split_info.do_nap = 0; kvmppc_set_host_core(pcpu); + guest_exit_irqoff(); + local_irq_enable(); - guest_exit(); /* Let secondaries go back to the offline loop */ for (i = 0; i < controlled_threads; ++i) { @@ -3449,10 +3452,17 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, int trap; unsigned long host_hfscr = mfspr(SPRN_HFSCR); unsigned long host_ciabr = mfspr(SPRN_CIABR); - unsigned long host_dawr = mfspr(SPRN_DAWR0); - unsigned long host_dawrx = mfspr(SPRN_DAWRX0); + unsigned long host_dawr0 = mfspr(SPRN_DAWR0); + unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0); unsigned long host_psscr = mfspr(SPRN_PSSCR); unsigned long host_pidr = mfspr(SPRN_PID); + unsigned long host_dawr1 = 0; + unsigned long host_dawrx1 = 0; + + if (cpu_has_feature(CPU_FTR_DAWR1)) { + host_dawr1 = mfspr(SPRN_DAWR1); + host_dawrx1 = mfspr(SPRN_DAWRX1); + } /* * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0, @@ -3489,8 +3499,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_SPURR, vcpu->arch.spurr); if (dawr_enabled()) { - mtspr(SPRN_DAWR0, vcpu->arch.dawr); - mtspr(SPRN_DAWRX0, vcpu->arch.dawrx); + mtspr(SPRN_DAWR0, vcpu->arch.dawr0); + mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0); + if (cpu_has_feature(CPU_FTR_DAWR1)) { + mtspr(SPRN_DAWR1, vcpu->arch.dawr1); + mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1); + } } mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_IC, vcpu->arch.ic); @@ -3542,8 +3556,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); mtspr(SPRN_HFSCR, host_hfscr); mtspr(SPRN_CIABR, host_ciabr); - mtspr(SPRN_DAWR0, host_dawr); - mtspr(SPRN_DAWRX0, host_dawrx); + mtspr(SPRN_DAWR0, host_dawr0); + mtspr(SPRN_DAWRX0, host_dawrx0); + if (cpu_has_feature(CPU_FTR_DAWR1)) { + mtspr(SPRN_DAWR1, host_dawr1); + mtspr(SPRN_DAWRX1, host_dawrx1); + } mtspr(SPRN_PID, host_pidr); /* @@ -3595,6 +3613,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long host_tidr = mfspr(SPRN_TIDR); unsigned long host_iamr = mfspr(SPRN_IAMR); unsigned long host_amr = mfspr(SPRN_AMR); + unsigned long host_fscr = mfspr(SPRN_FSCR); s64 dec; u64 tb; int trap, save_pmu; @@ -3735,6 +3754,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, if (host_amr != vcpu->arch.amr) mtspr(SPRN_AMR, host_amr); + if (host_fscr != vcpu->arch.fscr) + mtspr(SPRN_FSCR, host_fscr); + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); store_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC @@ -4173,7 +4195,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, kvmppc_clear_host_core(pcpu); - local_paca->kvm_hstate.tid = 0; local_paca->kvm_hstate.napping = 0; local_paca->kvm_hstate.kvm_split_mode = NULL; kvmppc_start_thread(vcpu, vc); @@ -4217,8 +4238,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, kvmppc_set_host_core(pcpu); + guest_exit_irqoff(); + local_irq_enable(); - guest_exit(); cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); @@ -4358,15 +4380,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) do { /* - * The early POWER9 chips that can't mix radix and HPT threads - * on the same core also need the workaround for the problem - * where the TLB would prefetch entries in the guest exit path - * for radix guests using the guest PIDR value and LPID 0. - * The workaround is in the old path (kvmppc_run_vcpu()) - * but not the new path (kvmhv_run_single_vcpu()). + * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu + * path, which also handles hash and dependent threads mode. */ if (kvm->arch.threads_indep && kvm_is_radix(kvm) && - !no_mixing_hpt_and_radix) + !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); else @@ -5599,6 +5617,26 @@ out: return ret; } +static int kvmhv_enable_dawr1(struct kvm *kvm) +{ + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return -ENODEV; + + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.dawr1_enabled = true; + return 0; +} + +static bool kvmppc_hash_v3_possible(void) +{ + if (radix_enabled() && no_mixing_hpt_and_radix) + return false; + + return cpu_has_feature(CPU_FTR_ARCH_300) && + cpu_has_feature(CPU_FTR_HVMODE); +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5642,6 +5680,8 @@ static struct kvmppc_ops kvm_ops_hv = { .store_to_eaddr = kvmhv_store_to_eaddr, .enable_svm = kvmhv_enable_svm, .svm_off = kvmhv_svm_off, + .enable_dawr1 = kvmhv_enable_dawr1, + .hash_v3_possible = kvmppc_hash_v3_possible, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 8053efdf7ea7..158d309b42a3 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -17,6 +17,7 @@ #include <asm/asm-prototypes.h> #include <asm/cputable.h> +#include <asm/interrupt.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/archrandom.h> @@ -277,8 +278,7 @@ void kvmhv_commence_exit(int trap) struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; - int me, ee, i, t; - int cpu0; + int me, ee, i; /* Set our bit in the threads-exiting-guest map in the 0xff00 bits of vcore->entry_exit_map */ @@ -320,22 +320,6 @@ void kvmhv_commence_exit(int trap) if ((ee >> 8) == 0) kvmhv_interrupt_vcore(vc, ee); } - - /* - * On POWER9 when running a HPT guest on a radix host (sip != NULL), - * we have to interrupt inactive CPU threads to get them to - * restore the host LPCR value. - */ - if (sip->lpcr_req) { - if (cmpxchg(&sip->do_restore, 0, 1) == 0) { - vc = local_paca->kvm_hstate.kvm_vcore; - cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid; - for (t = 1; t < threads_per_core; ++t) { - if (sip->napped[t]) - kvmhv_rm_send_ipi(cpu0 + t); - } - } - } } struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; @@ -667,95 +651,6 @@ void kvmppc_bad_interrupt(struct pt_regs *regs) panic("Bad KVM trap"); } -/* - * Functions used to switch LPCR HR and UPRT bits on all threads - * when entering and exiting HPT guests on a radix host. - */ - -#define PHASE_REALMODE 1 /* in real mode */ -#define PHASE_SET_LPCR 2 /* have set LPCR */ -#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */ -#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */ - -#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p)) - -static void wait_for_sync(struct kvm_split_mode *sip, int phase) -{ - int thr = local_paca->kvm_hstate.tid; - - sip->lpcr_sync.phase[thr] |= phase; - phase = ALL(phase); - while ((sip->lpcr_sync.allphases & phase) != phase) { - HMT_low(); - barrier(); - } - HMT_medium(); -} - -void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) -{ - int num_sets; - unsigned long rb, set; - - /* wait for every other thread to get to real mode */ - wait_for_sync(sip, PHASE_REALMODE); - - /* Set LPCR and LPIDR */ - mtspr(SPRN_LPCR, sip->lpcr_req); - mtspr(SPRN_LPID, sip->lpidr_req); - isync(); - - /* - * P10 will flush all the congruence class with a single tlbiel - */ - if (cpu_has_feature(CPU_FTR_ARCH_31)) - num_sets = 1; - else - num_sets = POWER9_TLB_SETS_RADIX; - - /* Invalidate the TLB on thread 0 */ - if (local_paca->kvm_hstate.tid == 0) { - sip->do_set = 0; - asm volatile("ptesync" : : : "memory"); - for (set = 0; set < num_sets; ++set) { - rb = TLBIEL_INVAL_SET_LPID + - (set << TLBIEL_INVAL_SET_SHIFT); - asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : - "r" (rb), "r" (0)); - } - asm volatile("ptesync" : : : "memory"); - } - - /* indicate that we have done so and wait for others */ - wait_for_sync(sip, PHASE_SET_LPCR); - /* order read of sip->lpcr_sync.allphases vs. sip->do_set */ - smp_rmb(); -} - -/* - * Called when a thread that has been in the guest needs - * to reload the host LPCR value - but only on POWER9 when - * running a HPT guest on a radix host. - */ -void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) -{ - /* we're out of the guest... */ - wait_for_sync(sip, PHASE_OUT_OF_GUEST); - - mtspr(SPRN_LPID, 0); - mtspr(SPRN_LPCR, sip->host_lpcr); - isync(); - - if (local_paca->kvm_hstate.tid == 0) { - sip->do_restore = 0; - smp_wmb(); /* order store of do_restore vs. phase */ - } - - wait_for_sync(sip, PHASE_RESET_LPCR); - smp_mb(); - local_paca->kvm_hstate.kvm_split_mode = NULL; -} - static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 33b58549a9aa..0cd0e7aad588 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -33,8 +33,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) hr->dpdes = vc->dpdes; hr->hfscr = vcpu->arch.hfscr; hr->tb_offset = vc->tb_offset; - hr->dawr0 = vcpu->arch.dawr; - hr->dawrx0 = vcpu->arch.dawrx; + hr->dawr0 = vcpu->arch.dawr0; + hr->dawrx0 = vcpu->arch.dawrx0; hr->ciabr = vcpu->arch.ciabr; hr->purr = vcpu->arch.purr; hr->spurr = vcpu->arch.spurr; @@ -49,6 +49,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) hr->pidr = vcpu->arch.pid; hr->cfar = vcpu->arch.cfar; hr->ppr = vcpu->arch.ppr; + hr->dawr1 = vcpu->arch.dawr1; + hr->dawrx1 = vcpu->arch.dawrx1; } static void byteswap_pt_regs(struct pt_regs *regs) @@ -91,6 +93,8 @@ static void byteswap_hv_regs(struct hv_guest_state *hr) hr->pidr = swab64(hr->pidr); hr->cfar = swab64(hr->cfar); hr->ppr = swab64(hr->ppr); + hr->dawr1 = swab64(hr->dawr1); + hr->dawrx1 = swab64(hr->dawrx1); } static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, @@ -138,6 +142,7 @@ static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) /* Don't let data address watchpoint match in hypervisor state */ hr->dawrx0 &= ~DAWRX_HYP; + hr->dawrx1 &= ~DAWRX_HYP; /* Don't let completed instruction address breakpt match in HV state */ if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) @@ -151,8 +156,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) vc->pcr = hr->pcr | PCR_MASK; vc->dpdes = hr->dpdes; vcpu->arch.hfscr = hr->hfscr; - vcpu->arch.dawr = hr->dawr0; - vcpu->arch.dawrx = hr->dawrx0; + vcpu->arch.dawr0 = hr->dawr0; + vcpu->arch.dawrx0 = hr->dawrx0; vcpu->arch.ciabr = hr->ciabr; vcpu->arch.purr = hr->purr; vcpu->arch.spurr = hr->spurr; @@ -167,6 +172,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) vcpu->arch.pid = hr->pidr; vcpu->arch.cfar = hr->cfar; vcpu->arch.ppr = hr->ppr; + vcpu->arch.dawr1 = hr->dawr1; + vcpu->arch.dawrx1 = hr->dawrx1; } void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, @@ -215,12 +222,51 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr) } } +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu, + struct hv_guest_state *l2_hv, + struct pt_regs *l2_regs, + u64 hv_ptr, u64 regs_ptr) +{ + int size; + + if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version, + sizeof(l2_hv->version))) + return -1; + + if (kvmppc_need_byteswap(vcpu)) + l2_hv->version = swab64(l2_hv->version); + + size = hv_guest_state_size(l2_hv->version); + if (size < 0) + return -1; + + return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) || + kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs, + sizeof(struct pt_regs)); +} + +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu, + struct hv_guest_state *l2_hv, + struct pt_regs *l2_regs, + u64 hv_ptr, u64 regs_ptr) +{ + int size; + + size = hv_guest_state_size(l2_hv->version); + if (size < 0) + return -1; + + return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) || + kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs, + sizeof(struct pt_regs)); +} + long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) { long int err, r; struct kvm_nested_guest *l2; struct pt_regs l2_regs, saved_l1_regs; - struct hv_guest_state l2_hv, saved_l1_hv; + struct hv_guest_state l2_hv = {0}, saved_l1_hv; struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 hv_ptr, regs_ptr; u64 hdec_exp; @@ -235,17 +281,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) hv_ptr = kvmppc_get_gpr(vcpu, 4); regs_ptr = kvmppc_get_gpr(vcpu, 5); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv, - sizeof(struct hv_guest_state)) || - kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs, - sizeof(struct pt_regs)); + err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, + hv_ptr, regs_ptr); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (err) return H_PARAMETER; if (kvmppc_need_byteswap(vcpu)) byteswap_hv_regs(&l2_hv); - if (l2_hv.version != HV_GUEST_STATE_VERSION) + if (l2_hv.version > HV_GUEST_STATE_VERSION) return H_P2; if (kvmppc_need_byteswap(vcpu)) @@ -325,10 +369,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) byteswap_pt_regs(&l2_regs); } vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv, - sizeof(struct hv_guest_state)) || - kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs, - sizeof(struct pt_regs)); + err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, + hv_ptr, regs_ptr); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (err) return H_AUTHORITY; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index cd9995ee8441..5e634db4809b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -52,11 +52,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_PID (SFS-32) #define STACK_SLOT_IAMR (SFS-40) #define STACK_SLOT_CIABR (SFS-48) -#define STACK_SLOT_DAWR (SFS-56) -#define STACK_SLOT_DAWRX (SFS-64) +#define STACK_SLOT_DAWR0 (SFS-56) +#define STACK_SLOT_DAWRX0 (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) #define STACK_SLOT_AMR (SFS-80) #define STACK_SLOT_UAMOR (SFS-88) +#define STACK_SLOT_DAWR1 (SFS-96) +#define STACK_SLOT_DAWRX1 (SFS-104) /* the following is used by the P9 short path */ #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ @@ -85,19 +87,6 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) RFI_TO_KERNEL kvmppc_call_hv_entry: -BEGIN_FTR_SECTION - /* On P9, do LPCR setting, if necessary */ - ld r3, HSTATE_SPLIT_MODE(r13) - cmpdi r3, 0 - beq 46f - lwz r4, KVM_SPLIT_DO_SET(r3) - cmpwi r4, 0 - beq 46f - bl kvmhv_p9_set_lpcr - nop -46: -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_hv_entry @@ -361,11 +350,11 @@ kvm_secondary_got_guest: LOAD_REG_ADDR(r6, decrementer_max) ld r6, 0(r6) mtspr SPRN_HDEC, r6 +BEGIN_FTR_SECTION /* and set per-LPAR registers, if doing dynamic micro-threading */ ld r6, HSTATE_SPLIT_MODE(r13) cmpdi r6, 0 beq 63f -BEGIN_FTR_SECTION ld r0, KVM_SPLIT_RPR(r6) mtspr SPRN_RPR, r0 ld r0, KVM_SPLIT_PMMAR(r6) @@ -373,16 +362,7 @@ BEGIN_FTR_SECTION ld r0, KVM_SPLIT_LDBAR(r6) mtspr SPRN_LDBAR, r0 isync -FTR_SECTION_ELSE - /* On P9 we use the split_info for coordinating LPCR changes */ - lwz r4, KVM_SPLIT_DO_SET(r6) - cmpwi r4, 0 - beq 1f - mr r3, r6 - bl kvmhv_p9_set_lpcr - nop -1: -ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 63: /* Order load of vcpu after load of vcore */ lwsync @@ -452,19 +432,15 @@ kvm_no_guest: mtcr r5 blr -53: HMT_LOW +53: +BEGIN_FTR_SECTION + HMT_LOW ld r5, HSTATE_KVM_VCORE(r13) cmpdi r5, 0 bne 60f ld r3, HSTATE_SPLIT_MODE(r13) cmpdi r3, 0 beq kvm_no_guest - lwz r0, KVM_SPLIT_DO_SET(r3) - cmpwi r0, 0 - bne kvmhv_do_set - lwz r0, KVM_SPLIT_DO_RESTORE(r3) - cmpwi r0, 0 - bne kvmhv_do_restore lbz r0, KVM_SPLIT_DO_NAP(r3) cmpwi r0, 0 beq kvm_no_guest @@ -472,24 +448,19 @@ kvm_no_guest: b kvm_unsplit_nap 60: HMT_MEDIUM b kvm_secondary_got_guest +FTR_SECTION_ELSE + HMT_LOW + ld r5, HSTATE_KVM_VCORE(r13) + cmpdi r5, 0 + beq kvm_no_guest + HMT_MEDIUM + b kvm_secondary_got_guest +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 54: li r0, KVM_HWTHREAD_IN_KVM stb r0, HSTATE_HWTHREAD_STATE(r13) b kvm_no_guest -kvmhv_do_set: - /* Set LPCR, LPIDR etc. on P9 */ - HMT_MEDIUM - bl kvmhv_p9_set_lpcr - nop - b kvm_no_guest - -kvmhv_do_restore: - HMT_MEDIUM - bl kvmhv_p9_restore_lpcr - nop - b kvm_no_guest - /* * Here the primary thread is trying to return the core to * whole-core mode, so we need to nap. @@ -527,7 +498,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Set kvm_split_mode.napped[tid] = 1 */ ld r3, HSTATE_SPLIT_MODE(r13) li r0, 1 - lbz r4, HSTATE_TID(r13) + lhz r4, PACAPACAINDEX(r13) + clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ addi r4, r4, KVM_SPLIT_NAPPED stbx r0, r3, r4 /* Check the do_nap flag again after setting napped[] */ @@ -711,10 +683,16 @@ BEGIN_FTR_SECTION mfspr r7, SPRN_DAWRX0 mfspr r8, SPRN_IAMR std r5, STACK_SLOT_CIABR(r1) - std r6, STACK_SLOT_DAWR(r1) - std r7, STACK_SLOT_DAWRX(r1) + std r6, STACK_SLOT_DAWR0(r1) + std r7, STACK_SLOT_DAWRX0(r1) std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION + mfspr r6, SPRN_DAWR1 + mfspr r7, SPRN_DAWRX1 + std r6, STACK_SLOT_DAWR1(r1) + std r7, STACK_SLOT_DAWRX1(r1) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1) mfspr r5, SPRN_AMR std r5, STACK_SLOT_AMR(r1) @@ -801,10 +779,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) lbz r5, 0(r5) cmpdi r5, 0 beq 1f - ld r5, VCPU_DAWR(r4) - ld r6, VCPU_DAWRX(r4) + ld r5, VCPU_DAWR0(r4) + ld r6, VCPU_DAWRX0(r4) mtspr SPRN_DAWR0, r5 mtspr SPRN_DAWRX0, r6 +BEGIN_FTR_SECTION + ld r5, VCPU_DAWR1(r4) + ld r6, VCPU_DAWRX1(r4) + mtspr SPRN_DAWR1, r5 + mtspr SPRN_DAWRX1, r6 +END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) 1: ld r7, VCPU_CIABR(r4) ld r8, VCPU_TAR(r4) @@ -918,15 +902,19 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) cmpdi r3, 512 /* 1 microsecond */ blt hdec_soon - /* For hash guest, clear out and reload the SLB */ ld r6, VCPU_KVM(r4) lbz r0, KVM_RADIX(r6) cmpwi r0, 0 bne 9f + + /* For hash guest, clear out and reload the SLB */ +BEGIN_MMU_FTR_SECTION + /* Radix host won't have populated the SLB, so no need to clear */ li r6, 0 slbmte r6, r6 - slbia + PPC_SLBIA(6) ptesync +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ lwz r5,VCPU_SLB_MAX(r4) @@ -1187,6 +1175,20 @@ EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) mr r4, r3 b fast_guest_entry_c guest_exit_short_path: + /* + * Malicious or buggy radix guests may have inserted SLB entries + * (only 0..3 because radix always runs with UPRT=1), so these must + * be cleared here to avoid side-channels. slbmte is used rather + * than slbia, as it won't clear cached translations. + */ + li r0,0 + slbmte r0,r0 + li r4,1 + slbmte r0,r4 + li r4,2 + slbmte r0,r4 + li r4,3 + slbmte r0,r4 li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) @@ -1499,7 +1501,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ lbz r0, KVM_RADIX(r5) li r5, 0 cmpwi r0, 0 - bne 3f /* for radix, save 0 entries */ + bne 0f /* for radix, save 0 entries */ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ mtctr r0 li r6,0 @@ -1518,13 +1520,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ /* Finally clear out the SLB */ li r0,0 slbmte r0,r0 - slbia + PPC_SLBIA(6) ptesync -3: stw r5,VCPU_SLB_MAX(r9) + stw r5,VCPU_SLB_MAX(r9) /* load host SLB entries */ BEGIN_MMU_FTR_SECTION - b 0f + b guest_bypass END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ld r8,PACA_SLBSHADOWPTR(r13) @@ -1538,7 +1540,21 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) slbmte r6,r5 1: addi r8,r8,16 .endr -0: + b guest_bypass + +0: /* + * Sanitise radix guest SLB, see guest_exit_short_path comment. + * We clear vcpu->arch.slb_max to match earlier behaviour. + */ + li r0,0 + stw r0,VCPU_SLB_MAX(r9) + slbmte r0,r0 + li r4,1 + slbmte r0,r4 + li r4,2 + slbmte r0,r4 + li r4,3 + slbmte r0,r4 guest_bypass: stw r12, STACK_SLOT_TRAP(r1) @@ -1759,8 +1775,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* Restore host values of some registers */ BEGIN_FTR_SECTION ld r5, STACK_SLOT_CIABR(r1) - ld r6, STACK_SLOT_DAWR(r1) - ld r7, STACK_SLOT_DAWRX(r1) + ld r6, STACK_SLOT_DAWR0(r1) + ld r7, STACK_SLOT_DAWRX0(r1) mtspr SPRN_CIABR, r5 /* * If the DAWR doesn't work, it's ok to write these here as @@ -1770,6 +1786,12 @@ BEGIN_FTR_SECTION mtspr SPRN_DAWRX0, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION + ld r6, STACK_SLOT_DAWR1(r1) + ld r7, STACK_SLOT_DAWRX1(r1) + mtspr SPRN_DAWR1, r6 + mtspr SPRN_DAWRX1, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1) +BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) ld r7, STACK_SLOT_PID(r1) @@ -1938,24 +1960,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 19: lis r8,0x7fff /* MAX_INT@h */ mtspr SPRN_HDEC,r8 -16: -BEGIN_FTR_SECTION - /* On POWER9 with HPT-on-radix we need to wait for all other threads */ - ld r3, HSTATE_SPLIT_MODE(r13) - cmpdi r3, 0 - beq 47f - lwz r8, KVM_SPLIT_DO_RESTORE(r3) - cmpwi r8, 0 - beq 47f - bl kvmhv_p9_restore_lpcr - nop - b 48f -47: -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - ld r8,KVM_HOST_LPCR(r4) +16: ld r8,KVM_HOST_LPCR(r4) mtspr SPRN_LPCR,r8 isync -48: + #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING /* Finish timing, if we have a vcpu */ ld r4, HSTATE_KVM_VCPU(r13) @@ -2574,8 +2582,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 2, DAWRX_WT clrrdi r4, r4, 3 - std r4, VCPU_DAWR(r3) - std r5, VCPU_DAWRX(r3) + std r4, VCPU_DAWR0(r3) + std r5, VCPU_DAWRX0(r3) /* * If came in through the real mode hcall handler then it is necessary * to write the registers since the return path won't. Otherwise it is @@ -2779,8 +2787,10 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) beq kvm_end_cede cmpwi r0, NAPPING_NOVCPU beq kvm_novcpu_wakeup +BEGIN_FTR_SECTION cmpwi r0, NAPPING_UNSPLIT beq kvm_unsplit_wakeup +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) twi 31,0,0 /* Nap state must not be zero */ 33: mr r4, r3 @@ -3343,13 +3353,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_IAMR, r0 mtspr SPRN_CIABR, r0 mtspr SPRN_DAWRX0, r0 +BEGIN_FTR_SECTION + mtspr SPRN_DAWRX1, r0 +END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) + + /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */ + slbmte r0, r0 + PPC_SLBIA(6) BEGIN_MMU_FTR_SECTION b 4f END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) - slbmte r0, r0 - slbia ptesync ld r8, PACA_SLBSHADOWPTR(r13) .rept SLB_NUM_BOLTED diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 30dfeac731c6..e7219b6f5f9a 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1813,9 +1813,9 @@ int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, return -EINVAL; if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) - state->asserted = 1; + state->asserted = true; else if (level == 0 || level == KVM_INTERRUPT_UNSET) { - state->asserted = 0; + state->asserted = false; return 0; } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 288a9820ec01..7d5fe43f85c4 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -20,6 +20,7 @@ #include <asm/cputable.h> #include <linux/uaccess.h> +#include <asm/interrupt.h> #include <asm/kvm_ppc.h> #include <asm/cacheflush.h> #include <asm/dbell.h> @@ -698,7 +699,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); r = 1; - }; + } return r; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index cf52d26f49cd..a2a68a958fa0 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -611,8 +611,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !!(hv_enabled && radix_enabled()); break; case KVM_CAP_PPC_MMU_HASH_V3: - r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && - cpu_has_feature(CPU_FTR_HVMODE)); + r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && + kvmppc_hv_ops->hash_v3_possible()); break; case KVM_CAP_PPC_NESTED_HV: r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && @@ -678,6 +678,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = hv_enabled && kvmppc_hv_ops->enable_svm && !kvmppc_hv_ops->enable_svm(NULL); break; + case KVM_CAP_PPC_DAWR1: + r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && + !kvmppc_hv_ops->enable_dawr1(NULL)); + break; #endif default: r = 0; @@ -1518,7 +1522,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, return emulated; } -int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1536,7 +1540,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1554,7 +1558,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1572,7 +1576,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -2187,6 +2191,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, break; r = kvm->arch.kvm_ops->enable_svm(kvm); break; + case KVM_CAP_PPC_DAWR1: + r = -EINVAL; + if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) + break; + r = kvm->arch.kvm_ops->enable_dawr1(kvm); + break; #endif default: r = -EINVAL; diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 69a91b571845..d4efc182662a 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -31,7 +31,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o # 64-bit linker creates .sfpr on demand for final link (vmlinux), # so it is only needed for modules, and only for older linkers which # do not support --save-restore-funcs -ifeq ($(call ld-ifversion, -lt, 225000000, y),y) +ifeq ($(call ld-ifversion, -lt, 22500, y),y) extra-$(CONFIG_PPC64) += crtsavres.o endif diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index 1550e0d2513a..eb2919ddf9b9 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -6,6 +6,7 @@ #include <linux/string.h> #include <linux/export.h> #include <linux/uaccess.h> +#include <linux/libnvdimm.h> #include <asm/cacheflush.h> diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index ede093e96234..c6aebc149d14 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -904,7 +904,7 @@ static nokprobe_inline int do_vsx_load(struct instruction_op *op, if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) return -EFAULT; - nr_vsx_regs = size / sizeof(__vector128); + nr_vsx_regs = max(1ul, size / sizeof(__vector128)); emulate_vsx_load(op, buf, mem, cross_endian); preempt_disable(); if (reg < 32) { @@ -951,7 +951,7 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op, if (!address_ok(regs, ea, size)) return -EFAULT; - nr_vsx_regs = size / sizeof(__vector128); + nr_vsx_regs = max(1ul, size / sizeof(__vector128)); preempt_disable(); if (reg < 32) { /* FP regs + extensions */ @@ -1306,9 +1306,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, if ((word & 0xfe2) == 2) op->type = SYSCALL; else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && - (word & 0xfe3) == 1) + (word & 0xfe3) == 1) { /* scv */ op->type = SYSCALL_VECTORED_0; - else + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; + } else op->type = UNKNOWN; return 0; #endif @@ -1412,7 +1414,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 1: if (!cpu_has_feature(CPU_FTR_ARCH_31)) - return -1; + goto unknown_opcode; prefix_r = GET_PREFIX_R(word); ra = GET_PREFIX_RA(suffix); @@ -1445,8 +1447,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 4: + /* + * There are very many instructions with this primary opcode + * introduced in the ISA as early as v2.03. However, the ones + * we currently emulate were all introduced with ISA 3.0 + */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; switch (word & 0x3f) { case 48: /* maddhd */ @@ -1472,7 +1479,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, * There are other instructions from ISA 3.0 with the same * primary opcode which do not have emulation support yet. */ - return -1; + goto unknown_opcode; #endif case 7: /* mulli */ @@ -1532,6 +1539,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 19: if (((word >> 1) & 0x1f) == 2) { /* addpcis */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; imm = (short) (word & 0xffc1); /* d0 + d2 fields */ imm |= (word >> 15) & 0x3e; /* d1 field */ op->val = regs->nip + (imm << 16) + 4; @@ -1844,7 +1853,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 265: /* modud */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = regs->gpr[ra] % regs->gpr[rb]; goto compute_done; #endif @@ -1854,7 +1863,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 267: /* moduw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = (unsigned int) regs->gpr[ra] % (unsigned int) regs->gpr[rb]; goto compute_done; @@ -1891,7 +1900,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif case 755: /* darn */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; switch (ra & 0x3) { case 0: /* 32-bit conditioned */ @@ -1909,18 +1918,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, goto compute_done; } - return -1; + goto unknown_opcode; #ifdef __powerpc64__ case 777: /* modsd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = (long int) regs->gpr[ra] % (long int) regs->gpr[rb]; goto compute_done; #endif case 779: /* modsw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = (int) regs->gpr[ra] % (int) regs->gpr[rb]; goto compute_done; @@ -1997,14 +2006,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif case 538: /* cnttzw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; val = (unsigned int) regs->gpr[rd]; op->val = (val ? __builtin_ctz(val) : 32); goto logical_done; #ifdef __powerpc64__ case 570: /* cnttzd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; val = regs->gpr[rd]; op->val = (val ? __builtin_ctzl(val) : 64); goto logical_done; @@ -2114,7 +2123,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 890: /* extswsli with sh_5 = 0 */ case 891: /* extswsli with sh_5 = 1 */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->type = COMPUTE + SETREG; sh = rb | ((word & 2) << 4); val = (signed int) regs->gpr[rd]; @@ -2441,6 +2450,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 268: /* lxvx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 16; @@ -2450,6 +2461,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 269: /* lxvl */ case 301: { /* lxvll */ int nb; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; @@ -2470,13 +2483,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 333: /* lxvpx */ if (!cpu_has_feature(CPU_FTR_ARCH_31)) - return -1; + goto unknown_opcode; op->reg = VSX_REGISTER_XTP(rd); op->type = MKOP(LOAD_VSX, 0, 32); op->element_size = 32; break; case 364: /* lxvwsx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 4; @@ -2484,6 +2499,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 396: /* stxvx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 16; @@ -2493,6 +2510,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 397: /* stxvl */ case 429: { /* stxvll */ int nb; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; @@ -2506,7 +2525,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } case 461: /* stxvpx */ if (!cpu_has_feature(CPU_FTR_ARCH_31)) - return -1; + goto unknown_opcode; op->reg = VSX_REGISTER_XTP(rd); op->type = MKOP(STORE_VSX, 0, 32); op->element_size = 32; @@ -2544,6 +2563,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 781: /* lxsibzx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 1); op->element_size = 8; @@ -2551,6 +2572,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 812: /* lxvh8x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 2; @@ -2558,6 +2581,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 813: /* lxsihzx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 2); op->element_size = 8; @@ -2571,6 +2596,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 876: /* lxvb16x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 1; @@ -2584,6 +2611,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 909: /* stxsibx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 1); op->element_size = 8; @@ -2591,6 +2620,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 940: /* stxvh8x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 2; @@ -2598,6 +2629,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 941: /* stxsihx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 2); op->element_size = 8; @@ -2611,6 +2644,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 1004: /* stxvb16x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 1; @@ -2719,12 +2754,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->type = MKOP(LOAD_FP, 0, 16); break; case 2: /* lxsd */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 3: /* lxssp */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; @@ -2754,7 +2793,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef CONFIG_VSX case 6: if (!cpu_has_feature(CPU_FTR_ARCH_31)) - return -1; + goto unknown_opcode; op->ea = dqform_ea(word, regs); op->reg = VSX_REGISTER_XTP(rd); op->element_size = 32; @@ -2777,6 +2816,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 1: /* lxv */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dqform_ea(word, regs); if (word & 8) op->reg = rd + 32; @@ -2787,6 +2828,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 2: /* stxsd with LSB of DS field = 0 */ case 6: /* stxsd with LSB of DS field = 1 */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 8); @@ -2796,6 +2839,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 3: /* stxssp with LSB of DS field = 0 */ case 7: /* stxssp with LSB of DS field = 1 */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 4); @@ -2804,6 +2849,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 5: /* stxv */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dqform_ea(word, regs); if (word & 8) op->reg = rd + 32; @@ -2833,7 +2880,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 1: /* Prefixed instructions */ if (!cpu_has_feature(CPU_FTR_ARCH_31)) - return -1; + goto unknown_opcode; prefix_r = GET_PREFIX_R(word); ra = GET_PREFIX_RA(suffix); @@ -2972,6 +3019,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } + if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { + switch (GETTYPE(op->type)) { + case LOAD: + if (ra == rd) + goto unknown_opcode; + fallthrough; + case STORE: + case LOAD_FP: + case STORE_FP: + if (ra == 0) + goto unknown_opcode; + } + } + #ifdef CONFIG_VSX if ((GETTYPE(op->type) == LOAD_VSX || GETTYPE(op->type) == STORE_VSX) && @@ -2982,6 +3043,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 0; + unknown_opcode: + op->type = UNKNOWN; + return 0; + logical_done: if (word & 1) set_cr0(regs, op); diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile index 3f972db17761..446d9de88ce4 100644 --- a/arch/powerpc/mm/book3s32/Makefile +++ b/arch/powerpc/mm/book3s32/Makefile @@ -6,4 +6,6 @@ ifdef CONFIG_KASAN CFLAGS_mmu.o += -DDISABLE_BRANCH_PROFILING endif -obj-y += mmu.o hash_low.o mmu_context.o tlb.o nohash_low.o +obj-y += mmu.o mmu_context.o +obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o +obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 859e5bd603ac..d7eb266a3f7a 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -234,7 +234,7 @@ void mmu_mark_initmem_nx(void) if (is_module_segment(i << 28)) continue; - mtsrin(mfsrin(i << 28) | 0x10000000, i << 28); + mtsr(mfsr(i << 28) | 0x10000000, i << 28); } } diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c index b5e9fff8c217..a688e1324ae5 100644 --- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c @@ -16,10 +16,6 @@ unsigned int hpage_shift; EXPORT_SYMBOL(hpage_shift); -extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn, - unsigned long pa, unsigned long rlags, - unsigned long vflags, int psize, int ssize); - int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, unsigned int shift, unsigned int mmu_psize) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 73b06adb6eeb..581b20a2feaf 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -38,6 +38,7 @@ #include <linux/pgtable.h> #include <asm/debugfs.h> +#include <asm/interrupt.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -1143,10 +1144,10 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) page = pte_page(pte); /* page is dirty */ - if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { + if (!test_bit(PG_dcache_clean, &page->flags) && !PageReserved(page)) { if (trap == 0x400) { flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + set_bit(PG_dcache_clean, &page->flags); } else pp |= HPTE_R_N; } @@ -1288,7 +1289,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long flags) { bool is_thp; - enum ctx_state prev_state = exception_enter(); pgd_t *pgdir; unsigned long vsid; pte_t *ptep; @@ -1490,7 +1490,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, DBG_LOW(" -> rc=%d\n", rc); bail: - exception_exit(prev_state); return rc; } EXPORT_SYMBOL_GPL(hash_page_mm); @@ -1512,16 +1511,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap, } EXPORT_SYMBOL_GPL(hash_page); -int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, - unsigned long msr) +DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault); +DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault) { + unsigned long ea = regs->dar; + unsigned long dsisr = regs->dsisr; unsigned long access = _PAGE_PRESENT | _PAGE_READ; unsigned long flags = 0; - struct mm_struct *mm = current->mm; - unsigned int region_id = get_region_id(ea); + struct mm_struct *mm; + unsigned int region_id; + long err; + region_id = get_region_id(ea); if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID)) mm = &init_mm; + else + mm = current->mm; if (dsisr & DSISR_NOHPTE) flags |= HPTE_NOHPTE_UPDATE; @@ -1537,13 +1542,66 @@ int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, * 2) user space access kernel space. */ access |= _PAGE_PRIVILEGED; - if ((msr & MSR_PR) || (region_id == USER_REGION_ID)) + if (user_mode(regs) || (region_id == USER_REGION_ID)) access &= ~_PAGE_PRIVILEGED; - if (trap == 0x400) + if (regs->trap == 0x400) access |= _PAGE_EXEC; - return hash_page_mm(mm, ea, access, trap, flags); + err = hash_page_mm(mm, ea, access, regs->trap, flags); + if (unlikely(err < 0)) { + // failed to instert a hash PTE due to an hypervisor error + if (user_mode(regs)) { + if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2) + _exception(SIGSEGV, regs, SEGV_ACCERR, ea); + else + _exception(SIGBUS, regs, BUS_ADRERR, ea); + } else { + bad_page_fault(regs, SIGBUS); + } + err = 0; + } + + return err; +} + +/* + * The _RAW interrupt entry checks for the in_nmi() case before + * running the full handler. + */ +DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault) +{ + unsigned long dsisr = regs->dsisr; + long err; + + if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) + goto page_fault; + + /* + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then + * don't call hash_page, just fail the fault. This is required to + * prevent re-entrancy problems in the hash code, namely perf + * interrupts hitting while something holds H_PAGE_BUSY, and taking a + * hash fault. See the comment in hash_preload(). + * + * We come here as a result of a DSI at a point where we don't want + * to call hash_page, such as when we are accessing memory (possibly + * user memory) inside a PMU interrupt that occurred while interrupts + * were soft-disabled. We want to invoke the exception handler for + * the access, or panic if there isn't a handler. + */ + if (unlikely(in_nmi())) { + do_bad_page_fault_segv(regs); + return 0; + } + + err = __do_hash_fault(regs); + if (err) { +page_fault: + err = hash__do_page_fault(regs); + } + + return err; } #ifdef CONFIG_PPC_MM_SLICES @@ -1843,27 +1901,6 @@ void flush_hash_range(unsigned long number, int local) } } -/* - * low_hash_fault is called when we the low level hash code failed - * to instert a PTE due to an hypervisor error - */ -void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) -{ - enum ctx_state prev_state = exception_enter(); - - if (user_mode(regs)) { -#ifdef CONFIG_PPC_SUBPAGE_PROT - if (rc == -2) - _exception(SIGSEGV, regs, SEGV_ACCERR, address); - else -#endif - _exception(SIGBUS, regs, BUS_ADRERR, address); - } else - bad_page_fault(regs, address, SIGBUS); - - exception_exit(prev_state); -} - long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h index c12d78ee42f5..5045048ce244 100644 --- a/arch/powerpc/mm/book3s64/internal.h +++ b/arch/powerpc/mm/book3s64/internal.h @@ -15,4 +15,6 @@ static inline bool stress_slb(void) void slb_setup_new_exec(void); +void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush); + #endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */ diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 685d7bb3d26f..cd18e94d0843 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -129,7 +129,8 @@ good_exit: mutex_lock(&mem_list_mutex); - list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) { + list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next, + lockdep_is_held(&mem_list_mutex)) { /* Overlap? */ if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) && (ua < (mem2->ua + @@ -289,6 +290,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, { struct mm_iommu_table_group_mem_t *mem, *ret = NULL; + rcu_read_lock(); list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if ((mem->ua <= ua) && (ua + size <= mem->ua + @@ -297,6 +299,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, break; } } + rcu_read_unlock(); return ret; } @@ -327,7 +330,8 @@ struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, mutex_lock(&mem_list_mutex); - list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next, + lockdep_is_held(&mem_list_mutex)) { if ((mem->ua == ua) && (mem->entries == entries)) { ret = mem; ++mem->used; @@ -421,6 +425,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, struct mm_iommu_table_group_mem_t *mem; unsigned long end; + rcu_read_lock(); list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) continue; @@ -437,6 +442,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, return true; } } + rcu_read_unlock(); return false; } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 5b3a3bae21aa..9ffa65074cb0 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -20,6 +20,8 @@ #include <mm/mmu_decl.h> #include <trace/events/thp.h> +#include "internal.h" + unsigned long __pmd_frag_nr; EXPORT_SYMBOL(__pmd_frag_nr); unsigned long __pmd_frag_size_shift; @@ -79,10 +81,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } -static void do_nothing(void *unused) +static void do_serialize(void *arg) { - + /* We've taken the IPI, so try to trim the mask while here */ + if (radix_enabled()) { + struct mm_struct *mm = arg; + exit_lazy_flush_tlb(mm, false); + } } + /* * Serialize against find_current_mm_pte which does lock-less * lookup in page tables with local interrupts disabled. For huge pages @@ -96,7 +103,7 @@ static void do_nothing(void *unused) void serialize_against_pte_lookup(struct mm_struct *mm) { smp_mb(); - smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); + smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1); } /* diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index fb66d154b26c..409e61210789 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -18,6 +18,8 @@ #include <asm/cputhreads.h> #include <asm/plpar_wrappers.h> +#include "internal.h" + #define RIC_FLUSH_TLB 0 #define RIC_FLUSH_PWC 1 #define RIC_FLUSH_ALL 2 @@ -627,15 +629,6 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd } EXPORT_SYMBOL(radix__local_flush_tlb_page); -static bool mm_is_singlethreaded(struct mm_struct *mm) -{ - if (atomic_read(&mm->context.copros) > 0) - return false; - if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) - return true; - return false; -} - static bool mm_needs_flush_escalation(struct mm_struct *mm) { /* @@ -648,21 +641,24 @@ static bool mm_needs_flush_escalation(struct mm_struct *mm) return false; } -#ifdef CONFIG_SMP -static void do_exit_flush_lazy_tlb(void *arg) +/* + * If always_flush is true, then flush even if this CPU can't be removed + * from mm_cpumask. + */ +void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush) { - struct mm_struct *mm = arg; unsigned long pid = mm->context.id; + int cpu = smp_processor_id(); /* * A kthread could have done a mmget_not_zero() after the flushing CPU - * checked mm_is_singlethreaded, and be in the process of - * kthread_use_mm when interrupted here. In that case, current->mm will - * be set to mm, because kthread_use_mm() setting ->mm and switching to - * the mm is done with interrupts off. + * checked mm_cpumask, and be in the process of kthread_use_mm when + * interrupted here. In that case, current->mm will be set to mm, + * because kthread_use_mm() setting ->mm and switching to the mm is + * done with interrupts off. */ if (current->mm == mm) - goto out_flush; + goto out; if (current->active_mm == mm) { WARN_ON_ONCE(current->mm != NULL); @@ -673,11 +669,30 @@ static void do_exit_flush_lazy_tlb(void *arg) mmdrop(mm); } - atomic_dec(&mm->context.active_cpus); - cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm)); + /* + * This IPI may be initiated from any source including those not + * running the mm, so there may be a racing IPI that comes after + * this one which finds the cpumask already clear. Check and avoid + * underflowing the active_cpus count in that case. The race should + * not otherwise be a problem, but the TLB must be flushed because + * that's what the caller expects. + */ + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { + atomic_dec(&mm->context.active_cpus); + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + always_flush = true; + } + +out: + if (always_flush) + _tlbiel_pid(pid, RIC_FLUSH_ALL); +} -out_flush: - _tlbiel_pid(pid, RIC_FLUSH_ALL); +#ifdef CONFIG_SMP +static void do_exit_flush_lazy_tlb(void *arg) +{ + struct mm_struct *mm = arg; + exit_lazy_flush_tlb(mm, true); } static void exit_flush_lazy_tlbs(struct mm_struct *mm) @@ -693,9 +708,110 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm) (void *)mm, 1); } +#else /* CONFIG_SMP */ +static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { } +#endif /* CONFIG_SMP */ + +static DEFINE_PER_CPU(unsigned int, mm_cpumask_trim_clock); + +/* + * Interval between flushes at which we send out IPIs to check whether the + * mm_cpumask can be trimmed for the case where it's not a single-threaded + * process flushing its own mm. The intent is to reduce the cost of later + * flushes. Don't want this to be so low that it adds noticable cost to TLB + * flushing, or so high that it doesn't help reduce global TLBIEs. + */ +static unsigned long tlb_mm_cpumask_trim_timer = 1073; + +static bool tick_and_test_trim_clock(void) +{ + if (__this_cpu_inc_return(mm_cpumask_trim_clock) == + tlb_mm_cpumask_trim_timer) { + __this_cpu_write(mm_cpumask_trim_clock, 0); + return true; + } + return false; +} + +enum tlb_flush_type { + FLUSH_TYPE_NONE, + FLUSH_TYPE_LOCAL, + FLUSH_TYPE_GLOBAL, +}; + +static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm) +{ + int active_cpus = atomic_read(&mm->context.active_cpus); + int cpu = smp_processor_id(); + + if (active_cpus == 0) + return FLUSH_TYPE_NONE; + if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) { + if (current->mm != mm) { + /* + * Asynchronous flush sources may trim down to nothing + * if the process is not running, so occasionally try + * to trim. + */ + if (tick_and_test_trim_clock()) { + exit_lazy_flush_tlb(mm, true); + return FLUSH_TYPE_NONE; + } + } + return FLUSH_TYPE_LOCAL; + } + + /* Coprocessors require TLBIE to invalidate nMMU. */ + if (atomic_read(&mm->context.copros) > 0) + return FLUSH_TYPE_GLOBAL; + + /* + * In the fullmm case there's no point doing the exit_flush_lazy_tlbs + * because the mm is being taken down anyway, and a TLBIE tends to + * be faster than an IPI+TLBIEL. + */ + if (fullmm) + return FLUSH_TYPE_GLOBAL; + + /* + * If we are running the only thread of a single-threaded process, + * then we should almost always be able to trim off the rest of the + * CPU mask (except in the case of use_mm() races), so always try + * trimming the mask. + */ + if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) { + exit_flush_lazy_tlbs(mm); + /* + * use_mm() race could prevent IPIs from being able to clear + * the cpumask here, however those users are established + * after our first check (and so after the PTEs are removed), + * and the TLB still gets flushed by the IPI, so this CPU + * will only require a local flush. + */ + return FLUSH_TYPE_LOCAL; + } + + /* + * Occasionally try to trim down the cpumask. It's possible this can + * bring the mask to zero, which results in no flush. + */ + if (tick_and_test_trim_clock()) { + exit_flush_lazy_tlbs(mm); + if (current->mm == mm) + return FLUSH_TYPE_LOCAL; + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) + exit_lazy_flush_tlb(mm, true); + return FLUSH_TYPE_NONE; + } + + return FLUSH_TYPE_GLOBAL; +} + +#ifdef CONFIG_SMP void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; + enum tlb_flush_type type; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) @@ -703,16 +819,15 @@ void radix__flush_tlb_mm(struct mm_struct *mm) preempt_disable(); /* - * Order loads of mm_cpumask vs previous stores to clear ptes before - * the invalidate. See barrier in switch_mm_irqs_off + * Order loads of mm_cpumask (in flush_type_needed) vs previous + * stores to clear ptes before the invalidate. See barrier in + * switch_mm_irqs_off */ smp_mb(); - if (!mm_is_thread_local(mm)) { - if (unlikely(mm_is_singlethreaded(mm))) { - exit_flush_lazy_tlbs(mm); - goto local; - } - + type = flush_type_needed(mm, false); + if (type == FLUSH_TYPE_LOCAL) { + _tlbiel_pid(pid, RIC_FLUSH_TLB); + } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt = H_RPTI_TARGET_CMMU; @@ -728,9 +843,6 @@ void radix__flush_tlb_mm(struct mm_struct *mm) } else { _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); } - } else { -local: - _tlbiel_pid(pid, RIC_FLUSH_TLB); } preempt_enable(); } @@ -739,6 +851,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm); static void __flush_all_mm(struct mm_struct *mm, bool fullmm) { unsigned long pid; + enum tlb_flush_type type; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) @@ -746,13 +859,10 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm) preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ - if (!mm_is_thread_local(mm)) { - if (unlikely(mm_is_singlethreaded(mm))) { - if (!fullmm) { - exit_flush_lazy_tlbs(mm); - goto local; - } - } + type = flush_type_needed(mm, fullmm); + if (type == FLUSH_TYPE_LOCAL) { + _tlbiel_pid(pid, RIC_FLUSH_ALL); + } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | @@ -766,9 +876,6 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); - } else { -local: - _tlbiel_pid(pid, RIC_FLUSH_ALL); } preempt_enable(); } @@ -783,6 +890,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) { unsigned long pid; + enum tlb_flush_type type; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) @@ -790,11 +898,10 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ - if (!mm_is_thread_local(mm)) { - if (unlikely(mm_is_singlethreaded(mm))) { - exit_flush_lazy_tlbs(mm); - goto local; - } + type = flush_type_needed(mm, false); + if (type == FLUSH_TYPE_LOCAL) { + _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); + } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt, pg_sizes, size; @@ -811,9 +918,6 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); else _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB); - } else { -local: - _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); } preempt_enable(); } @@ -828,8 +932,6 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) } EXPORT_SYMBOL(radix__flush_tlb_page); -#else /* CONFIG_SMP */ -static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { } #endif /* CONFIG_SMP */ static void do_tlbiel_kernel(void *info) @@ -893,7 +995,9 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; - bool local, full; + bool fullmm = (end == TLB_FLUSH_ALL); + bool flush_pid; + enum tlb_flush_type type; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) @@ -901,24 +1005,18 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ - if (!mm_is_thread_local(mm)) { - if (unlikely(mm_is_singlethreaded(mm))) { - if (end != TLB_FLUSH_ALL) { - exit_flush_lazy_tlbs(mm); - goto is_local; - } - } - local = false; - full = (end == TLB_FLUSH_ALL || - nr_pages > tlb_single_page_flush_ceiling); - } else { -is_local: - local = true; - full = (end == TLB_FLUSH_ALL || - nr_pages > tlb_local_single_page_flush_ceiling); - } + type = flush_type_needed(mm, fullmm); + if (type == FLUSH_TYPE_NONE) + goto out; + + if (fullmm) + flush_pid = true; + else if (type == FLUSH_TYPE_GLOBAL) + flush_pid = nr_pages > tlb_single_page_flush_ceiling; + else + flush_pid = nr_pages > tlb_local_single_page_flush_ceiling; - if (!mmu_has_feature(MMU_FTR_GTSE) && !local) { + if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) { unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize); @@ -928,8 +1026,8 @@ is_local: tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes, start, end); - } else if (full) { - if (local) { + } else if (flush_pid) { + if (type == FLUSH_TYPE_LOCAL) { _tlbiel_pid(pid, RIC_FLUSH_TLB); } else { if (cputlb_use_tlbie()) { @@ -952,7 +1050,7 @@ is_local: hflush = true; } - if (local) { + if (type == FLUSH_TYPE_LOCAL) { asm volatile("ptesync": : :"memory"); __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) @@ -974,6 +1072,7 @@ is_local: hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false); } } +out: preempt_enable(); } @@ -1085,32 +1184,30 @@ static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned int page_shift = mmu_psize_defs[psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; - bool local, full; + bool fullmm = (end == TLB_FLUSH_ALL); + bool flush_pid; + enum tlb_flush_type type; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; + fullmm = (end == TLB_FLUSH_ALL); + preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ - if (!mm_is_thread_local(mm)) { - if (unlikely(mm_is_singlethreaded(mm))) { - if (end != TLB_FLUSH_ALL) { - exit_flush_lazy_tlbs(mm); - goto is_local; - } - } - local = false; - full = (end == TLB_FLUSH_ALL || - nr_pages > tlb_single_page_flush_ceiling); - } else { -is_local: - local = true; - full = (end == TLB_FLUSH_ALL || - nr_pages > tlb_local_single_page_flush_ceiling); - } + type = flush_type_needed(mm, fullmm); + if (type == FLUSH_TYPE_NONE) + goto out; - if (!mmu_has_feature(MMU_FTR_GTSE) && !local) { + if (fullmm) + flush_pid = true; + else if (type == FLUSH_TYPE_GLOBAL) + flush_pid = nr_pages > tlb_single_page_flush_ceiling; + else + flush_pid = nr_pages > tlb_local_single_page_flush_ceiling; + + if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) { unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long type = H_RPTI_TYPE_TLB; unsigned long pg_sizes = psize_to_rpti_pgsize(psize); @@ -1120,8 +1217,8 @@ is_local: if (atomic_read(&mm->context.copros) > 0) tgt |= H_RPTI_TARGET_NMMU; pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end); - } else if (full) { - if (local) { + } else if (flush_pid) { + if (type == FLUSH_TYPE_LOCAL) { _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } else { if (cputlb_use_tlbie()) { @@ -1137,7 +1234,7 @@ is_local: } } else { - if (local) + if (type == FLUSH_TYPE_LOCAL) _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); else if (cputlb_use_tlbie()) _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); @@ -1145,6 +1242,7 @@ is_local: _tlbiel_va_range_multicast(mm, start, end, pid, page_size, psize, also_pwc); } +out: preempt_enable(); } @@ -1164,6 +1262,7 @@ static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) { unsigned long pid, end; + enum tlb_flush_type type; pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) @@ -1180,11 +1279,10 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) /* Otherwise first do the PWC, then iterate the pages. */ preempt_disable(); smp_mb(); /* see radix__flush_tlb_mm */ - if (!mm_is_thread_local(mm)) { - if (unlikely(mm_is_singlethreaded(mm))) { - exit_flush_lazy_tlbs(mm); - goto local; - } + type = flush_type_needed(mm, false); + if (type == FLUSH_TYPE_LOCAL) { + _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); + } else if (type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE)) { unsigned long tgt, type, pg_sizes; @@ -1202,9 +1300,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) else _tlbiel_va_range_multicast(mm, addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); - } else { -local: - _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); } preempt_enable(); diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index 584567970c11..c91bd85eb90e 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -10,6 +10,7 @@ */ #include <asm/asm-prototypes.h> +#include <asm/interrupt.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/paca.h> @@ -813,8 +814,9 @@ static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) return slb_insert_entry(ea, context, flags, ssize, false); } -long do_slb_fault(struct pt_regs *regs, unsigned long ea) +DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault) { + unsigned long ea = regs->dar; unsigned long id = get_region_id(ea); /* IRQs are not reconciled here, so can't check irqs_disabled */ @@ -824,19 +826,21 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea) return -EINVAL; /* - * SLB kernel faults must be very careful not to touch anything - * that is not bolted. E.g., PACA and global variables are okay, - * mm->context stuff is not. - * - * SLB user faults can access all of kernel memory, but must be - * careful not to touch things like IRQ state because it is not - * "reconciled" here. The difficulty is that we must use - * fast_exception_return to return from kernel SLB faults without - * looking at possible non-bolted memory. We could test user vs - * kernel faults in the interrupt handler asm and do a full fault, - * reconcile, ret_from_except for user faults which would make them - * first class kernel code. But for performance it's probably nicer - * if they go via fast_exception_return too. + * SLB kernel faults must be very careful not to touch anything that is + * not bolted. E.g., PACA and global variables are okay, mm->context + * stuff is not. SLB user faults may access all of memory (and induce + * one recursive SLB kernel fault), so the kernel fault must not + * trample on the user fault state at those points. + */ + + /* + * This is a raw interrupt handler, for performance, so that + * fast_interrupt_return can be used. The handler must not touch local + * irq state, or schedule. We could test for usermode and upgrade to a + * normal process context (synchronous) interrupt for those, which + * would make them first-class kernel code and able to be traced and + * instrumented, although performance would suffer a bit, it would + * probably be a good tradeoff. */ if (id >= LINEAR_MAP_REGION_ID) { long err; @@ -865,13 +869,15 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea) } } -void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) +DEFINE_INTERRUPT_HANDLER(do_bad_slb_fault) { + int err = regs->result; + if (err == -EFAULT) { if (user_mode(regs)) - _exception(SIGSEGV, regs, SEGV_BNDERR, ea); + _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); else - bad_page_fault(regs, ea, SIGSEGV); + bad_page_fault(regs, SIGSEGV); } else if (err == -EINVAL) { unrecoverable_exception(regs); } else { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8961b44f350c..bb368257b55c 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -34,6 +34,7 @@ #include <linux/uaccess.h> #include <asm/firmware.h> +#include <asm/interrupt.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -377,18 +378,16 @@ static void sanity_check_fault(bool is_write, bool is_user, /* * For 600- and 800-family processors, the error_code parameter is DSISR - * for a data fault, SRR1 for an instruction fault. For 400-family processors - * the error_code parameter is ESR for a data fault, 0 for an instruction - * fault. - * For 64-bit processors, the error_code parameter is - * - DSISR for a non-SLB data access fault, - * - SRR1 & 0x08000000 for a non-SLB instruction access fault - * - 0 any SLB fault. + * for a data fault, SRR1 for an instruction fault. + * For 400-family processors the error_code parameter is ESR for a data fault, + * 0 for an instruction fault. + * For 64-bit processors, the error_code parameter is DSISR for a data access + * fault, SRR1 & 0x08000000 for an instruction access fault. * * The return value is 0 if the fault was handled, or the signal * number if this is a kernel fault that can't be handled here. */ -static int __do_page_fault(struct pt_regs *regs, unsigned long address, +static int ___do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { struct vm_area_struct * vma; @@ -435,9 +434,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, return bad_area_nosemaphore(regs, address); } - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); @@ -540,34 +537,51 @@ retry: return 0; } -NOKPROBE_SYMBOL(__do_page_fault); +NOKPROBE_SYMBOL(___do_page_fault); -int do_page_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) +static long __do_page_fault(struct pt_regs *regs) { const struct exception_table_entry *entry; - enum ctx_state prev_state = exception_enter(); - int rc = __do_page_fault(regs, address, error_code); - exception_exit(prev_state); - if (likely(!rc)) - return 0; + long err; + + err = ___do_page_fault(regs, regs->dar, regs->dsisr); + if (likely(!err)) + return err; entry = search_exception_tables(regs->nip); - if (unlikely(!entry)) - return rc; + if (likely(entry)) { + instruction_pointer_set(regs, extable_fixup(entry)); + return 0; + } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { + __bad_page_fault(regs, err); + return 0; + } else { + /* 32 and 64e handle the bad page fault in asm */ + return err; + } +} +NOKPROBE_SYMBOL(__do_page_fault); - instruction_pointer_set(regs, extable_fixup(entry)); +DEFINE_INTERRUPT_HANDLER_RET(do_page_fault) +{ + return __do_page_fault(regs); +} - return 0; +#ifdef CONFIG_PPC_BOOK3S_64 +/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */ +long hash__do_page_fault(struct pt_regs *regs) +{ + return __do_page_fault(regs); } -NOKPROBE_SYMBOL(do_page_fault); +NOKPROBE_SYMBOL(hash__do_page_fault); +#endif /* * bad_page_fault is called when we have a bad access from the kernel. * It is called from the DSI and ISI handlers in head.S and from some * of the procedures in traps.c. */ -void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) +void __bad_page_fault(struct pt_regs *regs, int sig) { int is_write = page_fault_is_write(regs->dsisr); @@ -605,7 +619,7 @@ void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) die("Kernel access of bad area", regs, sig); } -void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) +void bad_page_fault(struct pt_regs *regs, int sig) { const struct exception_table_entry *entry; @@ -614,5 +628,12 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) if (entry) instruction_pointer_set(regs, extable_fixup(entry)); else - __bad_page_fault(regs, address, sig); + __bad_page_fault(regs, sig); } + +#ifdef CONFIG_PPC_BOOK3S_64 +DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv) +{ + bad_page_fault(regs, SIGSEGV); +} +#endif diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8b3cc4d688e8..d142b76d507d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -217,7 +217,7 @@ void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_p } } -int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) +static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; if (nr_gpages == 0) @@ -663,24 +663,6 @@ static int __init hugetlbpage_init(void) arch_initcall(hugetlbpage_init); -void flush_dcache_icache_hugepage(struct page *page) -{ - int i; - void *start; - - BUG_ON(!PageCompound(page)); - - for (i = 0; i < compound_nr(page); i++) { - if (!PageHighMem(page)) { - __flush_dcache_icache(page_address(page+i)); - } else { - start = kmap_atomic(page+i); - __flush_dcache_icache(start); - kunmap_atomic(start); - } - } -} - void __init gigantic_hugetlb_cma_reserve(void) { unsigned long order = 0; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index afab328d0887..4e8ce6d85232 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -91,27 +91,6 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) return -ENODEV; } -#define FLUSH_CHUNK_SIZE SZ_1G -/** - * flush_dcache_range_chunked(): Write any modified data cache blocks out to - * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE - * Does not invalidate the corresponding instruction cache blocks. - * - * @start: the start address - * @stop: the stop address (exclusive) - * @chunk: the max size of the chunks - */ -static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, - unsigned long chunk) -{ - unsigned long i; - - for (i = start; i < stop; i += chunk) { - flush_dcache_range(i, min(stop, i + chunk)); - cond_resched(); - } -} - int __ref arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params) { @@ -136,7 +115,6 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size) /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); - flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); mutex_lock(&linear_mapping_mutex); ret = remove_section_mapping(start, start + size); @@ -489,19 +467,35 @@ void flush_dcache_page(struct page *page) if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) return; /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &page->flags)) - clear_bit(PG_arch_1, &page->flags); + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); } EXPORT_SYMBOL(flush_dcache_page); -void flush_dcache_icache_page(struct page *page) +static void flush_dcache_icache_hugepage(struct page *page) { -#ifdef CONFIG_HUGETLB_PAGE - if (PageCompound(page)) { - flush_dcache_icache_hugepage(page); - return; + int i; + void *start; + + BUG_ON(!PageCompound(page)); + + for (i = 0; i < compound_nr(page); i++) { + if (!PageHighMem(page)) { + __flush_dcache_icache(page_address(page+i)); + } else { + start = kmap_atomic(page+i); + __flush_dcache_icache(start); + kunmap_atomic(start); + } } -#endif +} + +void flush_dcache_icache_page(struct page *page) +{ + + if (PageCompound(page)) + return flush_dcache_icache_hugepage(page); + #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) /* On 8xx there is no need to kmap since highmem is not supported */ __flush_dcache_icache(page_address(page)); diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 15555c95cebc..354611940118 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -26,6 +26,7 @@ #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/hugetlb.h> +#include <asm/pte-walk.h> static inline int is_exec_fault(void) { @@ -81,9 +82,9 @@ static pte_t set_pte_filter_hash(pte_t pte) struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; - if (!test_bit(PG_arch_1, &pg->flags)) { + if (!test_bit(PG_dcache_clean, &pg->flags)) { flush_dcache_icache_page(pg); - set_bit(PG_arch_1, &pg->flags); + set_bit(PG_dcache_clean, &pg->flags); } } return pte; @@ -116,13 +117,13 @@ static inline pte_t set_pte_filter(pte_t pte) return pte; /* If the page clean, we move on */ - if (test_bit(PG_arch_1, &pg->flags)) + if (test_bit(PG_dcache_clean, &pg->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_page(pg); - set_bit(PG_arch_1, &pg->flags); + set_bit(PG_dcache_clean, &pg->flags); return pte; } @@ -161,12 +162,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, goto bail; /* If the page is already clean, we move on */ - if (test_bit(PG_arch_1, &pg->flags)) + if (test_bit(PG_dcache_clean, &pg->flags)) goto bail; - /* Clean the page and set PG_arch_1 */ + /* Clean the page and set PG_dcache_clean */ flush_dcache_icache_page(pg); - set_bit(PG_arch_1, &pg->flags); + set_bit(PG_dcache_clean, &pg->flags); bail: return pte_mkexec(pte); diff --git a/arch/powerpc/mm/ptdump/segment_regs.c b/arch/powerpc/mm/ptdump/segment_regs.c index dde2fe8de4b2..565048a0c9be 100644 --- a/arch/powerpc/mm/ptdump/segment_regs.c +++ b/arch/powerpc/mm/ptdump/segment_regs.c @@ -10,7 +10,7 @@ static void seg_show(struct seq_file *m, int i) { - u32 val = mfsrin(i << 28); + u32 val = mfsr(i << 28); seq_printf(m, "0x%01x0000000-0x%01xfffffff ", i, i); seq_printf(m, "Kern key %d ", (val >> 30) & 1); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 869d999a836e..766f064f00fb 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -54,6 +54,9 @@ struct cpu_hw_events { struct perf_branch_stack bhrb_stack; struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; u64 ic_init; + + /* Store the PMC values */ + unsigned long pmcs[MAX_HWEVENTS]; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); @@ -110,10 +113,6 @@ static inline void perf_read_regs(struct pt_regs *regs) { regs->result = 0; } -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return 0; -} static inline int siar_valid(struct pt_regs *regs) { @@ -147,6 +146,17 @@ bool is_sier_available(void) return false; } +/* + * Return PMC value corresponding to the + * index passed. + */ +unsigned long get_pmcs_ext_regs(int idx) +{ + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + + return cpuhw->pmcs[idx]; +} + static bool regs_use_siar(struct pt_regs *regs) { /* @@ -212,7 +222,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs * if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) *addrp = mfspr(SPRN_SDAR); - if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0) + if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel) *addrp = 0; } @@ -354,15 +364,6 @@ static inline void perf_read_regs(struct pt_regs *regs) } /* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return (regs->softe & IRQS_DISABLED); -} - -/* * On processors like P7+ that have the SIAR-Valid bit, marked instructions * must be sampled only if the SIAR-valid bit is set. * @@ -506,7 +507,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events * * addresses, hence include a check before filtering code */ if (!(ppmu->flags & PPMU_ARCH_31) && - is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0) + is_kernel_addr(addr) && event->attr.exclude_kernel) continue; /* Branches are read most recent first (ie. mfbhrb 0 is @@ -915,7 +916,7 @@ void perf_event_print_debug(void) */ static int power_check_constraints(struct cpu_hw_events *cpuhw, u64 event_id[], unsigned int cflags[], - int n_ev) + int n_ev, struct perf_event **event) { unsigned long mask, value, nv; unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; @@ -938,7 +939,7 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw, event_id[i] = cpuhw->alternatives[i][0]; } if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], - &cpuhw->avalues[i][0])) + &cpuhw->avalues[i][0], event[i]->attr.config1)) return -1; } value = mask = 0; @@ -973,7 +974,8 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw, for (j = 1; j < n_alt[i]; ++j) ppmu->get_constraint(cpuhw->alternatives[i][j], &cpuhw->amasks[i][j], - &cpuhw->avalues[i][j]); + &cpuhw->avalues[i][j], + event[i]->attr.config1); } /* enumerate all possibilities and see if any will work */ @@ -1391,7 +1393,7 @@ static void power_pmu_enable(struct pmu *pmu) memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, - &cpuhw->mmcr, cpuhw->event)) { + &cpuhw->mmcr, cpuhw->event, ppmu->flags)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; @@ -1579,7 +1581,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) goto out; - if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) + if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event)) goto out; event->hw.config = cpuhw->events[n0]; @@ -1789,7 +1791,7 @@ static int power_pmu_commit_txn(struct pmu *pmu) n = cpuhw->n_events; if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) return -EAGAIN; - i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); + i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event); if (i < 0) return -EAGAIN; @@ -2027,7 +2029,7 @@ static int power_pmu_event_init(struct perf_event *event) local_irq_save(irq_flags); cpuhw = this_cpu_ptr(&cpu_hw_events); - err = power_check_constraints(cpuhw, events, cflags, n + 1); + err = power_check_constraints(cpuhw, events, cflags, n + 1, ctrs); if (has_branch_stack(event)) { u64 bhrb_filter = -1; @@ -2149,7 +2151,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, left += period; if (left <= 0) left = period; - record = siar_valid(regs); + + /* + * If address is not requested in the sample via + * PERF_SAMPLE_IP, just record that sample irrespective + * of SIAR valid check. + */ + if (event->attr.sample_type & PERF_SAMPLE_IP) + record = siar_valid(regs); + else + record = 1; + event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) @@ -2167,9 +2179,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, * MMCR2. Check attr.exclude_kernel and address to drop the sample in * these cases. */ - if (event->attr.exclude_kernel && record) - if (is_kernel_addr(mfspr(SPRN_SIAR))) - record = 0; + if (event->attr.exclude_kernel && + (event->attr.sample_type & PERF_SAMPLE_IP) && + is_kernel_addr(mfspr(SPRN_SIAR))) + record = 0; /* * Finally record data if requested. @@ -2277,9 +2290,7 @@ static void __perf_event_interrupt(struct pt_regs *regs) int i, j; struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; - unsigned long val[8]; int found, active; - int nmi; if (cpuhw->n_limited) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), @@ -2287,26 +2298,14 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); - /* - * If perf interrupts hit in a local_irq_disable (soft-masked) region, - * we consider them as NMIs. This is required to prevent hash faults on - * user addresses when reading callchains. See the NMI test in - * do_hash_page. - */ - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); - /* Read all the PMCs since we'll need them a bunch of times */ for (i = 0; i < ppmu->n_counter; ++i) - val[i] = read_pmc(i + 1); + cpuhw->pmcs[i] = read_pmc(i + 1); /* Try to find what caused the IRQ */ found = 0; for (i = 0; i < ppmu->n_counter; ++i) { - if (!pmc_overflow(val[i])) + if (!pmc_overflow(cpuhw->pmcs[i])) continue; if (is_limited_pmc(i + 1)) continue; /* these won't generate IRQs */ @@ -2321,7 +2320,7 @@ static void __perf_event_interrupt(struct pt_regs *regs) event = cpuhw->event[j]; if (event->hw.idx == (i + 1)) { active = 1; - record_and_restart(event, val[i], regs); + record_and_restart(event, cpuhw->pmcs[i], regs); break; } } @@ -2335,17 +2334,17 @@ static void __perf_event_interrupt(struct pt_regs *regs) event = cpuhw->event[i]; if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; - if (pmc_overflow_power7(val[event->hw.idx - 1])) { + if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) { /* event has overflowed in a buggy way*/ found = 1; record_and_restart(event, - val[event->hw.idx - 1], + cpuhw->pmcs[event->hw.idx - 1], regs); } } } - if (!found && !nmi && printk_ratelimit()) - printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); + if (unlikely(!found) && !arch_irq_disabled_regs(regs)) + printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* * Reset MMCR0 to its normal value. This will set PMXE and @@ -2356,10 +2355,9 @@ static void __perf_event_interrupt(struct pt_regs *regs) */ write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); - if (nmi) - nmi_exit(); - else - irq_exit(); + /* Clear the cpuhw->pmcs */ + memset(&cpuhw->pmcs, 0, sizeof(cpuhw->pmcs)); + } static void perf_event_interrupt(struct pt_regs *regs) diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index e0e7e276bfd2..ee721f420a7b 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -31,19 +31,6 @@ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); -/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ -#ifdef __powerpc64__ - return (regs->softe & IRQS_DISABLED); -#else - return 0; -#endif -} - static void perf_event_interrupt(struct pt_regs *regs); /* @@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val; int found = 0; - int nmi; - - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; @@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs) mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); - - if (nmi) - nmi_exit(); - else - irq_exit(); } void hw_perf_event_setup(int cpu) diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 6e7e820508df..e5eb33255066 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -764,6 +764,14 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event, return ev_len; } +/* + * Return true incase of invalid or dummy events with names like RESERVED* + */ +static bool ignore_event(const char *name) +{ + return strncmp(name, "RESERVED", 8) == 0; +} + #define MAX_4K (SIZE_MAX / 4096) static int create_events_from_catalog(struct attribute ***events_, @@ -894,6 +902,10 @@ static int create_events_from_catalog(struct attribute ***events_, name = event_name(event, &nl); + if (ignore_event(name)) { + junk_events++; + continue; + } if (event->event_group_record_len == 0) { pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n", event_idx, nl, name); @@ -955,6 +967,9 @@ static int create_events_from_catalog(struct attribute ***events_, continue; name = event_name(event, &nl); + if (ignore_event(name)) + continue; + nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); ct = event_data_to_attrs(event_idx, events + event_attr_ct, event, nonce); diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 6ab5b272090a..e4f577da33d8 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -108,12 +108,57 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) *mmcra |= MMCRA_SDAR_MODE_TLB; } +static u64 p10_thresh_cmp_val(u64 value) +{ + int exp = 0; + u64 result = value; + + if (!value) + return value; + + /* + * Incase of P10, thresh_cmp value is not part of raw event code + * and provided via attr.config1 parameter. To program threshold in MMCRA, + * take a 18 bit number N and shift right 2 places and increment + * the exponent E by 1 until the upper 10 bits of N are zero. + * Write E to the threshold exponent and write the lower 8 bits of N + * to the threshold mantissa. + * The max threshold that can be written is 261120. + */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + if (value > 261120) + value = 261120; + while ((64 - __builtin_clzl(value)) > 8) { + exp++; + value >>= 2; + } + + /* + * Note that it is invalid to write a mantissa with the + * upper 2 bits of mantissa being zero, unless the + * exponent is also zero. + */ + if (!(value & 0xC0) && exp) + result = 0; + else + result = (exp << 8) | value; + } + return result; +} + static u64 thresh_cmp_val(u64 value) { + if (cpu_has_feature(CPU_FTR_ARCH_31)) + value = p10_thresh_cmp_val(value); + + /* + * Since location of threshold compare bits in MMCRA + * is different for p8, using different shift value. + */ if (cpu_has_feature(CPU_FTR_ARCH_300)) return value << p9_MMCRA_THR_CMP_SHIFT; - - return value << MMCRA_THR_CMP_SHIFT; + else + return value << MMCRA_THR_CMP_SHIFT; } static unsigned long combine_from_event(u64 event) @@ -141,13 +186,13 @@ static bool is_thresh_cmp_valid(u64 event) { unsigned int cmp, exp; + if (cpu_has_feature(CPU_FTR_ARCH_31)) + return p10_thresh_cmp_val(event) != 0; + /* * Check the mantissa upper two bits are not zero, unless the * exponent is also zero. See the THRESH_CMP_MANTISSA doc. - * Power10: thresh_cmp is replaced by l2_l3 event select. */ - if (cpu_has_feature(CPU_FTR_ARCH_31)) - return false; cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; exp = cmp >> 7; @@ -256,7 +301,7 @@ void isa207_get_mem_weight(u64 *weight) *weight = mantissa << (2 * exp); } -int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) +int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1) { unsigned int unit, pmc, cache, ebb; unsigned long mask, value; @@ -355,9 +400,11 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) } if (cpu_has_feature(CPU_FTR_ARCH_31)) { - if (event_is_threshold(event)) { + if (event_is_threshold(event) && is_thresh_cmp_valid(event_config1)) { mask |= CNST_THRESH_CTL_SEL_MASK; value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT); + mask |= p10_CNST_THRESH_CMP_MASK; + value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1)); } } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { @@ -411,7 +458,7 @@ ebb_bhrb: int isa207_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]) + struct perf_event *pevents[], u32 flags) { unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; unsigned long mmcr3; @@ -504,6 +551,10 @@ int isa207_compute_mmcr(u64 event[], int n_ev, val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; mmcra |= thresh_cmp_val(val); + } else if (flags & PPMU_HAS_ATTR_CONFIG1) { + val = (pevents[i]->attr.config1 >> p10_EVENT_THR_CMP_SHIFT) & + p10_EVENT_THR_CMP_MASK; + mmcra |= thresh_cmp_val(val); } } diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 454b32c31440..1af0e8c97ac7 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -105,6 +105,10 @@ #define p10_EVENT_RADIX_SCOPE_QUAL_MASK 0x1 #define p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT 45 +/* Event Threshold Compare bit constant for power10 in config1 attribute */ +#define p10_EVENT_THR_CMP_SHIFT 0 +#define p10_EVENT_THR_CMP_MASK 0x3FFFFull + #define p10_EVENT_VALID_MASK \ ((p10_SDAR_MODE_MASK << p10_SDAR_MODE_SHIFT | \ (p10_EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ @@ -124,8 +128,8 @@ * 60 56 52 48 44 40 36 32 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ] - * | - * thresh_sel -* + * | | + * [ thresh_cmp bits for p10] thresh_sel -* * * 28 24 20 16 12 8 4 0 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | @@ -152,6 +156,9 @@ #define CNST_THRESH_CTL_SEL_VAL(v) (((v) & 0x7ffull) << 32) #define CNST_THRESH_CTL_SEL_MASK CNST_THRESH_CTL_SEL_VAL(0x7ff) +#define p10_CNST_THRESH_CMP_VAL(v) (((v) & 0x7ffull) << 43) +#define p10_CNST_THRESH_CMP_MASK p10_CNST_THRESH_CMP_VAL(0x7ff) + #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) @@ -262,10 +269,10 @@ #define PH(a, b) (P(LVL, HIT) | P(a, b)) #define PM(a, b) (P(LVL, MISS) | P(a, b)) -int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp); +int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1); int isa207_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]); + struct perf_event *pevents[], u32 flags); void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr); int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, const unsigned int ev_alt[][MAX_ALT]); diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c index 1919e9df9165..e39b15b79a83 100644 --- a/arch/powerpc/perf/mpc7450-pmu.c +++ b/arch/powerpc/perf/mpc7450-pmu.c @@ -148,7 +148,7 @@ static u32 classbits[N_CLASSES - 1][2] = { }; static int mpc7450_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) + unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, class; u32 mask, value; @@ -258,7 +258,8 @@ static const u32 pmcsel_mask[N_COUNTER] = { */ static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]) + struct perf_event *pevents[], + u32 flags __maybe_unused) { u8 event_index[N_CLASSES][N_COUNTER]; int n_classevent[N_CLASSES]; diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 6f681b105eec..b931eed482c9 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -75,6 +75,8 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { static u64 get_ext_regs_value(int idx) { switch (idx) { + case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6: + return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1); case PERF_REG_POWERPC_MMCR0: return mfspr(SPRN_MMCR0); case PERF_REG_POWERPC_MMCR1: @@ -95,13 +97,6 @@ static u64 get_ext_regs_value(int idx) u64 perf_reg_value(struct pt_regs *regs, int idx) { - u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX; - - if (cpu_has_feature(CPU_FTR_ARCH_31)) - perf_reg_extended_max = PERF_REG_MAX_ISA_31; - else if (cpu_has_feature(CPU_FTR_ARCH_300)) - perf_reg_extended_max = PERF_REG_MAX_ISA_300; - if (idx == PERF_REG_POWERPC_SIER && (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || IS_ENABLED(CONFIG_PPC32) || @@ -113,14 +108,14 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) IS_ENABLED(CONFIG_PPC32))) return 0; - if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max) + if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX) return get_ext_regs_value(idx); /* * If the idx is referring to value beyond the * supported registers, return 0 with a warning */ - if (WARN_ON_ONCE(idx >= perf_reg_extended_max)) + if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX)) return 0; return regs_get_register(regs, pt_regs_offset[idx]); diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index 79e0206ca454..a901c1348cad 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -216,6 +216,7 @@ PMU_FORMAT_ATTR(invert_bit, "config:47"); PMU_FORMAT_ATTR(src_mask, "config:48-53"); PMU_FORMAT_ATTR(src_match, "config:54-59"); PMU_FORMAT_ATTR(radix_scope, "config:9"); +PMU_FORMAT_ATTR(thresh_cmp, "config1:0-17"); static struct attribute *power10_pmu_format_attr[] = { &format_attr_event.attr, @@ -236,6 +237,7 @@ static struct attribute *power10_pmu_format_attr[] = { &format_attr_src_mask.attr, &format_attr_src_match.attr, &format_attr_radix_scope.attr, + &format_attr_thresh_cmp.attr, NULL, }; @@ -550,7 +552,7 @@ static struct power_pmu power10_pmu = { .get_mem_weight = isa207_get_mem_weight, .disable_pmc = isa207_disable_pmc, .flags = PPMU_HAS_SIER | PPMU_ARCH_207S | - PPMU_ARCH_31, + PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1, .n_generic = ARRAY_SIZE(power10_generic_events), .generic_events = power10_generic_events, .cache_events = &power10_cache_events, diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c index 3e64b4a1511f..18732267993a 100644 --- a/arch/powerpc/perf/power5+-pmu.c +++ b/arch/powerpc/perf/power5+-pmu.c @@ -132,7 +132,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = { }; static int power5p_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) + unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, byte, unit, sh; int bit, fmask; @@ -451,7 +451,8 @@ static int power5p_marked_instr_event(u64 event) static int power5p_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]) + struct perf_event *pevents[], + u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = 0; diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c index 017bb19b73fb..cb611c1e7abe 100644 --- a/arch/powerpc/perf/power5-pmu.c +++ b/arch/powerpc/perf/power5-pmu.c @@ -136,7 +136,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = { }; static int power5_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) + unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, byte, unit, sh; int bit, fmask; @@ -382,7 +382,8 @@ static int power5_marked_instr_event(u64 event) static int power5_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]) + struct perf_event *pevents[], + u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c index 189974478e9f..69ef38216418 100644 --- a/arch/powerpc/perf/power6-pmu.c +++ b/arch/powerpc/perf/power6-pmu.c @@ -173,7 +173,8 @@ static int power6_marked_instr_event(u64 event) * Assign PMC numbers and compute MMCR1 value for a set of events */ static int p6_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[]) + unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], + u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; @@ -266,7 +267,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev, * 32-34 select field: nest (subunit) event selector */ static int p6_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) + unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, byte, sh, subunit; unsigned long mask = 0, value = 0; diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index bacfab104a1a..894c17f9a762 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -81,7 +81,7 @@ enum { */ static int power7_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) + unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, sh, unit; unsigned long mask = 0, value = 0; @@ -245,7 +245,8 @@ static int power7_marked_instr_event(u64 event) static int power7_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]) + struct perf_event *pevents[], + u32 flags __maybe_unused) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index 7d78df97f272..1f8263785286 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c @@ -190,7 +190,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = { }; static int p970_get_constraint(u64 event, unsigned long *maskp, - unsigned long *valp) + unsigned long *valp, u64 event_config1 __maybe_unused) { int pmc, byte, unit, sh, spcsel; unsigned long mask = 0, value = 0; @@ -256,7 +256,8 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) static int p970_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]) + struct perf_event *pevents[], + u32 flags __maybe_unused) { unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; unsigned int pmc, unit, byte, psel; diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 78ac6d67a935..7d41e9264510 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -206,17 +206,10 @@ config AKEBONO select PPC4xx_HSTA_MSI select I2C select I2C_IBM_IIC - select NETDEVICES - select ETHERNET - select NET_VENDOR_IBM select IBM_EMAC_EMAC4 if IBM_EMAC select USB if USB_SUPPORT select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD - select MMC_SDHCI - select MMC_SDHCI_PLTFM - select ATA - select SATA_AHCI_PLATFORM help This option enables support for the IBM Akebono (476gtr) evaluation board diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c index 6303fbfc4e4f..9d030c2e0004 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads.c @@ -24,21 +24,23 @@ static void __init mpc5121_ads_setup_arch(void) { -#ifdef CONFIG_PCI - struct device_node *np; -#endif printk(KERN_INFO "MPC5121 ADS board from Freescale Semiconductor\n"); /* * cpld regs are needed early */ mpc5121_ads_cpld_map(); + mpc512x_setup_arch(); +} + +static void __init mpc5121_ads_setup_pci(void) +{ #ifdef CONFIG_PCI + struct device_node *np; + for_each_compatible_node(np, "pci", "fsl,mpc5121-pci") mpc83xx_add_bridge(np); #endif - - mpc512x_setup_arch(); } static void __init mpc5121_ads_init_IRQ(void) @@ -64,6 +66,7 @@ define_machine(mpc5121_ads) { .name = "MPC5121 ADS", .probe = mpc5121_ads_probe, .setup_arch = mpc5121_ads_setup_arch, + .discover_phbs = mpc5121_ads_setup_pci, .init = mpc512x_init, .init_IRQ = mpc5121_ads_init_IRQ, .get_irq = ipic_get_irq, diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 4514a6f7458a..3b7d70d71692 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -185,8 +185,6 @@ static void __init efika_setup_arch(void) /* Map important registers from the internal memory map */ mpc52xx_map_common_devices(); - efika_pcisetup(); - #ifdef CONFIG_PM mpc52xx_suspend.board_suspend_prepare = efika_suspend_prepare; mpc52xx_pm_init(); @@ -218,6 +216,7 @@ define_machine(efika) .name = EFIKA_PLATFORM_NAME, .probe = efika_probe, .setup_arch = efika_setup_arch, + .discover_phbs = efika_pcisetup, .init = mpc52xx_declare_of_platform_devices, .show_cpuinfo = efika_show_cpuinfo, .init_IRQ = mpc52xx_init_irq, diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index 3181aac08225..04cc97397095 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c @@ -165,8 +165,6 @@ static void __init lite5200_setup_arch(void) mpc52xx_suspend.board_resume_finish = lite5200_resume_finish; lite5200_pm_init(); #endif - - mpc52xx_setup_pci(); } static const char * const board[] __initconst = { @@ -187,6 +185,7 @@ define_machine(lite5200) { .name = "lite5200", .probe = lite5200_probe, .setup_arch = lite5200_setup_arch, + .discover_phbs = mpc52xx_setup_pci, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 07c5bc4ed0b5..efb8bdecbcc7 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -202,8 +202,6 @@ static void __init media5200_setup_arch(void) /* Some mpc5200 & mpc5200b related configuration */ mpc5200_setup_xlb_arbiter(); - mpc52xx_setup_pci(); - np = of_find_matching_node(NULL, mpc5200_gpio_ids); gpio = of_iomap(np, 0); of_node_put(np); @@ -244,6 +242,7 @@ define_machine(media5200_platform) { .name = "media5200-platform", .probe = media5200_probe, .setup_arch = media5200_setup_arch, + .discover_phbs = mpc52xx_setup_pci, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = media5200_init_irq, .get_irq = mpc52xx_get_irq, diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c index 2d01e9b2e779..b9f5675b0a1d 100644 --- a/arch/powerpc/platforms/52xx/mpc5200_simple.c +++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c @@ -40,8 +40,6 @@ static void __init mpc5200_simple_setup_arch(void) /* Some mpc5200 & mpc5200b related configuration */ mpc5200_setup_xlb_arbiter(); - - mpc52xx_setup_pci(); } /* list of the supported boards */ @@ -73,6 +71,7 @@ define_machine(mpc5200_simple_platform) { .name = "mpc5200-simple-platform", .probe = mpc5200_simple_probe, .setup_arch = mpc5200_simple_setup_arch, + .discover_phbs = mpc52xx_setup_pci, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 05e19470d523..b91ebebd9ff2 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -229,7 +229,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) int dma, write, poll_dma; spin_lock_irqsave(&lpbfifo.lock, flags); - ts = get_tbl(); + ts = mftb(); req = lpbfifo.req; if (!req) { @@ -307,7 +307,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) if (irq != 0) /* don't increment on polled case */ req->irq_count++; - req->irq_ticks += get_tbl() - ts; + req->irq_ticks += mftb() - ts; spin_unlock_irqrestore(&lpbfifo.lock, flags); /* Spinlock is released; it is now safe to call the callback */ @@ -330,7 +330,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) u32 ts; spin_lock_irqsave(&lpbfifo.lock, flags); - ts = get_tbl(); + ts = mftb(); req = lpbfifo.req; if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) { @@ -361,7 +361,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) lpbfifo.req = NULL; /* Release the lock before calling out to the callback. */ - req->irq_ticks += get_tbl() - ts; + req->irq_ticks += mftb() - ts; spin_unlock_irqrestore(&lpbfifo.lock, flags); if (req->callback) diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c index 3fe1a6593280..0b5b9dec16d5 100644 --- a/arch/powerpc/platforms/82xx/mpc8272_ads.c +++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c @@ -171,7 +171,6 @@ static void __init mpc8272_ads_setup_arch(void) iounmap(bcsr); init_ioports(); - pq2_init_pci(); if (ppc_md.progress) ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0); @@ -205,6 +204,7 @@ define_machine(mpc8272_ads) .name = "Freescale MPC8272 ADS", .probe = mpc8272_ads_probe, .setup_arch = mpc8272_ads_setup_arch, + .discover_phbs = pq2_init_pci, .init_IRQ = mpc8272_ads_pic_init, .get_irq = cpm2_get_irq, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 096cc0d59fd8..f82f75a6085c 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -123,20 +123,17 @@ int __init pq2ads_pci_init_irq(void) np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic"); if (!np) { printk(KERN_ERR "No pci pic node in device tree.\n"); - of_node_put(np); goto out; } irq = irq_of_parse_and_map(np, 0); if (!irq) { printk(KERN_ERR "No interrupt in pci pic node.\n"); - of_node_put(np); - goto out; + goto out_put_node; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { - of_node_put(np); ret = -ENOMEM; goto out_unmap_irq; } @@ -161,17 +158,17 @@ int __init pq2ads_pci_init_irq(void) priv->host = host; irq_set_handler_data(irq, priv); irq_set_chained_handler(irq, pq2ads_pci_irq_demux); - - of_node_put(np); - return 0; + ret = 0; + goto out_put_node; out_unmap_regs: iounmap(priv->regs); out_free_kmalloc: kfree(priv); - of_node_put(np); out_unmap_irq: irq_dispose_mapping(irq); +out_put_node: + of_node_put(np); out: return ret; } diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c index a74082140718..ac9113d524af 100644 --- a/arch/powerpc/platforms/82xx/pq2fads.c +++ b/arch/powerpc/platforms/82xx/pq2fads.c @@ -150,8 +150,6 @@ static void __init pq2fads_setup_arch(void) /* Enable external IRQs */ clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_siumcr, 0x0c000000); - pq2_init_pci(); - if (ppc_md.progress) ppc_md.progress("pq2fads_setup_arch(), finish", 0); } @@ -184,6 +182,7 @@ define_machine(pq2fads) .name = "Freescale PQ2FADS", .probe = pq2fads_probe, .setup_arch = pq2fads_setup_arch, + .discover_phbs = pq2_init_pci, .init_IRQ = pq2fads_pic_init, .get_irq = cpm2_get_irq, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c index 28474876f41b..68061c2a57c1 100644 --- a/arch/powerpc/platforms/83xx/asp834x.c +++ b/arch/powerpc/platforms/83xx/asp834x.c @@ -44,6 +44,7 @@ define_machine(asp834x) { .name = "ASP8347E", .probe = asp834x_probe, .setup_arch = asp834x_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index bcdc2c203ec9..108e1e4d2683 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -180,6 +180,7 @@ define_machine(mpc83xx_km) { .name = "mpc83xx-km-platform", .probe = mpc83xx_km_probe, .setup_arch = mpc83xx_km_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c index a952e91db3ee..3285dabcf923 100644 --- a/arch/powerpc/platforms/83xx/misc.c +++ b/arch/powerpc/platforms/83xx/misc.c @@ -132,8 +132,6 @@ void __init mpc83xx_setup_arch(void) setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG); update_bats(); } - - mpc83xx_setup_pci(); } int machine_check_83xx(struct pt_regs *regs) diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c index 51426e88ec67..956d4389effa 100644 --- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c @@ -48,6 +48,7 @@ define_machine(mpc830x_rdb) { .name = "MPC830x RDB", .probe = mpc830x_rdb_probe, .setup_arch = mpc830x_rdb_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c index 5ccd57a48492..3b578f080e3b 100644 --- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c @@ -48,6 +48,7 @@ define_machine(mpc831x_rdb) { .name = "MPC831x RDB", .probe = mpc831x_rdb_probe, .setup_arch = mpc831x_rdb_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c index 6fa5402ebf20..850d566ef900 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c @@ -101,6 +101,7 @@ define_machine(mpc832x_mds) { .name = "MPC832x MDS", .probe = mpc832x_sys_probe, .setup_arch = mpc832x_sys_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index 622c625d5ce4..b6133a237a70 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -219,6 +219,7 @@ define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .probe = mpc832x_rdb_probe, .setup_arch = mpc832x_rdb_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index ebfd139bca20..9630f3aa4d9c 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -70,6 +70,7 @@ define_machine(mpc834x_itx) { .name = "MPC834x ITX", .probe = mpc834x_itx_probe, .setup_arch = mpc834x_itx_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c index 356228e35279..6d91bdce0a18 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c @@ -91,6 +91,7 @@ define_machine(mpc834x_mds) { .name = "MPC834x MDS", .probe = mpc834x_mds_probe, .setup_arch = mpc834x_mds_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c index 90d9cbfae659..da4cf52cb55b 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c @@ -201,6 +201,7 @@ define_machine(mpc836x_mds) { .name = "MPC836x MDS", .probe = mpc836x_mds_probe, .setup_arch = mpc836x_mds_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c index b4aac2cde849..3427ad0d9d38 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c +++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c @@ -41,6 +41,7 @@ define_machine(mpc836x_rdk) { .name = "MPC836x RDK", .probe = mpc836x_rdk_probe, .setup_arch = mpc836x_rdk_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c index 9d3721c965be..f28d166ea7db 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c @@ -93,6 +93,7 @@ define_machine(mpc837x_mds) { .name = "MPC837x MDS", .probe = mpc837x_mds_probe, .setup_arch = mpc837x_mds_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c index 7c45f7ac2607..7fb7684c256b 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c @@ -73,6 +73,7 @@ define_machine(mpc837x_rdb) { .name = "MPC837x RDB/WLAN", .probe = mpc837x_rdb_probe, .setup_arch = mpc837x_rdb_setup_arch, + .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h index f37d04332fc7..a30d30588cf6 100644 --- a/arch/powerpc/platforms/83xx/mpc83xx.h +++ b/arch/powerpc/platforms/83xx/mpc83xx.h @@ -76,7 +76,7 @@ extern void mpc83xx_ipic_init_IRQ(void); #ifdef CONFIG_PCI extern void mpc83xx_setup_pci(void); #else -#define mpc83xx_setup_pci() do {} while (0) +#define mpc83xx_setup_pci NULL #endif extern int mpc83xx_declare_of_platform_devices(void); diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c index 88dedf38eccd..656365975895 100644 --- a/arch/powerpc/platforms/8xx/machine_check.c +++ b/arch/powerpc/platforms/8xx/machine_check.c @@ -26,7 +26,7 @@ int machine_check_8xx(struct pt_regs *regs) * to deal with that than having a wart in the mcheck handler. * -- BenH */ - bad_page_fault(regs, regs->dar, SIGBUS); + bad_page_fault(regs, SIGBUS); return 1; #else return 0; diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index f5d0bf999759..9d252c554f7f 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -66,6 +66,12 @@ static int __init amigaone_add_bridge(struct device_node *dev) void __init amigaone_setup_arch(void) { + if (ppc_md.progress) + ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0); +} + +static void __init amigaone_discover_phbs(void) +{ struct device_node *np; int phb = -ENODEV; @@ -74,9 +80,6 @@ void __init amigaone_setup_arch(void) phb = amigaone_add_bridge(np); BUG_ON(phb != 0); - - if (ppc_md.progress) - ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0); } void __init amigaone_init_IRQ(void) @@ -159,6 +162,7 @@ define_machine(amigaone) { .name = "AmigaOne", .probe = amigaone_probe, .setup_arch = amigaone_setup_arch, + .discover_phbs = amigaone_discover_phbs, .show_cpuinfo = amigaone_show_cpuinfo, .init_IRQ = amigaone_init_IRQ, .restart = amigaone_restart, diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index 9068edef71f7..5b9a7e9f144b 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -25,6 +25,7 @@ #include <asm/cpu_has_feature.h> #include "pervasive.h" +#include "ras.h" static void cbe_power_save(void) { diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h index c6fccad6caee..0da74ab10716 100644 --- a/arch/powerpc/platforms/cell/pervasive.h +++ b/arch/powerpc/platforms/cell/pervasive.h @@ -13,9 +13,6 @@ #define PERVASIVE_H extern void cbe_pervasive_init(void); -extern void cbe_system_error_exception(struct pt_regs *regs); -extern void cbe_maintenance_exception(struct pt_regs *regs); -extern void cbe_thermal_exception(struct pt_regs *regs); #ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON extern int cbe_sysreset_hack(void); diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 6ea480539419..4325c05bedd9 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -49,7 +49,7 @@ static void dump_fir(int cpu) } -void cbe_system_error_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception) { int cpu = smp_processor_id(); @@ -58,7 +58,7 @@ void cbe_system_error_exception(struct pt_regs *regs) dump_stack(); } -void cbe_maintenance_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception) { int cpu = smp_processor_id(); @@ -70,7 +70,7 @@ void cbe_maintenance_exception(struct pt_regs *regs) dump_stack(); } -void cbe_thermal_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception) { int cpu = smp_processor_id(); diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h index 6c2e6bc0062e..226dbd48efad 100644 --- a/arch/powerpc/platforms/cell/ras.h +++ b/arch/powerpc/platforms/cell/ras.h @@ -2,9 +2,12 @@ #ifndef RAS_H #define RAS_H -extern void cbe_system_error_exception(struct pt_regs *regs); -extern void cbe_maintenance_exception(struct pt_regs *regs); -extern void cbe_thermal_exception(struct pt_regs *regs); +#include <asm/interrupt.h> + +DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception); +DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception); +DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception); + extern void cbe_ras_init(void); #endif /* RAS_H */ diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 25390569e24c..b83a3670bd74 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -91,14 +91,15 @@ out: } static int -spufs_setattr(struct dentry *dentry, struct iattr *attr) +spufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, + struct iattr *attr) { struct inode *inode = d_inode(dentry); if ((attr->ia_valid & ATTR_SIZE) && (attr->ia_size != inode->i_size)) return -EINVAL; - setattr_copy(inode, attr); + setattr_copy(&init_user_ns, inode, attr); mark_inode_dirty(inode); return 0; } diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index b2c2bf35b76c..8c421dc78b28 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -314,6 +314,14 @@ chrp_find_bridges(void) } } of_node_put(root); + + /* + * "Temporary" fixes for PCI devices. + * -- Geert + */ + hydra_init(); /* Mac I/O */ + + pci_create_OF_bus_map(); } /* SL82C105 IDE Control/Status Register */ diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index c45435aa5e36..3cfc382841e5 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -334,22 +334,11 @@ static void __init chrp_setup_arch(void) /* On pegasos, enable the L2 cache if not already done by OF */ pegasos_set_l2cr(); - /* Lookup PCI host bridges */ - chrp_find_bridges(); - - /* - * Temporary fixes for PCI devices. - * -- Geert - */ - hydra_init(); /* Mac I/O */ - /* * Fix the Super I/O configuration */ sio_init(); - pci_create_OF_bus_map(); - /* * Print the banner, then scroll down so boot progress * can be printed. -- Cort @@ -582,6 +571,7 @@ define_machine(chrp) { .name = "CHRP", .probe = chrp_probe, .setup_arch = chrp_setup_arch, + .discover_phbs = chrp_find_bridges, .init = chrp_init2, .show_cpuinfo = chrp_show_cpuinfo, .init_IRQ = chrp_init_IRQ, diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index d8f2e2c737bb..53065d564161 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -108,15 +108,13 @@ static void holly_remap_bridge(void) tsi108_write_reg(TSI108_PCI_P2O_BAR2, 0x0); } -static void __init holly_setup_arch(void) +static void __init holly_init_pci(void) { struct device_node *np; if (ppc_md.progress) ppc_md.progress("holly_setup_arch():set_bridge", 0); - tsi108_csr_vir_base = get_vir_csrbase(); - /* setup PCI host bridge */ holly_remap_bridge(); @@ -127,6 +125,11 @@ static void __init holly_setup_arch(void) ppc_md.pci_exclude_device = holly_exclude_device; if (ppc_md.progress) ppc_md.progress("tsi108: resources set", 0x100); +} + +static void __init holly_setup_arch(void) +{ + tsi108_csr_vir_base = get_vir_csrbase(); printk(KERN_INFO "PPC750GX/CL Platform\n"); } @@ -259,6 +262,7 @@ define_machine(holly){ .name = "PPC750 GX/CL TSI", .probe = holly_probe, .setup_arch = holly_setup_arch, + .discover_phbs = holly_init_pci, .init_IRQ = holly_init_IRQ, .show_cpuinfo = holly_show_cpuinfo, .get_irq = mpic_get_irq, diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index f514d5d28cd4..eb8342e7f84e 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -64,14 +64,17 @@ static int __init linkstation_add_bridge(struct device_node *dev) static void __init linkstation_setup_arch(void) { + printk(KERN_INFO "BUFFALO Network Attached Storage Series\n"); + printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n"); +} + +static void __init linkstation_setup_pci(void) +{ struct device_node *np; /* Lookup PCI host bridges */ for_each_compatible_node(np, "pci", "mpc10x-pci") linkstation_add_bridge(np); - - printk(KERN_INFO "BUFFALO Network Attached Storage Series\n"); - printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n"); } /* @@ -153,6 +156,7 @@ define_machine(linkstation){ .name = "Buffalo Linkstation", .probe = linkstation_probe, .setup_arch = linkstation_setup_arch, + .discover_phbs = linkstation_setup_pci, .init_IRQ = linkstation_init_IRQ, .show_cpuinfo = linkstation_show_cpuinfo, .get_irq = mpic_get_irq, diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index b95c3380d2b5..5565647dc879 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -58,16 +58,14 @@ int mpc7448_hpc2_exclude_device(struct pci_controller *hose, return PCIBIOS_SUCCESSFUL; } -static void __init mpc7448_hpc2_setup_arch(void) +static void __init mpc7448_hpc2_setup_pci(void) { +#ifdef CONFIG_PCI struct device_node *np; if (ppc_md.progress) - ppc_md.progress("mpc7448_hpc2_setup_arch():set_bridge", 0); - - tsi108_csr_vir_base = get_vir_csrbase(); + ppc_md.progress("mpc7448_hpc2_setup_pci():set_bridge", 0); /* setup PCI host bridge */ -#ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "tsi108-pci") tsi108_setup_pci(np, MPC7448HPC2_PCI_CFG_PHYS, 0); @@ -75,6 +73,11 @@ static void __init mpc7448_hpc2_setup_arch(void) if (ppc_md.progress) ppc_md.progress("tsi108: resources set", 0x100); #endif +} + +static void __init mpc7448_hpc2_setup_arch(void) +{ + tsi108_csr_vir_base = get_vir_csrbase(); printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n"); printk(KERN_INFO @@ -181,6 +184,7 @@ define_machine(mpc7448_hpc2){ .name = "MPC7448 HPC2", .probe = mpc7448_hpc2_probe, .setup_arch = mpc7448_hpc2_setup_arch, + .discover_phbs = mpc7448_hpc2_setup_pci, .init_IRQ = mpc7448_hpc2_init_IRQ, .show_cpuinfo = mpc7448_hpc2_show_cpuinfo, .get_irq = mpic_get_irq, diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 1cd488daa0bf..c06a0490d157 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -154,17 +154,19 @@ static const struct of_device_id mvme5100_of_bus_ids[] __initconst = { */ static void __init mvme5100_setup_arch(void) { - struct device_node *np; - if (ppc_md.progress) ppc_md.progress("mvme5100_setup_arch()", 0); - for_each_compatible_node(np, "pci", "hawk-pci") - mvme5100_add_bridge(np); - restart = ioremap(BOARD_MODRST_REG, 4); } +static void __init mvme5100_setup_pci(void) +{ + struct device_node *np; + + for_each_compatible_node(np, "pci", "hawk-pci") + mvme5100_add_bridge(np); +} static void mvme5100_show_cpuinfo(struct seq_file *m) { @@ -205,6 +207,7 @@ define_machine(mvme5100) { .name = "MVME5100", .probe = mvme5100_probe, .setup_arch = mvme5100_setup_arch, + .discover_phbs = mvme5100_setup_pci, .init_IRQ = mvme5100_pic_init, .show_cpuinfo = mvme5100_show_cpuinfo, .get_irq = mpic_get_irq, diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index e346ddcef45e..e188b90f7016 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -66,13 +66,16 @@ static int __init storcenter_add_bridge(struct device_node *dev) static void __init storcenter_setup_arch(void) { + printk(KERN_INFO "IOMEGA StorCenter\n"); +} + +static void __init storcenter_setup_pci(void) +{ struct device_node *np; /* Lookup PCI host bridges */ for_each_compatible_node(np, "pci", "mpc10x-pci") storcenter_add_bridge(np); - - printk(KERN_INFO "IOMEGA StorCenter\n"); } /* @@ -117,6 +120,7 @@ define_machine(storcenter){ .name = "IOMEGA StorCenter", .probe = storcenter_probe, .setup_arch = storcenter_setup_arch, + .discover_phbs = storcenter_setup_pci, .init_IRQ = storcenter_init_IRQ, .get_irq = mpic_get_irq, .restart = storcenter_restart, diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index c86a66d5e998..a20b9576de22 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -536,6 +536,9 @@ static int __init maple_add_bridge(struct device_node *dev) /* Check for legacy IOs */ isa_bridge_find_early(hose); + /* create pci_dn's for DT nodes under this PHB */ + pci_devs_phb_init_dynamic(hose); + return 0; } diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index f7e66a2005b4..4e9ad5bf3efb 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -179,9 +179,6 @@ static void __init maple_setup_arch(void) #ifdef CONFIG_SMP smp_ops = &maple_smp_ops; #endif - /* Lookup PCI hosts */ - maple_pci_init(); - maple_use_rtas_reboot_and_halt_if_present(); printk(KERN_DEBUG "Using native/NAP idle loop\n"); @@ -351,6 +348,7 @@ define_machine(maple) { .name = "Maple", .probe = maple_probe, .setup_arch = maple_setup_arch, + .discover_phbs = maple_pci_init, .init_IRQ = maple_init_IRQ, .pci_irq_fixup = maple_pci_irq_fixup, .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index b612474f8f8e..376797eb7894 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -144,8 +144,6 @@ static void __init pas_setup_arch(void) /* Setup SMP callback */ smp_ops = &pas_smp_ops; #endif - /* Lookup PCI hosts */ - pas_pci_init(); /* Remap SDC register for doing reset */ /* XXXOJN This should maybe come out of the device tree */ @@ -446,6 +444,7 @@ define_machine(pasemi) { .name = "PA Semi PWRficient", .probe = pas_probe, .setup_arch = pas_setup_arch, + .discover_phbs = pas_pci_init, .init_IRQ = pas_init_IRQ, .get_irq = mpic_get_irq, .restart = pas_restart, diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index e35eaa9cf938..e9abe0f2e7f0 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -850,6 +850,10 @@ static int __init pmac_add_bridge(struct device_node *dev) /* Fixup "bus-range" OF property */ fixup_bus_range(dev); + /* create pci_dn's for DT nodes under this PHB */ + if (IS_ENABLED(CONFIG_PPC64)) + pci_devs_phb_init_dynamic(hose); + return 0; } diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 2e2cc0c75d87..86aee3f2483f 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -298,9 +298,6 @@ static void __init pmac_setup_arch(void) of_node_put(ic); } - /* Lookup PCI hosts */ - pmac_pci_init(); - #ifdef CONFIG_PPC32 ohare_init(); l2cr_init(); @@ -600,6 +597,7 @@ define_machine(powermac) { .name = "PowerMac", .probe = pmac_probe, .setup_arch = pmac_setup_arch, + .discover_phbs = pmac_pci_init, .show_cpuinfo = pmac_show_cpuinfo, .init_IRQ = pmac_pic_init, .get_irq = NULL, /* changed later */ diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index e6f461812856..999997d9e9a9 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -14,6 +14,7 @@ #include <asm/asm-prototypes.h> #include <asm/firmware.h> +#include <asm/interrupt.h> #include <asm/machdep.h> #include <asm/opal.h> #include <asm/cputhreads.h> diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 5fc9408bb0b3..019669eb21d2 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -19,6 +19,7 @@ #include <linux/numa.h> #include <asm/machdep.h> #include <asm/debugfs.h> +#include <asm/cacheflush.h> /* This enables us to keep track of the memory removed from each node. */ struct memtrace_entry { @@ -51,6 +52,27 @@ static const struct file_operations memtrace_fops = { .open = simple_open, }; +#define FLUSH_CHUNK_SIZE SZ_1G +/** + * flush_dcache_range_chunked(): Write any modified data cache blocks out to + * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE + * Does not invalidate the corresponding instruction cache blocks. + * + * @start: the start address + * @stop: the stop address (exclusive) + * @chunk: the max size of the chunks + */ +static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, + unsigned long chunk) +{ + unsigned long i; + + for (i = start; i < stop; i += chunk) { + flush_dcache_range(i, min(stop, i + chunk)); + cond_resched(); + } +} + static void memtrace_clear_range(unsigned long start_pfn, unsigned long nr_pages) { @@ -62,6 +84,13 @@ static void memtrace_clear_range(unsigned long start_pfn, cond_resched(); clear_page(__va(PFN_PHYS(pfn))); } + /* + * Before we go ahead and use this range as cache inhibited range + * flush the cache. + */ + flush_dcache_range_chunked(PFN_PHYS(start_pfn), + PFN_PHYS(start_pfn + nr_pages), + FLUSH_CHUNK_SIZE); } static u64 memtrace_alloc_node(u32 nid, u64 size) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index c61c3b62c8c6..303d7c775740 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -624,7 +624,7 @@ static int opal_recover_mce(struct pt_regs *regs, */ recovered = 0; } else { - die("Machine check", regs, SIGBUS); + die_mce("Machine check", regs, SIGBUS); recovered = 1; } } diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c index 8c739c94ed28..53172862d23b 100644 --- a/arch/powerpc/platforms/powernv/pci-cxl.c +++ b/arch/powerpc/platforms/powernv/pci-cxl.c @@ -150,25 +150,3 @@ int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, return 0; } EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup); - -#if IS_MODULE(CONFIG_CXL) -static inline int get_cxl_module(void) -{ - struct module *cxl_module; - - mutex_lock(&module_mutex); - - cxl_module = find_module("cxl"); - if (cxl_module) - __module_get(cxl_module); - - mutex_unlock(&module_mutex); - - if (!cxl_module) - return -ENODEV; - - return 0; -} -#else -static inline int get_cxl_module(void) { return 0; } -#endif diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 5218f5da2737..30551bbd7988 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -380,6 +380,8 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, /* Remove link to a group from table's list of attached groups */ found = false; + + rcu_read_lock(); list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { if (tgl->table_group == table_group) { list_del_rcu(&tgl->next); @@ -388,6 +390,8 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, break; } } + rcu_read_unlock(); + if (WARN_ON(!found)) return; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c4f72cdc9b51..f0f901683a2f 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2402,9 +2402,6 @@ static void pnv_pci_ioda_create_dbgfs(void) list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { phb = hose->private_data; - /* Notify initialization of PHB done */ - phb->initialized = 1; - sprintf(name, "PCI%04x", hose->global_number); phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root); @@ -2601,17 +2598,8 @@ static resource_size_t pnv_pci_default_alignment(void) */ static bool pnv_pci_enable_device_hook(struct pci_dev *dev) { - struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); struct pci_dn *pdn; - /* The function is probably called while the PEs have - * not be created yet. For example, resource reassignment - * during PCI probe period. We just skip the check if - * PEs isn't ready. - */ - if (!phb->initialized) - return true; - pdn = pci_get_pdn(dev); if (!pdn || pdn->pe_number == IODA_INVALID_PE) { pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n"); @@ -2623,14 +2611,9 @@ static bool pnv_pci_enable_device_hook(struct pci_dev *dev) static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev) { - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; struct pci_dn *pdn; struct pnv_ioda_pe *pe; - if (!phb->initialized) - return true; - pdn = pci_get_pdn(dev); if (!pdn) return false; @@ -2938,7 +2921,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); - phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES); + phb = kzalloc(sizeof(*phb), GFP_KERNEL); if (!phb) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*phb)); @@ -2987,7 +2970,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, else phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; - phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES); + phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL); if (!phb->diag_data) panic("%s: Failed to allocate %u bytes\n", __func__, phb->diag_data_size); @@ -3049,9 +3032,10 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, } pemap_off = size; size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); - aux = memblock_alloc(size, SMP_CACHE_BYTES); + aux = kzalloc(size, GFP_KERNEL); if (!aux) panic("%s: Failed to allocate %lu bytes\n", __func__, size); + phb->ioda.pe_alloc = aux; phb->ioda.m64_segmap = aux + m64map_off; phb->ioda.m32_segmap = aux + m32map_off; @@ -3178,6 +3162,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, /* Remove M64 resource if we can't configure it successfully */ if (!phb->init_m64 || phb->init_m64(phb)) hose->mem_resources[1].flags = 0; + + /* create pci_dn's for DT nodes under this PHB */ + pci_devs_phb_init_dynamic(hose); } void __init pnv_pci_init_ioda2_phb(struct device_node *np) diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 739a0b3b72e1..36d22920f5a3 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -119,7 +119,6 @@ struct pnv_phb { int flags; void __iomem *regs; u64 regs_phys; - int initialized; spinlock_t lock; #ifdef CONFIG_DEBUG_FS diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 4426a109ec2f..aadf932c4e61 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -180,9 +180,6 @@ static void __init pnv_setup_arch(void) /* Initialize SMP */ pnv_smp_init(); - /* Setup PCI */ - pnv_pci_init(); - /* Setup RTC and NVRAM callbacks */ if (firmware_has_feature(FW_FEATURE_OPAL)) opal_nvram_init(); @@ -547,6 +544,7 @@ define_machine(powernv) { .init_IRQ = pnv_init_IRQ, .show_cpuinfo = pnv_show_cpuinfo, .get_proc_freq = pnv_get_proc_freq, + .discover_phbs = pnv_pci_init, .progress = pnv_progress, .machine_shutdown = pnv_shutdown, .power_save = NULL, diff --git a/arch/powerpc/platforms/powernv/subcore.h b/arch/powerpc/platforms/powernv/subcore.h index c8f574d1c04a..77feee8436d4 100644 --- a/arch/powerpc/platforms/powernv/subcore.h +++ b/arch/powerpc/platforms/powernv/subcore.h @@ -15,7 +15,7 @@ void split_core_secondary_loop(u8 *state); extern void update_subcore_sibling_mask(void); #else -static inline void update_subcore_sibling_mask(void) { }; +static inline void update_subcore_sibling_mask(void) { } #endif /* CONFIG_SMP */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c index 598e4cd563fb..b65256a63e87 100644 --- a/arch/powerpc/platforms/powernv/vas.c +++ b/arch/powerpc/platforms/powernv/vas.c @@ -28,12 +28,10 @@ static DEFINE_PER_CPU(int, cpu_vas_id); static int vas_irq_fault_window_setup(struct vas_instance *vinst) { - char devname[64]; int rc = 0; - snprintf(devname, sizeof(devname), "vas-%d", vinst->vas_id); rc = request_threaded_irq(vinst->virq, vas_fault_handler, - vas_fault_thread_fn, 0, devname, vinst); + vas_fault_thread_fn, 0, vinst->name, vinst); if (rc) { pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n", @@ -80,6 +78,12 @@ static int init_vas_instance(struct platform_device *pdev) if (!vinst) return -ENOMEM; + vinst->name = kasprintf(GFP_KERNEL, "vas-%d", vasid); + if (!vinst->name) { + kfree(vinst); + return -ENOMEM; + } + INIT_LIST_HEAD(&vinst->node); ida_init(&vinst->ida); mutex_init(&vinst->mutex); @@ -162,6 +166,7 @@ static int init_vas_instance(struct platform_device *pdev) return 0; free_vinst: + kfree(vinst->name); kfree(vinst); return -ENODEV; diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 70f793e8f6cc..c7db3190baca 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -340,6 +340,7 @@ struct vas_instance { struct vas_window *rxwin[VAS_COP_TYPE_MAX]; struct vas_window *windows[VAS_WINDOWS_PER_CHIP]; + char *name; char *dbgname; struct dentry *dbgdir; }; diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 16e86ba8aa20..233503fcf8f0 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn) #define NEXT_PROPERTY 3 #define PREV_PARENT 4 #define MORE_MEMORY 5 -#define CALL_AGAIN -2 #define ERR_CFG_USE -9003 struct device_node *dlpar_configure_connector(__be32 drc_index, @@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, spin_unlock(&rtas_data_buf_lock); + if (rtas_busy_delay(rc)) + continue; + switch (rc) { case COMPLETE: break; @@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, last_dn = last_dn->parent; break; - case CALL_AGAIN: - break; - case MORE_MEMORY: case ERR_CFG_USE: default: @@ -521,11 +520,8 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, int rc; args = argbuf = kstrdup(buf, GFP_KERNEL); - if (!argbuf) { - pr_info("Could not allocate resources for DLPAR operation\n"); - kfree(argbuf); + if (!argbuf) return -ENOMEM; - } /* * Parse out the request from the user, this will be in the form: diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index cf024fa37bda..bc15200852b7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -43,7 +43,7 @@ static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; static int ibm_configure_pe; -void pseries_pcibios_bus_add_device(struct pci_dev *pdev) +static void pseries_pcibios_bus_add_device(struct pci_dev *pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); @@ -694,8 +694,7 @@ static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u } #ifdef CONFIG_PCI_IOV -int pseries_send_allow_unfreeze(struct pci_dn *pdn, - u16 *vf_pe_array, int cur_vfs) +static int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs) { int rc; int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze"); diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 8c6e509f6967..a15ab33646b3 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -355,12 +355,12 @@ static int ibmebus_bus_device_probe(struct device *dev) if (!drv->probe) return error; - of_dev_get(of_dev); + get_device(dev); if (of_driver_match_device(dev, dev->driver)) error = drv->probe(of_dev); if (error) - of_dev_put(of_dev); + put_device(dev); return error; } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index b3ac2455faad..637300330507 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Michael Ellerman, IBM Corp. */ +#include <linux/crash_dump.h> #include <linux/device.h> #include <linux/irq.h> #include <linux/msi.h> @@ -458,8 +459,28 @@ again: return hwirq; } - virq = irq_create_mapping_affinity(NULL, hwirq, - entry->affinity); + /* + * Depending on the number of online CPUs in the original + * kernel, it is likely for CPU #0 to be offline in a kdump + * kernel. The associated IRQs in the affinity mappings + * provided by irq_create_affinity_masks() are thus not + * started by irq_startup(), as per-design for managed IRQs. + * This can be a problem with multi-queue block devices driven + * by blk-mq : such a non-started IRQ is very likely paired + * with the single queue enforced by blk-mq during kdump (see + * blk_mq_alloc_tag_set()). This causes the device to remain + * silent and likely hangs the guest at some point. + * + * We don't really care for fine-grained affinity when doing + * kdump actually : simply ignore the pre-computed affinity + * masks in this case and let the default mask with all CPUs + * be used when creating the IRQ mappings. + */ + if (is_kdump_kernel()) + virq = irq_create_mapping(NULL, hwirq); + else + virq = irq_create_mapping_affinity(NULL, hwirq, + entry->affinity); if (!virq) { pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 72a4d4167849..1bffbd1c9a94 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -55,9 +55,8 @@ struct pe_map_bar_entry { __be32 reserved; /* Reserved Space */ }; -int pseries_send_map_pe(struct pci_dev *pdev, - u16 num_vfs, - struct pe_map_bar_entry *vf_pe_array) +static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs, + struct pe_map_bar_entry *vf_pe_array) { struct pci_dn *pdn; int rc; @@ -88,7 +87,7 @@ int pseries_send_map_pe(struct pci_dev *pdev, return rc; } -void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num) +static void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num) { struct pci_dn *pdn; @@ -102,7 +101,7 @@ void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num) pdn->pe_num_map[vf_index]); } -int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs) +static int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs) { struct pci_dn *pdn; int i, rc, vf_index; @@ -146,7 +145,7 @@ int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs) return rc; } -int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +static int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { struct pci_dn *pdn; int rc; @@ -189,14 +188,14 @@ int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) return rc; } -int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +static int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { /* Allocate PCI data */ add_sriov_vf_pdns(pdev); return pseries_pci_sriov_enable(pdev, num_vfs); } -int pseries_pcibios_sriov_disable(struct pci_dev *pdev) +static int pseries_pcibios_sriov_disable(struct pci_dev *pdev) { struct pci_dn *pdn; diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 593840847cd3..4fe48c04c6c2 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -33,7 +33,7 @@ int smp_query_cpu_stopped(unsigned int pcpu); #define QCSS_HARDWARE_ERROR -1 #define QCSS_HARDWARE_BUSY -2 #else -static inline void smp_init_pseries(void) { }; +static inline void smp_init_pseries(void) { } #endif extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 149cec2212e6..f8b390a9d9fb 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -122,7 +122,7 @@ static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog) * devices or systems (e.g. hugepages) that have not been initialized at the * subsys stage. */ -int __init init_ras_hotplug_IRQ(void) +static int __init init_ras_hotplug_IRQ(void) { struct device_node *np; @@ -315,12 +315,10 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id) /* Handle environmental and power warning (EPOW) interrupts. */ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) { - int status; int state; int critical; - status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, - &state); + rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state); if (state > 3) critical = 1; /* Time Critical */ @@ -329,12 +327,9 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) spin_lock(&ras_log_buf_lock); - status = rtas_call(ras_check_exception_token, 6, 1, NULL, - RTAS_VECTOR_EXTERNAL_INTERRUPT, - virq_to_hw(irq), - RTAS_EPOW_WARNING, - critical, __pa(&ras_log_buf), - rtas_get_error_log_max()); + rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, + virq_to_hw(irq), RTAS_EPOW_WARNING, critical, __pa(&ras_log_buf), + rtas_get_error_log_max()); log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0); @@ -722,6 +717,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) struct pseries_errorlog *pseries_log; struct pseries_mc_errorlog *mce_log = NULL; int disposition = rtas_error_disposition(errp); + unsigned long msr; u8 error_type; if (!rtas_error_extended(errp)) @@ -747,9 +743,21 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) * SLB multihit is done by now. */ out: - mtmsr(mfmsr() | MSR_IR | MSR_DR); + msr = mfmsr(); + mtmsr(msr | MSR_IR | MSR_DR); + disposition = mce_handle_err_virtmode(regs, errp, mce_log, disposition); + + /* + * Queue irq work to log this rtas event later. + * irq_work_queue uses per-cpu variables, so do this in virt + * mode as well. + */ + irq_work_queue(&mce_errlog_process_work); + + mtmsr(msr); + return disposition; } @@ -813,7 +821,7 @@ static int recover_mce(struct pt_regs *regs, struct machine_check_event *evt) */ recovered = 0; } else { - die("Machine check", regs, SIGBUS); + die_mce("Machine check", regs, SIGBUS); recovered = 1; } } @@ -865,10 +873,8 @@ long pseries_machine_check_realmode(struct pt_regs *regs) * virtual mode. */ disposition = mce_handle_error(regs, errp); - fwnmi_release_errinfo(); - /* Queue irq work to log this rtas event later. */ - irq_work_queue(&mce_errlog_process_work); + fwnmi_release_errinfo(); if (disposition == RTAS_DISP_FULLY_RECOVERED) return 1; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 090c13f6c881..46e1540abc22 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -463,7 +463,7 @@ void pseries_little_endian_exceptions(void) } #endif -static void __init find_and_init_phbs(void) +static void __init pSeries_discover_phbs(void) { struct device_node *node; struct pci_controller *phb; @@ -481,6 +481,9 @@ static void __init find_and_init_phbs(void) pci_process_bridge_OF_ranges(phb, node, 0); isa_bridge_find_early(phb); phb->controller_ops = pseries_pci_controller_ops; + + /* create pci_dn's for DT nodes under this PHB */ + pci_devs_phb_init_dynamic(phb); } of_node_put(root); @@ -607,8 +610,8 @@ enum get_iov_fw_value_index { WDW_SIZE = 3 /* Get Window Size */ }; -resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno, - enum get_iov_fw_value_index value) +static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno, + enum get_iov_fw_value_index value) { const int *indexes; struct device_node *dn = pci_device_to_OF_node(dev); @@ -643,7 +646,7 @@ resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno, return ret; } -void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes) +static void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes) { struct resource *res; resource_size_t base, size; @@ -665,7 +668,7 @@ void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes) } } -void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes) +static void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes) { struct resource *res, *root, *conflict; resource_size_t base, size; @@ -786,7 +789,6 @@ static void __init pSeries_setup_arch(void) /* Find and initialize PCI host bridges */ init_pci_config_tokens(); - find_and_init_phbs(); of_reconfig_notifier_register(&pci_dn_reconfig_nb); pSeries_nvram_init(); @@ -1050,6 +1052,7 @@ define_machine(pseries) { .init_IRQ = pseries_init_irq, .show_cpuinfo = pSeries_show_cpuinfo, .log_error = pSeries_log_error, + .discover_phbs = pSeries_discover_phbs, .pcibios_fixup = pSeries_final_fixup, .restart = rtas_restart, .halt = rtas_halt, diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index b2797cfe4e2b..9cb4fc839fd5 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1261,7 +1261,6 @@ static int vio_bus_remove(struct device *dev) struct vio_dev *viodev = to_vio_dev(dev); struct vio_driver *viodrv = to_vio_driver(dev->driver); struct device *devptr; - int ret = 1; /* * Hold a reference to the device after the remove function is called @@ -1270,13 +1269,13 @@ static int vio_bus_remove(struct device *dev) devptr = get_device(dev); if (viodrv->remove) - ret = viodrv->remove(viodev); + viodrv->remove(viodev); - if (!ret && firmware_has_feature(FW_FEATURE_CMO)) + if (firmware_has_feature(FW_FEATURE_CMO)) vio_cmo_bus_remove(viodev); put_device(devptr); - return ret; + return 0; } /** diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index dcd817ca2edf..3fe37495f63d 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1383,7 +1383,6 @@ static long check_bp_loc(unsigned long addr) return 1; } -#ifndef CONFIG_PPC_8xx static int find_free_data_bpt(void) { int i; @@ -1395,7 +1394,6 @@ static int find_free_data_bpt(void) printf("Couldn't find free breakpoint register\n"); return -1; } -#endif static void print_data_bpts(void) { @@ -1435,7 +1433,6 @@ bpt_cmds(void) cmd = inchar(); switch (cmd) { -#ifndef CONFIG_PPC_8xx static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; int mode; case 'd': /* bd - hardware data breakpoint */ @@ -1497,7 +1494,6 @@ bpt_cmds(void) force_enable_xmon(); } break; -#endif case 'c': if (!scanhex(&a)) { @@ -3723,7 +3719,7 @@ void dump_segments(void) printf("sr0-15 ="); for (i = 0; i < 16; ++i) - printf(" %x", mfsrin(i << 28)); + printf(" %x", mfsr(i << 28)); printf("\n"); } #endif diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index e0a34eb5ed3b..85d626b8ce5e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -57,6 +57,7 @@ config RISCV select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if MMU && 64BIT + select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB_QXFER_PKT select HAVE_ARCH_MMAP_RND_BITS if MMU @@ -67,14 +68,19 @@ config RISCV select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU select HAVE_EBPF_JIT if MMU + select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GCC_PLUGINS select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_KRETPROBES select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN @@ -143,7 +149,7 @@ config PAGE_OFFSET default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB config ARCH_FLATMEM_ENABLE - def_bool y + def_bool !NUMA config ARCH_SPARSEMEM_ENABLE def_bool y @@ -156,6 +162,9 @@ config ARCH_SELECT_MEMORY_MODEL config ARCH_WANT_GENERAL_HUGETLB def_bool y +config ARCH_SUPPORTS_UPROBES + def_bool y + config SYS_SUPPORTS_HUGETLBFS depends on MMU def_bool y @@ -302,6 +311,36 @@ config TUNE_GENERIC endchoice +# Common NUMA Features +config NUMA + bool "NUMA Memory Allocation and Scheduler Support" + depends on SMP + select GENERIC_ARCH_NUMA + select OF_NUMA + select ARCH_SUPPORTS_NUMA_BALANCING + help + Enable NUMA (Non-Uniform Memory Access) support. + + The kernel will try to allocate memory used by a CPU on the + local memory of the CPU and add some more NUMA awareness to the kernel. + +config NODES_SHIFT + int "Maximum NUMA Nodes (as a power of 2)" + range 1 10 + default "2" + depends on NEED_MULTIPLE_NODES + help + Specify the maximum number of NUMA Nodes available on the target + system. Increases memory reserved to accommodate various tables. + +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + depends on NUMA + config RISCV_ISA_C bool "Emit compressed instructions when building Linux" default y @@ -416,11 +455,17 @@ config EFI allow the kernel to be booted as an EFI application. This is only useful on systems that have UEFI firmware. +config CC_HAVE_STACKPROTECTOR_TLS + def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0) + +config STACKPROTECTOR_PER_TASK + def_bool y + depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS + endmenu config BUILTIN_DTB def_bool n - depends on RISCV_M_MODE depends on OF menu "Power management options" diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index 3284d5c291be..7efcece8896c 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -22,30 +22,41 @@ config SOC_VIRT help This enables support for QEMU Virt Machine. -config SOC_KENDRYTE - bool "Kendryte K210 SoC" +config SOC_CANAAN + bool "Canaan Kendryte K210 SoC" depends on !MMU select CLINT_TIMER if RISCV_M_MODE select SERIAL_SIFIVE if TTY select SERIAL_SIFIVE_CONSOLE if TTY select SIFIVE_PLIC + select ARCH_HAS_RESET_CONTROLLER + select PINCTRL help - This enables support for Kendryte K210 SoC platform hardware. + This enables support for Canaan Kendryte K210 SoC platform hardware. -config SOC_KENDRYTE_K210_DTB - def_bool y - depends on SOC_KENDRYTE_K210_DTB_BUILTIN +if SOC_CANAAN -config SOC_KENDRYTE_K210_DTB_BUILTIN - bool "Builtin device tree for the Kendryte K210" - depends on SOC_KENDRYTE +config SOC_CANAAN_K210_DTB_BUILTIN + bool "Builtin device tree for the Canaan Kendryte K210" + depends on SOC_CANAAN default y select OF select BUILTIN_DTB - select SOC_KENDRYTE_K210_DTB help - Builds a device tree for the Kendryte K210 into the Linux image. + Build a device tree for the Kendryte K210 into the Linux image. This option should be selected if no bootloader is being used. If unsure, say Y. +config SOC_CANAAN_K210_DTB_SOURCE + string "Source file for the Canaan Kendryte K210 builtin DTB" + depends on SOC_CANAAN + depends on SOC_CANAAN_K210_DTB_BUILTIN + default "k210_generic" + help + Base name (without suffix, relative to arch/riscv/boot/dts/canaan) + for the DTS file that will be used to produce the DTB linked into the + kernel. + +endif + endmenu diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 8c29e553ef7f..1368d943f1f3 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -12,6 +12,8 @@ OBJCOPYFLAGS := -O binary LDFLAGS_vmlinux := ifeq ($(CONFIG_DYNAMIC_FTRACE),y) LDFLAGS_vmlinux := --no-relax + KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY + CC_FLAGS_FTRACE := -fpatchable-function-entry=8 endif ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy) @@ -65,6 +67,16 @@ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) # architectures. It's faster to have GCC emit only aligned accesses. KBUILD_CFLAGS += $(call cc-option,-mstrict-align) +ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) +prepare: stack_protector_prepare +stack_protector_prepare: prepare0 + $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \ + -mstack-protector-guard-reg=tp \ + -mstack-protector-guard-offset=$(shell \ + awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \ + include/generated/asm-offsets.h)) +endif + # arch specific predefines for sparse CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) @@ -83,7 +95,7 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ -ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_KENDRYTE),yy) +ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) KBUILD_IMAGE := $(boot)/loader.bin else KBUILD_IMAGE := $(boot)/Image.gz diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile index ca1f8cbd78c0..7ffd502e3e7b 100644 --- a/arch/riscv/boot/dts/Makefile +++ b/arch/riscv/boot/dts/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 subdir-y += sifive -subdir-y += kendryte +subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/riscv/boot/dts/canaan/Makefile b/arch/riscv/boot/dts/canaan/Makefile new file mode 100644 index 000000000000..9ee7156c0c31 --- /dev/null +++ b/arch/riscv/boot/dts/canaan/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +ifneq ($(CONFIG_SOC_CANAAN_K210_DTB_SOURCE),"") +dtb-y += $(strip $(shell echo $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))).dtb +obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y)) +endif diff --git a/arch/riscv/boot/dts/canaan/canaan_kd233.dts b/arch/riscv/boot/dts/canaan/canaan_kd233.dts new file mode 100644 index 000000000000..039b92abf046 --- /dev/null +++ b/arch/riscv/boot/dts/canaan/canaan_kd233.dts @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +/dts-v1/; + +#include "k210.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> + +/ { + model = "Kendryte KD233"; + compatible = "canaan,kendryte-kd233", "canaan,kendryte-k210"; + + chosen { + bootargs = "earlycon console=ttySIF0"; + stdout-path = "serial0:115200n8"; + }; + + gpio-leds { + compatible = "gpio-leds"; + + led0 { + gpios = <&gpio0 8 GPIO_ACTIVE_LOW>; + }; + + led1 { + gpios = <&gpio0 9 GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + key0 { + label = "KEY0"; + linux,code = <BTN_0>; + gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&fpioa { + pinctrl-0 = <&jtag_pinctrl>; + pinctrl-names = "default"; + status = "okay"; + + jtag_pinctrl: jtag-pinmux { + pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>, + <K210_FPIOA(1, K210_PCF_JTAG_TDI)>, + <K210_FPIOA(2, K210_PCF_JTAG_TMS)>, + <K210_FPIOA(3, K210_PCF_JTAG_TDO)>; + }; + + uarths_pinctrl: uarths-pinmux { + pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, + <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; + }; + + spi0_pinctrl: spi0-pinmux { + pinmux = <K210_FPIOA(6, K210_PCF_GPIOHS20)>, /* cs */ + <K210_FPIOA(7, K210_PCF_SPI0_SCLK)>, /* wr */ + <K210_FPIOA(8, K210_PCF_GPIOHS21)>; /* dc */ + }; + + dvp_pinctrl: dvp-pinmux { + pinmux = <K210_FPIOA(9, K210_PCF_SCCB_SCLK)>, + <K210_FPIOA(10, K210_PCF_SCCB_SDA)>, + <K210_FPIOA(11, K210_PCF_DVP_RST)>, + <K210_FPIOA(12, K210_PCF_DVP_VSYNC)>, + <K210_FPIOA(13, K210_PCF_DVP_PWDN)>, + <K210_FPIOA(14, K210_PCF_DVP_XCLK)>, + <K210_FPIOA(15, K210_PCF_DVP_PCLK)>, + <K210_FPIOA(17, K210_PCF_DVP_HSYNC)>; + }; + + gpiohs_pinctrl: gpiohs-pinmux { + pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>, + <K210_FPIOA(20, K210_PCF_GPIOHS4)>, /* Rot. dip sw line 8 */ + <K210_FPIOA(21, K210_PCF_GPIOHS5)>, /* Rot. dip sw line 4 */ + <K210_FPIOA(22, K210_PCF_GPIOHS6)>, /* Rot. dip sw line 2 */ + <K210_FPIOA(23, K210_PCF_GPIOHS7)>, /* Rot. dip sw line 1 */ + <K210_FPIOA(24, K210_PCF_GPIOHS8)>, + <K210_FPIOA(25, K210_PCF_GPIOHS9)>, + <K210_FPIOA(26, K210_PCF_GPIOHS10)>; + }; + + spi1_pinctrl: spi1-pinmux { + pinmux = <K210_FPIOA(29, K210_PCF_SPI1_SCLK)>, + <K210_FPIOA(30, K210_PCF_SPI1_D0)>, + <K210_FPIOA(31, K210_PCF_SPI1_D1)>, + <K210_FPIOA(32, K210_PCF_GPIOHS16)>; /* cs */ + }; + + i2s0_pinctrl: i2s0-pinmux { + pinmux = <K210_FPIOA(33, K210_PCF_I2S0_IN_D0)>, + <K210_FPIOA(34, K210_PCF_I2S0_WS)>, + <K210_FPIOA(35, K210_PCF_I2S0_SCLK)>; + }; +}; + +&uarths0 { + pinctrl-0 = <&uarths_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio0 { + pinctrl-0 = <&gpiohs_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <1>; + pinctrl-0 = <&i2s0_pinctrl>; + pinctrl-names = "default"; +}; + +&spi0 { + pinctrl-0 = <&spi0_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + + panel@0 { + compatible = "ilitek,ili9341"; + reg = <0>; + dc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>; + spi-max-frequency = <15000000>; + status = "disabled"; + }; +}; + +&spi1 { + pinctrl-0 = <&spi1_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 16 GPIO_ACTIVE_LOW>; + status = "okay"; + + slot@0 { + compatible = "mmc-spi-slot"; + reg = <0>; + voltage-ranges = <3300 3300>; + spi-max-frequency = <25000000>; + broken-cd; + }; +}; diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi new file mode 100644 index 000000000000..5e8ca8142482 --- /dev/null +++ b/arch/riscv/boot/dts/canaan/k210.dtsi @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ +#include <dt-bindings/clock/k210-clk.h> +#include <dt-bindings/pinctrl/k210-fpioa.h> +#include <dt-bindings/reset/k210-rst.h> + +/ { + /* + * Although the K210 is a 64-bit CPU, the address bus is only 32-bits + * wide, and the upper half of all addresses is ignored. + */ + #address-cells = <1>; + #size-cells = <1>; + compatible = "canaan,kendryte-k210"; + + aliases { + serial0 = &uarths0; + serial1 = &uart1; + serial2 = &uart2; + serial3 = &uart3; + }; + + /* + * The K210 has an sv39 MMU following the privileged specification v1.9. + * Since this is a non-ratified draft specification, the kernel does not + * support it and the K210 support enabled only for the !MMU case. + * Be consistent with this by setting the CPUs MMU type to "none". + */ + cpus { + #address-cells = <1>; + #size-cells = <0>; + timebase-frequency = <7800000>; + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "canaan,k210", "riscv"; + reg = <0>; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,none"; + i-cache-block-size = <64>; + i-cache-size = <0x8000>; + d-cache-block-size = <64>; + d-cache-size = <0x8000>; + cpu0_intc: interrupt-controller { + #interrupt-cells = <1>; + interrupt-controller; + compatible = "riscv,cpu-intc"; + }; + }; + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "canaan,k210", "riscv"; + reg = <1>; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,none"; + i-cache-block-size = <64>; + i-cache-size = <0x8000>; + d-cache-block-size = <64>; + d-cache-size = <0x8000>; + cpu1_intc: interrupt-controller { + #interrupt-cells = <1>; + interrupt-controller; + compatible = "riscv,cpu-intc"; + }; + }; + }; + + sram: memory@80000000 { + device_type = "memory"; + compatible = "canaan,k210-sram"; + reg = <0x80000000 0x400000>, + <0x80400000 0x200000>, + <0x80600000 0x200000>; + reg-names = "sram0", "sram1", "aisram"; + clocks = <&sysclk K210_CLK_SRAM0>, + <&sysclk K210_CLK_SRAM1>, + <&sysclk K210_CLK_AI>; + clock-names = "sram0", "sram1", "aisram"; + }; + + clocks { + in0: oscillator { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <26000000>; + }; + }; + + soc { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges; + interrupt-parent = <&plic0>; + + rom0: nvmem@1000 { + reg = <0x1000 0x1000>; + read-only; + }; + + clint0: timer@2000000 { + compatible = "canaan,k210-clint", "sifive,clint0"; + reg = <0x2000000 0xC000>; + interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 + &cpu1_intc 3 &cpu1_intc 7>; + }; + + plic0: interrupt-controller@c000000 { + #interrupt-cells = <1>; + #address-cells = <0>; + compatible = "canaan,k210-plic", "sifive,plic-1.0.0"; + reg = <0xC000000 0x4000000>; + interrupt-controller; + interrupts-extended = <&cpu0_intc 11 &cpu1_intc 11>; + riscv,ndev = <65>; + }; + + uarths0: serial@38000000 { + compatible = "canaan,k210-uarths", "sifive,uart0"; + reg = <0x38000000 0x1000>; + interrupts = <33>; + clocks = <&sysclk K210_CLK_CPU>; + }; + + gpio0: gpio-controller@38001000 { + #interrupt-cells = <2>; + #gpio-cells = <2>; + compatible = "canaan,k210-gpiohs", "sifive,gpio0"; + reg = <0x38001000 0x1000>; + interrupt-controller; + interrupts = <34 35 36 37 38 39 40 41 + 42 43 44 45 46 47 48 49 + 50 51 52 53 54 55 56 57 + 58 59 60 61 62 63 64 65>; + gpio-controller; + ngpios = <32>; + }; + + dmac0: dma-controller@50000000 { + compatible = "snps,axi-dma-1.01a"; + reg = <0x50000000 0x1000>; + interrupts = <27 28 29 30 31 32>; + #dma-cells = <1>; + clocks = <&sysclk K210_CLK_DMA>, <&sysclk K210_CLK_DMA>; + clock-names = "core-clk", "cfgr-clk"; + resets = <&sysrst K210_RST_DMA>; + dma-channels = <6>; + snps,dma-masters = <2>; + snps,priority = <0 1 2 3 4 5>; + snps,data-width = <5>; + snps,block-size = <0x200000 0x200000 0x200000 + 0x200000 0x200000 0x200000>; + snps,axi-max-burst-len = <256>; + }; + + apb0: bus@50200000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-pm-bus"; + ranges; + clocks = <&sysclk K210_CLK_APB0>; + + gpio1: gpio@50200000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dw-apb-gpio"; + reg = <0x50200000 0x80>; + clocks = <&sysclk K210_CLK_APB0>, + <&sysclk K210_CLK_GPIO>; + clock-names = "bus", "db"; + resets = <&sysrst K210_RST_GPIO>; + + gpio1_0: gpio-port@0 { + #gpio-cells = <2>; + #interrupt-cells = <2>; + compatible = "snps,dw-apb-gpio-port"; + reg = <0>; + interrupt-controller; + interrupts = <23>; + gpio-controller; + ngpios = <8>; + }; + }; + + uart1: serial@50210000 { + compatible = "snps,dw-apb-uart"; + reg = <0x50210000 0x100>; + interrupts = <11>; + clocks = <&sysclk K210_CLK_UART1>, + <&sysclk K210_CLK_APB0>; + clock-names = "baudclk", "apb_pclk"; + resets = <&sysrst K210_RST_UART1>; + reg-io-width = <4>; + reg-shift = <2>; + dcd-override; + dsr-override; + cts-override; + ri-override; + }; + + uart2: serial@50220000 { + compatible = "snps,dw-apb-uart"; + reg = <0x50220000 0x100>; + interrupts = <12>; + clocks = <&sysclk K210_CLK_UART2>, + <&sysclk K210_CLK_APB0>; + clock-names = "baudclk", "apb_pclk"; + resets = <&sysrst K210_RST_UART2>; + reg-io-width = <4>; + reg-shift = <2>; + dcd-override; + dsr-override; + cts-override; + ri-override; + }; + + uart3: serial@50230000 { + compatible = "snps,dw-apb-uart"; + reg = <0x50230000 0x100>; + interrupts = <13>; + clocks = <&sysclk K210_CLK_UART3>, + <&sysclk K210_CLK_APB0>; + clock-names = "baudclk", "apb_pclk"; + resets = <&sysrst K210_RST_UART3>; + reg-io-width = <4>; + reg-shift = <2>; + dcd-override; + dsr-override; + cts-override; + ri-override; + }; + + spi2: spi@50240000 { + compatible = "canaan,k210-spi"; + spi-slave; + reg = <0x50240000 0x100>; + #address-cells = <0>; + #size-cells = <0>; + interrupts = <3>; + clocks = <&sysclk K210_CLK_SPI2>, + <&sysclk K210_CLK_APB0>; + clock-names = "ssi_clk", "pclk"; + resets = <&sysrst K210_RST_SPI2>; + spi-max-frequency = <25000000>; + }; + + i2s0: i2s@50250000 { + compatible = "snps,designware-i2s"; + reg = <0x50250000 0x200>; + interrupts = <5>; + clocks = <&sysclk K210_CLK_I2S0>; + clock-names = "i2sclk"; + resets = <&sysrst K210_RST_I2S0>; + }; + + i2s1: i2s@50260000 { + compatible = "snps,designware-i2s"; + reg = <0x50260000 0x200>; + interrupts = <6>; + clocks = <&sysclk K210_CLK_I2S1>; + clock-names = "i2sclk"; + resets = <&sysrst K210_RST_I2S1>; + }; + + i2s2: i2s@50270000 { + compatible = "snps,designware-i2s"; + reg = <0x50270000 0x200>; + interrupts = <7>; + clocks = <&sysclk K210_CLK_I2S2>; + clock-names = "i2sclk"; + resets = <&sysrst K210_RST_I2S2>; + }; + + i2c0: i2c@50280000 { + compatible = "snps,designware-i2c"; + reg = <0x50280000 0x100>; + interrupts = <8>; + clocks = <&sysclk K210_CLK_I2C0>, + <&sysclk K210_CLK_APB0>; + clock-names = "ref", "pclk"; + resets = <&sysrst K210_RST_I2C0>; + }; + + i2c1: i2c@50290000 { + compatible = "snps,designware-i2c"; + reg = <0x50290000 0x100>; + interrupts = <9>; + clocks = <&sysclk K210_CLK_I2C1>, + <&sysclk K210_CLK_APB0>; + clock-names = "ref", "pclk"; + resets = <&sysrst K210_RST_I2C1>; + }; + + i2c2: i2c@502a0000 { + compatible = "snps,designware-i2c"; + reg = <0x502A0000 0x100>; + interrupts = <10>; + clocks = <&sysclk K210_CLK_I2C2>, + <&sysclk K210_CLK_APB0>; + clock-names = "ref", "pclk"; + resets = <&sysrst K210_RST_I2C2>; + }; + + fpioa: pinmux@502b0000 { + compatible = "canaan,k210-fpioa"; + reg = <0x502B0000 0x100>; + clocks = <&sysclk K210_CLK_FPIOA>, + <&sysclk K210_CLK_APB0>; + clock-names = "ref", "pclk"; + resets = <&sysrst K210_RST_FPIOA>; + canaan,k210-sysctl-power = <&sysctl 108>; + }; + + timer0: timer@502d0000 { + compatible = "snps,dw-apb-timer"; + reg = <0x502D0000 0x100>; + interrupts = <14 15>; + clocks = <&sysclk K210_CLK_TIMER0>, + <&sysclk K210_CLK_APB0>; + clock-names = "timer", "pclk"; + resets = <&sysrst K210_RST_TIMER0>; + }; + + timer1: timer@502e0000 { + compatible = "snps,dw-apb-timer"; + reg = <0x502E0000 0x100>; + interrupts = <16 17>; + clocks = <&sysclk K210_CLK_TIMER1>, + <&sysclk K210_CLK_APB0>; + clock-names = "timer", "pclk"; + resets = <&sysrst K210_RST_TIMER1>; + }; + + timer2: timer@502f0000 { + compatible = "snps,dw-apb-timer"; + reg = <0x502F0000 0x100>; + interrupts = <18 19>; + clocks = <&sysclk K210_CLK_TIMER2>, + <&sysclk K210_CLK_APB0>; + clock-names = "timer", "pclk"; + resets = <&sysrst K210_RST_TIMER2>; + }; + }; + + apb1: bus@50400000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-pm-bus"; + ranges; + clocks = <&sysclk K210_CLK_APB1>; + + wdt0: watchdog@50400000 { + compatible = "snps,dw-wdt"; + reg = <0x50400000 0x100>; + interrupts = <21>; + clocks = <&sysclk K210_CLK_WDT0>, + <&sysclk K210_CLK_APB1>; + clock-names = "tclk", "pclk"; + resets = <&sysrst K210_RST_WDT0>; + }; + + wdt1: watchdog@50410000 { + compatible = "snps,dw-wdt"; + reg = <0x50410000 0x100>; + interrupts = <22>; + clocks = <&sysclk K210_CLK_WDT1>, + <&sysclk K210_CLK_APB1>; + clock-names = "tclk", "pclk"; + resets = <&sysrst K210_RST_WDT1>; + }; + + sysctl: syscon@50440000 { + compatible = "canaan,k210-sysctl", + "syscon", "simple-mfd"; + reg = <0x50440000 0x100>; + clocks = <&sysclk K210_CLK_APB1>; + clock-names = "pclk"; + + sysclk: clock-controller { + #clock-cells = <1>; + compatible = "canaan,k210-clk"; + clocks = <&in0>; + }; + + sysrst: reset-controller { + compatible = "canaan,k210-rst"; + #reset-cells = <1>; + }; + + reboot: syscon-reboot { + compatible = "syscon-reboot"; + regmap = <&sysctl>; + offset = <48>; + mask = <1>; + value = <1>; + }; + }; + }; + + apb2: bus@52000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-pm-bus"; + ranges; + clocks = <&sysclk K210_CLK_APB2>; + + spi0: spi@52000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "canaan,k210-spi"; + reg = <0x52000000 0x100>; + interrupts = <1>; + clocks = <&sysclk K210_CLK_SPI0>, + <&sysclk K210_CLK_APB2>; + clock-names = "ssi_clk", "pclk"; + resets = <&sysrst K210_RST_SPI0>; + reset-names = "spi"; + spi-max-frequency = <25000000>; + num-cs = <4>; + reg-io-width = <4>; + }; + + spi1: spi@53000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "canaan,k210-spi"; + reg = <0x53000000 0x100>; + interrupts = <2>; + clocks = <&sysclk K210_CLK_SPI1>, + <&sysclk K210_CLK_APB2>; + clock-names = "ssi_clk", "pclk"; + resets = <&sysrst K210_RST_SPI1>; + reset-names = "spi"; + spi-max-frequency = <25000000>; + num-cs = <4>; + reg-io-width = <4>; + }; + + spi3: spi@54000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwc-ssi-1.01a"; + reg = <0x54000000 0x200>; + interrupts = <4>; + clocks = <&sysclk K210_CLK_SPI3>, + <&sysclk K210_CLK_APB2>; + clock-names = "ssi_clk", "pclk"; + resets = <&sysrst K210_RST_SPI3>; + reset-names = "spi"; + /* Could possibly go up to 200 MHz */ + spi-max-frequency = <100000000>; + num-cs = <4>; + reg-io-width = <4>; + }; + }; + }; +}; diff --git a/arch/riscv/boot/dts/canaan/k210_generic.dts b/arch/riscv/boot/dts/canaan/k210_generic.dts new file mode 100644 index 000000000000..396c8ca4d24d --- /dev/null +++ b/arch/riscv/boot/dts/canaan/k210_generic.dts @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +/dts-v1/; + +#include "k210.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> + +/ { + model = "Kendryte K210 generic"; + compatible = "canaan,kendryte-k210"; + + chosen { + bootargs = "earlycon console=ttySIF0"; + stdout-path = "serial0:115200n8"; + }; +}; + +&fpioa { + pinctrl-0 = <&jtag_pins>; + pinctrl-names = "default"; + status = "okay"; + + jtag_pins: jtag-pinmux { + pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>, + <K210_FPIOA(1, K210_PCF_JTAG_TDI)>, + <K210_FPIOA(2, K210_PCF_JTAG_TMS)>, + <K210_FPIOA(3, K210_PCF_JTAG_TDO)>; + }; + + uarths_pins: uarths-pinmux { + pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, + <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; + }; +}; + +&uarths0 { + pinctrl-0 = <&uarths_pins>; + pinctrl-names = "default"; + status = "okay"; +}; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts new file mode 100644 index 000000000000..0bcaf35045e7 --- /dev/null +++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +/dts-v1/; + +#include "k210.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +/ { + model = "SiPeed MAIX BiT"; + compatible = "sipeed,maix-bit", "sipeed,maix-bitm", + "canaan,kendryte-k210"; + + chosen { + bootargs = "earlycon console=ttySIF0"; + stdout-path = "serial0:115200n8"; + }; + + gpio-leds { + compatible = "gpio-leds"; + + led0 { + color = <LED_COLOR_ID_GREEN>; + label = "green"; + gpios = <&gpio1_0 4 GPIO_ACTIVE_LOW>; + }; + + led1 { + color = <LED_COLOR_ID_RED>; + label = "red"; + gpios = <&gpio1_0 5 GPIO_ACTIVE_LOW>; + }; + + led2 { + color = <LED_COLOR_ID_BLUE>; + label = "blue"; + gpios = <&gpio1_0 6 GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + boot { + label = "BOOT"; + linux,code = <BTN_0>; + gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&fpioa { + pinctrl-names = "default"; + pinctrl-0 = <&jtag_pinctrl>; + status = "okay"; + + jtag_pinctrl: jtag-pinmux { + pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>, + <K210_FPIOA(1, K210_PCF_JTAG_TDI)>, + <K210_FPIOA(2, K210_PCF_JTAG_TMS)>, + <K210_FPIOA(3, K210_PCF_JTAG_TDO)>; + }; + + uarths_pinctrl: uarths-pinmux { + pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, + <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; + }; + + gpio_pinctrl: gpio-pinmux { + pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>, + <K210_FPIOA(9, K210_PCF_GPIO1)>, + <K210_FPIOA(10, K210_PCF_GPIO2)>, + <K210_FPIOA(11, K210_PCF_GPIO3)>, + <K210_FPIOA(12, K210_PCF_GPIO4)>, + <K210_FPIOA(13, K210_PCF_GPIO5)>, + <K210_FPIOA(14, K210_PCF_GPIO6)>, + <K210_FPIOA(15, K210_PCF_GPIO7)>; + }; + + gpiohs_pinctrl: gpiohs-pinmux { + pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>, + <K210_FPIOA(17, K210_PCF_GPIOHS1)>, + <K210_FPIOA(21, K210_PCF_GPIOHS5)>, + <K210_FPIOA(22, K210_PCF_GPIOHS6)>, + <K210_FPIOA(23, K210_PCF_GPIOHS7)>, + <K210_FPIOA(24, K210_PCF_GPIOHS8)>, + <K210_FPIOA(25, K210_PCF_GPIOHS9)>, + <K210_FPIOA(32, K210_PCF_GPIOHS16)>, + <K210_FPIOA(33, K210_PCF_GPIOHS17)>, + <K210_FPIOA(34, K210_PCF_GPIOHS18)>, + <K210_FPIOA(35, K210_PCF_GPIOHS19)>; + }; + + i2s0_pinctrl: i2s0-pinmux { + pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>, + <K210_FPIOA(19, K210_PCF_I2S0_WS)>, + <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>; + }; + + dvp_pinctrl: dvp-pinmux { + pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>, + <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>, + <K210_FPIOA(42, K210_PCF_DVP_RST)>, + <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>, + <K210_FPIOA(44, K210_PCF_DVP_PWDN)>, + <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>, + <K210_FPIOA(46, K210_PCF_DVP_XCLK)>, + <K210_FPIOA(47, K210_PCF_DVP_PCLK)>; + }; + + spi0_pinctrl: spi0-pinmux { + pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */ + <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */ + <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */ + <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */ + }; + + spi1_pinctrl: spi1-pinmux { + pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>, + <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>, + <K210_FPIOA(28, K210_PCF_SPI1_D0)>, + <K210_FPIOA(29, K210_PCF_GPIOHS13)>; /* cs */ + }; + + i2c1_pinctrl: i2c1-pinmux { + pinmux = <K210_FPIOA(30, K210_PCF_I2C1_SCLK)>, + <K210_FPIOA(31, K210_PCF_I2C1_SDA)>; + }; +}; + +&uarths0 { + pinctrl-0 = <&uarths_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio0 { + pinctrl-0 = <&gpiohs_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio1 { + pinctrl-0 = <&gpio_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <1>; + pinctrl-0 = <&i2s0_pinctrl>; + pinctrl-names = "default"; +}; + +&i2c1 { + pinctrl-0 = <&i2c1_pinctrl>; + pinctrl-names = "default"; + clock-frequency = <400000>; + status = "okay"; +}; + +&spi0 { + pinctrl-0 = <&spi0_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + + panel@0 { + compatible = "sitronix,st7789v"; + reg = <0>; + reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; + dc-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>; + spi-max-frequency = <15000000>; + spi-cs-high; + status = "disabled"; + }; +}; + +&spi1 { + pinctrl-0 = <&spi1_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; + status = "okay"; + + slot@0 { + compatible = "mmc-spi-slot"; + reg = <0>; + voltage-ranges = <3300 3300>; + spi-max-frequency = <25000000>; + broken-cd; + }; +}; + +&spi3 { + spi-flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + m25p,fast-read; + broken-flash-reset; + }; +}; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts new file mode 100644 index 000000000000..ac8a03f5867a --- /dev/null +++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +/dts-v1/; + +#include "k210.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +/ { + model = "SiPeed MAIX Dock"; + compatible = "sipeed,maix-dock-m1", "sipeed,maix-dock-m1w", + "canaan,kendryte-k210"; + + chosen { + bootargs = "earlycon console=ttySIF0"; + stdout-path = "serial0:115200n8"; + }; + + gpio-leds { + compatible = "gpio-leds"; + + /* + * Note: the board wiring drawing documents green on + * gpio #4, red on gpio #5 and blue on gpio #6. However, + * the board is actually wired differently as defined here. + */ + led0 { + color = <LED_COLOR_ID_BLUE>; + label = "blue"; + gpios = <&gpio1_0 4 GPIO_ACTIVE_LOW>; + }; + + led1 { + color = <LED_COLOR_ID_GREEN>; + label = "green"; + gpios = <&gpio1_0 5 GPIO_ACTIVE_LOW>; + }; + + led2 { + color = <LED_COLOR_ID_RED>; + label = "red"; + gpios = <&gpio1_0 6 GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + boot { + label = "BOOT"; + linux,code = <BTN_0>; + gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&fpioa { + pinctrl-0 = <&jtag_pinctrl>; + pinctrl-names = "default"; + status = "okay"; + + jtag_pinctrl: jtag-pinmux { + pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>, + <K210_FPIOA(1, K210_PCF_JTAG_TDI)>, + <K210_FPIOA(2, K210_PCF_JTAG_TMS)>, + <K210_FPIOA(3, K210_PCF_JTAG_TDO)>; + }; + + uarths_pinctrl: uarths-pinmux { + pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, + <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; + }; + + gpio_pinctrl: gpio-pinmux { + pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>, + <K210_FPIOA(11, K210_PCF_GPIO3)>, + <K210_FPIOA(12, K210_PCF_GPIO4)>, + <K210_FPIOA(13, K210_PCF_GPIO5)>, + <K210_FPIOA(14, K210_PCF_GPIO6)>, + <K210_FPIOA(15, K210_PCF_GPIO7)>; + }; + + gpiohs_pinctrl: gpiohs-pinmux { + pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>, + <K210_FPIOA(17, K210_PCF_GPIOHS1)>, + <K210_FPIOA(21, K210_PCF_GPIOHS5)>, + <K210_FPIOA(22, K210_PCF_GPIOHS6)>, + <K210_FPIOA(23, K210_PCF_GPIOHS7)>, + <K210_FPIOA(24, K210_PCF_GPIOHS8)>, + <K210_FPIOA(25, K210_PCF_GPIOHS9)>, + <K210_FPIOA(32, K210_PCF_GPIOHS16)>, + <K210_FPIOA(33, K210_PCF_GPIOHS17)>, + <K210_FPIOA(34, K210_PCF_GPIOHS18)>, + <K210_FPIOA(35, K210_PCF_GPIOHS19)>; + }; + + i2s0_pinctrl: i2s0-pinmux { + pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>, + <K210_FPIOA(19, K210_PCF_I2S0_WS)>, + <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>; + }; + + dvp_pinctrl: dvp-pinmux { + pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>, + <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>, + <K210_FPIOA(42, K210_PCF_DVP_RST)>, + <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>, + <K210_FPIOA(44, K210_PCF_DVP_PWDN)>, + <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>, + <K210_FPIOA(46, K210_PCF_DVP_XCLK)>, + <K210_FPIOA(47, K210_PCF_DVP_PCLK)>; + }; + + spi0_pinctrl: spi0-pinmux { + pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */ + <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */ + <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */ + <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */ + }; + + spi1_pinctrl: spi1-pinmux { + pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>, + <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>, + <K210_FPIOA(28, K210_PCF_SPI1_D0)>, + <K210_FPIOA(29, K210_PCF_GPIOHS13)>; /* cs */ + }; + + i2c1_pinctrl: i2c1-pinmux { + pinmux = <K210_FPIOA(9, K210_PCF_I2C1_SCLK)>, + <K210_FPIOA(10, K210_PCF_I2C1_SDA)>; + }; +}; + +&uarths0 { + pinctrl-0 = <&uarths_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio0 { + pinctrl-0 = <&gpiohs_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio1 { + pinctrl-0 = <&gpio_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <1>; + pinctrl-0 = <&i2s0_pinctrl>; + pinctrl-names = "default"; +}; + +&i2c1 { + pinctrl-0 = <&i2c1_pinctrl>; + pinctrl-names = "default"; + clock-frequency = <400000>; + status = "okay"; +}; + +&spi0 { + pinctrl-0 = <&spi0_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + + panel@0 { + compatible = "sitronix,st7789v"; + reg = <0>; + reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; + dc-gpios = <&gpio0 22 0>; + spi-max-frequency = <15000000>; + status = "disabled"; + }; +}; + +&spi1 { + pinctrl-0 = <&spi1_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; + status = "okay"; + + slot@0 { + compatible = "mmc-spi-slot"; + reg = <0>; + voltage-ranges = <3300 3300>; + spi-max-frequency = <25000000>; + broken-cd; + }; +}; + +&spi3 { + spi-flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + m25p,fast-read; + broken-flash-reset; + }; +}; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts new file mode 100644 index 000000000000..623998194bc1 --- /dev/null +++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +/dts-v1/; + +#include "k210.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +/ { + model = "SiPeed MAIX GO"; + compatible = "sipeed,maix-go", "canaan,kendryte-k210"; + + chosen { + bootargs = "earlycon console=ttySIF0"; + stdout-path = "serial0:115200n8"; + }; + + gpio-leds { + compatible = "gpio-leds"; + + led0 { + color = <LED_COLOR_ID_GREEN>; + label = "green"; + gpios = <&gpio1_0 4 GPIO_ACTIVE_LOW>; + }; + + led1 { + color = <LED_COLOR_ID_RED>; + label = "red"; + gpios = <&gpio1_0 5 GPIO_ACTIVE_LOW>; + }; + + led2 { + color = <LED_COLOR_ID_BLUE>; + label = "blue"; + gpios = <&gpio1_0 6 GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + up { + label = "UP"; + linux,code = <BTN_1>; + gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>; + }; + + press { + label = "PRESS"; + linux,code = <BTN_0>; + gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + }; + + down { + label = "DOWN"; + linux,code = <BTN_2>; + gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&fpioa { + pinctrl-0 = <&jtag_pinctrl>; + pinctrl-names = "default"; + status = "okay"; + + jtag_pinctrl: jtag-pinmux { + pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>, + <K210_FPIOA(1, K210_PCF_JTAG_TDI)>, + <K210_FPIOA(2, K210_PCF_JTAG_TMS)>, + <K210_FPIOA(3, K210_PCF_JTAG_TDO)>; + }; + + uarths_pinctrl: uarths-pinmux { + pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, + <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; + }; + + gpio_pinctrl: gpio-pinmux { + pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>, + <K210_FPIOA(9, K210_PCF_GPIO1)>, + <K210_FPIOA(10, K210_PCF_GPIO2)>, + <K210_FPIOA(11, K210_PCF_GPIO3)>, + <K210_FPIOA(12, K210_PCF_GPIO4)>, + <K210_FPIOA(13, K210_PCF_GPIO5)>, + <K210_FPIOA(14, K210_PCF_GPIO6)>, + <K210_FPIOA(15, K210_PCF_GPIO7)>; + }; + + gpiohs_pinctrl: gpiohs-pinmux { + pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>, + <K210_FPIOA(17, K210_PCF_GPIOHS1)>, + <K210_FPIOA(21, K210_PCF_GPIOHS5)>, + <K210_FPIOA(22, K210_PCF_GPIOHS6)>, + <K210_FPIOA(23, K210_PCF_GPIOHS7)>, + <K210_FPIOA(24, K210_PCF_GPIOHS8)>, + <K210_FPIOA(25, K210_PCF_GPIOHS9)>, + <K210_FPIOA(32, K210_PCF_GPIOHS16)>, + <K210_FPIOA(33, K210_PCF_GPIOHS17)>, + <K210_FPIOA(34, K210_PCF_GPIOHS18)>, + <K210_FPIOA(35, K210_PCF_GPIOHS19)>; + }; + + i2s0_pinctrl: i2s0-pinmux { + pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>, + <K210_FPIOA(19, K210_PCF_I2S0_WS)>, + <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>; + }; + + dvp_pinctrl: dvp-pinmux { + pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>, + <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>, + <K210_FPIOA(42, K210_PCF_DVP_RST)>, + <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>, + <K210_FPIOA(44, K210_PCF_DVP_PWDN)>, + <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>, + <K210_FPIOA(46, K210_PCF_DVP_XCLK)>, + <K210_FPIOA(47, K210_PCF_DVP_PCLK)>; + }; + + spi0_pinctrl: spi0-pinmux { + pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */ + <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */ + <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */ + <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */ + }; + + spi1_pinctrl: spi1-pinmux { + pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>, + <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>, + <K210_FPIOA(28, K210_PCF_SPI1_D0)>, + <K210_FPIOA(29, K210_PCF_GPIOHS13)>; /* cs */ + }; + + i2c1_pinctrl: i2c1-pinmux { + pinmux = <K210_FPIOA(30, K210_PCF_I2C1_SCLK)>, + <K210_FPIOA(31, K210_PCF_I2C1_SDA)>; + }; +}; + +&uarths0 { + pinctrl-0 = <&uarths_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio0 { + pinctrl-0 = <&gpiohs_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio1 { + pinctrl-0 = <&gpio_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <1>; + pinctrl-0 = <&i2s0_pinctrl>; + pinctrl-names = "default"; +}; + +&i2c1 { + pinctrl-0 = <&i2c1_pinctrl>; + pinctrl-names = "default"; + clock-frequency = <400000>; + status = "okay"; +}; + +&spi0 { + pinctrl-0 = <&spi0_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + + panel@0 { + compatible = "sitronix,st7789v"; + reg = <0>; + reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; + dc-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>; + spi-max-frequency = <15000000>; + status = "disabled"; + }; +}; + +&spi1 { + pinctrl-0 = <&spi1_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; + status = "okay"; + + slot@0 { + compatible = "mmc-spi-slot"; + reg = <0>; + voltage-ranges = <3300 3300>; + spi-max-frequency = <25000000>; + broken-cd; + }; +}; + +&spi3 { + spi-flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + m25p,fast-read; + broken-flash-reset; + }; +}; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts new file mode 100644 index 000000000000..cf605ba0d67e --- /dev/null +++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +/dts-v1/; + +#include "k210.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> + +/ { + model = "SiPeed MAIXDUINO"; + compatible = "sipeed,maixduino", "canaan,kendryte-k210"; + + chosen { + bootargs = "earlycon console=ttySIF0"; + stdout-path = "serial0:115200n8"; + }; + + gpio-keys { + compatible = "gpio-keys"; + + boot { + label = "BOOT"; + linux,code = <BTN_0>; + gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + }; + }; + + vcc_3v3: regulator-3v3 { + compatible = "regulator-fixed"; + regulator-name = "3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; +}; + +&fpioa { + status = "okay"; + + uarths_pinctrl: uarths-pinmux { + pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, /* Header "0" */ + <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; /* Header "1" */ + }; + + gpio_pinctrl: gpio-pinmux { + pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>, + <K210_FPIOA(9, K210_PCF_GPIO1)>; + }; + + gpiohs_pinctrl: gpiohs-pinmux { + pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>, /* BOOT */ + <K210_FPIOA(21, K210_PCF_GPIOHS2)>, /* Header "2" */ + <K210_FPIOA(22, K210_PCF_GPIOHS3)>, /* Header "3" */ + <K210_FPIOA(23, K210_PCF_GPIOHS4)>, /* Header "4" */ + <K210_FPIOA(24, K210_PCF_GPIOHS5)>, /* Header "5" */ + <K210_FPIOA(32, K210_PCF_GPIOHS6)>, /* Header "6" */ + <K210_FPIOA(15, K210_PCF_GPIOHS7)>, /* Header "7" */ + <K210_FPIOA(14, K210_PCF_GPIOHS8)>, /* Header "8" */ + <K210_FPIOA(13, K210_PCF_GPIOHS9)>, /* Header "9" */ + <K210_FPIOA(12, K210_PCF_GPIOHS10)>, /* Header "10" */ + <K210_FPIOA(11, K210_PCF_GPIOHS11)>, /* Header "11" */ + <K210_FPIOA(10, K210_PCF_GPIOHS12)>, /* Header "12" */ + <K210_FPIOA(3, K210_PCF_GPIOHS13)>; /* Header "13" */ + }; + + i2s0_pinctrl: i2s0-pinmux { + pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>, + <K210_FPIOA(19, K210_PCF_I2S0_WS)>, + <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>; + }; + + spi1_pinctrl: spi1-pinmux { + pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>, + <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>, + <K210_FPIOA(28, K210_PCF_SPI1_D0)>, + <K210_FPIOA(29, K210_PCF_GPIO2)>; /* cs */ + }; + + i2c1_pinctrl: i2c1-pinmux { + pinmux = <K210_FPIOA(30, K210_PCF_I2C1_SCLK)>, /* Header "scl" */ + <K210_FPIOA(31, K210_PCF_I2C1_SDA)>; /* Header "sda" */ + }; + + i2s1_pinctrl: i2s1-pinmux { + pinmux = <K210_FPIOA(33, K210_PCF_I2S1_WS)>, + <K210_FPIOA(34, K210_PCF_I2S1_IN_D0)>, + <K210_FPIOA(35, K210_PCF_I2S1_SCLK)>; + }; + + spi0_pinctrl: spi0-pinmux { + pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */ + <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */ + <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */ + <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */ + }; + + dvp_pinctrl: dvp-pinmux { + pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>, + <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>, + <K210_FPIOA(42, K210_PCF_DVP_RST)>, + <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>, + <K210_FPIOA(44, K210_PCF_DVP_PWDN)>, + <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>, + <K210_FPIOA(46, K210_PCF_DVP_XCLK)>, + <K210_FPIOA(47, K210_PCF_DVP_PCLK)>; + }; +}; + +&uarths0 { + pinctrl-0 = <&uarths_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio0 { + pinctrl-0 = <&gpiohs_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gpio1 { + pinctrl-0 = <&gpio_pinctrl>; + pinctrl-names = "default"; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <1>; + pinctrl-0 = <&i2s0_pinctrl>; + pinctrl-names = "default"; +}; + +&i2c1 { + pinctrl-0 = <&i2c1_pinctrl>; + pinctrl-names = "default"; + clock-frequency = <400000>; + status = "okay"; +}; + +&spi0 { + pinctrl-0 = <&spi0_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + + panel@0 { + compatible = "sitronix,st7789v"; + reg = <0>; + reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; + dc-gpios = <&gpio0 22 0>; + spi-max-frequency = <15000000>; + power-supply = <&vcc_3v3>; + }; +}; + +&spi1 { + pinctrl-0 = <&spi1_pinctrl>; + pinctrl-names = "default"; + num-cs = <1>; + cs-gpios = <&gpio1_0 2 GPIO_ACTIVE_LOW>; + status = "okay"; + + slot@0 { + compatible = "mmc-spi-slot"; + reg = <0>; + voltage-ranges = <3300 3300>; + spi-max-frequency = <25000000>; + broken-cd; + }; +}; + +&spi3 { + spi-flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + m25p,fast-read; + broken-flash-reset; + }; +}; diff --git a/arch/riscv/boot/dts/kendryte/Makefile b/arch/riscv/boot/dts/kendryte/Makefile deleted file mode 100644 index 1a88e616f18e..000000000000 --- a/arch/riscv/boot/dts/kendryte/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -dtb-$(CONFIG_SOC_KENDRYTE_K210_DTB) += k210.dtb - -obj-$(CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/riscv/boot/dts/kendryte/k210.dts b/arch/riscv/boot/dts/kendryte/k210.dts deleted file mode 100644 index 0d1f28fce6b2..000000000000 --- a/arch/riscv/boot/dts/kendryte/k210.dts +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2020 Western Digital Corporation or its affiliates. - */ - -/dts-v1/; - -#include "k210.dtsi" - -/ { - model = "Kendryte K210 generic"; - compatible = "kendryte,k210"; - - chosen { - bootargs = "earlycon console=ttySIF0"; - stdout-path = "serial0"; - }; -}; - -&uarths0 { - status = "okay"; -}; - diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi deleted file mode 100644 index d2d0ff645632..000000000000 --- a/arch/riscv/boot/dts/kendryte/k210.dtsi +++ /dev/null @@ -1,125 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2019 Sean Anderson <seanga2@gmail.com> - * Copyright (C) 2020 Western Digital Corporation or its affiliates. - */ -#include <dt-bindings/clock/k210-clk.h> - -/ { - /* - * Although the K210 is a 64-bit CPU, the address bus is only 32-bits - * wide, and the upper half of all addresses is ignored. - */ - #address-cells = <1>; - #size-cells = <1>; - compatible = "kendryte,k210"; - - aliases { - serial0 = &uarths0; - }; - - /* - * The K210 has an sv39 MMU following the priviledge specification v1.9. - * Since this is a non-ratified draft specification, the kernel does not - * support it and the K210 support enabled only for the !MMU case. - * Be consistent with this by setting the CPUs MMU type to "none". - */ - cpus { - #address-cells = <1>; - #size-cells = <0>; - timebase-frequency = <7800000>; - cpu0: cpu@0 { - device_type = "cpu"; - reg = <0>; - compatible = "kendryte,k210", "sifive,rocket0", "riscv"; - riscv,isa = "rv64imafdc"; - mmu-type = "none"; - i-cache-size = <0x8000>; - i-cache-block-size = <64>; - d-cache-size = <0x8000>; - d-cache-block-size = <64>; - clocks = <&sysctl K210_CLK_CPU>; - clock-frequency = <390000000>; - cpu0_intc: interrupt-controller { - #interrupt-cells = <1>; - interrupt-controller; - compatible = "riscv,cpu-intc"; - }; - }; - cpu1: cpu@1 { - device_type = "cpu"; - reg = <1>; - compatible = "kendryte,k210", "sifive,rocket0", "riscv"; - riscv,isa = "rv64imafdc"; - mmu-type = "none"; - i-cache-size = <0x8000>; - i-cache-block-size = <64>; - d-cache-size = <0x8000>; - d-cache-block-size = <64>; - clocks = <&sysctl K210_CLK_CPU>; - clock-frequency = <390000000>; - cpu1_intc: interrupt-controller { - #interrupt-cells = <1>; - interrupt-controller; - compatible = "riscv,cpu-intc"; - }; - }; - }; - - sram: memory@80000000 { - device_type = "memory"; - reg = <0x80000000 0x400000>, - <0x80400000 0x200000>, - <0x80600000 0x200000>; - reg-names = "sram0", "sram1", "aisram"; - }; - - clocks { - in0: oscillator { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <26000000>; - }; - }; - - soc { - #address-cells = <1>; - #size-cells = <1>; - compatible = "kendryte,k210-soc", "simple-bus"; - ranges; - interrupt-parent = <&plic0>; - - sysctl: sysctl@50440000 { - compatible = "kendryte,k210-sysctl", "simple-mfd"; - reg = <0x50440000 0x1000>; - #clock-cells = <1>; - }; - - clint0: clint@2000000 { - #interrupt-cells = <1>; - compatible = "riscv,clint0"; - reg = <0x2000000 0xC000>; - interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 - &cpu1_intc 3 &cpu1_intc 7>; - clocks = <&sysctl K210_CLK_ACLK>; - }; - - plic0: interrupt-controller@c000000 { - #interrupt-cells = <1>; - interrupt-controller; - compatible = "kendryte,k210-plic0", "riscv,plic0"; - reg = <0xC000000 0x4000000>; - interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 0xffffffff>, - <&cpu1_intc 11>, <&cpu1_intc 0xffffffff>; - riscv,ndev = <65>; - riscv,max-priority = <7>; - }; - - uarths0: serial@38000000 { - compatible = "kendryte,k210-uarths", "sifive,uart0"; - reg = <0x38000000 0x1000>; - interrupts = <33>; - clocks = <&sysctl K210_CLK_CPU>; - }; - }; -}; diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile index 6d6189e6e4af..74c47fe9fc22 100644 --- a/arch/riscv/boot/dts/sifive/Makefile +++ b/arch/riscv/boot/dts/sifive/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb +dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \ + hifive-unmatched-a00.dtb diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi new file mode 100644 index 000000000000..eeb4f8c3e0e7 --- /dev/null +++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 SiFive, Inc */ + +/dts-v1/; + +#include <dt-bindings/clock/sifive-fu740-prci.h> + +/ { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sifive,fu740-c000", "sifive,fu740"; + + aliases { + serial0 = &uart0; + serial1 = &uart1; + ethernet0 = ð0; + }; + + chosen { + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu0: cpu@0 { + compatible = "sifive,bullet0", "riscv"; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <16384>; + next-level-cache = <&ccache>; + reg = <0x0>; + riscv,isa = "rv64imac"; + status = "disabled"; + cpu0_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + cpu1: cpu@1 { + compatible = "sifive,bullet0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <40>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <40>; + mmu-type = "riscv,sv39"; + next-level-cache = <&ccache>; + reg = <0x1>; + riscv,isa = "rv64imafdc"; + tlb-split; + cpu1_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + cpu2: cpu@2 { + compatible = "sifive,bullet0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <40>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <40>; + mmu-type = "riscv,sv39"; + next-level-cache = <&ccache>; + reg = <0x2>; + riscv,isa = "rv64imafdc"; + tlb-split; + cpu2_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + cpu3: cpu@3 { + compatible = "sifive,bullet0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <40>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <40>; + mmu-type = "riscv,sv39"; + next-level-cache = <&ccache>; + reg = <0x3>; + riscv,isa = "rv64imafdc"; + tlb-split; + cpu3_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + cpu4: cpu@4 { + compatible = "sifive,bullet0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <40>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <40>; + mmu-type = "riscv,sv39"; + next-level-cache = <&ccache>; + reg = <0x4>; + riscv,isa = "rv64imafdc"; + tlb-split; + cpu4_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + }; + soc { + #address-cells = <2>; + #size-cells = <2>; + compatible = "simple-bus"; + ranges; + plic0: interrupt-controller@c000000 { + #interrupt-cells = <1>; + #address-cells = <0>; + compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0"; + reg = <0x0 0xc000000 0x0 0x4000000>; + riscv,ndev = <69>; + interrupt-controller; + interrupts-extended = < + &cpu0_intc 0xffffffff + &cpu1_intc 0xffffffff &cpu1_intc 9 + &cpu2_intc 0xffffffff &cpu2_intc 9 + &cpu3_intc 0xffffffff &cpu3_intc 9 + &cpu4_intc 0xffffffff &cpu4_intc 9>; + }; + prci: clock-controller@10000000 { + compatible = "sifive,fu740-c000-prci"; + reg = <0x0 0x10000000 0x0 0x1000>; + clocks = <&hfclk>, <&rtcclk>; + #clock-cells = <1>; + }; + uart0: serial@10010000 { + compatible = "sifive,fu740-c000-uart", "sifive,uart0"; + reg = <0x0 0x10010000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <39>; + clocks = <&prci PRCI_CLK_PCLK>; + status = "disabled"; + }; + uart1: serial@10011000 { + compatible = "sifive,fu740-c000-uart", "sifive,uart0"; + reg = <0x0 0x10011000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <40>; + clocks = <&prci PRCI_CLK_PCLK>; + status = "disabled"; + }; + i2c0: i2c@10030000 { + compatible = "sifive,fu740-c000-i2c", "sifive,i2c0"; + reg = <0x0 0x10030000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <52>; + clocks = <&prci PRCI_CLK_PCLK>; + reg-shift = <2>; + reg-io-width = <1>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + i2c1: i2c@10031000 { + compatible = "sifive,fu740-c000-i2c", "sifive,i2c0"; + reg = <0x0 0x10031000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <53>; + clocks = <&prci PRCI_CLK_PCLK>; + reg-shift = <2>; + reg-io-width = <1>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + qspi0: spi@10040000 { + compatible = "sifive,fu740-c000-spi", "sifive,spi0"; + reg = <0x0 0x10040000 0x0 0x1000>, + <0x0 0x20000000 0x0 0x10000000>; + interrupt-parent = <&plic0>; + interrupts = <41>; + clocks = <&prci PRCI_CLK_PCLK>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + qspi1: spi@10041000 { + compatible = "sifive,fu740-c000-spi", "sifive,spi0"; + reg = <0x0 0x10041000 0x0 0x1000>, + <0x0 0x30000000 0x0 0x10000000>; + interrupt-parent = <&plic0>; + interrupts = <42>; + clocks = <&prci PRCI_CLK_PCLK>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + spi0: spi@10050000 { + compatible = "sifive,fu740-c000-spi", "sifive,spi0"; + reg = <0x0 0x10050000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <43>; + clocks = <&prci PRCI_CLK_PCLK>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + eth0: ethernet@10090000 { + compatible = "sifive,fu540-c000-gem"; + interrupt-parent = <&plic0>; + interrupts = <55>; + reg = <0x0 0x10090000 0x0 0x2000>, + <0x0 0x100a0000 0x0 0x1000>; + local-mac-address = [00 00 00 00 00 00]; + clock-names = "pclk", "hclk"; + clocks = <&prci PRCI_CLK_GEMGXLPLL>, + <&prci PRCI_CLK_GEMGXLPLL>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + pwm0: pwm@10020000 { + compatible = "sifive,fu740-c000-pwm", "sifive,pwm0"; + reg = <0x0 0x10020000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <44>, <45>, <46>, <47>; + clocks = <&prci PRCI_CLK_PCLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + pwm1: pwm@10021000 { + compatible = "sifive,fu740-c000-pwm", "sifive,pwm0"; + reg = <0x0 0x10021000 0x0 0x1000>; + interrupt-parent = <&plic0>; + interrupts = <48>, <49>, <50>, <51>; + clocks = <&prci PRCI_CLK_PCLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + ccache: cache-controller@2010000 { + compatible = "sifive,fu740-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <2048>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic0>; + interrupts = <19 20 21 22>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; + gpio: gpio@10060000 { + compatible = "sifive,fu740-c000-gpio", "sifive,gpio0"; + interrupt-parent = <&plic0>; + interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, + <30>, <31>, <32>, <33>, <34>, <35>, <36>, + <37>, <38>; + reg = <0x0 0x10060000 0x0 0x1000>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + clocks = <&prci PRCI_CLK_PCLK>; + status = "disabled"; + }; + }; +}; diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts new file mode 100644 index 000000000000..b1c3c596578f --- /dev/null +++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 SiFive, Inc */ + +#include "fu740-c000.dtsi" +#include <dt-bindings/interrupt-controller/irq.h> + +/* Clock frequency (in Hz) of the PCB crystal for rtcclk */ +#define RTCCLK_FREQ 1000000 + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "SiFive HiFive Unmatched A00"; + compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000", + "sifive,fu740"; + + chosen { + stdout-path = "serial0"; + }; + + cpus { + timebase-frequency = <RTCCLK_FREQ>; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 0x2 0x00000000>; + }; + + soc { + }; + + hfclk: hfclk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <26000000>; + clock-output-names = "hfclk"; + }; + + rtcclk: rtcclk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <RTCCLK_FREQ>; + clock-output-names = "rtcclk"; + }; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + temperature-sensor@4c { + compatible = "ti,tmp451"; + reg = <0x4c>; + interrupt-parent = <&gpio>; + interrupts = <6 IRQ_TYPE_LEVEL_LOW>; + }; + + pmic@58 { + compatible = "dlg,da9063"; + reg = <0x58>; + interrupt-parent = <&gpio>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + + regulators { + vdd_bcore1: bcore1 { + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + regulator-min-microamp = <5000000>; + regulator-max-microamp = <5000000>; + regulator-always-on; + }; + + vdd_bcore2: bcore2 { + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + regulator-min-microamp = <5000000>; + regulator-max-microamp = <5000000>; + regulator-always-on; + }; + + vdd_bpro: bpro { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <2500000>; + regulator-max-microamp = <2500000>; + regulator-always-on; + }; + + vdd_bperi: bperi { + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1050000>; + regulator-min-microamp = <1500000>; + regulator-max-microamp = <1500000>; + regulator-always-on; + }; + + vdd_bmem: bmem { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-min-microamp = <3000000>; + regulator-max-microamp = <3000000>; + regulator-always-on; + }; + + vdd_bio: bio { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-min-microamp = <3000000>; + regulator-max-microamp = <3000000>; + regulator-always-on; + }; + + vdd_ldo1: ldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <100000>; + regulator-max-microamp = <100000>; + regulator-always-on; + }; + + vdd_ldo2: ldo2 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + regulator-always-on; + }; + + vdd_ldo3: ldo3 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + regulator-always-on; + }; + + vdd_ldo4: ldo4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + regulator-always-on; + }; + + vdd_ldo5: ldo5 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <100000>; + regulator-max-microamp = <100000>; + regulator-always-on; + }; + + vdd_ldo6: ldo6 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + regulator-always-on; + }; + + vdd_ldo7: ldo7 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + regulator-always-on; + }; + + vdd_ldo8: ldo8 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + regulator-always-on; + }; + + vdd_ld09: ldo9 { + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1050000>; + regulator-min-microamp = <200000>; + regulator-max-microamp = <200000>; + }; + + vdd_ldo10: ldo10 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-min-microamp = <300000>; + regulator-max-microamp = <300000>; + }; + + vdd_ldo11: ldo11 { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-min-microamp = <300000>; + regulator-max-microamp = <300000>; + regulator-always-on; + }; + }; + }; +}; + +&qspi0 { + status = "okay"; + flash@0 { + compatible = "issi,is25wp256", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + m25p,fast-read; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + }; +}; + +&spi0 { + status = "okay"; + mmc@0 { + compatible = "mmc-spi-slot"; + reg = <0>; + spi-max-frequency = <20000000>; + voltage-ranges = <3300 3300>; + disable-wp; + }; +}; + +ð0 { + status = "okay"; + phy-mode = "gmii"; + phy-handle = <&phy0>; + phy0: ethernet-phy@0 { + reg = <0>; + }; +}; + +&pwm0 { + status = "okay"; +}; + +&pwm1 { + status = "okay"; +}; + +&gpio { + status = "okay"; +}; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 8c3d1e451703..6c0625aa96c7 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -17,6 +17,7 @@ CONFIG_BPF_SYSCALL=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig index cd1df62b13c7..b16a2a12c82a 100644 --- a/arch/riscv/configs/nommu_k210_defconfig +++ b/arch/riscv/configs/nommu_k210_defconfig @@ -1,17 +1,19 @@ # CONFIG_CPU_ISOLATION is not set -CONFIG_LOG_BUF_SHIFT=15 +CONFIG_LOG_BUF_SHIFT=13 CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_FORCE=y +# CONFIG_RD_GZIP is not set # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set +# CONFIG_RD_ZSTD is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSFS_SYSCALL is not set # CONFIG_FHANDLE is not set # CONFIG_BASE_FULL is not set +# CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set # CONFIG_TIMERFD is not set @@ -25,15 +27,17 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLOB=y -# CONFIG_SLAB_MERGE_DEFAULT is not set # CONFIG_MMU is not set -CONFIG_SOC_KENDRYTE=y +CONFIG_SOC_CANAAN=y +CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic" CONFIG_MAXPHYSMEM_2GB=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_CMDLINE="earlycon console=ttySIF0" CONFIG_CMDLINE_FORCE=y -CONFIG_JUMP_LABEL=y +# CONFIG_SECCOMP is not set +# CONFIG_STACKPROTECTOR is not set +# CONFIG_GCC_PLUGINS is not set # CONFIG_BLOCK is not set CONFIG_BINFMT_FLAT=y # CONFIG_COREDUMP is not set @@ -41,23 +45,47 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_FW_LOADER is not set # CONFIG_ALLOW_DEV_COREDUMP is not set -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT is not set # CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_LDISC_AUTOLOAD is not set # CONFIG_HW_RANDOM is not set # CONFIG_DEVMEM is not set +CONFIG_I2C=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_SPI=y +# CONFIG_SPI_MEM is not set +CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_MMIO=y +# CONFIG_GPIO_CDEV_V1 is not set +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_SIFIVE=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y # CONFIG_HWMON is not set -# CONFIG_VGA_CONSOLE is not set -# CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_USER=y # CONFIG_VIRTIO_MENU is not set +# CONFIG_VHOST_MENU is not set +# CONFIG_SURFACE_PLATFORMS is not set +# CONFIG_FILE_LOCKING is not set # CONFIG_DNOTIFY is not set # CONFIG_INOTIFY_USER is not set # CONFIG_MISC_FILESYSTEMS is not set CONFIG_LSM="[]" CONFIG_PRINTK_TIME=y +# CONFIG_SYMBOLIC_ERRNAME is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +# CONFIG_FRAME_POINTER is not set # CONFIG_DEBUG_MISC is not set CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig new file mode 100644 index 000000000000..61f887f65419 --- /dev/null +++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig @@ -0,0 +1,92 @@ +# CONFIG_CPU_ISOLATION is not set +CONFIG_LOG_BUF_SHIFT=13 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12 +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +# CONFIG_SYSFS_SYSCALL is not set +# CONFIG_FHANDLE is not set +# CONFIG_BASE_FULL is not set +# CONFIG_FUTEX is not set +# CONFIG_EPOLL is not set +# CONFIG_SIGNALFD is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +# CONFIG_AIO is not set +# CONFIG_IO_URING is not set +# CONFIG_ADVISE_SYSCALLS is not set +# CONFIG_MEMBARRIER is not set +# CONFIG_KALLSYMS is not set +CONFIG_EMBEDDED=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLOB=y +# CONFIG_MMU is not set +CONFIG_SOC_CANAAN=y +CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic" +CONFIG_MAXPHYSMEM_2GB=y +CONFIG_SMP=y +CONFIG_NR_CPUS=2 +CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro" +CONFIG_CMDLINE_FORCE=y +# CONFIG_SECCOMP is not set +# CONFIG_STACKPROTECTOR is not set +# CONFIG_GCC_PLUGINS is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_MQ_IOSCHED_DEADLINE is not set +# CONFIG_MQ_IOSCHED_KYBER is not set +CONFIG_BINFMT_FLAT=y +# CONFIG_COREDUMP is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +# CONFIG_BLK_DEV is not set +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_LDISC_AUTOLOAD is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_DEVMEM is not set +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_SPI=y +# CONFIG_SPI_MEM is not set +CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_MMIO=y +# CONFIG_GPIO_CDEV_V1 is not set +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_SIFIVE=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_SPI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_USER=y +# CONFIG_VIRTIO_MENU is not set +# CONFIG_VHOST_MENU is not set +# CONFIG_SURFACE_PLATFORMS is not set +CONFIG_EXT2_FS=y +# CONFIG_FILE_LOCKING is not set +# CONFIG_DNOTIFY is not set +# CONFIG_INOTIFY_USER is not set +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_LSM="[]" +CONFIG_PRINTK_TIME=y +# CONFIG_SYMBOLIC_ERRNAME is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_DEBUG_MISC is not set +CONFIG_PANIC_ON_OOPS=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_FTRACE is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index 2c2cda6cc1c5..8dd02b842fef 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -18,6 +18,7 @@ CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y CONFIG_ARCH_RV32I=y CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h index d6f1ec08d97b..d3804a2f9aad 100644 --- a/arch/riscv/include/asm/bug.h +++ b/arch/riscv/include/asm/bug.h @@ -85,6 +85,7 @@ do { \ struct pt_regs; struct task_struct; +void __show_regs(struct pt_regs *regs); void die(struct pt_regs *regs, const char *str); void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr); diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index cec462e198ce..caadfc1d7487 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -41,10 +41,16 @@ #define SATP_PPN _AC(0x003FFFFF, UL) #define SATP_MODE_32 _AC(0x80000000, UL) #define SATP_MODE SATP_MODE_32 +#define SATP_ASID_BITS 9 +#define SATP_ASID_SHIFT 22 +#define SATP_ASID_MASK _AC(0x1FF, UL) #else #define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) #define SATP_MODE_39 _AC(0x8000000000000000, UL) #define SATP_MODE SATP_MODE_39 +#define SATP_ASID_BITS 16 +#define SATP_ASID_SHIFT 44 +#define SATP_ASID_MASK _AC(0xFFFF, UL) #endif /* Exception cause high bit - is an interrupt if set */ diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h index b04028c6218c..a2b3d9cdbc86 100644 --- a/arch/riscv/include/asm/kasan.h +++ b/arch/riscv/include/asm/kasan.h @@ -8,12 +8,28 @@ #ifdef CONFIG_KASAN +/* + * The following comment was copied from arm64: + * KASAN_SHADOW_START: beginning of the kernel virtual addresses. + * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, + * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). + * + * KASAN_SHADOW_OFFSET: + * This value is used to map an address to the corresponding shadow + * address by the following formula: + * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET + * + * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range + * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual + * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation: + * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - + * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) + */ #define KASAN_SHADOW_SCALE_SHIFT 3 -#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT)) -#define KASAN_SHADOW_START KERN_VIRT_START /* 2^64 - 2^38 */ +#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT)) +#define KASAN_SHADOW_START KERN_VIRT_START #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) - #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ (64 - KASAN_SHADOW_SCALE_SHIFT))) diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 56a98ea30731..4647d38018f6 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -11,4 +11,44 @@ #include <asm-generic/kprobes.h> +#ifdef CONFIG_KPROBES +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/percpu.h> + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +#include <asm/probes.h> + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* Single step context for kprobe */ +struct kprobe_step_ctx { + unsigned long ss_pending; + unsigned long match_addr; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_status; + struct prev_kprobe prev_kprobe; + struct kprobe_step_ctx ss_ctx; +}; + +void arch_remove_kprobe(struct kprobe *p); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); +bool kprobe_breakpoint_handler(struct pt_regs *regs); +bool kprobe_single_step_handler(struct pt_regs *regs); +void kretprobe_trampoline(void); +void __kprobes *trampoline_probe_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ #endif /* _ASM_RISCV_KPROBES_H */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index dabcf2cfb3dc..0099dc116168 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -12,6 +12,8 @@ typedef struct { #ifndef CONFIG_MMU unsigned long end_brk; +#else + atomic_long_t id; #endif void *vdso; #ifdef CONFIG_SMP diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 250defa06f3a..b0659413a080 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -23,6 +23,16 @@ static inline void activate_mm(struct mm_struct *prev, switch_mm(prev, next, NULL); } +#define init_new_context init_new_context +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ +#ifdef CONFIG_MMU + atomic_long_set(&mm->context.id, 0); +#endif + return 0; +} + #include <asm-generic/mmu_context.h> #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h new file mode 100644 index 000000000000..fa17e01d9ab2 --- /dev/null +++ b/arch/riscv/include/asm/mmzone.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MMZONE_H +#define __ASM_MMZONE_H + +#ifdef CONFIG_NUMA + +#include <asm/numa.h> + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[(nid)]) + +#endif /* CONFIG_NUMA */ +#endif /* __ASM_MMZONE_H */ diff --git a/arch/riscv/include/asm/numa.h b/arch/riscv/include/asm/numa.h new file mode 100644 index 000000000000..8c8cf4297cc3 --- /dev/null +++ b/arch/riscv/include/asm/numa.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_NUMA_H +#define __ASM_NUMA_H + +#include <asm/topology.h> +#include <asm-generic/numa.h> + +#endif /* __ASM_NUMA_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 64a675c5c30a..adc9d26f3d75 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -97,9 +97,6 @@ extern unsigned long pfn_base; #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ -extern unsigned long max_low_pfn; -extern unsigned long min_low_pfn; - #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 1c473a1bd986..658e112c3ce7 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h @@ -32,6 +32,20 @@ static inline int pci_proc_domain(struct pci_bus *bus) /* always show the domain in /proc */ return 1; } + +#ifdef CONFIG_NUMA + +static inline int pcibus_to_node(struct pci_bus *bus) +{ + return dev_to_node(&bus->dev); +} +#ifndef cpumask_of_pcibus +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) +#endif +#endif /* CONFIG_NUMA */ + #endif /* CONFIG_PCI */ #endif /* _ASM_RISCV_PCI_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 251e1db088fa..ebf817c1bdf4 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -186,6 +186,11 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); } +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + /* Yields the page frame number (PFN) of a page table entry */ static inline unsigned long pte_pfn(pte_t pte) { @@ -289,6 +294,21 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif + /* Modify page protection bits */ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { @@ -468,6 +488,7 @@ extern void *dtb_early_va; extern uintptr_t dtb_early_pa; void setup_bootmem(void); void paging_init(void); +void misc_mem_init(void); #define FIRST_USER_ADDRESS 0 diff --git a/arch/riscv/include/asm/probes.h b/arch/riscv/include/asm/probes.h new file mode 100644 index 000000000000..a787e6d537b9 --- /dev/null +++ b/arch/riscv/include/asm/probes.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_PROBES_H +#define _ASM_RISCV_PROBES_H + +typedef u32 probe_opcode_t; +typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *); + +/* architecture specific copy of original instruction */ +struct arch_probe_insn { + probe_opcode_t *insn; + probes_handler_t *handler; + /* restore address after simulation */ + unsigned long restore; +}; + +#ifdef CONFIG_KPROBES +typedef u32 kprobe_opcode_t; +struct arch_specific_insn { + struct arch_probe_insn api; +}; +#endif + +#endif /* _ASM_RISCV_PROBES_H */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index bdddcd5c1b71..3a240037bde2 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -34,6 +34,7 @@ struct thread_struct { unsigned long sp; /* Kernel mode stack */ unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; + unsigned long bad_cause; }; #define INIT_THREAD { \ diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index ee49f80c9533..cb4abb639e8d 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h @@ -8,6 +8,7 @@ #include <uapi/asm/ptrace.h> #include <asm/csr.h> +#include <linux/compiler.h> #ifndef __ASSEMBLY__ @@ -60,6 +61,7 @@ struct pt_regs { #define user_mode(regs) (((regs)->status & SR_PP) == 0) +#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_a0) /* Helpers for working with the instruction pointer */ static inline unsigned long instruction_pointer(struct pt_regs *regs) @@ -85,6 +87,12 @@ static inline void user_stack_pointer_set(struct pt_regs *regs, regs->sp = val; } +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} + /* Helpers for working with the frame pointer */ static inline unsigned long frame_pointer(struct pt_regs *regs) { @@ -101,6 +109,33 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) return regs->a0; } +static inline void regs_set_return_value(struct pt_regs *regs, + unsigned long val) +{ + regs->a0 = val; +} + +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 653edb25d495..99895d9c3bdd 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -89,7 +89,7 @@ struct sbiret { long value; }; -int sbi_init(void); +void sbi_init(void); struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, @@ -100,13 +100,13 @@ int sbi_console_getchar(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_clear_ipi(void); -void sbi_send_ipi(const unsigned long *hart_mask); -void sbi_remote_fence_i(const unsigned long *hart_mask); -void sbi_remote_sfence_vma(const unsigned long *hart_mask, +int sbi_send_ipi(const unsigned long *hart_mask); +int sbi_remote_fence_i(const unsigned long *hart_mask); +int sbi_remote_sfence_vma(const unsigned long *hart_mask, unsigned long start, unsigned long size); -void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, +int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start, unsigned long size, unsigned long asid); @@ -147,11 +147,7 @@ static inline unsigned long sbi_minor_version(void) int sbi_err_map_linux_errno(int err); #else /* CONFIG_RISCV_SBI */ -/* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */ -void sbi_set_timer(uint64_t stime_value); -void sbi_clear_ipi(void); -void sbi_send_ipi(const unsigned long *hart_mask); -void sbi_remote_fence_i(const unsigned long *hart_mask); -void sbi_init(void); +static inline int sbi_remote_fence_i(const unsigned long *hart_mask) { return -1; } +static inline void sbi_init(void) {} #endif /* CONFIG_RISCV_SBI */ #endif /* _ASM_RISCV_SBI_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 8b80c80c7f1a..6887b3d9f371 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -22,7 +22,7 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } -static inline void protect_kernel_text_data(void) {}; +static inline void protect_kernel_text_data(void) {} static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } #endif diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h index 6c8363b1f327..f494066051a2 100644 --- a/arch/riscv/include/asm/soc.h +++ b/arch/riscv/include/asm/soc.h @@ -21,42 +21,4 @@ void soc_early_init(void); extern unsigned long __soc_early_init_table_start; extern unsigned long __soc_early_init_table_end; -/* - * Allows Linux to provide a device tree, which is necessary for SOCs that - * don't provide a useful one on their own. - */ -struct soc_builtin_dtb { - unsigned long vendor_id; - unsigned long arch_id; - unsigned long imp_id; - void *(*dtb_func)(void); -}; - -/* - * The argument name must specify a valid DTS file name without the dts - * extension. - */ -#define SOC_BUILTIN_DTB_DECLARE(name, vendor, arch, impl) \ - extern void *__dtb_##name##_begin; \ - \ - static __init __used \ - void *__soc_builtin_dtb_f__##name(void) \ - { \ - return (void *)&__dtb_##name##_begin; \ - } \ - \ - static const struct soc_builtin_dtb __soc_builtin_dtb__##name \ - __used __section("__soc_builtin_dtb_table") = \ - { \ - .vendor_id = vendor, \ - .arch_id = arch, \ - .imp_id = impl, \ - .dtb_func = __soc_builtin_dtb_f__##name, \ - } - -extern unsigned long __soc_builtin_dtb_table_start; -extern unsigned long __soc_builtin_dtb_table_end; - -void *soc_lookup_builtin_dtb(void); - #endif diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h index 5962f8891f06..09093af46565 100644 --- a/arch/riscv/include/asm/stackprotector.h +++ b/arch/riscv/include/asm/stackprotector.h @@ -24,6 +24,7 @@ static __always_inline void boot_init_stack_canary(void) canary &= CANARY_MASK; current->stack_canary = canary; - __stack_chk_guard = current->stack_canary; + if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) + __stack_chk_guard = current->stack_canary; } #endif /* _ASM_RISCV_STACKPROTECTOR_H */ diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h index 470a65c4ccdc..3450c1912afd 100644 --- a/arch/riscv/include/asm/stacktrace.h +++ b/arch/riscv/include/asm/stacktrace.h @@ -13,5 +13,7 @@ struct stackframe { extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg); +extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task, + const char *loglvl); #endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 97bf5a1575d2..0e549a3089b3 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -75,6 +75,7 @@ struct thread_info { #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ #define TIF_SECCOMP 8 /* syscall secure computing */ #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ +#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -84,10 +85,11 @@ struct thread_info { #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_WORK_MASK \ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) #define _TIF_SYSCALL_WORK \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h new file mode 100644 index 000000000000..f2183e00fdd2 --- /dev/null +++ b/arch/riscv/include/asm/uprobes.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_RISCV_UPROBES_H +#define _ASM_RISCV_UPROBES_H + +#include <asm/probes.h> +#include <asm/patch.h> +#include <asm/bug.h> + +#define MAX_UINSN_BYTES 8 + +#ifdef CONFIG_RISCV_ISA_C +#define UPROBE_SWBP_INSN __BUG_INSN_16 +#define UPROBE_SWBP_INSN_SIZE 2 +#else +#define UPROBE_SWBP_INSN __BUG_INSN_32 +#define UPROBE_SWBP_INSN_SIZE 4 +#endif +#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES + +typedef u32 uprobe_opcode_t; + +struct arch_uprobe_task { + unsigned long saved_cause; +}; + +struct arch_uprobe { + union { + u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; + }; + struct arch_probe_insn api; + unsigned long insn_size; + bool simulate; +}; + +bool uprobe_breakpoint_handler(struct pt_regs *regs); +bool uprobe_single_step_handler(struct pt_regs *regs); + +#endif /* _ASM_RISCV_UPROBES_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index f6caf4d9ca15..3dc0abde988a 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -4,8 +4,9 @@ # ifdef CONFIG_FTRACE -CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_patch.o = -pg +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) endif extra-y += head.o @@ -29,6 +30,7 @@ obj-y += riscv_ksyms.o obj-y += stacktrace.o obj-y += cacheinfo.o obj-y += patch.o +obj-y += probes/ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index b79ffa3561fd..9ef33346853c 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -68,6 +68,9 @@ void asm_offsets(void) OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]); OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]); OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); +#ifdef CONFIG_STACKPROTECTOR + OFFSET(TSK_STACK_CANARY, task_struct, stack_canary); +#endif DEFINE(PT_SIZE, sizeof(struct pt_regs)); OFFSET(PT_EPC, pt_regs, epc); diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 765b62434f30..7f1e5203de88 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -72,29 +72,56 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, return 0; } +/* + * Put 5 instructions with 16 bytes at the front of function within + * patchable function entry nops' area. + * + * 0: REG_S ra, -SZREG(sp) + * 1: auipc ra, 0x? + * 2: jalr -?(ra) + * 3: REG_L ra, -SZREG(sp) + * + * So the opcodes is: + * 0: 0xfe113c23 (sd)/0xfe112e23 (sw) + * 1: 0x???????? -> auipc + * 2: 0x???????? -> jalr + * 3: 0xff813083 (ld)/0xffc12083 (lw) + */ +#if __riscv_xlen == 64 +#define INSN0 0xfe113c23 +#define INSN3 0xff813083 +#elif __riscv_xlen == 32 +#define INSN0 0xfe112e23 +#define INSN3 0xffc12083 +#endif + +#define FUNC_ENTRY_SIZE 16 +#define FUNC_ENTRY_JMP 4 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - int ret = ftrace_check_current_call(rec->ip, NULL); + unsigned int call[4] = {INSN0, 0, 0, INSN3}; + unsigned long target = addr; + unsigned long caller = rec->ip + FUNC_ENTRY_JMP; - if (ret) - return ret; + call[1] = to_auipc_insn((unsigned int)(target - caller)); + call[2] = to_jalr_insn((unsigned int)(target - caller)); - return __ftrace_modify_call(rec->ip, addr, true); + if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE)) + return -EPERM; + + return 0; } int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - unsigned int call[2]; - int ret; + unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4}; - make_call(rec->ip, addr, call); - ret = ftrace_check_current_call(rec->ip, call); - - if (ret) - return ret; + if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE)) + return -EPERM; - return __ftrace_modify_call(rec->ip, addr, false); + return 0; } @@ -139,15 +166,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { unsigned int call[2]; + unsigned long caller = rec->ip + FUNC_ENTRY_JMP; int ret; - make_call(rec->ip, old_addr, call); - ret = ftrace_check_current_call(rec->ip, call); + make_call(caller, old_addr, call); + ret = ftrace_check_current_call(caller, call); if (ret) return ret; - return __ftrace_modify_call(rec->ip, addr, true); + return __ftrace_modify_call(caller, addr, true); } #endif @@ -176,53 +204,30 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); +extern void ftrace_graph_regs_call(void); int ftrace_enable_ftrace_graph_caller(void) { - unsigned int call[2]; - static int init_graph = 1; int ret; - make_call(&ftrace_graph_call, &ftrace_stub, call); - - /* - * When enabling graph tracer for the first time, ftrace_graph_call - * should contains a call to ftrace_stub. Once it has been disabled, - * the 8-bytes at the position becomes NOPs. - */ - if (init_graph) { - ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, - call); - init_graph = 0; - } else { - ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, - NULL); - } - + ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, + (unsigned long)&prepare_ftrace_return, true); if (ret) return ret; - return __ftrace_modify_call((unsigned long)&ftrace_graph_call, + return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, (unsigned long)&prepare_ftrace_return, true); } int ftrace_disable_ftrace_graph_caller(void) { - unsigned int call[2]; int ret; - make_call(&ftrace_graph_call, &prepare_ftrace_return, call); - - /* - * This is to make sure that ftrace_enable_ftrace_graph_caller - * did the right thing. - */ - ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, - call); - + ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, + (unsigned long)&prepare_ftrace_return, false); if (ret) return ret; - return __ftrace_modify_call((unsigned long)&ftrace_graph_call, + return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, (unsigned long)&prepare_ftrace_return, false); } #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 16e9941900c4..f5a9bad86e58 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -260,7 +260,11 @@ clear_bss_done: /* Initialize page tables and relocate to virtual addresses */ la sp, init_thread_union + THREAD_SIZE +#ifdef CONFIG_BUILTIN_DTB + la a0, __dtb_start +#else mv a0, s1 +#endif /* CONFIG_BUILTIN_DTB */ call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h index 8c212efb37a6..71a76a623257 100644 --- a/arch/riscv/kernel/image-vars.h +++ b/arch/riscv/kernel/image-vars.h @@ -3,7 +3,7 @@ * Copyright (C) 2020 Western Digital Corporation or its affiliates. * Linker script variables to be set after section resolution, as * ld.lld does not like variables assigned before SECTIONS is processed. - * Based on arch/arm64/kerne/image-vars.h + * Based on arch/arm64/kernel/image-vars.h */ #ifndef __RISCV_KERNEL_IMAGE_VARS_H #define __RISCV_KERNEL_IMAGE_VARS_H diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S index 35a6ed76cb8b..d171eca623b6 100644 --- a/arch/riscv/kernel/mcount-dyn.S +++ b/arch/riscv/kernel/mcount-dyn.S @@ -13,224 +13,186 @@ .text - .macro SAVE_ABI_STATE -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - addi sp, sp, -48 - sd s0, 32(sp) - sd ra, 40(sp) - addi s0, sp, 48 - sd t0, 24(sp) - sd t1, 16(sp) -#ifdef HAVE_FUNCTION_GRAPH_FP_TEST - sd t2, 8(sp) -#endif -#else - addi sp, sp, -16 - sd s0, 0(sp) - sd ra, 8(sp) - addi s0, sp, 16 -#endif - .endm +#define FENTRY_RA_OFFSET 12 +#define ABI_SIZE_ON_STACK 72 +#define ABI_A0 0 +#define ABI_A1 8 +#define ABI_A2 16 +#define ABI_A3 24 +#define ABI_A4 32 +#define ABI_A5 40 +#define ABI_A6 48 +#define ABI_A7 56 +#define ABI_RA 64 - .macro RESTORE_ABI_STATE -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - ld s0, 32(sp) - ld ra, 40(sp) - addi sp, sp, 48 -#else - ld ra, 8(sp) - ld s0, 0(sp) - addi sp, sp, 16 -#endif - .endm + .macro SAVE_ABI + addi sp, sp, -SZREG + addi sp, sp, -ABI_SIZE_ON_STACK - .macro RESTORE_GRAPH_ARGS - ld a0, 24(sp) - ld a1, 16(sp) -#ifdef HAVE_FUNCTION_GRAPH_FP_TEST - ld a2, 8(sp) -#endif + REG_S a0, ABI_A0(sp) + REG_S a1, ABI_A1(sp) + REG_S a2, ABI_A2(sp) + REG_S a3, ABI_A3(sp) + REG_S a4, ABI_A4(sp) + REG_S a5, ABI_A5(sp) + REG_S a6, ABI_A6(sp) + REG_S a7, ABI_A7(sp) + REG_S ra, ABI_RA(sp) .endm -ENTRY(ftrace_graph_caller) - addi sp, sp, -16 - sd s0, 0(sp) - sd ra, 8(sp) - addi s0, sp, 16 -ftrace_graph_call: - .global ftrace_graph_call - /* - * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the - * call below. Check ftrace_modify_all_code for details. - */ - call ftrace_stub - ld ra, 8(sp) - ld s0, 0(sp) - addi sp, sp, 16 - ret -ENDPROC(ftrace_graph_caller) - -ENTRY(ftrace_caller) - /* - * a0: the address in the caller when calling ftrace_caller - * a1: the caller's return address - * a2: the address of global variable function_trace_op - */ - ld a1, -8(s0) - addi a0, ra, -MCOUNT_INSN_SIZE - la t5, function_trace_op - ld a2, 0(t5) - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* - * the graph tracer (specifically, prepare_ftrace_return) needs these - * arguments but for now the function tracer occupies the regs, so we - * save them in temporary regs to recover later. - */ - addi t0, s0, -8 - mv t1, a0 -#ifdef HAVE_FUNCTION_GRAPH_FP_TEST - ld t2, -16(s0) -#endif -#endif - - SAVE_ABI_STATE -ftrace_call: - .global ftrace_call - /* - * For the dynamic ftrace to work, here we should reserve at least - * 8 bytes for a functional auipc-jalr pair. The following call - * serves this purpose. - * - * Calling ftrace_update_ftrace_func would overwrite the nops below. - * Check ftrace_modify_all_code for details. - */ - call ftrace_stub - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - RESTORE_GRAPH_ARGS - call ftrace_graph_caller -#endif + .macro RESTORE_ABI + REG_L a0, ABI_A0(sp) + REG_L a1, ABI_A1(sp) + REG_L a2, ABI_A2(sp) + REG_L a3, ABI_A3(sp) + REG_L a4, ABI_A4(sp) + REG_L a5, ABI_A5(sp) + REG_L a6, ABI_A6(sp) + REG_L a7, ABI_A7(sp) + REG_L ra, ABI_RA(sp) - RESTORE_ABI_STATE - ret -ENDPROC(ftrace_caller) + addi sp, sp, ABI_SIZE_ON_STACK + addi sp, sp, SZREG + .endm #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS .macro SAVE_ALL - addi sp, sp, -(PT_SIZE_ON_STACK+16) - sd s0, (PT_SIZE_ON_STACK)(sp) - sd ra, (PT_SIZE_ON_STACK+8)(sp) - addi s0, sp, (PT_SIZE_ON_STACK+16) + addi sp, sp, -SZREG + addi sp, sp, -PT_SIZE_ON_STACK - sd x1, PT_RA(sp) - sd x2, PT_SP(sp) - sd x3, PT_GP(sp) - sd x4, PT_TP(sp) - sd x5, PT_T0(sp) - sd x6, PT_T1(sp) - sd x7, PT_T2(sp) - sd x8, PT_S0(sp) - sd x9, PT_S1(sp) - sd x10, PT_A0(sp) - sd x11, PT_A1(sp) - sd x12, PT_A2(sp) - sd x13, PT_A3(sp) - sd x14, PT_A4(sp) - sd x15, PT_A5(sp) - sd x16, PT_A6(sp) - sd x17, PT_A7(sp) - sd x18, PT_S2(sp) - sd x19, PT_S3(sp) - sd x20, PT_S4(sp) - sd x21, PT_S5(sp) - sd x22, PT_S6(sp) - sd x23, PT_S7(sp) - sd x24, PT_S8(sp) - sd x25, PT_S9(sp) - sd x26, PT_S10(sp) - sd x27, PT_S11(sp) - sd x28, PT_T3(sp) - sd x29, PT_T4(sp) - sd x30, PT_T5(sp) - sd x31, PT_T6(sp) + REG_S x1, PT_EPC(sp) + addi sp, sp, PT_SIZE_ON_STACK + REG_L x1, (sp) + addi sp, sp, -PT_SIZE_ON_STACK + REG_S x1, PT_RA(sp) + REG_L x1, PT_EPC(sp) + + REG_S x2, PT_SP(sp) + REG_S x3, PT_GP(sp) + REG_S x4, PT_TP(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) .endm .macro RESTORE_ALL - ld x1, PT_RA(sp) - ld x2, PT_SP(sp) - ld x3, PT_GP(sp) - ld x4, PT_TP(sp) - ld x5, PT_T0(sp) - ld x6, PT_T1(sp) - ld x7, PT_T2(sp) - ld x8, PT_S0(sp) - ld x9, PT_S1(sp) - ld x10, PT_A0(sp) - ld x11, PT_A1(sp) - ld x12, PT_A2(sp) - ld x13, PT_A3(sp) - ld x14, PT_A4(sp) - ld x15, PT_A5(sp) - ld x16, PT_A6(sp) - ld x17, PT_A7(sp) - ld x18, PT_S2(sp) - ld x19, PT_S3(sp) - ld x20, PT_S4(sp) - ld x21, PT_S5(sp) - ld x22, PT_S6(sp) - ld x23, PT_S7(sp) - ld x24, PT_S8(sp) - ld x25, PT_S9(sp) - ld x26, PT_S10(sp) - ld x27, PT_S11(sp) - ld x28, PT_T3(sp) - ld x29, PT_T4(sp) - ld x30, PT_T5(sp) - ld x31, PT_T6(sp) + REG_L x1, PT_RA(sp) + addi sp, sp, PT_SIZE_ON_STACK + REG_S x1, (sp) + addi sp, sp, -PT_SIZE_ON_STACK + REG_L x1, PT_EPC(sp) + REG_L x2, PT_SP(sp) + REG_L x3, PT_GP(sp) + REG_L x4, PT_TP(sp) + REG_L x5, PT_T0(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) - ld s0, (PT_SIZE_ON_STACK)(sp) - ld ra, (PT_SIZE_ON_STACK+8)(sp) - addi sp, sp, (PT_SIZE_ON_STACK+16) + addi sp, sp, PT_SIZE_ON_STACK + addi sp, sp, SZREG .endm +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ - .macro RESTORE_GRAPH_REG_ARGS - ld a0, PT_T0(sp) - ld a1, PT_T1(sp) -#ifdef HAVE_FUNCTION_GRAPH_FP_TEST - ld a2, PT_T2(sp) -#endif - .endm +ENTRY(ftrace_caller) + SAVE_ABI -/* - * Most of the contents are the same as ftrace_caller. - */ -ENTRY(ftrace_regs_caller) - /* - * a3: the address of all registers in the stack - */ - ld a1, -8(s0) - addi a0, ra, -MCOUNT_INSN_SIZE - la t5, function_trace_op - ld a2, 0(t5) - addi a3, sp, -(PT_SIZE_ON_STACK+16) + addi a0, ra, -FENTRY_RA_OFFSET + la a1, function_trace_op + REG_L a2, 0(a1) + REG_L a1, ABI_SIZE_ON_STACK(sp) + mv a3, sp + +ftrace_call: + .global ftrace_call + call ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER - addi t0, s0, -8 - mv t1, a0 + addi a0, sp, ABI_SIZE_ON_STACK + REG_L a1, ABI_RA(sp) + addi a1, a1, -FENTRY_RA_OFFSET #ifdef HAVE_FUNCTION_GRAPH_FP_TEST - ld t2, -16(s0) + mv a2, s0 #endif +ftrace_graph_call: + .global ftrace_graph_call + call ftrace_stub #endif + RESTORE_ABI + ret +ENDPROC(ftrace_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_regs_caller) SAVE_ALL + addi a0, ra, -FENTRY_RA_OFFSET + la a1, function_trace_op + REG_L a2, 0(a1) + REG_L a1, PT_SIZE_ON_STACK(sp) + mv a3, sp + ftrace_regs_call: .global ftrace_regs_call call ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER - RESTORE_GRAPH_REG_ARGS - call ftrace_graph_caller + addi a0, sp, PT_RA + REG_L a1, PT_EPC(sp) + addi a1, a1, -FENTRY_RA_OFFSET +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + mv a2, s0 +#endif +ftrace_graph_regs_call: + .global ftrace_graph_regs_call + call ftrace_stub #endif RESTORE_ALL diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 3fe7a5296aa5..0b552873a577 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -20,7 +20,12 @@ struct patch_insn { }; #ifdef CONFIG_MMU -static void *patch_map(void *addr, int fixmap) +/* + * The fix_to_virt(, idx) needs a const value (not a dynamic variable of + * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses". + * So use '__always_inline' and 'const unsigned int fixmap' here. + */ +static __always_inline void *patch_map(void *addr, const unsigned int fixmap) { uintptr_t uintaddr = (uintptr_t) addr; struct page *page; @@ -37,7 +42,6 @@ static void *patch_map(void *addr, int fixmap) return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + (uintaddr & ~PAGE_MASK)); } -NOKPROBE_SYMBOL(patch_map); static void patch_unmap(int fixmap) { diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile new file mode 100644 index 000000000000..7f0840dcc31b --- /dev/null +++ b/arch/riscv/kernel/probes/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o +obj-$(CONFIG_KPROBES) += kprobes_trampoline.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o +obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o +CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/kernel/probes/decode-insn.c b/arch/riscv/kernel/probes/decode-insn.c new file mode 100644 index 000000000000..0ed043acc882 --- /dev/null +++ b/arch/riscv/kernel/probes/decode-insn.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kernel.h> +#include <linux/kprobes.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <asm/sections.h> + +#include "decode-insn.h" +#include "simulate-insn.h" + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +enum probe_insn __kprobes +riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) +{ + probe_opcode_t insn = *addr; + + /* + * Reject instructions list: + */ + RISCV_INSN_REJECTED(system, insn); + RISCV_INSN_REJECTED(fence, insn); + + /* + * Simulate instructions list: + * TODO: the REJECTED ones below need to be implemented + */ +#ifdef CONFIG_RISCV_ISA_C + RISCV_INSN_REJECTED(c_j, insn); + RISCV_INSN_REJECTED(c_jr, insn); + RISCV_INSN_REJECTED(c_jal, insn); + RISCV_INSN_REJECTED(c_jalr, insn); + RISCV_INSN_REJECTED(c_beqz, insn); + RISCV_INSN_REJECTED(c_bnez, insn); + RISCV_INSN_REJECTED(c_ebreak, insn); +#endif + + RISCV_INSN_REJECTED(auipc, insn); + RISCV_INSN_REJECTED(branch, insn); + + RISCV_INSN_SET_SIMULATE(jal, insn); + RISCV_INSN_SET_SIMULATE(jalr, insn); + + return INSN_GOOD; +} diff --git a/arch/riscv/kernel/probes/decode-insn.h b/arch/riscv/kernel/probes/decode-insn.h new file mode 100644 index 000000000000..42269a7d676d --- /dev/null +++ b/arch/riscv/kernel/probes/decode-insn.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RISCV_KERNEL_KPROBES_DECODE_INSN_H +#define _RISCV_KERNEL_KPROBES_DECODE_INSN_H + +#include <asm/sections.h> +#include <asm/kprobes.h> + +enum probe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +enum probe_insn __kprobes +riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); + +#endif /* _RISCV_KERNEL_KPROBES_DECODE_INSN_H */ diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c new file mode 100644 index 000000000000..e6372490aa0b --- /dev/null +++ b/arch/riscv/kernel/probes/ftrace.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/kprobes.h> + +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + return; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + unsigned long orig_ip = instruction_pointer(&(regs->regs)); + + instruction_pointer_set(&(regs->regs), ip); + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) { + /* + * Emulate singlestep (and also recover regs->pc) + * as if there is a nop + */ + instruction_pointer_set(&(regs->regs), + (unsigned long)p->addr + MCOUNT_INSN_SIZE); + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, &(regs->regs), 0); + } + instruction_pointer_set(&(regs->regs), orig_ip); + } + + /* + * If pre_handler returns !0, it changes regs->pc. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); + } +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.api.insn = NULL; + return 0; +} diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c new file mode 100644 index 000000000000..a2ec18662fee --- /dev/null +++ b/arch/riscv/kernel/probes/kprobes.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kprobes.h> +#include <linux/extable.h> +#include <linux/slab.h> +#include <linux/stop_machine.h> +#include <asm/ptrace.h> +#include <linux/uaccess.h> +#include <asm/sections.h> +#include <asm/cacheflush.h> +#include <asm/bug.h> +#include <asm/patch.h> + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + unsigned long offset = GET_INSN_LENGTH(p->opcode); + + p->ainsn.api.restore = (unsigned long)p->addr + offset; + + patch_text(p->ainsn.api.insn, p->opcode); + patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset), + __BUG_INSN_32); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + p->ainsn.api.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.api.handler) + p->ainsn.api.handler((u32)p->opcode, + (unsigned long)p->addr, regs); + + post_kprobe_handler(kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x1) { + pr_warn("Address not aligned.\n"); + + return -EINVAL; + } + + /* copy instruction */ + p->opcode = *p->addr; + + /* decode instruction */ + switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.api.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.api.insn = get_insn_slot(); + if (!p->ainsn.api.insn) + return -ENOMEM; + break; + } + + /* prepare the instruction */ + if (p->ainsn.api.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +/* install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) + patch_text(p->addr, __BUG_INSN_32); + else + patch_text(p->addr, __BUG_INSN_16); +} + +/* remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Interrupts need to be disabled before single-step mode is set, and not + * reenabled until after single-step mode ends. + * Without disabling interrupt on local CPU, there is a chance of + * interrupt occurrence in the period of exception return and start of + * out-of-line single-step, that result in wrongly single stepping + * into the interrupt handler. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_status = regs->status; + regs->status &= ~SR_SPIE; +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + regs->status = kcb->saved_status; +} + +static void __kprobes +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) +{ + unsigned long offset = GET_INSN_LENGTH(p->opcode); + + kcb->ss_ctx.ss_pending = true; + kcb->ss_ctx.match_addr = addr + offset; +} + +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) +{ + kcb->ss_ctx.ss_pending = false; + kcb->ss_ctx.match_addr = 0; +} + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + if (p->ainsn.api.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.api.insn; + + set_ss_context(kcb, slot, p); /* mark pending ss */ + + /* IRQs and single stepping do not mix well. */ + kprobes_save_local_irqflag(kcb, regs); + + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + + if (!cur) + return; + + /* return addr restore if non-branching insn */ + if (cur->ainsn.api.restore != 0) + regs->epc = cur->ainsn.api.restore; + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) { + /* post_handler can hit breakpoint and single step + * again, so we enable D-flag for recursive exception. + */ + cur->post_handler(cur, regs, 0); + } + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->epc = (unsigned long) cur->addr; + if (!instruction_pointer(regs)) + BUG(); + + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + } + return 0; +} + +bool __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + + if (p) { + if (cur_kprobe) { + if (reenter_kprobe(p, regs, kcb)) + return true; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. + * + * pre_handler can hit a breakpoint and can step thru + * before return. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + } + return true; + } + + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Return back to original instruction, and continue. + */ + return false; +} + +bool __kprobes +kprobe_single_step_handler(struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if ((kcb->ss_ctx.ss_pending) + && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { + clear_ss_context(kcb); /* clear pending ss */ + + kprobes_restore_local_irqflag(kcb, regs); + + post_kprobe_handler(kcb, regs); + return true; + } + return false; +} + +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) +{ + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + return ret; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->ra; + ri->fp = NULL; + regs->ra = (unsigned long) &kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/riscv/kernel/probes/kprobes_trampoline.S b/arch/riscv/kernel/probes/kprobes_trampoline.S new file mode 100644 index 000000000000..6e85d021e2a2 --- /dev/null +++ b/arch/riscv/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Author: Patrick Stählin <me@packi.ch> + */ +#include <linux/linkage.h> + +#include <asm/asm.h> +#include <asm/asm-offsets.h> + + .text + .altmacro + + .macro save_all_base_regs + REG_S x1, PT_RA(sp) + REG_S x3, PT_GP(sp) + REG_S x4, PT_TP(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + .endm + + .macro restore_all_base_regs + REG_L x3, PT_GP(sp) + REG_L x4, PT_TP(sp) + REG_L x5, PT_T0(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + .endm + +ENTRY(kretprobe_trampoline) + addi sp, sp, -(PT_SIZE_ON_STACK) + save_all_base_regs + + move a0, sp /* pt_regs */ + + call trampoline_probe_handler + + /* use the result as the return-address */ + move ra, a0 + + restore_all_base_regs + addi sp, sp, PT_SIZE_ON_STACK + + ret +ENDPROC(kretprobe_trampoline) diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c new file mode 100644 index 000000000000..2519ce26377d --- /dev/null +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/kprobes.h> + +#include "decode-insn.h" +#include "simulate-insn.h" + +static inline bool rv_insn_reg_get_val(struct pt_regs *regs, u32 index, + unsigned long *ptr) +{ + if (index == 0) + *ptr = 0; + else if (index <= 31) + *ptr = *((unsigned long *)regs + index); + else + return false; + + return true; +} + +static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index, + unsigned long val) +{ + if (index == 0) + return false; + else if (index <= 31) + *((unsigned long *)regs + index) = val; + else + return false; + + return true; +} + +bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs) +{ + /* + * 31 30 21 20 19 12 11 7 6 0 + * imm [20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode + * 1 10 1 8 5 JAL/J + */ + bool ret; + u32 imm; + u32 index = (opcode >> 7) & 0x1f; + + ret = rv_insn_reg_set_val(regs, index, addr + 4); + if (!ret) + return ret; + + imm = ((opcode >> 21) & 0x3ff) << 1; + imm |= ((opcode >> 20) & 0x1) << 11; + imm |= ((opcode >> 12) & 0xff) << 12; + imm |= ((opcode >> 31) & 0x1) << 20; + + instruction_pointer_set(regs, addr + sign_extend32((imm), 20)); + + return ret; +} + +bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs) +{ + /* + * 31 20 19 15 14 12 11 7 6 0 + * offset[11:0] | rs1 | 010 | rd | opcode + * 12 5 3 5 JALR/JR + */ + bool ret; + unsigned long base_addr; + u32 imm = (opcode >> 20) & 0xfff; + u32 rd_index = (opcode >> 7) & 0x1f; + u32 rs1_index = (opcode >> 15) & 0x1f; + + ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); + if (!ret) + return ret; + + ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); + if (!ret) + return ret; + + instruction_pointer_set(regs, (base_addr + sign_extend32((imm), 11))&~1); + + return ret; +} diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h new file mode 100644 index 000000000000..cb6ff7dccb92 --- /dev/null +++ b/arch/riscv/kernel/probes/simulate-insn.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RISCV_KERNEL_PROBES_SIMULATE_INSN_H +#define _RISCV_KERNEL_PROBES_SIMULATE_INSN_H + +#define __RISCV_INSN_FUNCS(name, mask, val) \ +static __always_inline bool riscv_insn_is_##name(probe_opcode_t code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ +bool simulate_##name(u32 opcode, unsigned long addr, \ + struct pt_regs *regs) + +#define RISCV_INSN_REJECTED(name, code) \ + do { \ + if (riscv_insn_is_##name(code)) { \ + return INSN_REJECTED; \ + } \ + } while (0) + +__RISCV_INSN_FUNCS(system, 0x7f, 0x73); +__RISCV_INSN_FUNCS(fence, 0x7f, 0x0f); + +#define RISCV_INSN_SET_SIMULATE(name, code) \ + do { \ + if (riscv_insn_is_##name(code)) { \ + api->handler = simulate_##name; \ + return INSN_GOOD_NO_SLOT; \ + } \ + } while (0) + +__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); +__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); +__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); +__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); +__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001); +__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001); +__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002); + +__RISCV_INSN_FUNCS(auipc, 0x7f, 0x17); +__RISCV_INSN_FUNCS(branch, 0x7f, 0x63); + +__RISCV_INSN_FUNCS(jal, 0x7f, 0x6f); +__RISCV_INSN_FUNCS(jalr, 0x707f, 0x67); + +#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */ diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c new file mode 100644 index 000000000000..7a057b5f0adc --- /dev/null +++ b/arch/riscv/kernel/probes/uprobes.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/highmem.h> +#include <linux/ptrace.h> +#include <linux/uprobes.h> + +#include "decode-insn.h" + +#define UPROBE_TRAP_NR UINT_MAX + +bool is_swbp_insn(uprobe_opcode_t *insn) +{ +#ifdef CONFIG_RISCV_ISA_C + return (*insn & 0xffff) == UPROBE_SWBP_INSN; +#else + return *insn == UPROBE_SWBP_INSN; +#endif +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, + unsigned long addr) +{ + probe_opcode_t opcode; + + opcode = *(probe_opcode_t *)(&auprobe->insn[0]); + + auprobe->insn_size = GET_INSN_LENGTH(opcode); + + switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) { + case INSN_REJECTED: + return -EINVAL; + + case INSN_GOOD_NO_SLOT: + auprobe->simulate = true; + break; + + case INSN_GOOD: + auprobe->simulate = false; + break; + + default: + return -EINVAL; + } + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + utask->autask.saved_cause = current->thread.bad_cause; + current->thread.bad_cause = UPROBE_TRAP_NR; + + instruction_pointer_set(regs, utask->xol_vaddr); + + regs->status &= ~SR_SPIE; + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR); + + instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); + + regs->status |= SR_SPIE; + + return 0; +} + +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + if (t->thread.bad_cause != UPROBE_TRAP_NR) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + probe_opcode_t insn; + unsigned long addr; + + if (!auprobe->simulate) + return false; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + addr = instruction_pointer(regs); + + if (auprobe->api.handler) + auprobe->api.handler(insn, addr, regs); + + return true; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* + * Task has received a fatal signal, so reset back to probbed + * address. + */ + instruction_pointer_set(regs, utask->vaddr); + + regs->status &= ~SR_SPIE; +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->sp <= ret->stack; + else + return regs->sp < ret->stack; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->ra; + + regs->ra = trampoline_vaddr; + + return ra; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +bool uprobe_breakpoint_handler(struct pt_regs *regs) +{ + if (uprobe_pre_sstep_notifier(regs)) + return true; + + return false; +} + +bool uprobe_single_step_handler(struct pt_regs *regs) +{ + if (uprobe_post_sstep_notifier(regs)) + return true; + + return false; +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + /* Initialize the slot */ + void *kaddr = kmap_atomic(page); + void *dst = kaddr + (vaddr & ~PAGE_MASK); + + memcpy(dst, src, len); + + /* Add ebreak behind opcode to simulate singlestep */ + if (vaddr) { + dst += GET_INSN_LENGTH(*(probe_opcode_t *)src); + *(uprobe_opcode_t *)dst = __BUG_INSN_32; + } + + kunmap_atomic(kaddr); + + /* + * We probably need flush_icache_user_page() but it needs vma. + * This should work on most of architectures by default. If + * architecture needs to do something different it can define + * its own version of the function. + */ + flush_dcache_page(page); +} diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index dd5f985b1f40..6f728e731bed 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -18,13 +18,14 @@ #include <asm/unistd.h> #include <asm/processor.h> #include <asm/csr.h> +#include <asm/stacktrace.h> #include <asm/string.h> #include <asm/switch_to.h> #include <asm/thread_info.h> register unsigned long gp_in_global __asm__("gp"); -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); @@ -39,11 +40,16 @@ void arch_cpu_idle(void) raw_local_irq_enable(); } -void show_regs(struct pt_regs *regs) +void __show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_DEFAULT); - pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", + if (!user_mode(regs)) { + pr_cont("epc : %pS\n", (void *)regs->epc); + pr_cont(" ra : %pS\n", (void *)regs->ra); + } + + pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", regs->epc, regs->ra, regs->sp); pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n", regs->gp, regs->tp, regs->t0); @@ -69,6 +75,12 @@ void show_regs(struct pt_regs *regs) pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n", regs->status, regs->badaddr, regs->cause); } +void show_regs(struct pt_regs *regs) +{ + __show_regs(regs); + if (!user_mode(regs)) + dump_backtrace(regs, NULL, KERN_DEFAULT); +} void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) @@ -112,7 +124,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct pt_regs *childregs = task_pt_regs(p); /* p->thread holds context to be restored by __switch_to() */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* Kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); childregs->gp = gp_in_global; diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 2d6395f5ad54..1a85305720e8 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -114,6 +114,105 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) return &riscv_user_native_view; } +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(epc), + REG_OFFSET_NAME(ra), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(gp), + REG_OFFSET_NAME(tp), + REG_OFFSET_NAME(t0), + REG_OFFSET_NAME(t1), + REG_OFFSET_NAME(t2), + REG_OFFSET_NAME(s0), + REG_OFFSET_NAME(s1), + REG_OFFSET_NAME(a0), + REG_OFFSET_NAME(a1), + REG_OFFSET_NAME(a2), + REG_OFFSET_NAME(a3), + REG_OFFSET_NAME(a4), + REG_OFFSET_NAME(a5), + REG_OFFSET_NAME(a6), + REG_OFFSET_NAME(a7), + REG_OFFSET_NAME(s2), + REG_OFFSET_NAME(s3), + REG_OFFSET_NAME(s4), + REG_OFFSET_NAME(s5), + REG_OFFSET_NAME(s6), + REG_OFFSET_NAME(s7), + REG_OFFSET_NAME(s8), + REG_OFFSET_NAME(s9), + REG_OFFSET_NAME(s10), + REG_OFFSET_NAME(s11), + REG_OFFSET_NAME(t3), + REG_OFFSET_NAME(t4), + REG_OFFSET_NAME(t5), + REG_OFFSET_NAME(t6), + REG_OFFSET_NAME(status), + REG_OFFSET_NAME(badaddr), + REG_OFFSET_NAME(cause), + REG_OFFSET_NAME(orig_a0), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return (addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + void ptrace_disable(struct task_struct *child) { clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 226ccce0f9e0..f4a7db3d309e 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -351,7 +351,7 @@ static int __sbi_rfence_v02(int fid, const unsigned long *hart_mask, * sbi_set_timer() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire. * - * Return: None + * Return: None. */ void sbi_set_timer(uint64_t stime_value) { @@ -362,11 +362,11 @@ void sbi_set_timer(uint64_t stime_value) * sbi_send_ipi() - Send an IPI to any hart. * @hart_mask: A cpu mask containing all the target harts. * - * Return: None + * Return: 0 on success, appropriate linux error code otherwise. */ -void sbi_send_ipi(const unsigned long *hart_mask) +int sbi_send_ipi(const unsigned long *hart_mask) { - __sbi_send_ipi(hart_mask); + return __sbi_send_ipi(hart_mask); } EXPORT_SYMBOL(sbi_send_ipi); @@ -374,12 +374,12 @@ EXPORT_SYMBOL(sbi_send_ipi); * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts. * @hart_mask: A cpu mask containing all the target harts. * - * Return: None + * Return: 0 on success, appropriate linux error code otherwise. */ -void sbi_remote_fence_i(const unsigned long *hart_mask) +int sbi_remote_fence_i(const unsigned long *hart_mask) { - __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I, - hart_mask, 0, 0, 0, 0); + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I, + hart_mask, 0, 0, 0, 0); } EXPORT_SYMBOL(sbi_remote_fence_i); @@ -390,14 +390,14 @@ EXPORT_SYMBOL(sbi_remote_fence_i); * @start: Start of the virtual address * @size: Total size of the virtual address range. * - * Return: None + * Return: 0 on success, appropriate linux error code otherwise. */ -void sbi_remote_sfence_vma(const unsigned long *hart_mask, +int sbi_remote_sfence_vma(const unsigned long *hart_mask, unsigned long start, unsigned long size) { - __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, - hart_mask, start, size, 0, 0); + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, + hart_mask, start, size, 0, 0); } EXPORT_SYMBOL(sbi_remote_sfence_vma); @@ -410,15 +410,15 @@ EXPORT_SYMBOL(sbi_remote_sfence_vma); * @size: Total size of the virtual address range. * @asid: The value of address space identifier (ASID). * - * Return: None + * Return: 0 on success, appropriate linux error code otherwise. */ -void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, +int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start, unsigned long size, unsigned long asid) { - __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, - hart_mask, start, size, asid, 0); + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, + hart_mask, start, size, asid, 0); } EXPORT_SYMBOL(sbi_remote_sfence_vma_asid); @@ -560,7 +560,7 @@ static struct riscv_ipi_ops sbi_ipi_ops = { .ipi_inject = sbi_send_cpumask_ipi }; -int __init sbi_init(void) +void __init sbi_init(void) { int ret; @@ -600,6 +600,4 @@ int __init sbi_init(void) } riscv_set_ipi_ops(&sbi_ipi_ops); - - return 0; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index c7c0655dd45b..e85bacff1b50 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -216,8 +216,15 @@ static void __init init_resources(void) static void __init parse_dtb(void) { /* Early scan of device tree from init memory */ - if (early_init_dt_scan(dtb_early_va)) + if (early_init_dt_scan(dtb_early_va)) { + const char *name = of_flat_dt_get_machine_name(); + + if (name) { + pr_info("Machine model: %s\n", name); + dump_stack_set_arch_desc("%s (DT)", name); + } return; + } pr_err("No DTB passed to the kernel\n"); #ifdef CONFIG_CMDLINE_FORCE @@ -252,9 +259,9 @@ void __init setup_arch(char **cmdline_p) else pr_err("No DTB found in kernel mappings\n"); #endif + misc_mem_init(); - if (IS_ENABLED(CONFIG_RISCV_SBI)) - sbi_init(); + sbi_init(); if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) protect_kernel_text_data(); @@ -275,13 +282,19 @@ void __init setup_arch(char **cmdline_p) static int __init topology_init(void) { - int i; + int i, ret; + + for_each_online_node(i) + register_one_node(i); for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_devices, i); cpu->hotpluggable = cpu_has_hotplug(i); - register_cpu(cpu, i); + ret = register_cpu(cpu, i); + if (unlikely(ret)) + pr_warn("Warning: %s: register_cpu %d failed (%d)\n", + __func__, i, ret); } return 0; diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 469aef8ed922..65942b3748b4 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -309,6 +309,9 @@ static void do_signal(struct pt_regs *regs) asmlinkage __visible void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + /* Handle pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 96167d55ed98..5e276c25646f 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -27,6 +27,7 @@ #include <asm/cpu_ops.h> #include <asm/irq.h> #include <asm/mmu_context.h> +#include <asm/numa.h> #include <asm/tlbflush.h> #include <asm/sections.h> #include <asm/sbi.h> @@ -45,13 +46,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { int cpuid; int ret; + unsigned int curr_cpuid; + + curr_cpuid = smp_processor_id(); + numa_store_cpu_info(curr_cpuid); + numa_add_cpu(curr_cpuid); /* This covers non-smp usecase mandated by "nosmp" option */ if (max_cpus == 0) return; for_each_possible_cpu(cpuid) { - if (cpuid == smp_processor_id()) + if (cpuid == curr_cpuid) continue; if (cpu_ops[cpuid]->cpu_prepare) { ret = cpu_ops[cpuid]->cpu_prepare(cpuid); @@ -59,6 +65,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) continue; } set_cpu_present(cpuid, true); + numa_store_cpu_info(cpuid); } } @@ -79,6 +86,7 @@ void __init setup_smp(void) if (hart == cpuid_to_hartid_map(0)) { BUG_ON(found_boot_cpu); found_boot_cpu = 1; + early_map_cpu_to_node(0, of_node_to_nid(dn)); continue; } if (cpuid >= NR_CPUS) { @@ -88,6 +96,7 @@ void __init setup_smp(void) } cpuid_to_hartid_map(cpuid) = hart; + early_map_cpu_to_node(cpuid, of_node_to_nid(dn)); cpuid++; } @@ -153,6 +162,7 @@ asmlinkage __visible void smp_callin(void) current->active_mm = mm; notify_cpu_starting(curr_cpuid); + numa_add_cpu(curr_cpuid); update_siblings_masks(curr_cpuid); set_cpu_online(curr_cpuid, 1); diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c index c7b0a73e382e..a0516172a33c 100644 --- a/arch/riscv/kernel/soc.c +++ b/arch/riscv/kernel/soc.c @@ -26,30 +26,3 @@ void __init soc_early_init(void) } } } - -static bool soc_builtin_dtb_match(unsigned long vendor_id, - unsigned long arch_id, unsigned long imp_id, - const struct soc_builtin_dtb *entry) -{ - return entry->vendor_id == vendor_id && - entry->arch_id == arch_id && - entry->imp_id == imp_id; -} - -void * __init soc_lookup_builtin_dtb(void) -{ - unsigned long vendor_id, arch_id, imp_id; - const struct soc_builtin_dtb *s; - - __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id)); - __asm__ ("csrr %0, marchid" : "=r"(arch_id)); - __asm__ ("csrr %0, mimpid" : "=r"(imp_id)); - - for (s = (void *)&__soc_builtin_dtb_table_start; - (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) { - if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s)) - return s->dtb_func(); - } - - return NULL; -} diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index df5d2da7c40b..3f893c9d9d85 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -53,9 +53,15 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, /* Unwind stack frame */ frame = (struct stackframe *)fp - 1; sp = fp; - fp = frame->fp; - pc = ftrace_graph_ret_addr(current, NULL, frame->ra, - (unsigned long *)(fp - 8)); + if (regs && (regs->epc == pc) && (frame->fp & 0x7)) { + fp = frame->ra; + pc = regs->ra; + } else { + fp = frame->fp; + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, + (unsigned long *)(fp - 8)); + } + } } @@ -100,10 +106,16 @@ static bool print_trace_address(void *arg, unsigned long pc) return true; } +void dump_backtrace(struct pt_regs *regs, struct task_struct *task, + const char *loglvl) +{ + pr_cont("%sCall Trace:\n", loglvl); + walk_stackframe(task, regs, print_trace_address, (void *)loglvl); +} + void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { - pr_cont("Call Trace:\n"); - walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); + dump_backtrace(NULL, task, loglvl); } static bool save_wchan(void *arg, unsigned long pc) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index ad14f4466d92..3ed2c23601a0 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -12,10 +12,12 @@ #include <linux/signal.h> #include <linux/kdebug.h> #include <linux/uaccess.h> +#include <linux/kprobes.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/irq.h> +#include <asm/bug.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/csr.h> @@ -66,7 +68,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) tsk->comm, task_pid_nr(tsk), signo, code, addr); print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); pr_cont("\n"); - show_regs(regs); + __show_regs(regs); } force_sig_fault(signo, code, (void __user *)addr); @@ -75,6 +77,8 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) static void do_trap_error(struct pt_regs *regs, int signo, int code, unsigned long addr, const char *str) { + current->thread.bad_cause = regs->cause; + if (user_mode(regs)) { do_trap(regs, signo, code, addr); } else { @@ -145,6 +149,22 @@ static inline unsigned long get_break_insn_length(unsigned long pc) asmlinkage __visible void do_trap_break(struct pt_regs *regs) { +#ifdef CONFIG_KPROBES + if (kprobe_single_step_handler(regs)) + return; + + if (kprobe_breakpoint_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_single_step_handler(regs)) + return; + + if (uprobe_breakpoint_handler(regs)) + return; +#endif + current->thread.bad_cause = regs->cause; + if (user_mode(regs)) force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); #ifdef CONFIG_KGDB diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 0cfd6da784f8..71a315e73cbe 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -32,9 +32,10 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH) # Disable -pg to prevent insert call site CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os -# Disable gcov profiling for VDSO code +# Disable profiling and instrumentation for VDSO code GCOV_PROFILE := n KCOV_INSTRUMENT := n +KASAN_SANITIZE := n # Force dependency $(obj)/vdso.o: $(obj)/vdso.so diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index ac6171e9c19e..25d5c9664e57 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -5,3 +5,5 @@ lib-y += memset.o lib-y += memmove.o lib-$(CONFIG_MMU) += uaccess.o lib-$(CONFIG_64BIT) += tishift.o + +obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/riscv/lib/error-inject.c b/arch/riscv/lib/error-inject.c new file mode 100644 index 000000000000..d667ade2bc41 --- /dev/null +++ b/arch/riscv/lib/error-inject.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/error-injection.h> +#include <linux/kprobes.h> + +void override_function_with_return(struct pt_regs *regs) +{ + instruction_pointer_set(regs, regs->ra); +} +NOKPROBE_SYMBOL(override_function_with_return); diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index c0185e556ca5..7ebaef10ea1b 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -2,7 +2,8 @@ CFLAGS_init.o := -mcmodel=medany ifdef CONFIG_FTRACE -CFLAGS_REMOVE_init.o = -pg +CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE) endif KCOV_INSTRUMENT_init.o := n diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 613ec81a8979..68aa312fc352 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -2,13 +2,273 @@ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive + * Copyright (C) 2021 Western Digital Corporation or its affiliates. */ +#include <linux/bitops.h> +#include <linux/cpumask.h> #include <linux/mm.h> +#include <linux/percpu.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/static_key.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> +#ifdef CONFIG_MMU + +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator); + +static unsigned long asid_bits; +static unsigned long num_asids; +static unsigned long asid_mask; + +static atomic_long_t current_version; + +static DEFINE_RAW_SPINLOCK(context_lock); +static cpumask_t context_tlb_flush_pending; +static unsigned long *context_asid_map; + +static DEFINE_PER_CPU(atomic_long_t, active_context); +static DEFINE_PER_CPU(unsigned long, reserved_context); + +static bool check_update_reserved_context(unsigned long cntx, + unsigned long newcntx) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved CONTEXT looking for a match. + * If we find one, then we can update our mm to use new CONTEXT + * (i.e. the same CONTEXT in the current_version) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old CONTEXT are updated to reflect the mm. Failure to do + * so could result in us missing the reserved CONTEXT in a future + * version. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_context, cpu) == cntx) { + hit = true; + per_cpu(reserved_context, cpu) = newcntx; + } + } + + return hit; +} + +static void __flush_context(void) +{ + int i; + unsigned long cntx; + + /* Must be called with context_lock held */ + lockdep_assert_held(&context_lock); + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(context_asid_map, 0, num_asids); + + /* Mark already active ASIDs as used */ + for_each_possible_cpu(i) { + cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0); + /* + * If this CPU has already been through a rollover, but + * hasn't run another task in the meantime, we must preserve + * its reserved CONTEXT, as this is the only trace we have of + * the process it is still running. + */ + if (cntx == 0) + cntx = per_cpu(reserved_context, i); + + __set_bit(cntx & asid_mask, context_asid_map); + per_cpu(reserved_context, i) = cntx; + } + + /* Mark ASID #0 as used because it is used at boot-time */ + __set_bit(0, context_asid_map); + + /* Queue a TLB invalidation for each CPU on next context-switch */ + cpumask_setall(&context_tlb_flush_pending); +} + +static unsigned long __new_context(struct mm_struct *mm) +{ + static u32 cur_idx = 1; + unsigned long cntx = atomic_long_read(&mm->context.id); + unsigned long asid, ver = atomic_long_read(¤t_version); + + /* Must be called with context_lock held */ + lockdep_assert_held(&context_lock); + + if (cntx != 0) { + unsigned long newcntx = ver | (cntx & asid_mask); + + /* + * If our current CONTEXT was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_context(cntx, newcntx)) + return newcntx; + + /* + * We had a valid CONTEXT in a previous life, so try to + * re-use it if possible. + */ + if (!__test_and_set_bit(cntx & asid_mask, context_asid_map)) + return newcntx; + } + + /* + * Allocate a free ASID. If we can't find one then increment + * current_version and flush all ASIDs. + */ + asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx); + if (asid != num_asids) + goto set_asid; + + /* We're out of ASIDs, so increment current_version */ + ver = atomic_long_add_return_relaxed(num_asids, ¤t_version); + + /* Flush everything */ + __flush_context(); + + /* We have more ASIDs than CPUs, so this will always succeed */ + asid = find_next_zero_bit(context_asid_map, num_asids, 1); + +set_asid: + __set_bit(asid, context_asid_map); + cur_idx = asid; + return asid | ver; +} + +static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) +{ + unsigned long flags; + bool need_flush_tlb = false; + unsigned long cntx, old_active_cntx; + + cntx = atomic_long_read(&mm->context.id); + + /* + * If our active_context is non-zero and the context matches the + * current_version, then we update the active_context entry with a + * relaxed cmpxchg. + * + * Following is how we handle racing with a concurrent rollover: + * + * - We get a zero back from the cmpxchg and end up waiting on the + * lock. Taking the lock synchronises with the rollover and so + * we are forced to see the updated verion. + * + * - We get a valid context back from the cmpxchg then we continue + * using old ASID because __flush_context() would have marked ASID + * of active_context as used and next context switch we will + * allocate new context. + */ + old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); + if (old_active_cntx && + ((cntx & ~asid_mask) == atomic_long_read(¤t_version)) && + atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), + old_active_cntx, cntx)) + goto switch_mm_fast; + + raw_spin_lock_irqsave(&context_lock, flags); + + /* Check that our ASID belongs to the current_version. */ + cntx = atomic_long_read(&mm->context.id); + if ((cntx & ~asid_mask) != atomic_long_read(¤t_version)) { + cntx = __new_context(mm); + atomic_long_set(&mm->context.id, cntx); + } + + if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending)) + need_flush_tlb = true; + + atomic_long_set(&per_cpu(active_context, cpu), cntx); + + raw_spin_unlock_irqrestore(&context_lock, flags); + +switch_mm_fast: + csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | + ((cntx & asid_mask) << SATP_ASID_SHIFT) | + SATP_MODE); + + if (need_flush_tlb) + local_flush_tlb_all(); +} + +static void set_mm_noasid(struct mm_struct *mm) +{ + /* Switch the page table and blindly nuke entire local TLB */ + csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | SATP_MODE); + local_flush_tlb_all(); +} + +static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +{ + if (static_branch_unlikely(&use_asid_allocator)) + set_mm_asid(mm, cpu); + else + set_mm_noasid(mm); +} + +static int asids_init(void) +{ + unsigned long old; + + /* Figure-out number of ASID bits in HW */ + old = csr_read(CSR_SATP); + asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT); + csr_write(CSR_SATP, asid_bits); + asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK; + asid_bits = fls_long(asid_bits); + csr_write(CSR_SATP, old); + + /* + * In the process of determining number of ASID bits (above) + * we polluted the TLB of current HART so let's do TLB flushed + * to remove unwanted TLB enteries. + */ + local_flush_tlb_all(); + + /* Pre-compute ASID details */ + num_asids = 1 << asid_bits; + asid_mask = num_asids - 1; + + /* + * Use ASID allocator only if number of HW ASIDs are + * at-least twice more than CPUs + */ + if (num_asids > (2 * num_possible_cpus())) { + atomic_long_set(¤t_version, num_asids); + + context_asid_map = kcalloc(BITS_TO_LONGS(num_asids), + sizeof(*context_asid_map), GFP_KERNEL); + if (!context_asid_map) + panic("Failed to allocate bitmap for %lu ASIDs\n", + num_asids); + + __set_bit(0, context_asid_map); + + static_branch_enable(&use_asid_allocator); + + pr_info("ASID allocator using %lu bits (%lu entries)\n", + asid_bits, num_asids); + } else { + pr_info("ASID allocator disabled\n"); + } + + return 0; +} +early_initcall(asids_init); +#else +static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +{ + /* Nothing to do here when there is no MMU */ +} +#endif + /* * When necessary, performs a deferred icache flush for the given MM context, * on the local CPU. RISC-V has no direct mechanism for instruction cache @@ -58,10 +318,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); -#ifdef CONFIG_MMU - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); - local_flush_tlb_all(); -#endif + set_mm(next, cpu); flush_icache_deferred(next); } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 3c8b9e433c67..8f17519208c7 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -13,14 +13,30 @@ #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/uaccess.h> +#include <linux/kprobes.h> #include <asm/ptrace.h> #include <asm/tlbflush.h> #include "../kernel/head.h" +static void die_kernel_fault(const char *msg, unsigned long addr, + struct pt_regs *regs) +{ + bust_spinlocks(1); + + pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg, + addr); + + bust_spinlocks(0); + die(regs, "Oops"); + do_exit(SIGKILL); +} + static inline void no_context(struct pt_regs *regs, unsigned long addr) { + const char *msg; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; @@ -29,12 +45,8 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - bust_spinlocks(1); - pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", - (addr < PAGE_SIZE) ? "NULL pointer dereference" : - "paging request", addr); - die(regs, "Oops"); - do_exit(SIGKILL); + msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request"; + die_kernel_fault(msg, addr, regs); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) @@ -202,6 +214,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) tsk = current; mm = tsk->mm; + if (kprobe_page_fault(regs, cause)) + return; + /* * Fault-in kernel-space virtual memory on-demand. * The 'reference' page table is init_mm.pgd. @@ -225,6 +240,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * in an atomic region, then we must not take the fault. */ if (unlikely(faulthandler_disabled() || !mm)) { + tsk->thread.bad_cause = cause; no_context(regs, addr); return; } @@ -232,6 +248,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; + if (!user_mode(regs) && addr < TASK_SIZE && + unlikely(!(regs->status & SR_SUM))) + die_kernel_fault("access to user memory without uaccess routines", + addr, regs); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (cause == EXC_STORE_PAGE_FAULT) @@ -242,16 +263,19 @@ retry: mmap_read_lock(mm); vma = find_vma(mm, addr); if (unlikely(!vma)) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } if (likely(vma->vm_start <= addr)) goto good_area; if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } if (unlikely(expand_stack(vma, addr))) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } @@ -264,6 +288,7 @@ good_area: code = SEGV_ACCERR; if (unlikely(access_error(cause, vma))) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } @@ -297,6 +322,7 @@ good_area: mmap_read_unlock(mm); if (unlikely(fault & VM_FAULT_ERROR)) { + tsk->thread.bad_cause = cause; mm_fault_error(regs, addr, fault); return; } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f9f9568d689e..067583ab1bd7 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -21,6 +21,7 @@ #include <asm/soc.h> #include <asm/io.h> #include <asm/ptdump.h> +#include <asm/numa.h> #include "../kernel/head.h" @@ -105,85 +106,19 @@ void __init mem_init(void) print_vm_layout(); } -#ifdef CONFIG_BLK_DEV_INITRD -static void __init setup_initrd(void) -{ - phys_addr_t start; - unsigned long size; - - /* Ignore the virtul address computed during device tree parsing */ - initrd_start = initrd_end = 0; - - if (!phys_initrd_size) - return; - /* - * Round the memory region to page boundaries as per free_initrd_mem() - * This allows us to detect whether the pages overlapping the initrd - * are in use, but more importantly, reserves the entire set of pages - * as we don't want these pages allocated for other purposes. - */ - start = round_down(phys_initrd_start, PAGE_SIZE); - size = phys_initrd_size + (phys_initrd_start - start); - size = round_up(size, PAGE_SIZE); - - if (!memblock_is_region_memory(start, size)) { - pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", - (u64)start, size); - goto disable; - } - - if (memblock_is_region_reserved(start, size)) { - pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", - (u64)start, size); - goto disable; - } - - memblock_reserve(start, size); - /* Now convert initrd to virtual addresses */ - initrd_start = (unsigned long)__va(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - initrd_below_start_ok = 1; - - pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)(initrd_start), size); - return; -disable: - pr_cont(" - disabling initrd\n"); - initrd_start = 0; - initrd_end = 0; -} -#endif /* CONFIG_BLK_DEV_INITRD */ - void __init setup_bootmem(void) { - phys_addr_t mem_start = 0; - phys_addr_t start, dram_end, end = 0; phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); + phys_addr_t dram_end = memblock_end_of_DRAM(); phys_addr_t max_mapped_addr = __pa(~(ulong)0); - u64 i; - - /* Find the memory region containing the kernel */ - for_each_mem_range(i, &start, &end) { - phys_addr_t size = end - start; - if (!mem_start) - mem_start = start; - if (start <= vmlinux_start && vmlinux_end <= end) - BUG_ON(size == 0); - } - /* - * The maximal physical memory size is -PAGE_OFFSET. - * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed - * as it is unusable by kernel. - */ + /* The maximal physical memory size is -PAGE_OFFSET. */ memblock_enforce_memory_limit(-PAGE_OFFSET); /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - dram_end = memblock_end_of_DRAM(); - /* * memblock allocator is not aware of the fact that last 4K bytes of * the addressable memory can not be mapped because of IS_ERR_VALUE @@ -198,20 +133,19 @@ void __init setup_bootmem(void) dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); -#ifdef CONFIG_BLK_DEV_INITRD - setup_initrd(); -#endif /* CONFIG_BLK_DEV_INITRD */ - + reserve_initrd_mem(); /* - * Avoid using early_init_fdt_reserve_self() since __pa() does + * If DTB is built in, no need to reserve its memblock. + * Otherwise, do reserve it but avoid using + * early_init_fdt_reserve_self() since __pa() does * not work for DTB pointers that are fixmap addresses */ - memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); + if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) + memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); early_init_fdt_scan_reserved_mem(); dma_contiguous_reserve(dma32_phys_limit); memblock_allow_resize(); - memblock_dump_all(); } #ifdef CONFIG_MMU @@ -226,8 +160,6 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; -#define MAX_EARLY_MAPPING_SIZE SZ_128M - pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) @@ -302,13 +234,7 @@ static void __init create_pte_mapping(pte_t *ptep, pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; - -#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE -#define NUM_EARLY_PMDS 1UL -#else -#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE) -#endif -pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE); +pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) @@ -330,11 +256,9 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa) static phys_addr_t __init alloc_pmd_early(uintptr_t va) { - uintptr_t pmd_num; + BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT); - pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT; - BUG_ON(pmd_num >= NUM_EARLY_PMDS); - return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD]; + return (uintptr_t)early_pmd; } static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) @@ -452,7 +376,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) uintptr_t va, pa, end_va; uintptr_t load_pa = (uintptr_t)(&_start); uintptr_t load_sz = (uintptr_t)(&_end) - load_pa; - uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE); + uintptr_t map_size; #ifndef __PAGETABLE_PMD_FOLDED pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif @@ -464,12 +388,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * Enforce boot alignment requirements of RV32 and * RV64 by only allowing PMD or PGD mappings. */ - BUG_ON(map_size == PAGE_SIZE); + map_size = PMD_SIZE; /* Sanity check alignment and size */ BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); BUG_ON((load_pa % map_size) != 0); - BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE); pt_ops.alloc_pte = alloc_pte_early; pt_ops.get_pte_virt = get_pte_virt_early; @@ -511,6 +434,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) /* Setup early PMD for DTB */ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE); +#ifndef CONFIG_BUILTIN_DTB /* Create two consecutive PMD mappings for FDT early scan */ pa = dtb_pa & ~(PMD_SIZE - 1); create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, @@ -518,7 +442,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); +#else /* CONFIG_BUILTIN_DTB */ + dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_BUILTIN_DTB */ #else +#ifndef CONFIG_BUILTIN_DTB /* Create two consecutive PGD mappings for FDT early scan */ pa = dtb_pa & ~(PGDIR_SIZE - 1); create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, @@ -526,6 +454,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE, pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); +#else /* CONFIG_BUILTIN_DTB */ + dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_BUILTIN_DTB */ #endif dtb_early_pa = dtb_pa; @@ -616,15 +547,7 @@ static void __init setup_vm_final(void) #else asmlinkage void __init setup_vm(uintptr_t dtb_pa) { -#ifdef CONFIG_BUILTIN_DTB - dtb_early_va = soc_lookup_builtin_dtb(); - if (!dtb_early_va) { - /* Fallback to first available DTS */ - dtb_early_va = (void *) __dtb_start; - } -#else dtb_early_va = (void *)dtb_pa; -#endif dtb_early_pa = dtb_pa; } @@ -665,9 +588,15 @@ void mark_rodata_ro(void) void __init paging_init(void) { setup_vm_final(); - sparse_init(); setup_zero_page(); +} + +void __init misc_mem_init(void) +{ + arch_numa_init(); + sparse_init(); zone_sizes_init(); + memblock_dump_all(); } #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index a8a2ffd9114a..3fc18f469efb 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -9,6 +9,19 @@ #include <linux/pgtable.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> +#include <asm/pgalloc.h> + +static __init void *early_alloc(size_t size, int node) +{ + void *ptr = memblock_alloc_try_nid(size, size, + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); + + if (!ptr) + panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", + __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); + + return ptr; +} extern pgd_t early_pg_dir[PTRS_PER_PGD]; asmlinkage void __init kasan_early_init(void) @@ -47,40 +60,133 @@ asmlinkage void __init kasan_early_init(void) local_flush_tlb_all(); } -static void __init populate(void *start, void *end) +static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) +{ + phys_addr_t phys_addr; + pte_t *ptep, *base_pte; + + if (pmd_none(*pmd)) + base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); + else + base_pte = (pte_t *)pmd_page_vaddr(*pmd); + + ptep = base_pte + pte_index(vaddr); + + do { + if (pte_none(*ptep)) { + phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL)); + } + } while (ptep++, vaddr += PAGE_SIZE, vaddr != end); + + set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE)); +} + +static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) +{ + phys_addr_t phys_addr; + pmd_t *pmdp, *base_pmd; + unsigned long next; + + base_pmd = (pmd_t *)pgd_page_vaddr(*pgd); + if (base_pmd == lm_alias(kasan_early_shadow_pmd)) + base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); + + pmdp = base_pmd + pmd_index(vaddr); + + do { + next = pmd_addr_end(vaddr, end); + + if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { + phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); + if (phys_addr) { + set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL)); + continue; + } + } + + kasan_populate_pte(pmdp, vaddr, next); + } while (pmdp++, vaddr = next, vaddr != end); + + /* + * Wait for the whole PGD to be populated before setting the PGD in + * the page table, otherwise, if we did set the PGD before populating + * it entirely, memblock could allocate a page at a physical address + * where KASAN is not populated yet and then we'd get a page fault. + */ + set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); +} + +static void kasan_populate_pgd(unsigned long vaddr, unsigned long end) +{ + phys_addr_t phys_addr; + pgd_t *pgdp = pgd_offset_k(vaddr); + unsigned long next; + + do { + next = pgd_addr_end(vaddr, end); + + /* + * pgdp can't be none since kasan_early_init initialized all KASAN + * shadow region with kasan_early_shadow_pmd: if this is stillthe case, + * that means we can try to allocate a hugepage as a replacement. + */ + if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) && + IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { + phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE); + if (phys_addr) { + set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL)); + continue; + } + } + + kasan_populate_pmd(pgdp, vaddr, next); + } while (pgdp++, vaddr = next, vaddr != end); +} + +static void __init kasan_populate(void *start, void *end) { - unsigned long i, offset; unsigned long vaddr = (unsigned long)start & PAGE_MASK; unsigned long vend = PAGE_ALIGN((unsigned long)end); - unsigned long n_pages = (vend - vaddr) / PAGE_SIZE; - unsigned long n_ptes = - ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE; - unsigned long n_pmds = - ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD; - pte_t *pte = - memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); - pmd_t *pmd = - memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); - pgd_t *pgd = pgd_offset_k(vaddr); + kasan_populate_pgd(vaddr, vend); - for (i = 0; i < n_pages; i++) { - phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); - set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); - } + local_flush_tlb_all(); + memset(start, KASAN_SHADOW_INIT, end - start); +} - for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE) - set_pmd(&pmd[i], - pfn_pmd(PFN_DOWN(__pa(&pte[offset])), - __pgprot(_PAGE_TABLE))); +void __init kasan_shallow_populate(void *start, void *end) +{ + unsigned long vaddr = (unsigned long)start & PAGE_MASK; + unsigned long vend = PAGE_ALIGN((unsigned long)end); + unsigned long pfn; + int index; + void *p; + pud_t *pud_dir, *pud_k; + pgd_t *pgd_dir, *pgd_k; + p4d_t *p4d_dir, *p4d_k; - for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD) - set_pgd(&pgd[i], - pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), - __pgprot(_PAGE_TABLE))); + while (vaddr < vend) { + index = pgd_index(vaddr); + pfn = csr_read(CSR_SATP) & SATP_PPN; + pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; + pgd_k = init_mm.pgd + index; + pgd_dir = pgd_offset_k(vaddr); + set_pgd(pgd_dir, *pgd_k); - local_flush_tlb_all(); - memset(start, 0, end - start); + p4d_dir = p4d_offset(pgd_dir, vaddr); + p4d_k = p4d_offset(pgd_k, vaddr); + + vaddr = (vaddr + PUD_SIZE) & PUD_MASK; + pud_dir = pud_offset(p4d_dir, vaddr); + pud_k = pud_offset(p4d_k, vaddr); + + if (pud_present(*pud_dir)) { + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + pud_populate(&init_mm, pud_dir, p); + } + vaddr += PAGE_SIZE; + } } void __init kasan_init(void) @@ -90,7 +196,15 @@ void __init kasan_init(void) kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)kasan_mem_to_shadow((void *) - VMALLOC_END)); + VMEMMAP_END)); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_shallow_populate( + (void *)kasan_mem_to_shadow((void *)VMALLOC_START), + (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + else + kasan_populate_early_shadow( + (void *)kasan_mem_to_shadow((void *)VMALLOC_START), + (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); for_each_mem_range(i, &_start, &_end) { void *start = (void *)__va(_start); @@ -99,7 +213,7 @@ void __init kasan_init(void) if (start >= end) break; - populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); + kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); }; for (i = 0; i < PTRS_PER_PTE; i++) @@ -108,6 +222,6 @@ void __init kasan_init(void) __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED))); - memset(kasan_early_shadow_page, 0, PAGE_SIZE); + memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); init_task.kasan_depth = 0; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 41d6498dcbaa..c1ff874e6c2e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -58,6 +58,7 @@ config S390 # Note: keep this list sorted alphabetically # imply IMA_SECURE_AND_OR_TRUSTED_BOOT + select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX @@ -123,11 +124,13 @@ config S390 select GENERIC_ALLOCATOR select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_VULNERABILITIES + select GENERIC_ENTRY select GENERIC_FIND_FIRST_BIT select GENERIC_GETTIMEOFDAY select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL + select GENERIC_VDSO_TIME_NS select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL @@ -181,6 +184,7 @@ config S390 select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ + select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING_IDLE diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 6bfaceebbbc0..ef96c25fa921 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -6,10 +6,12 @@ config TRACE_IRQFLAGS_SUPPORT config EARLY_PRINTK def_bool y -config DEBUG_USER_ASCE - bool "Debug User ASCE" +config DEBUG_ENTRY + bool "Debug low-level entry code" + depends on DEBUG_KERNEL help - Check on exit to user space that address space control - elements are setup correctly. + This option enables sanity checks in s390 low-level entry code. + Some of these sanity checks may slow down kernel entries and + exits or otherwise impact performance. If unsure, say N. diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 8b94347705e5..02056b024091 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -40,6 +40,7 @@ CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_LIVEPATCH=y +CONFIG_MARCH_ZEC12=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 CONFIG_NUMA=y @@ -70,7 +71,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SIG_SHA256=y -CONFIG_UNUSED_SYMBOLS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y @@ -176,13 +176,17 @@ CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y CONFIG_NFT_CT=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m CONFIG_NFT_NAT=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -274,6 +278,7 @@ CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m @@ -294,6 +299,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_FIB_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -629,7 +635,6 @@ CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_INODE64=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=m CONFIG_ECRYPT_FS=m @@ -791,6 +796,8 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y CONFIG_SLUB_DEBUG_ON=y CONFIG_SLUB_STATS=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_VMACACHE=y @@ -831,7 +838,6 @@ CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y CONFIG_FTRACE_STARTUP_TEST=y # CONFIG_EVENT_TRACE_STARTUP_TEST is not set -CONFIG_DEBUG_USER_ASCE=y CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y @@ -855,3 +861,4 @@ CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BITOPS=m CONFIG_TEST_BPF=m +CONFIG_DEBUG_ENTRY=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 9db1232e09f4..bac721a501da 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -38,6 +38,7 @@ CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_LIVEPATCH=y +CONFIG_MARCH_ZEC12=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 CONFIG_NUMA=y @@ -65,7 +66,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SIG_SHA256=y -CONFIG_UNUSED_SYMBOLS=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y @@ -167,13 +167,17 @@ CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y CONFIG_NFT_CT=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m CONFIG_NFT_NAT=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -265,6 +269,7 @@ CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m @@ -285,6 +290,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_FIB_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -617,7 +623,6 @@ CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_INODE64=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=m CONFIG_ECRYPT_FS=m @@ -779,7 +784,6 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y -CONFIG_DEBUG_USER_ASCE=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1ef211dae77a..acf982a2ae4c 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -3,11 +3,13 @@ CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y # CONFIG_CPU_ISOLATION is not set # CONFIG_UTS_NS is not set +# CONFIG_TIME_NS is not set # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_COMPAT_BRK is not set +CONFIG_MARCH_ZEC12=y CONFIG_TUNE_ZEC12=y # CONFIG_COMPAT is not set CONFIG_NR_CPUS=2 diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 73044634d342..54c7536f2482 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -21,6 +21,7 @@ #include <crypto/algapi.h> #include <crypto/ghash.h> #include <crypto/internal/aead.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/err.h> @@ -1055,3 +1056,4 @@ MODULE_ALIAS_CRYPTO("aes-all"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index f3caeb17c85b..a279b7d23a5e 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/spinlock.h> +#include <linux/delay.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include <asm/cpacf.h> @@ -128,6 +129,9 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb, /* try three times in case of failure */ for (i = 0; i < 3; i++) { + if (i > 0 && ret == -EAGAIN && in_task()) + if (msleep_interruptible(1000)) + return -EINTR; ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk); if (ret == 0) break; @@ -138,10 +142,12 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb, static inline int __paes_convert_key(struct s390_paes_ctx *ctx) { + int ret; struct pkey_protkey pkey; - if (__paes_keyblob2pkey(&ctx->kb, &pkey)) - return -EINVAL; + ret = __paes_keyblob2pkey(&ctx->kb, &pkey); + if (ret) + return ret; spin_lock_bh(&ctx->pk_lock); memcpy(&ctx->pk, &pkey, sizeof(pkey)); @@ -169,10 +175,12 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm) static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) { + int rc; unsigned long fc; - if (__paes_convert_key(ctx)) - return -EINVAL; + rc = __paes_convert_key(ctx); + if (rc) + return rc; /* Pick the correct function code based on the protected key type */ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : @@ -282,10 +290,12 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm) static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) { + int rc; unsigned long fc; - if (__paes_convert_key(ctx)) - return -EINVAL; + rc = __paes_convert_key(ctx); + if (rc) + return rc; /* Pick the correct function code based on the protected key type */ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : @@ -577,10 +587,12 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm) static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) { + int rc; unsigned long fc; - if (__paes_convert_key(ctx)) - return -EINVAL; + rc = __paes_convert_key(ctx); + if (rc) + return rc; /* Pick the correct function code based on the protected key type */ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index b2f219ec379c..234d791ca59d 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -414,7 +414,7 @@ static int __init prng_sha512_instantiate(void) } /* append the seed by 16 bytes of unique nonce */ - get_tod_clock_ext(seed + seedlen); + store_tod_clock_ext((union tod_clock *)(seed + seedlen)); seedlen += 16; /* now initial seed of the prno drng */ diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index 3235e4d82f2d..6c43d2ba2079 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -84,7 +84,7 @@ static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size) if (IS_ERR(diag0c_data)) return PTR_ERR(diag0c_data); memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr)); - get_tod_clock_ext(diag0c_data->hdr.tod_ext); + store_tod_clock_ext((union tod_clock *)diag0c_data->hdr.tod_ext); diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry); diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION; diag0c_data->hdr.count = count; diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index e1fcc03159ef..33f973ff9744 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -234,7 +234,7 @@ failed: struct dbfs_d2fc_hdr { u64 len; /* Length of d2fc buffer without header */ u16 version; /* Version of header */ - char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */ + union tod_clock tod_ext; /* TOD clock for d2fc */ u64 count; /* Number of VM guests in d2fc buffer */ char reserved[30]; } __attribute__ ((packed)); @@ -252,7 +252,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size) d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); if (IS_ERR(d2fc)) return PTR_ERR(d2fc); - get_tod_clock_ext(d2fc->hdr.tod_ext); + store_tod_clock_ext(&d2fc->hdr.tod_ext); d2fc->hdr.len = count * sizeof(struct diag2fc_data); d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; d2fc->hdr.count = count; diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index 1c8a38f762a3..d3880ca764ee 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -145,6 +145,22 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ altinstr2, facility2) ::: "memory") +/* Alternative inline assembly with input. */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ + : : input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, altinstr, facility, output, input...) \ + asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \ + : output : input) + +/* Use this macro if more than one output parameter is needed. */ +#define ASM_OUTPUT2(a...) a + +/* Use this macro if clobbers are needed without inputs. */ +#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber + #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_ALTERNATIVE_H */ diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index aea32dda3d14..837d1699b109 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -368,7 +368,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, #if IS_ENABLED(CONFIG_ZCRYPT) void ap_bus_cfg_chg(void); #else -static inline void ap_bus_cfg_chg(void){}; +static inline void ap_bus_cfg_chg(void){} #endif #endif /* _ASM_S390_AP_H_ */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 11c5952e1afa..5860ae790f2d 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -44,16 +44,6 @@ static inline int atomic_fetch_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v) { -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - /* - * Order of conditions is important to circumvent gcc 10 bug: - * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html - */ - if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { - __atomic_add_const(i, &v->counter); - return; - } -#endif __atomic_add(i, &v->counter); } @@ -115,16 +105,6 @@ static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) static inline void atomic64_add(s64 i, atomic64_t *v) { -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - /* - * Order of conditions is important to circumvent gcc 10 bug: - * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html - */ - if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { - __atomic64_add_const(i, (long *)&v->counter); - return; - } -#endif __atomic64_add(i, (long *)&v->counter); } diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 431e208a5ea4..31121d32f81d 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -61,18 +61,6 @@ static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned lon unsigned long *addr = __bitops_word(nr, ptr); unsigned long mask; -#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES - if (__builtin_constant_p(nr)) { - unsigned char *caddr = __bitops_byte(nr, ptr); - - asm volatile( - "oi %0,%b1\n" - : "+Q" (*caddr) - : "i" (1 << (nr & 7)) - : "cc", "memory"); - return; - } -#endif mask = 1UL << (nr & (BITS_PER_LONG - 1)); __atomic64_or(mask, (long *)addr); } @@ -82,18 +70,6 @@ static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned l unsigned long *addr = __bitops_word(nr, ptr); unsigned long mask; -#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES - if (__builtin_constant_p(nr)) { - unsigned char *caddr = __bitops_byte(nr, ptr); - - asm volatile( - "ni %0,%b1\n" - : "+Q" (*caddr) - : "i" (~(1 << (nr & 7))) - : "cc", "memory"); - return; - } -#endif mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); __atomic64_and(mask, (long *)addr); } @@ -104,18 +80,6 @@ static __always_inline void arch_change_bit(unsigned long nr, unsigned long *addr = __bitops_word(nr, ptr); unsigned long mask; -#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES - if (__builtin_constant_p(nr)) { - unsigned char *caddr = __bitops_byte(nr, ptr); - - asm volatile( - "xi %0,%b1\n" - : "+Q" (*caddr) - : "i" (1 << (nr & 7)) - : "cc", "memory"); - return; - } -#endif mask = 1UL << (nr & (BITS_PER_LONG - 1)); __atomic64_xor(mask, (long *)addr); } diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index cb729d111e20..1d389847b588 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -35,4 +35,6 @@ u64 arch_cpu_idle_time(int cpu); #define arch_idle_time(cpu) arch_cpu_idle_time(cpu) +void account_idle_time_irq(void); + #endif /* _S390_CPUTIME_H */ diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 5775fc22f410..66d51ad090ab 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -233,8 +233,7 @@ extern char elf_platform[]; do { \ set_personality(PER_LINUX | \ (current->personality & (~PER_MASK))); \ - current->thread.sys_call_table = \ - (unsigned long) &sys_call_table; \ + current->thread.sys_call_table = sys_call_table; \ } while (0) #else /* CONFIG_COMPAT */ #define SET_PERSONALITY(ex) \ @@ -245,11 +244,11 @@ do { \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ set_thread_flag(TIF_31BIT); \ current->thread.sys_call_table = \ - (unsigned long) &sys_call_table_emu; \ + sys_call_table_emu; \ } else { \ clear_thread_flag(TIF_31BIT); \ current->thread.sys_call_table = \ - (unsigned long) &sys_call_table; \ + sys_call_table; \ } \ } while (0) #endif /* CONFIG_COMPAT */ diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h new file mode 100644 index 000000000000..75cebc80474e --- /dev/null +++ b/arch/s390/include/asm/entry-common.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_S390_ENTRY_COMMON_H +#define ARCH_S390_ENTRY_COMMON_H + +#include <linux/sched.h> +#include <linux/audit.h> +#include <linux/tracehook.h> +#include <linux/processor.h> +#include <linux/uaccess.h> +#include <asm/fpu/api.h> + +#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) + +void do_per_trap(struct pt_regs *regs); +void do_syscall(struct pt_regs *regs); + +typedef void (*pgm_check_func)(struct pt_regs *regs); + +extern pgm_check_func pgm_check_table[128]; + +#ifdef CONFIG_DEBUG_ENTRY +static __always_inline void arch_check_user_regs(struct pt_regs *regs) +{ + debug_user_asce(0); +} + +#define arch_check_user_regs arch_check_user_regs +#endif /* CONFIG_DEBUG_ENTRY */ + +static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work) +{ + if (ti_work & _TIF_PER_TRAP) { + clear_thread_flag(TIF_PER_TRAP); + do_per_trap(regs); + } + + if (ti_work & _TIF_GUARDED_STORAGE) + gs_load_bc_cb(regs); +} + +#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work + +static __always_inline void arch_exit_to_user_mode(void) +{ + if (test_cpu_flag(CIF_FPU)) + __load_fpu_regs(); + + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) + debug_user_asce(1); +} + +#define arch_exit_to_user_mode arch_exit_to_user_mode + +static inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1)); +} + +#endif diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index 68c476b20b57..91b5d714d28f 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -44,7 +44,7 @@ static inline int __test_facility(unsigned long nr, void *facilities) } /* - * The test_facility function uses the bit odering where the MSB is bit 0. + * The test_facility function uses the bit ordering where the MSB is bit 0. * That makes it easier to query facility bits with the bit number as * documented in the Principles of Operation. */ diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h index 34a7ae68485c..a959b815a58b 100644 --- a/arch/s390/include/asm/fpu/api.h +++ b/arch/s390/include/asm/fpu/api.h @@ -47,6 +47,8 @@ #include <linux/preempt.h> void save_fpu_regs(void); +void load_fpu_regs(void); +void __load_fpu_regs(void); static inline int test_fp_ctl(u32 fpc) { diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index dfbc3c6c0674..58668ffb5488 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -18,7 +18,6 @@ #define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) #define __ARCH_IRQ_STAT -#define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_IRQ_EXIT_IRQS_DISABLED static inline void ack_bad_irq(unsigned int irq) diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h index 6d4226dcf42a..b04f6a794cdf 100644 --- a/arch/s390/include/asm/idle.h +++ b/arch/s390/include/asm/idle.h @@ -20,11 +20,13 @@ struct s390_idle_data { unsigned long long clock_idle_exit; unsigned long long timer_idle_enter; unsigned long long timer_idle_exit; + unsigned long mt_cycles_enter[8]; }; extern struct device_attribute dev_attr_idle_count; extern struct device_attribute dev_attr_idle_time_us; -void psw_idle(struct s390_idle_data *, unsigned long); +void psw_idle(struct s390_idle_data *data, unsigned long psw_mask); +void psw_idle_exit(void); #endif /* _S390_IDLE_H */ diff --git a/arch/s390/include/asm/irq_work.h b/arch/s390/include/asm/irq_work.h new file mode 100644 index 000000000000..603783766d0a --- /dev/null +++ b/arch/s390/include/asm/irq_work.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_IRQ_WORK_H +#define _ASM_S390_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} + +void arch_irq_work_raise(void); + +#endif /* _ASM_S390_IRQ_WORK_H */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 74f9a036bab2..6bcfc5614bbc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_S390_BSCA_CPU_SLOTS 64 #define KVM_S390_ESCA_CPU_SLOTS 248 #define KVM_MAX_VCPUS 255 -#define KVM_USER_MEM_SLOTS 32 /* * These seem to be used for allocating ->chip in the routing table, which we diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 69ce9191eaf1..22bceeeba4bc 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -81,8 +81,8 @@ struct lowcore { psw_t return_mcck_psw; /* 0x02a0 */ /* CPU accounting and timing values. */ - __u64 sync_enter_timer; /* 0x02b0 */ - __u64 async_enter_timer; /* 0x02b8 */ + __u64 sys_enter_timer; /* 0x02b0 */ + __u8 pad_0x02b8[0x02c0-0x02b8]; /* 0x02b8 */ __u64 mcck_enter_timer; /* 0x02c0 */ __u64 exit_timer; /* 0x02c8 */ __u64 user_timer; /* 0x02d0 */ @@ -107,16 +107,15 @@ struct lowcore { __u64 async_stack; /* 0x0350 */ __u64 nodat_stack; /* 0x0358 */ __u64 restart_stack; /* 0x0360 */ - + __u64 mcck_stack; /* 0x0368 */ /* Restart function and parameter. */ - __u64 restart_fn; /* 0x0368 */ - __u64 restart_data; /* 0x0370 */ - __u64 restart_source; /* 0x0378 */ + __u64 restart_fn; /* 0x0370 */ + __u64 restart_data; /* 0x0378 */ + __u64 restart_source; /* 0x0380 */ /* Address space pointer. */ - __u64 kernel_asce; /* 0x0380 */ - __u64 user_asce; /* 0x0388 */ - __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */ + __u64 kernel_asce; /* 0x0388 */ + __u64 user_asce; /* 0x0390 */ /* * The lpp and current_pid fields form a diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 5afee80cff58..20e51c9ff240 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -99,6 +99,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc); void nmi_free_per_cpu(struct lowcore *lc); void s390_handle_mcck(void); +void __s390_handle_mcck(void); int s390_do_machine_check(struct pt_regs *regs); #endif /* __ASSEMBLY__ */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 212628932ddc..053fe8b8dec7 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -201,7 +201,7 @@ extern unsigned int s390_pci_no_rid; Prototypes ----------------------------------------------------------------------------- */ /* Base stuff */ -int zpci_create_device(struct zpci_dev *); +int zpci_create_device(u32 fid, u32 fh, enum zpci_state state); void zpci_remove_device(struct zpci_dev *zdev); int zpci_enable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *); @@ -212,7 +212,7 @@ void zpci_remove_reserved_devices(void); /* CLP */ int clp_setup_writeback_mio(void); int clp_scan_pci_devices(void); -int clp_add_pci_device(u32, u32, int); +int clp_query_pci_fn(struct zpci_dev *zdev); int clp_enable_fh(struct zpci_dev *, u8); int clp_disable_fh(struct zpci_dev *); int clp_get_state(u32 fid, enum zpci_state *state); diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index d1297d6bbdcf..6b187cd72251 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -135,7 +135,7 @@ static inline void pmd_populate(struct mm_struct *mm, #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) #define pmd_pgtable(pmd) \ - (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) + ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)) /* * page table entry allocation/free routines. diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 794746a32806..29c7ecd5ad1d 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1219,8 +1219,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) -#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) +#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN)) +#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN)) static inline unsigned long pmd_deref(pmd_t pmd) { @@ -1229,12 +1229,12 @@ static inline unsigned long pmd_deref(pmd_t pmd) origin_mask = _SEGMENT_ENTRY_ORIGIN; if (pmd_large(pmd)) origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; - return pmd_val(pmd) & origin_mask; + return (unsigned long)__va(pmd_val(pmd) & origin_mask); } static inline unsigned long pmd_pfn(pmd_t pmd) { - return pmd_deref(pmd) >> PAGE_SHIFT; + return __pa(pmd_deref(pmd)) >> PAGE_SHIFT; } static inline unsigned long pud_deref(pud_t pud) @@ -1244,12 +1244,12 @@ static inline unsigned long pud_deref(pud_t pud) origin_mask = _REGION_ENTRY_ORIGIN; if (pud_large(pud)) origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; - return pud_val(pud) & origin_mask; + return (unsigned long)__va(pud_val(pud) & origin_mask); } static inline unsigned long pud_pfn(pud_t pud) { - return pud_deref(pud) >> PAGE_SHIFT; + return __pa(pud_deref(pud)) >> PAGE_SHIFT; } /* @@ -1329,7 +1329,7 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end) } #define gup_fast_permitted gup_fast_permitted -#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) +#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot)) #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -1636,7 +1636,7 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, } #define pmdp_collapse_flush pmdp_collapse_flush -#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) +#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot)) #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) static inline int pmd_trans_huge(pmd_t pmd) diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 6ede29907fbf..b49e0492842c 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -131,9 +131,9 @@ static inline bool should_resched(int preempt_offset) #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ #ifdef CONFIG_PREEMPTION -extern asmlinkage void preempt_schedule(void); +extern void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() -extern asmlinkage void preempt_schedule_notrace(void); +extern void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() #endif /* CONFIG_PREEMPTION */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 2058a435add4..023a15dc25a3 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -38,6 +38,9 @@ #include <asm/runtime_instr.h> #include <asm/fpu/types.h> #include <asm/fpu/internal.h> +#include <asm/irqflags.h> + +typedef long (*sys_call_ptr_t)(struct pt_regs *regs); static inline void set_cpu_flag(int flag) { @@ -101,31 +104,32 @@ extern void __bpon(void); */ struct thread_struct { unsigned int acrs[NUM_ACRS]; - unsigned long ksp; /* kernel stack pointer */ - unsigned long user_timer; /* task cputime in user space */ - unsigned long guest_timer; /* task cputime in kvm guest */ - unsigned long system_timer; /* task cputime in kernel space */ - unsigned long hardirq_timer; /* task cputime in hardirq context */ - unsigned long softirq_timer; /* task cputime in softirq context */ - unsigned long sys_call_table; /* system call table address */ - unsigned long gmap_addr; /* address of last gmap fault. */ - unsigned int gmap_write_flag; /* gmap fault write indication */ - unsigned int gmap_int_code; /* int code of last gmap fault */ - unsigned int gmap_pfault; /* signal of a pending guest pfault */ + unsigned long ksp; /* kernel stack pointer */ + unsigned long user_timer; /* task cputime in user space */ + unsigned long guest_timer; /* task cputime in kvm guest */ + unsigned long system_timer; /* task cputime in kernel space */ + unsigned long hardirq_timer; /* task cputime in hardirq context */ + unsigned long softirq_timer; /* task cputime in softirq context */ + const sys_call_ptr_t *sys_call_table; /* system call table address */ + unsigned long gmap_addr; /* address of last gmap fault. */ + unsigned int gmap_write_flag; /* gmap fault write indication */ + unsigned int gmap_int_code; /* int code of last gmap fault */ + unsigned int gmap_pfault; /* signal of a pending guest pfault */ + /* Per-thread information related to debugging */ - struct per_regs per_user; /* User specified PER registers */ - struct per_event per_event; /* Cause of the last PER trap */ - unsigned long per_flags; /* Flags to control debug behavior */ - unsigned int system_call; /* system call number in signal */ - unsigned long last_break; /* last breaking-event-address. */ - /* pfault_wait is used to block the process on a pfault event */ + struct per_regs per_user; /* User specified PER registers */ + struct per_event per_event; /* Cause of the last PER trap */ + unsigned long per_flags; /* Flags to control debug behavior */ + unsigned int system_call; /* system call number in signal */ + unsigned long last_break; /* last breaking-event-address. */ + /* pfault_wait is used to block the process on a pfault event */ unsigned long pfault_wait; struct list_head list; /* cpu runtime instrumentation */ struct runtime_instr_cb *ri_cb; - struct gs_cb *gs_cb; /* Current guarded storage cb */ - struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ - unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ + struct gs_cb *gs_cb; /* Current guarded storage cb */ + struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ + unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ /* * Warning: 'fpu' is dynamically-sized. It *MUST* be at * the end. @@ -184,6 +188,7 @@ static inline void release_thread(struct task_struct *tsk) { } /* Free guarded storage control block */ void guarded_storage_release(struct task_struct *tsk); +void gs_load_bc_cb(struct pt_regs *regs); unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *) \ @@ -324,6 +329,11 @@ extern void memcpy_absolute(void *, void *, size_t); extern int s390_isolate_bp(void); extern int s390_isolate_bp_guest(void); +static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) +{ + return arch_irqs_disabled_flags(regs->psw.mask); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 73ca7f7cac33..f828be78937f 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -11,13 +11,13 @@ #include <uapi/asm/ptrace.h> #define PIF_SYSCALL 0 /* inside a system call */ -#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ -#define PIF_SYSCALL_RESTART 2 /* restart the current system call */ +#define PIF_SYSCALL_RESTART 1 /* restart the current system call */ +#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ #define _PIF_SYSCALL BIT(PIF_SYSCALL) -#define _PIF_PER_TRAP BIT(PIF_PER_TRAP) #define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART) +#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) #ifndef __ASSEMBLY__ @@ -68,6 +68,9 @@ enum { &(*(struct psw_bits *)(&(__psw))); \ })) +#define PGM_INT_CODE_MASK 0x7f +#define PGM_INT_CODE_PER 0x80 + /* * The pt_regs struct defines the way the registers are stored on * the stack during a system call. diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 19e84c95d1e7..d9215c7106f0 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -250,17 +250,13 @@ struct slsb { * struct qdio_outbuf_state - SBAL related asynchronous operation information * (for communication with upper layer programs) * (only required for use with completion queues) - * @flags: flags indicating state of buffer * @user: pointer to upper layer program's state information related to SBAL * (stored in user1 data of QAOB) */ struct qdio_outbuf_state { - u8 flags; void *user; }; -#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 - #define CHSC_AC1_INITIATE_INPUTQ 0x80 @@ -315,6 +311,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, #define QDIO_ERROR_GET_BUF_STATE 0x0002 #define QDIO_ERROR_SET_BUF_STATE 0x0004 #define QDIO_ERROR_SLSB_STATE 0x0100 +#define QDIO_ERROR_SLSB_PENDING 0x0200 #define QDIO_ERROR_FATAL 0x00ff #define QDIO_ERROR_TEMPORARY 0xff00 @@ -336,7 +333,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @no_output_qs: number of output queues * @input_handler: handler to be called for input queues * @output_handler: handler to be called for output queues - * @irq_poll: Data IRQ polling handler (NULL when not supported) + * @irq_poll: Data IRQ polling handler * @scan_threshold: # of in-use buffers that triggers scan on output queue * @int_parm: interruption parameter * @input_sbal_addr_array: per-queue array, each element points to 128 SBALs diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h index c00f7b031628..a7c3ccf681da 100644 --- a/arch/s390/include/asm/scsw.h +++ b/arch/s390/include/asm/scsw.h @@ -525,8 +525,7 @@ static inline int scsw_cmd_is_valid_pno(union scsw *scsw) return (scsw->cmd.fctl != 0) && (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || - ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && - (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); + (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)); } /** diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index d9d5de0f67ff..9107e3dab68c 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -14,8 +14,8 @@ #include <linux/err.h> #include <asm/ptrace.h> -extern const unsigned long sys_call_table[]; -extern const unsigned long sys_call_table_emu[]; +extern const sys_call_ptr_t sys_call_table[]; +extern const sys_call_ptr_t sys_call_table_emu[]; static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) @@ -56,6 +56,7 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { + set_pt_regs_flag(regs, PIF_SYSCALL_RET_SET); regs->gprs[2] = error ? error : val; } @@ -97,4 +98,10 @@ static inline int syscall_get_arch(struct task_struct *task) #endif return AUDIT_ARCH_S390X; } + +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) +{ + return false; +} + #endif /* _ASM_SYSCALL_H */ diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h index 1320f4213d80..ad2c996e7e93 100644 --- a/arch/s390/include/asm/syscall_wrapper.h +++ b/arch/s390/include/asm/syscall_wrapper.h @@ -7,6 +7,33 @@ #ifndef _ASM_S390_SYSCALL_WRAPPER_H #define _ASM_S390_SYSCALL_WRAPPER_H +#define __SC_TYPE(t, a) t + +#define SYSCALL_PT_ARG6(regs, m, t1, t2, t3, t4, t5, t6)\ + SYSCALL_PT_ARG5(regs, m, t1, t2, t3, t4, t5), \ + m(t6, (regs->gprs[7])) + +#define SYSCALL_PT_ARG5(regs, m, t1, t2, t3, t4, t5) \ + SYSCALL_PT_ARG4(regs, m, t1, t2, t3, t4), \ + m(t5, (regs->gprs[6])) + +#define SYSCALL_PT_ARG4(regs, m, t1, t2, t3, t4) \ + SYSCALL_PT_ARG3(regs, m, t1, t2, t3), \ + m(t4, (regs->gprs[5])) + +#define SYSCALL_PT_ARG3(regs, m, t1, t2, t3) \ + SYSCALL_PT_ARG2(regs, m, t1, t2), \ + m(t3, (regs->gprs[4])) + +#define SYSCALL_PT_ARG2(regs, m, t1, t2) \ + SYSCALL_PT_ARG1(regs, m, t1), \ + m(t2, (regs->gprs[3])) + +#define SYSCALL_PT_ARG1(regs, m, t1) \ + m(t1, (regs->orig_gpr2)) + +#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__) + #ifdef CONFIG_COMPAT #define __SC_COMPAT_TYPE(t, a) \ __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a @@ -29,14 +56,15 @@ (t)__ReS; \ }) -#define __S390_SYS_STUBx(x, name, ...) \ - asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));\ - ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \ - asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ - { \ - long ret = __s390x_sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));\ - __MAP(x,__SC_TEST,__VA_ARGS__); \ - return ret; \ +#define __S390_SYS_STUBx(x, name, ...) \ + long __s390_sys##name(struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \ + long __s390_sys##name(struct pt_regs *regs) \ + { \ + long ret = __do_sys##name(SYSCALL_PT_ARGS(x, regs, \ + __SC_COMPAT_CAST, __MAP(x, __SC_TYPE, __VA_ARGS__))); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ } /* @@ -45,17 +73,17 @@ */ #define COMPAT_SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __s390_compat_sys_##sname(void); \ + long __s390_compat_sys_##sname(void); \ ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \ - asmlinkage long __s390_compat_sys_##sname(void) + long __s390_compat_sys_##sname(void) #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __s390x_sys_##sname(void); \ + long __s390x_sys_##sname(void); \ ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ - asmlinkage long __s390_sys_##sname(void) \ + long __s390_sys_##sname(void) \ __attribute__((alias(__stringify(__s390x_sys_##sname)))); \ - asmlinkage long __s390x_sys_##sname(void) + long __s390x_sys_##sname(void) #define COND_SYSCALL(name) \ cond_syscall(__s390x_sys_##name); \ @@ -65,23 +93,24 @@ SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \ SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers) -#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ - __diag_push(); \ - __diag_ignore(GCC, 8, "-Wattribute-alias", \ - "Type aliasing is used to sanitize syscall arguments");\ - asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ - asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ - __attribute__((alias(__stringify(__se_compat_sys##name)))); \ - ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \ - static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ - asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ - asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ - { \ - long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ - __MAP(x,__SC_TEST,__VA_ARGS__); \ - return ret; \ - } \ - __diag_pop(); \ +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments"); \ + long __s390_compat_sys##name(struct pt_regs *regs); \ + long __s390_compat_sys##name(struct pt_regs *regs) \ + __attribute__((alias(__stringify(__se_compat_sys##name)))); \ + ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + long __se_compat_sys##name(struct pt_regs *regs); \ + long __se_compat_sys##name(struct pt_regs *regs) \ + { \ + long ret = __do_compat_sys##name(SYSCALL_PT_ARGS(x, regs, __SC_DELOUSE, \ + __MAP(x, __SC_TYPE, __VA_ARGS__))); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } \ + __diag_pop(); \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) /* @@ -101,9 +130,9 @@ #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __s390x_sys_##sname(void); \ + long __s390x_sys_##sname(void); \ ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ - asmlinkage long __s390x_sys_##sname(void) + long __s390x_sys_##sname(void) #define COND_SYSCALL(name) \ cond_syscall(__s390x_sys_##name) @@ -113,23 +142,24 @@ #endif /* CONFIG_COMPAT */ -#define __SYSCALL_DEFINEx(x, name, ...) \ - __diag_push(); \ - __diag_ignore(GCC, 8, "-Wattribute-alias", \ - "Type aliasing is used to sanitize syscall arguments");\ - asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ - __attribute__((alias(__stringify(__se_sys##name)))); \ - ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \ - long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ - static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ - __S390_SYS_STUBx(x, name, __VA_ARGS__) \ - asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ - { \ - long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ - __MAP(x,__SC_TEST,__VA_ARGS__); \ - return ret; \ - } \ - __diag_pop(); \ +#define __SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments"); \ + long __s390x_sys##name(struct pt_regs *regs) \ + __attribute__((alias(__stringify(__se_sys##name)))); \ + ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + long __se_sys##name(struct pt_regs *regs); \ + __S390_SYS_STUBx(x, name, __VA_ARGS__) \ + long __se_sys##name(struct pt_regs *regs) \ + { \ + long ret = __do_sys##name(SYSCALL_PT_ARGS(x, regs, \ + __SC_CAST, __MAP(x, __SC_TYPE, __VA_ARGS__))); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } \ + __diag_pop(); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* _ASM_X86_SYSCALL_WRAPPER_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 3c5b1f909b6d..e6674796aa6f 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -36,6 +36,7 @@ */ struct thread_info { unsigned long flags; /* low level flags */ + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ }; /* @@ -46,6 +47,8 @@ struct thread_info { .flags = 0, \ } +struct task_struct; + void arch_release_task_struct(struct task_struct *tsk); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); @@ -68,6 +71,7 @@ void arch_setup_new_exec(void); #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ +#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */ #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -91,6 +95,7 @@ void arch_setup_new_exec(void); #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) #define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP) #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) +#define _TIF_PER_TRAP BIT(TIF_PER_TRAP) #define _TIF_31BIT BIT(TIF_31BIT) #define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index c8e244ecdfde..c4e23e925665 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -19,6 +19,25 @@ extern u64 clock_comparator_max; +union tod_clock { + __uint128_t val; + struct { + __uint128_t ei : 8; /* epoch index */ + __uint128_t tod : 64; /* bits 0-63 of tod clock */ + __uint128_t : 40; + __uint128_t pf : 16; /* programmable field */ + }; + struct { + __uint128_t eitod : 72; /* epoch index + bits 0-63 tod clock */ + __uint128_t : 56; + }; + struct { + __uint128_t us : 60; /* micro-seconds */ + __uint128_t sus : 12; /* sub-microseconds */ + __uint128_t : 56; + }; +} __packed; + /* Inline functions for clock register access. */ static inline int set_tod_clock(__u64 time) { @@ -32,18 +51,23 @@ static inline int set_tod_clock(__u64 time) return cc; } -static inline int store_tod_clock(__u64 *time) +static inline int store_tod_clock_ext_cc(union tod_clock *clk) { int cc; asm volatile( - " stck %1\n" + " stcke %1\n" " ipm %0\n" " srl %0,28\n" - : "=d" (cc), "=Q" (*time) : : "cc"); + : "=d" (cc), "=Q" (*clk) : : "cc"); return cc; } +static inline void store_tod_clock_ext(union tod_clock *tod) +{ + asm volatile("stcke %0" : "=Q" (*tod) : : "cc"); +} + static inline void set_clock_comparator(__u64 time) { asm volatile("sckc %0" : : "Q" (time)); @@ -144,23 +168,15 @@ static inline void local_tick_enable(unsigned long long comp) } #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ -#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */ typedef unsigned long long cycles_t; -static inline void get_tod_clock_ext(char *clk) -{ - typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype; - - asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); -} - static inline unsigned long long get_tod_clock(void) { - char clk[STORE_CLOCK_EXT_SIZE]; + union tod_clock clk; - get_tod_clock_ext(clk); - return *((unsigned long long *)&clk[1]); + store_tod_clock_ext(&clk); + return clk.tod; } static inline unsigned long long get_tod_clock_fast(void) @@ -183,7 +199,7 @@ static inline cycles_t get_cycles(void) int get_phys_clock(unsigned long *clock); void init_cpu_timer(void); -extern unsigned char tod_clock_base[16] __aligned(8); +extern union tod_clock tod_clock_base; /** * get_clock_monotonic - returns current time in clock rate units @@ -197,7 +213,7 @@ static inline unsigned long long get_tod_clock_monotonic(void) unsigned long long tod; preempt_disable_notrace(); - tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + tod = get_tod_clock() - tod_clock_base.tod; preempt_enable_notrace(); return tod; } diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 954fa8ca6cbd..fe6407f0eb1b 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -66,7 +66,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; - tlb->cleared_ptes = 1; + tlb->cleared_pmds = 1; /* * page_table_free_rcu takes care of the allocation bit masks * of the 2K table fragments in the 4K page table page, @@ -110,7 +110,6 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; - tlb->cleared_p4ds = 1; tlb_remove_table(tlb, p4d); } @@ -128,7 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, return; tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; - tlb->cleared_puds = 1; + tlb->cleared_p4ds = 1; tlb_remove_table(tlb, pud); } diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index c6707885e7c2..4756d2937e54 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -18,7 +18,7 @@ #include <asm/extable.h> #include <asm/facility.h> -void debug_user_asce(void); +void debug_user_asce(int exit); static inline int __range_ok(unsigned long addr, unsigned long size) { diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index f65590889054..b45e3dddd2c2 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -4,17 +4,18 @@ #include <vdso/datapage.h> -/* Default link addresses for the vDSOs */ -#define VDSO32_LBASE 0 +/* Default link address for the vDSO */ #define VDSO64_LBASE 0 +#define __VVAR_PAGES 2 + #define VDSO_VERSION_STRING LINUX_2.6.29 #ifndef __ASSEMBLY__ extern struct vdso_data *vdso_data; -void vdso_getcpu_init(void); +int vdso_getcpu_init(void); #endif /* __ASSEMBLY__ */ #endif /* __S390_VDSO_H__ */ diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h index bf123065ad3b..ed89ef742530 100644 --- a/arch/s390/include/asm/vdso/gettimeofday.h +++ b/arch/s390/include/asm/vdso/gettimeofday.h @@ -24,13 +24,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd) { - const struct vdso_data *vdso = __arch_get_vdso_data(); u64 adj, now; now = get_tod_clock(); - adj = vdso->arch_data.tod_steering_end - now; + adj = vd->arch_data.tod_steering_end - now; if (unlikely((s64) adj > 0)) - now += (vdso->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); + now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); return now; } @@ -68,4 +67,11 @@ long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts) return r2; } +#ifdef CONFIG_TIME_NS +static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) +{ + return _timens_data; +} +#endif + #endif diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h index fac6a67988eb..fe17e448c0c5 100644 --- a/arch/s390/include/asm/vtime.h +++ b/arch/s390/include/asm/vtime.h @@ -4,4 +4,18 @@ #define __ARCH_HAS_VTIME_TASK_SWITCH +static inline void update_timer_sys(void) +{ + S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; + S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer; + S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer; +} + +static inline void update_timer_mcck(void) +{ + S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; + S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer; + S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer; +} + #endif /* _S390_VTIME_H */ diff --git a/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h b/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h new file mode 100644 index 000000000000..3d8284b95f87 --- /dev/null +++ b/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright IBM Corp. 2021 + * Interface implementation for communication with the CPU Measurement + * counter facility device driver. + * + * Author(s): Thomas Richter <tmricht@linux.ibm.com> + * + * Define for ioctl() commands to communicate with the CPU Measurement + * counter facility device driver. + */ + +#ifndef _PERF_CPUM_CF_DIAG_H +#define _PERF_CPUM_CF_DIAG_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define S390_HWCTR_DEVICE "hwctr" +#define S390_HWCTR_START_VERSION 1 + +struct s390_ctrset_start { /* Set CPUs to operate on */ + __u64 version; /* Version of interface */ + __u64 data_bytes; /* # of bytes required */ + __u64 cpumask_len; /* Length of CPU mask in bytes */ + __u64 *cpumask; /* Pointer to CPU mask */ + __u64 counter_sets; /* Bit mask of counter sets to get */ +}; + +struct s390_ctrset_setdata { /* Counter set data */ + __u32 set; /* Counter set number */ + __u32 no_cnts; /* # of counters stored in cv[] */ + __u64 cv[0]; /* Counter values (variable length) */ +}; + +struct s390_ctrset_cpudata { /* Counter set data per CPU */ + __u32 cpu_nr; /* CPU number */ + __u32 no_sets; /* # of counters sets in data[] */ + struct s390_ctrset_setdata data[0]; +}; + +struct s390_ctrset_read { /* Structure to get all ctr sets */ + __u64 no_cpus; /* Total # of CPUs data taken from */ + struct s390_ctrset_cpudata data[0]; +}; + +#define S390_HWCTR_MAGIC 'C' /* Random magic # for ioctls */ +#define S390_HWCTR_START _IOWR(S390_HWCTR_MAGIC, 1, struct s390_ctrset_start) +#define S390_HWCTR_STOP _IO(S390_HWCTR_MAGIC, 2) +#define S390_HWCTR_READ _IOWR(S390_HWCTR_MAGIC, 3, struct s390_ctrset_read) +#endif diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index 543dd70e12c8..ad64d673b5e6 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -179,8 +179,9 @@ #define ACR_SIZE 4 -#define PTRACE_OLDSETOPTIONS 21 - +#define PTRACE_OLDSETOPTIONS 21 +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 #ifndef __ASSEMBLY__ #include <linux/stddef.h> #include <linux/types.h> diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index dd73b7f07423..c97818a382f3 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -34,7 +34,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o -obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o +obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 79724d861dc9..15e637728a4b 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -26,26 +26,14 @@ int main(void) BLANK(); /* thread struct offsets */ OFFSET(__THREAD_ksp, thread_struct, ksp); - OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table); - OFFSET(__THREAD_last_break, thread_struct, last_break); - OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc); - OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs); - OFFSET(__THREAD_per_cause, thread_struct, per_event.cause); - OFFSET(__THREAD_per_address, thread_struct, per_event.address); - OFFSET(__THREAD_per_paid, thread_struct, per_event.paid); - OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb); BLANK(); /* thread info offsets */ OFFSET(__TI_flags, task_struct, thread_info.flags); BLANK(); /* pt_regs offsets */ - OFFSET(__PT_ARGS, pt_regs, args); OFFSET(__PT_PSW, pt_regs, psw); OFFSET(__PT_GPRS, pt_regs, gprs); OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); - OFFSET(__PT_INT_CODE, pt_regs, int_code); - OFFSET(__PT_INT_PARM, pt_regs, int_parm); - OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long); OFFSET(__PT_FLAGS, pt_regs, flags); OFFSET(__PT_CR1, pt_regs, cr1); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); @@ -64,6 +52,7 @@ int main(void) OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit); OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit); + OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter); BLANK(); /* hardware defined lowcore locations 0x000 - 0x1ff */ OFFSET(__LC_EXT_PARAMS, lowcore, ext_params); @@ -115,13 +104,9 @@ int main(void) OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags); OFFSET(__LC_RETURN_PSW, lowcore, return_psw); OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw); - OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer); - OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer); + OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer); OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer); OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer); - OFFSET(__LC_USER_TIMER, lowcore, user_timer); - OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer); - OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer); OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer); OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock); OFFSET(__LC_INT_CLOCK, lowcore, int_clock); @@ -133,6 +118,7 @@ int main(void) OFFSET(__LC_ASYNC_STACK, lowcore, async_stack); OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack); OFFSET(__LC_RESTART_STACK, lowcore, restart_stack); + OFFSET(__LC_MCCK_STACK, lowcore, mcck_stack); OFFSET(__LC_RESTART_FN, lowcore, restart_fn); OFFSET(__LC_RESTART_DATA, lowcore, restart_data); OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 38d4bdbc34b9..1d0e17ec93eb 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -118,6 +118,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); return 0; } diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index b6619ae9a3e0..bb958d32bd81 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -829,11 +829,11 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id) static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active, int level, int exception) { - unsigned char clk[STORE_CLOCK_EXT_SIZE]; unsigned long timestamp; + union tod_clock clk; - get_tod_clock_ext(clk); - timestamp = *(unsigned long *) &clk[0] >> 4; + store_tod_clock_ext(&clk); + timestamp = clk.us; timestamp -= TOD_UNIX_EPOCH >> 12; active->clock = timestamp; active->cpu = smp_processor_id(); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cc89763a4d3c..a361d2e70025 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -35,16 +35,16 @@ static void __init reset_tod_clock(void) { - u64 time; + union tod_clock clk; - if (store_tod_clock(&time) == 0) + if (store_tod_clock_ext_cc(&clk) == 0) return; /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) + if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk)) disabled_wait(); - memset(tod_clock_base, 0, 16); - *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; + memset(&tod_clock_base, 0, sizeof(tod_clock_base)); + tod_clock_base.tod = TOD_UNIX_EPOCH; S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; } @@ -230,7 +230,7 @@ static __init void detect_machine_facilities(void) } if (test_facility(133)) S390_lowcore.machine_flags |= MACHINE_FLAG_GS; - if (test_facility(139) && (tod_clock_base[1] & 0x80)) { + if (test_facility(139) && (tod_clock_base.tod >> 63)) { /* Enabled signed clock comparator comparisons */ S390_lowcore.machine_flags |= MACHINE_FLAG_SCC; clock_comparator_max = -1ULL >> 1; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f1ba197b10c0..c10b9f31eef7 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -51,38 +51,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE -_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \ - _TIF_NOTIFY_SIGNAL) -_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT) -_CIF_WORK = (_CIF_FPU) -_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) - _LPP_OFFSET = __LC_LPP - .macro TRACE_IRQS_ON -#ifdef CONFIG_TRACE_IRQFLAGS - basr %r2,%r0 - brasl %r14,trace_hardirqs_on_caller -#endif - .endm - - .macro TRACE_IRQS_OFF -#ifdef CONFIG_TRACE_IRQFLAGS - basr %r2,%r0 - brasl %r14,trace_hardirqs_off_caller -#endif - .endm - - .macro LOCKDEP_SYS_EXIT -#ifdef CONFIG_LOCKDEP - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jz .+10 - brasl %r14,lockdep_sys_exit -#endif - .endm - .macro CHECK_STACK savearea #ifdef CONFIG_CHECK_STACK tml %r15,STACK_SIZE - CONFIG_STACK_GUARD @@ -91,12 +61,6 @@ _LPP_OFFSET = __LC_LPP #endif .endm - .macro DEBUG_USER_ASCE -#ifdef CONFIG_DEBUG_USER_ASCE - brasl %r14,debug_user_asce -#endif - .endm - .macro CHECK_VMAP_STACK savearea,oklabel #ifdef CONFIG_VMAP_STACK lgr %r14,%r15 @@ -106,6 +70,8 @@ _LPP_OFFSET = __LC_LPP je \oklabel clg %r14,__LC_ASYNC_STACK je \oklabel + clg %r14,__LC_MCCK_STACK + je \oklabel clg %r14,__LC_NODAT_STACK je \oklabel clg %r14,__LC_RESTART_STACK @@ -117,113 +83,9 @@ _LPP_OFFSET = __LC_LPP #endif .endm - .macro SWITCH_ASYNC savearea,timer,clock - tmhh %r8,0x0001 # interrupting from user ? - jnz 4f -#if IS_ENABLED(CONFIG_KVM) - lgr %r14,%r9 - larl %r13,.Lsie_gmap - slgr %r14,%r13 - lghi %r13,.Lsie_done - .Lsie_gmap - clgr %r14,%r13 - jhe 0f - lghi %r11,\savearea # inside critical section, do cleanup - brasl %r14,.Lcleanup_sie -#endif -0: larl %r13,.Lpsw_idle_exit - cgr %r13,%r9 - jne 3f - - larl %r1,smp_cpu_mtid - llgf %r1,0(%r1) - ltgr %r1,%r1 - jz 2f # no SMT, skip mt_cycles calculation - .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) - larl %r3,mt_cycles - ag %r3,__LC_PERCPU_OFFSET - la %r4,__SF_EMPTY+16(%r15) -1: lg %r0,0(%r3) - slg %r0,0(%r4) - alg %r0,64(%r4) - stg %r0,0(%r3) - la %r3,8(%r3) - la %r4,8(%r4) - brct %r1,1b - -2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock - mvc __TIMER_IDLE_EXIT(8,%r2), \timer - # account system time going idle - ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT - - lg %r13,__LC_STEAL_TIMER - alg %r13,__CLOCK_IDLE_ENTER(%r2) - slg %r13,__LC_LAST_UPDATE_CLOCK - stg %r13,__LC_STEAL_TIMER - - mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) - - lg %r13,__LC_SYSTEM_TIMER - alg %r13,__LC_LAST_UPDATE_TIMER - slg %r13,__TIMER_IDLE_ENTER(%r2) - stg %r13,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) - - nihh %r8,0xfcfd # clear wait state and irq bits -3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? - slgr %r14,%r15 - srag %r14,%r14,STACK_SHIFT - jnz 5f - CHECK_STACK \savearea - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 6f -4: UPDATE_VTIME %r14,%r15,\timer - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP -5: lg %r15,__LC_ASYNC_STACK # load async stack -6: la %r11,STACK_FRAME_OVERHEAD(%r15) - .endm - - .macro UPDATE_VTIME w1,w2,enter_timer - lg \w1,__LC_EXIT_TIMER - lg \w2,__LC_LAST_UPDATE_TIMER - slg \w1,\enter_timer - slg \w2,__LC_EXIT_TIMER - alg \w1,__LC_USER_TIMER - alg \w2,__LC_SYSTEM_TIMER - stg \w1,__LC_USER_TIMER - stg \w2,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer - .endm - - .macro RESTORE_SM_CLEAR_PER - stg %r8,__LC_RETURN_PSW - ni __LC_RETURN_PSW,0xbf - ssm __LC_RETURN_PSW - .endm - - .macro ENABLE_INTS - stosm __SF_EMPTY(%r15),3 - .endm - - .macro ENABLE_INTS_TRACE - TRACE_IRQS_ON - ENABLE_INTS - .endm - - .macro DISABLE_INTS - stnsm __SF_EMPTY(%r15),0xfc - .endm - - .macro DISABLE_INTS_TRACE - DISABLE_INTS - TRACE_IRQS_OFF - .endm - .macro STCK savearea -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - .insn s,0xb27c0000,\savearea # store clock fast -#else - .insn s,0xb2050000,\savearea # store clock -#endif + ALTERNATIVE ".insn s,0xb2050000,\savearea", \ + ".insn s,0xb27c0000,\savearea", 25 .endm /* @@ -267,18 +129,17 @@ _LPP_OFFSET = __LC_LPP "jnz .+8; .long 0xb2e8d000", 82 .endm - GEN_BR_THUNK %r9 GEN_BR_THUNK %r14 - GEN_BR_THUNK %r14,%r11 + GEN_BR_THUNK %r14,%r13 .section .kprobes.text, "ax" .Ldummy: /* - * This nop exists only in order to avoid that __switch_to starts at + * This nop exists only in order to avoid that __bpon starts at * the beginning of the kprobes text section. In that case we would * have several symbols at the same address. E.g. objdump would take * an arbitrary symbol name when disassembling this code. - * With the added nop in between the __switch_to symbol is unique + * With the added nop in between the __bpon symbol is unique * again. */ nop 0 @@ -327,10 +188,6 @@ ENTRY(sie64a) stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags - TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? - jno .Lsie_load_guest_gprs - brasl %r14,load_fpu_regs # load guest fp/vx regs -.Lsie_load_guest_gprs: lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 @@ -357,7 +214,7 @@ ENTRY(sie64a) # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. # Other instructions between sie64a and .Lsie_done should not cause program # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. -# See also .Lcleanup_sie +# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int .Lrewind_pad6: nopr 7 .Lrewind_pad4: @@ -370,7 +227,6 @@ sie_exit: stmg %r0,%r13,0(%r14) # save guest gprs 0-13 xgr %r0,%r0 # clear guest registers to xgr %r1,%r1 # prevent speculative use - xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 @@ -397,249 +253,68 @@ EXPORT_SYMBOL(sie_exit) */ ENTRY(system_call) - stpt __LC_SYNC_ENTER_TIMER + stpt __LC_SYS_ENTER_TIMER stmg %r8,%r15,__LC_SAVE_AREA_SYNC BPOFF - lg %r12,__LC_CURRENT - lghi %r14,_PIF_SYSCALL + lghi %r14,0 .Lsysc_per: lctlg %c1,%c1,__LC_KERNEL_ASCE - lghi %r13,__TASK_thread + lg %r12,__LC_CURRENT lg %r15,__LC_KERNEL_STACK - la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs - UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC - mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC - stg %r14,__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - ENABLE_INTS -.Lsysc_do_svc: + stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP # clear user controlled register to prevent speculative use xgr %r0,%r0 - # load address of system call table - lg %r10,__THREAD_sysc_table(%r13,%r12) - llgh %r8,__PT_INT_CODE+2(%r11) - slag %r8,%r8,3 # shift and test for svc 0 - jnz .Lsysc_nr_ok - # svc 0: system call number in %r1 - llgfr %r1,%r1 # clear high word in r1 - sth %r1,__PT_INT_CODE+2(%r11) - cghi %r1,NR_syscalls - jnl .Lsysc_nr_ok - slag %r8,%r1,3 -.Lsysc_nr_ok: - stg %r2,__PT_ORIG_GPR2(%r11) - stg %r7,STACK_FRAME_OVERHEAD(%r15) - lg %r9,0(%r8,%r10) # get system call add. - TSTMSK __TI_flags(%r12),_TIF_TRACE - jnz .Lsysc_tracesys - BASR_EX %r14,%r9 # call sys_xxxx - stg %r2,__PT_R2(%r11) # store return value - -.Lsysc_return: -#ifdef CONFIG_DEBUG_RSEQ - lgr %r2,%r11 - brasl %r14,rseq_syscall -#endif - LOCKDEP_SYS_EXIT -.Lsysc_tif: - DISABLE_INTS - TSTMSK __PT_FLAGS(%r11),_PIF_WORK - jnz .Lsysc_work - TSTMSK __TI_flags(%r12),_TIF_WORK - jnz .Lsysc_work # check for work - DEBUG_USER_ASCE + xgr %r1,%r1 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r8,%r8 + xgr %r9,%r9 + xgr %r10,%r10 + xgr %r11,%r11 + la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs + lgr %r3,%r14 + brasl %r14,__do_syscall lctlg %c1,%c1,__LC_USER_ASCE - BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP - TSTMSK __LC_CPU_FLAGS, _CIF_FPU - jz .Lsysc_skip_fpu - brasl %r14,load_fpu_regs -.Lsysc_skip_fpu: - mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) stpt __LC_EXIT_TIMER - lmg %r0,%r15,__PT_R0(%r11) b __LC_RETURN_LPSWE - -# -# One of the work bits is on. Find out which one. -# -.Lsysc_work: - ENABLE_INTS - TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED - jo .Lsysc_reschedule - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART - jo .Lsysc_syscall_restart -#ifdef CONFIG_UPROBES - TSTMSK __TI_flags(%r12),_TIF_UPROBE - jo .Lsysc_uprobe_notify -#endif - TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE - jo .Lsysc_guarded_storage - TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP - jo .Lsysc_singlestep -#ifdef CONFIG_LIVEPATCH - TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING - jo .Lsysc_patch_pending # handle live patching just before - # signals and possible syscall restart -#endif - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART - jo .Lsysc_syscall_restart - TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) - jnz .Lsysc_sigpending - TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME - jo .Lsysc_notify_resume - j .Lsysc_return - -# -# _TIF_NEED_RESCHED is set, call schedule -# -.Lsysc_reschedule: - larl %r14,.Lsysc_return - jg schedule - -# -# _TIF_SIGPENDING is set, call do_signal -# -.Lsysc_sigpending: - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_signal - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL - jno .Lsysc_return -.Lsysc_do_syscall: - lghi %r13,__TASK_thread - lmg %r2,%r7,__PT_R2(%r11) # load svc arguments - lghi %r1,0 # svc 0 returns -ENOSYS - j .Lsysc_do_svc - -# -# _TIF_NOTIFY_RESUME is set, call do_notify_resume -# -.Lsysc_notify_resume: - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg do_notify_resume - -# -# _TIF_UPROBE is set, call uprobe_notify_resume -# -#ifdef CONFIG_UPROBES -.Lsysc_uprobe_notify: - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg uprobe_notify_resume -#endif - -# -# _TIF_GUARDED_STORAGE is set, call guarded_storage_load -# -.Lsysc_guarded_storage: - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg gs_load_bc_cb -# -# _TIF_PATCH_PENDING is set, call klp_update_patch_state -# -#ifdef CONFIG_LIVEPATCH -.Lsysc_patch_pending: - lg %r2,__LC_CURRENT # pass pointer to task struct - larl %r14,.Lsysc_return - jg klp_update_patch_state -#endif - -# -# _PIF_PER_TRAP is set, call do_per_trap -# -.Lsysc_singlestep: - ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg do_per_trap - -# -# _PIF_SYSCALL_RESTART is set, repeat the current system call -# -.Lsysc_syscall_restart: - ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART - lmg %r1,%r7,__PT_R1(%r11) # load svc arguments - lg %r2,__PT_ORIG_GPR2(%r11) - j .Lsysc_do_svc - -# -# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before -# and after the system call -# -.Lsysc_tracesys: - lgr %r2,%r11 # pass pointer to pt_regs - la %r3,0 - llgh %r0,__PT_INT_CODE+2(%r11) - stg %r0,__PT_R2(%r11) - brasl %r14,do_syscall_trace_enter - lghi %r0,NR_syscalls - clgr %r0,%r2 - jnh .Lsysc_tracenogo - sllg %r8,%r2,3 - lg %r9,0(%r8,%r10) - lmg %r3,%r7,__PT_R3(%r11) - stg %r7,STACK_FRAME_OVERHEAD(%r15) - lg %r2,__PT_ORIG_GPR2(%r11) - BASR_EX %r14,%r9 # call sys_xxx - stg %r2,__PT_R2(%r11) # store return value -.Lsysc_tracenogo: - TSTMSK __TI_flags(%r12),_TIF_TRACE - jz .Lsysc_return - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg do_syscall_trace_exit ENDPROC(system_call) # # a new process exits the kernel with ret_from_fork # ENTRY(ret_from_fork) - la %r11,STACK_FRAME_OVERHEAD(%r15) - lg %r12,__LC_CURRENT - brasl %r14,schedule_tail - tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? - jne .Lsysc_tracenogo - # it's a kernel thread - lmg %r9,%r10,__PT_R9(%r11) # load gprs - la %r2,0(%r10) - BASR_EX %r14,%r9 - j .Lsysc_tracenogo + lgr %r3,%r11 + brasl %r14,__ret_from_fork + lctlg %c1,%c1,__LC_USER_ASCE + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) + stpt __LC_EXIT_TIMER + b __LC_RETURN_LPSWE ENDPROC(ret_from_fork) -ENTRY(kernel_thread_starter) - la %r2,0(%r10) - BASR_EX %r14,%r9 - j .Lsysc_tracenogo -ENDPROC(kernel_thread_starter) - /* * Program check handler routine */ ENTRY(pgm_check_handler) - stpt __LC_SYNC_ENTER_TIMER + stpt __LC_SYS_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC - lg %r10,__LC_LAST_BREAK - srag %r11,%r10,12 - jnz 0f - /* if __LC_LAST_BREAK is < 4096, it contains one of - * the lpswe addresses in lowcore. Set it to 1 (initial state) - * to prevent leaking that address to userspace. - */ - lghi %r10,1 -0: lg %r12,__LC_CURRENT - lghi %r11,0 + lg %r12,__LC_CURRENT + lghi %r10,0 lmg %r8,%r9,__LC_PGM_OLD_PSW tmhh %r8,0x0001 # coming from user space? jno .Lpgm_skip_asce lctlg %c1,%c1,__LC_KERNEL_ASCE - j 3f + j 3f # -> fault in user space .Lpgm_skip_asce: #if IS_ENABLED(CONFIG_KVM) # cleanup critical section for program checks in sie64a @@ -653,7 +328,7 @@ ENTRY(pgm_check_handler) ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - lghi %r11,_PIF_GUEST_FAULT + lghi %r10,_PIF_GUEST_FAULT #endif 1: tmhh %r8,0x4000 # PER bit set in old PSW ? jnz 2f # -> enabled, can't be a double fault @@ -661,109 +336,84 @@ ENTRY(pgm_check_handler) jnz .Lpgm_svcper # -> single stepped svc 2: CHECK_STACK __LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - # CHECK_VMAP_STACK branches to stack_overflow or 5f - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f -3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP + # CHECK_VMAP_STACK branches to stack_overflow or 4f + CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f +3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK - lgr %r14,%r12 - aghi %r14,__TASK_thread # pointer to thread_struct - lghi %r13,__LC_PGM_TDB - tm __LC_PGM_ILC+2,0x02 # check for transaction abort - jz 4f - mvc __THREAD_trap_tdb(256,%r14),0(%r13) -4: stg %r10,__THREAD_last_break(%r14) -5: lgr %r13,%r11 - la %r11,STACK_FRAME_OVERHEAD(%r15) +4: la %r11,STACK_FRAME_OVERHEAD(%r15) + stg %r10,__PT_FLAGS(%r11) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC + stmg %r8,%r9,__PT_PSW(%r11) + # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 - xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC - stmg %r8,%r9,__PT_PSW(%r11) - mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC - mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE - stg %r13,__PT_FLAGS(%r11) - stg %r10,__PT_ARGS(%r11) - tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 6f - tmhh %r8,0x0001 # kernel per event ? - jz .Lpgm_kprobe - oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP - mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE - mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - RESTORE_SM_CLEAR_PER - larl %r1,pgm_check_table - llgh %r10,__PT_INT_CODE+2(%r11) - nill %r10,0x007f - sll %r10,3 - je .Lpgm_return - lg %r9,0(%r10,%r1) # load address of handler routine - lgr %r2,%r11 # pass pointer to pt_regs - BASR_EX %r14,%r9 # branch to interrupt-handler -.Lpgm_return: - LOCKDEP_SYS_EXIT - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lpgm_restore - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL - jo .Lsysc_do_syscall - j .Lsysc_tif -.Lpgm_restore: - DISABLE_INTS - TSTMSK __LC_CPU_FLAGS, _CIF_FPU - jz .Lpgm_skip_fpu - brasl %r14,load_fpu_regs -.Lpgm_skip_fpu: - mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + lgr %r2,%r11 + brasl %r14,__do_pgm_check + tmhh %r8,0x0001 # returning to user space? + jno .Lpgm_exit_kernel + lctlg %c1,%c1,__LC_USER_ASCE + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER - lmg %r0,%r15,__PT_R0(%r11) +.Lpgm_exit_kernel: + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) b __LC_RETURN_LPSWE # -# PER event in supervisor state, must be kprobes -# -.Lpgm_kprobe: - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - RESTORE_SM_CLEAR_PER - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_per_trap - j .Lpgm_return - -# # single stepped system call # .Lpgm_svcper: mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW larl %r14,.Lsysc_per stg %r14,__LC_RETURN_PSW+8 - lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP + lghi %r14,1 lpswe __LC_RETURN_PSW # branch to .Lsysc_per ENDPROC(pgm_check_handler) /* - * IO interrupt handler routine + * Interrupt handler macro used for external and IO interrupts. */ -ENTRY(io_int_handler) +.macro INT_HANDLER name,lc_old_psw,handler +ENTRY(\name) STCK __LC_INT_CLOCK - stpt __LC_ASYNC_ENTER_TIMER + stpt __LC_SYS_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT - lmg %r8,%r9,__LC_IO_OLD_PSW - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK + lmg %r8,%r9,\lc_old_psw + tmhh %r8,0x0001 # interrupting from user ? + jnz 1f +#if IS_ENABLED(CONFIG_KVM) + lgr %r14,%r9 + larl %r13,.Lsie_gmap + slgr %r14,%r13 + lghi %r13,.Lsie_done - .Lsie_gmap + clgr %r14,%r13 + jhe 0f + brasl %r14,.Lcleanup_sie_int +#endif +0: CHECK_STACK __LC_SAVE_AREA_ASYNC + lgr %r11,%r15 + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + stg %r11,__SF_BACKCHAIN(%r15) + j 2f +1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP + lctlg %c1,%c1,__LC_KERNEL_ASCE + lg %r15,__LC_KERNEL_STACK + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 - xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 @@ -772,323 +422,49 @@ ENTRY(io_int_handler) xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) - tm __PT_PSW+1(%r11),0x01 # coming from user space? - jno .Lio_skip_asce + tm %r8,0x0001 # coming from user space? + jno 1f lctlg %c1,%c1,__LC_KERNEL_ASCE -.Lio_skip_asce: - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - TRACE_IRQS_OFF -.Lio_loop: - lgr %r2,%r11 # pass pointer to pt_regs - lghi %r3,IO_INTERRUPT - tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? - jz .Lio_call - lghi %r3,THIN_INTERRUPT -.Lio_call: - brasl %r14,do_IRQ - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR - jz .Lio_return - tpi 0 - jz .Lio_return - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - j .Lio_loop -.Lio_return: - LOCKDEP_SYS_EXIT - TSTMSK __TI_flags(%r12),_TIF_WORK - jnz .Lio_work # there is work to do (signals etc.) - TSTMSK __LC_CPU_FLAGS,_CIF_WORK - jnz .Lio_work -.Lio_restore: - TRACE_IRQS_ON +1: lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,\handler mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lio_exit_kernel - DEBUG_USER_ASCE + tmhh %r8,0x0001 # returning to user ? + jno 2f lctlg %c1,%c1,__LC_USER_ASCE BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER -.Lio_exit_kernel: - lmg %r0,%r15,__PT_R0(%r11) +2: lmg %r0,%r15,__PT_R0(%r11) b __LC_RETURN_LPSWE -.Lio_done: +ENDPROC(\name) +.endm -# -# There is work todo, find out in which context we have been interrupted: -# 1) if we return to user space we can do all _TIF_WORK work -# 2) if we return to kernel code and kvm is enabled check if we need to -# modify the psw to leave SIE -# 3) if we return to kernel code and preemptive scheduling is enabled check -# the preemption counter and if it is zero call preempt_schedule_irq -# Before any work can be done, a switch to the kernel stack is required. -# -.Lio_work: - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jo .Lio_work_user # yes -> do resched & signal -#ifdef CONFIG_PREEMPTION - # check for preemptive scheduling - icm %r0,15,__LC_PREEMPT_COUNT - jnz .Lio_restore # preemption is disabled - TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED - jno .Lio_restore - # switch to kernel stack - lg %r1,__PT_R15(%r11) - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) - la %r11,STACK_FRAME_OVERHEAD(%r1) - lgr %r15,%r1 - brasl %r14,preempt_schedule_irq - j .Lio_return -#else - j .Lio_restore -#endif - -# -# Need to do work before returning to userspace, switch to kernel stack -# -.Lio_work_user: - lg %r1,__LC_KERNEL_STACK - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) - la %r11,STACK_FRAME_OVERHEAD(%r1) - lgr %r15,%r1 - -# -# One of the work bits is on. Find out which one. -# - TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED - jo .Lio_reschedule -#ifdef CONFIG_LIVEPATCH - TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING - jo .Lio_patch_pending -#endif - TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) - jnz .Lio_sigpending - TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME - jo .Lio_notify_resume - TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE - jo .Lio_guarded_storage - TSTMSK __LC_CPU_FLAGS,_CIF_FPU - jo .Lio_vxrs - j .Lio_return - -# -# CIF_FPU is set, restore floating-point controls and floating-point registers. -# -.Lio_vxrs: - larl %r14,.Lio_return - jg load_fpu_regs - -# -# _TIF_GUARDED_STORAGE is set, call guarded_storage_load -# -.Lio_guarded_storage: - ENABLE_INTS_TRACE - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,gs_load_bc_cb - DISABLE_INTS_TRACE - j .Lio_return - -# -# _TIF_NEED_RESCHED is set, call schedule -# -.Lio_reschedule: - ENABLE_INTS_TRACE - brasl %r14,schedule # call scheduler - DISABLE_INTS_TRACE - j .Lio_return - -# -# _TIF_PATCH_PENDING is set, call klp_update_patch_state -# -#ifdef CONFIG_LIVEPATCH -.Lio_patch_pending: - lg %r2,__LC_CURRENT # pass pointer to task struct - larl %r14,.Lio_return - jg klp_update_patch_state -#endif - -# -# _TIF_SIGPENDING or is set, call do_signal -# -.Lio_sigpending: - ENABLE_INTS_TRACE - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_signal - DISABLE_INTS_TRACE - j .Lio_return - -# -# _TIF_NOTIFY_RESUME or is set, call do_notify_resume -# -.Lio_notify_resume: - ENABLE_INTS_TRACE - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_notify_resume - DISABLE_INTS_TRACE - j .Lio_return -ENDPROC(io_int_handler) - -/* - * External interrupt handler routine - */ -ENTRY(ext_int_handler) - STCK __LC_INT_CLOCK - stpt __LC_ASYNC_ENTER_TIMER - BPOFF - stmg %r8,%r15,__LC_SAVE_AREA_ASYNC - lg %r12,__LC_CURRENT - lmg %r8,%r9,__LC_EXT_OLD_PSW - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK - stmg %r0,%r7,__PT_R0(%r11) - # clear user controlled registers to prevent speculative use - xgr %r0,%r0 - xgr %r1,%r1 - xgr %r2,%r2 - xgr %r3,%r3 - xgr %r4,%r4 - xgr %r5,%r5 - xgr %r6,%r6 - xgr %r7,%r7 - xgr %r10,%r10 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC - stmg %r8,%r9,__PT_PSW(%r11) - tm __PT_PSW+1(%r11),0x01 # coming from user space? - jno .Lext_skip_asce - lctlg %c1,%c1,__LC_KERNEL_ASCE -.Lext_skip_asce: - lghi %r1,__LC_EXT_PARAMS2 - mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR - mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS - mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - TRACE_IRQS_OFF - lgr %r2,%r11 # pass pointer to pt_regs - lghi %r3,EXT_INTERRUPT - brasl %r14,do_IRQ - j .Lio_return -ENDPROC(ext_int_handler) +INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq +INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq /* * Load idle PSW. */ ENTRY(psw_idle) stg %r3,__SF_EMPTY(%r15) - larl %r1,.Lpsw_idle_exit + larl %r1,psw_idle_exit stg %r1,__SF_EMPTY+8(%r15) larl %r1,smp_cpu_mtid llgf %r1,0(%r1) ltgr %r1,%r1 jz .Lpsw_idle_stcctm - .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) + .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) .Lpsw_idle_stcctm: oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT BPON STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) lpswe __SF_EMPTY(%r15) -.Lpsw_idle_exit: +.globl psw_idle_exit +psw_idle_exit: BR_EX %r14 ENDPROC(psw_idle) /* - * Store floating-point controls and floating-point or vector register - * depending whether the vector facility is available. A critical section - * cleanup assures that the registers are stored even if interrupted for - * some other work. The CIF_FPU flag is set to trigger a lazy restore - * of the register contents at return from io or a system call. - */ -ENTRY(save_fpu_regs) - stnsm __SF_EMPTY(%r15),0xfc - lg %r2,__LC_CURRENT - aghi %r2,__TASK_thread - TSTMSK __LC_CPU_FLAGS,_CIF_FPU - jo .Lsave_fpu_regs_exit - stfpc __THREAD_FPU_fpc(%r2) - lg %r3,__THREAD_FPU_regs(%r2) - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX - jz .Lsave_fpu_regs_fp # no -> store FP regs - VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) - VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) - j .Lsave_fpu_regs_done # -> set CIF_FPU flag -.Lsave_fpu_regs_fp: - std 0,0(%r3) - std 1,8(%r3) - std 2,16(%r3) - std 3,24(%r3) - std 4,32(%r3) - std 5,40(%r3) - std 6,48(%r3) - std 7,56(%r3) - std 8,64(%r3) - std 9,72(%r3) - std 10,80(%r3) - std 11,88(%r3) - std 12,96(%r3) - std 13,104(%r3) - std 14,112(%r3) - std 15,120(%r3) -.Lsave_fpu_regs_done: - oi __LC_CPU_FLAGS+7,_CIF_FPU -.Lsave_fpu_regs_exit: - ssm __SF_EMPTY(%r15) - BR_EX %r14 -.Lsave_fpu_regs_end: -ENDPROC(save_fpu_regs) -EXPORT_SYMBOL(save_fpu_regs) - -/* - * Load floating-point controls and floating-point or vector registers. - * A critical section cleanup assures that the register contents are - * loaded even if interrupted for some other work. - * - * There are special calling conventions to fit into sysc and io return work: - * %r15: <kernel stack> - * The function requires: - * %r4 - */ -load_fpu_regs: - stnsm __SF_EMPTY(%r15),0xfc - lg %r4,__LC_CURRENT - aghi %r4,__TASK_thread - TSTMSK __LC_CPU_FLAGS,_CIF_FPU - jno .Lload_fpu_regs_exit - lfpc __THREAD_FPU_fpc(%r4) - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX - lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area - jz .Lload_fpu_regs_fp # -> no VX, load FP regs - VLM %v0,%v15,0,%r4 - VLM %v16,%v31,256,%r4 - j .Lload_fpu_regs_done -.Lload_fpu_regs_fp: - ld 0,0(%r4) - ld 1,8(%r4) - ld 2,16(%r4) - ld 3,24(%r4) - ld 4,32(%r4) - ld 5,40(%r4) - ld 6,48(%r4) - ld 7,56(%r4) - ld 8,64(%r4) - ld 9,72(%r4) - ld 10,80(%r4) - ld 11,88(%r4) - ld 12,96(%r4) - ld 13,104(%r4) - ld 14,112(%r4) - ld 15,120(%r4) -.Lload_fpu_regs_done: - ni __LC_CPU_FLAGS+7,255-_CIF_FPU -.Lload_fpu_regs_exit: - ssm __SF_EMPTY(%r15) - BR_EX %r14 -.Lload_fpu_regs_end: -ENDPROC(load_fpu_regs) - -/* * Machine check handler routines */ ENTRY(mcck_int_handler) @@ -1146,11 +522,8 @@ ENTRY(mcck_int_handler) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID jo 3f - la %r14,__LC_SYNC_ENTER_TIMER - clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER - jl 0f - la %r14,__LC_ASYNC_ENTER_TIMER -0: clc 0(8,%r14),__LC_EXIT_TIMER + la %r14,__LC_SYS_ENTER_TIMER + clc 0(8,%r14),__LC_EXIT_TIMER jl 1f la %r14,__LC_EXIT_TIMER 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER @@ -1165,14 +538,32 @@ ENTRY(mcck_int_handler) TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID jno .Lmcck_panic 4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK + tmhh %r8,0x0001 # interrupting from user ? + jnz .Lmcck_user +#if IS_ENABLED(CONFIG_KVM) + lgr %r14,%r9 + larl %r13,.Lsie_gmap + slgr %r14,%r13 + lghi %r13,.Lsie_done - .Lsie_gmap + clgr %r14,%r13 + jhe .Lmcck_stack + brasl %r14,.Lcleanup_sie_mcck +#endif + j .Lmcck_stack +.Lmcck_user: + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP +.Lmcck_stack: + lg %r15,__LC_MCCK_STACK .Lmcck_skip: + la %r11,STACK_FRAME_OVERHEAD(%r15) + stctg %c1,%c1,__PT_CR1(%r11) + lctlg %c1,%c1,__LC_KERNEL_ASCE + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 - xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 @@ -1181,9 +572,6 @@ ENTRY(mcck_int_handler) xgr %r10,%r10 mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) - la %r14,4095 - mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14) - lctlg %c1,%c1,__LC_KERNEL_ASCE xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs @@ -1195,9 +583,7 @@ ENTRY(mcck_int_handler) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 - TRACE_IRQS_OFF brasl %r14,s390_handle_mcck - TRACE_IRQS_ON .Lmcck_return: lctlg %c1,%c1,__PT_CR1(%r11) lmg %r0,%r10,__PT_R0(%r11) @@ -1211,7 +597,6 @@ ENTRY(mcck_int_handler) .Lmcck_panic: lg %r15,__LC_NODAT_STACK - la %r11,STACK_FRAME_OVERHEAD(%r15) j .Lmcck_skip ENDPROC(mcck_int_handler) @@ -1264,21 +649,20 @@ ENDPROC(stack_overflow) #endif #if IS_ENABLED(CONFIG_KVM) -.Lcleanup_sie: - cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? - je 1f +.Lcleanup_sie_mcck: larl %r13,.Lsie_entry slgr %r9,%r13 larl %r13,.Lsie_skip clgr %r9,%r13 - jh 1f + jh .Lcleanup_sie_int oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST -1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) +.Lcleanup_sie_int: + BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE larl %r9,sie_exit # skip forward to sie_exit - BR_EX %r14,%r11 + BR_EX %r14,%r13 #endif .section .rodata, "a" diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index a16c33b32ab0..3d0c0ac5c20e 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -17,8 +17,9 @@ void io_int_handler(void); void mcck_int_handler(void); void restart_int_handler(void); -asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); -asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); +void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs); +void __do_pgm_check(struct pt_regs *regs); +void __do_syscall(struct pt_regs *regs, int per_trap); void do_protection_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs); @@ -48,9 +49,7 @@ void translation_exception(struct pt_regs *regs); void vector_exception(struct pt_regs *regs); void monitor_event_exception(struct pt_regs *regs); -void do_per_trap(struct pt_regs *regs); void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str); -void syscall_trace(struct pt_regs *regs, int entryexit); void kernel_stack_overflow(struct pt_regs * regs); void do_signal(struct pt_regs *regs); void handle_signal32(struct ksignal *ksig, sigset_t *oldset, @@ -58,7 +57,8 @@ void handle_signal32(struct ksignal *ksig, sigset_t *oldset, void do_notify_resume(struct pt_regs *regs); void __init init_IRQ(void); -void do_IRQ(struct pt_regs *regs, int irq); +void do_io_irq(struct pt_regs *regs); +void do_ext_irq(struct pt_regs *regs); void do_restart(void); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); @@ -82,8 +82,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user DECLARE_PER_CPU(u64, mt_cycles[8]); -void gs_load_bc_cb(struct pt_regs *regs); - unsigned long stack_alloc(void); void stack_free(unsigned long stack); diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index 0da378e2eb25..d864c9a325e2 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c @@ -175,3 +175,91 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) : "1", "cc"); } EXPORT_SYMBOL(__kernel_fpu_end); + +void __load_fpu_regs(void) +{ + struct fpu *state = ¤t->thread.fpu; + unsigned long *regs = current->thread.fpu.regs; + + asm volatile("lfpc %0" : : "Q" (state->fpc)); + if (likely(MACHINE_HAS_VX)) { + asm volatile("lgr 1,%0\n" + "VLM 0,15,0,1\n" + "VLM 16,31,256,1\n" + : + : "d" (regs) + : "1", "cc", "memory"); + } else { + asm volatile("ld 0,%0" : : "Q" (regs[0])); + asm volatile("ld 1,%0" : : "Q" (regs[1])); + asm volatile("ld 2,%0" : : "Q" (regs[2])); + asm volatile("ld 3,%0" : : "Q" (regs[3])); + asm volatile("ld 4,%0" : : "Q" (regs[4])); + asm volatile("ld 5,%0" : : "Q" (regs[5])); + asm volatile("ld 6,%0" : : "Q" (regs[6])); + asm volatile("ld 7,%0" : : "Q" (regs[7])); + asm volatile("ld 8,%0" : : "Q" (regs[8])); + asm volatile("ld 9,%0" : : "Q" (regs[9])); + asm volatile("ld 10,%0" : : "Q" (regs[10])); + asm volatile("ld 11,%0" : : "Q" (regs[11])); + asm volatile("ld 12,%0" : : "Q" (regs[12])); + asm volatile("ld 13,%0" : : "Q" (regs[13])); + asm volatile("ld 14,%0" : : "Q" (regs[14])); + asm volatile("ld 15,%0" : : "Q" (regs[15])); + } + clear_cpu_flag(CIF_FPU); +} +EXPORT_SYMBOL(__load_fpu_regs); + +void load_fpu_regs(void) +{ + raw_local_irq_disable(); + __load_fpu_regs(); + raw_local_irq_enable(); +} +EXPORT_SYMBOL(load_fpu_regs); + +void save_fpu_regs(void) +{ + unsigned long flags, *regs; + struct fpu *state; + + local_irq_save(flags); + + if (test_cpu_flag(CIF_FPU)) + goto out; + + state = ¤t->thread.fpu; + regs = current->thread.fpu.regs; + + asm volatile("stfpc %0" : "=Q" (state->fpc)); + if (likely(MACHINE_HAS_VX)) { + asm volatile("lgr 1,%0\n" + "VSTM 0,15,0,1\n" + "VSTM 16,31,256,1\n" + : + : "d" (regs) + : "1", "cc", "memory"); + } else { + asm volatile("std 0,%0" : "=Q" (regs[0])); + asm volatile("std 1,%0" : "=Q" (regs[1])); + asm volatile("std 2,%0" : "=Q" (regs[2])); + asm volatile("std 3,%0" : "=Q" (regs[3])); + asm volatile("std 4,%0" : "=Q" (regs[4])); + asm volatile("std 5,%0" : "=Q" (regs[5])); + asm volatile("std 6,%0" : "=Q" (regs[6])); + asm volatile("std 7,%0" : "=Q" (regs[7])); + asm volatile("std 8,%0" : "=Q" (regs[8])); + asm volatile("std 9,%0" : "=Q" (regs[9])); + asm volatile("std 10,%0" : "=Q" (regs[10])); + asm volatile("std 11,%0" : "=Q" (regs[11])); + asm volatile("std 12,%0" : "=Q" (regs[12])); + asm volatile("std 13,%0" : "=Q" (regs[13])); + asm volatile("std 14,%0" : "=Q" (regs[14])); + asm volatile("std 15,%0" : "=Q" (regs[15])); + } + set_cpu_flag(CIF_FPU); +out: + local_irq_restore(flags); +} +EXPORT_SYMBOL(save_fpu_regs); diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index a5d4d80d6ede..812073ea073e 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -14,12 +14,36 @@ #include <linux/cpu.h> #include <linux/sched/cputime.h> #include <trace/events/power.h> +#include <asm/cpu_mf.h> #include <asm/nmi.h> #include <asm/smp.h> #include "entry.h" static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); +void account_idle_time_irq(void) +{ + struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); + u64 cycles_new[8]; + int i; + + clear_cpu_flag(CIF_ENABLED_WAIT); + if (smp_cpu_mtid) { + stcctm(MT_DIAG, smp_cpu_mtid, cycles_new); + for (i = 0; i < smp_cpu_mtid; i++) + this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]); + } + + idle->clock_idle_exit = S390_lowcore.int_clock; + idle->timer_idle_exit = S390_lowcore.sys_enter_timer; + + S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock; + S390_lowcore.last_update_clock = idle->clock_idle_exit; + + S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter; + S390_lowcore.last_update_timer = idle->timer_idle_exit; +} + void arch_cpu_idle(void) { struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f8a8b9428ae2..601c21791338 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -21,12 +21,14 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/irq.h> +#include <linux/entry-common.h> #include <asm/irq_regs.h> #include <asm/cputime.h> #include <asm/lowcore.h> #include <asm/irq.h> #include <asm/hw_irq.h> #include <asm/stacktrace.h> +#include <asm/softirq_stack.h> #include "entry.h" DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); @@ -95,19 +97,97 @@ static const struct irq_class irqclass_sub_desc[] = { {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"}, }; -void do_IRQ(struct pt_regs *regs, int irq) +static void do_IRQ(struct pt_regs *regs, int irq) { - struct pt_regs *old_regs; - - old_regs = set_irq_regs(regs); - irq_enter(); if (tod_after_eq(S390_lowcore.int_clock, S390_lowcore.clock_comparator)) /* Serve timer interrupts first. */ clock_comparator_work(); generic_handle_irq(irq); +} + +static int on_async_stack(void) +{ + unsigned long frame = current_frame_address(); + + return !!!((S390_lowcore.async_stack - frame) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)); +} + +static void do_irq_async(struct pt_regs *regs, int irq) +{ + if (on_async_stack()) + do_IRQ(regs, irq); + else + CALL_ON_STACK(do_IRQ, S390_lowcore.async_stack, 2, regs, irq); +} + +static int irq_pending(struct pt_regs *regs) +{ + int cc; + + asm volatile("tpi 0\n" + "ipm %0" : "=d" (cc) : : "cc"); + return cc >> 28; +} + +void noinstr do_io_irq(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + struct pt_regs *old_regs = set_irq_regs(regs); + int from_idle; + + irq_enter(); + + if (user_mode(regs)) + update_timer_sys(); + + from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit; + if (from_idle) + account_idle_time_irq(); + + do { + memcpy(®s->int_code, &S390_lowcore.subchannel_id, 12); + if (S390_lowcore.io_int_word & BIT(31)) + do_irq_async(regs, THIN_INTERRUPT); + else + do_irq_async(regs, IO_INTERRUPT); + } while (MACHINE_IS_LPAR && irq_pending(regs)); + + irq_exit(); + set_irq_regs(old_regs); + irqentry_exit(regs, state); + + if (from_idle) + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); +} + +void noinstr do_ext_irq(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + struct pt_regs *old_regs = set_irq_regs(regs); + int from_idle; + + irq_enter(); + + if (user_mode(regs)) + update_timer_sys(); + + memcpy(®s->int_code, &S390_lowcore.ext_cpu_addr, 4); + regs->int_parm = S390_lowcore.ext_params; + regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2; + + from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit; + if (from_idle) + account_idle_time_irq(); + + do_irq_async(regs, EXT_INTERRUPT); + irq_exit(); set_irq_regs(old_regs); + irqentry_exit(regs, state); + + if (from_idle) + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); } static void show_msi_interrupt(struct seq_file *p, int irq) diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 86c8d5370e7f..11f8c296f60d 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -131,12 +131,11 @@ static notrace void s390_handle_damage(void) NOKPROBE_SYMBOL(s390_handle_damage); /* - * Main machine check handler function. Will be called with interrupts enabled - * or disabled and machine checks enabled or disabled. + * Main machine check handler function. Will be called with interrupts disabled + * and machine checks enabled. */ -void s390_handle_mcck(void) +void __s390_handle_mcck(void) { - unsigned long flags; struct mcck_struct mcck; /* @@ -144,12 +143,10 @@ void s390_handle_mcck(void) * machine checks. Afterwards delete the old state and enable machine * checks again. */ - local_irq_save(flags); local_mcck_disable(); mcck = *this_cpu_ptr(&cpu_mcck); memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck)); local_mcck_enable(); - local_irq_restore(flags); if (mcck.channel_report) crw_handle_channel_report(); @@ -181,8 +178,13 @@ void s390_handle_mcck(void) do_exit(SIGSEGV); } } -EXPORT_SYMBOL_GPL(s390_handle_mcck); +void noinstr s390_handle_mcck(void) +{ + trace_hardirqs_off(); + __s390_handle_mcck(); + trace_hardirqs_on(); +} /* * returns 0 if all required registers are available * returns 1 otherwise @@ -344,6 +346,9 @@ int notrace s390_do_machine_check(struct pt_regs *regs) int mcck_pending = 0; nmi_enter(); + + if (user_mode(regs)) + update_timer_mcck(); inc_irq_stat(NMI_NMI); mci.val = S390_lowcore.mcck_interruption_code; mcck = this_cpu_ptr(&cpu_mcck); diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index e949ab832ed7..db4877bbb9aa 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -2,7 +2,7 @@ /* * Performance event support for s390x - CPU-measurement Counter Sets * - * Copyright IBM Corp. 2019 + * Copyright IBM Corp. 2019, 2021 * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> * Thomas Richer <tmricht@linux.ibm.com> */ @@ -17,6 +17,8 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/processor.h> +#include <linux/miscdevice.h> +#include <linux/mutex.h> #include <asm/ctl_reg.h> #include <asm/irq.h> @@ -24,15 +26,20 @@ #include <asm/timex.h> #include <asm/debug.h> -#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ +#include <asm/perf_cpum_cf_diag.h> +#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ +#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */ + /* interval in seconds */ +static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL; static unsigned int cf_diag_cpu_speed; static debug_info_t *cf_diag_dbg; -struct cf_diag_csd { /* Counter set data per CPU */ +struct cf_diag_csd { /* Counter set data per CPU */ size_t used; /* Bytes used in data/start */ unsigned char start[PAGE_SIZE]; /* Counter set at event start */ unsigned char data[PAGE_SIZE]; /* Counter set at event delete */ + unsigned int sets; /* # Counter set saved in data */ }; static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); @@ -118,8 +125,8 @@ static void cf_diag_trailer(struct cf_trailer_entry *te) if (te->cpu_speed) te->speed = 1; te->clock_base = 1; /* Save clock base */ - memcpy(&te->tod_base, &tod_clock_base[1], 8); - store_tod_clock((__u64 *)&te->timestamp); + te->tod_base = tod_clock_base.tod; + te->timestamp = get_tod_clock_fast(); } /* @@ -178,18 +185,35 @@ static void cf_diag_disable(struct pmu *pmu) /* Number of perf events counting hardware events */ static atomic_t cf_diag_events = ATOMIC_INIT(0); +/* Used to avoid races in calling reserve/release_cpumf_hardware */ +static DEFINE_MUTEX(cf_diag_reserve_mutex); /* Release the PMU if event is the last perf event */ static void cf_diag_perf_event_destroy(struct perf_event *event) { debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d cf_diag_events %d\n", - __func__, event, event->cpu, + __func__, event, smp_processor_id(), atomic_read(&cf_diag_events)); if (atomic_dec_return(&cf_diag_events) == 0) __kernel_cpumcf_end(); } +static int get_authctrsets(void) +{ + struct cpu_cf_events *cpuhw; + unsigned long auth = 0; + enum cpumf_ctr_set i; + + cpuhw = &get_cpu_var(cpu_cf_events); + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { + if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) + auth |= cpumf_ctr_ctl[i]; + } + put_cpu_var(cpu_cf_events); + return auth; +} + /* Setup the event. Test for authorized counter sets and only include counter * sets which are authorized at the time of the setup. Including unauthorized * counter sets result in specification exception (and panic). @@ -197,15 +221,12 @@ static void cf_diag_perf_event_destroy(struct perf_event *event) static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - struct cpu_cf_events *cpuhw; - enum cpumf_ctr_set i; int err = 0; debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__, event, event->cpu); event->hw.config = attr->config; - event->hw.config_base = 0; /* Add all authorized counter sets to config_base. The * the hardware init function is either called per-cpu or just once @@ -215,11 +236,7 @@ static int __hw_perf_event_init(struct perf_event *event) * Checking the authorization on any CPU is fine as the hardware * applies the same authorization settings to all CPUs. */ - cpuhw = &get_cpu_var(cpu_cf_events); - for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) - if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) - event->hw.config_base |= cpumf_ctr_ctl[i]; - put_cpu_var(cpu_cf_events); + event->hw.config_base = get_authctrsets(); /* No authorized counter sets, nothing to count/sample */ if (!event->hw.config_base) { @@ -237,6 +254,25 @@ out: return err; } +/* Return 0 if the CPU-measurement counter facility is currently free + * and an error otherwise. + */ +static int cf_diag_perf_event_inuse(void) +{ + int err = 0; + + if (!atomic_inc_not_zero(&cf_diag_events)) { + mutex_lock(&cf_diag_reserve_mutex); + if (atomic_read(&cf_diag_events) == 0 && + __kernel_cpumcf_begin()) + err = -EBUSY; + else + err = atomic_inc_return(&cf_diag_events); + mutex_unlock(&cf_diag_reserve_mutex); + } + return err; +} + static int cf_diag_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; @@ -264,13 +300,9 @@ static int cf_diag_event_init(struct perf_event *event) } /* Initialize for using the CPU-measurement counter facility */ - if (atomic_inc_return(&cf_diag_events) == 1) { - if (__kernel_cpumcf_begin()) { - atomic_dec(&cf_diag_events); - err = -EBUSY; - goto out; - } - } + err = cf_diag_perf_event_inuse(); + if (err < 0) + goto out; event->destroy = cf_diag_perf_event_destroy; err = __hw_perf_event_init(event); @@ -599,6 +631,8 @@ static void cf_diag_del(struct perf_event *event, int flags) cpuhw->flags &= ~PMU_F_IN_USE; } +/* Default counter set events and format attribute groups */ + CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG); static struct attribute *cf_diag_events_attr[] = { @@ -663,6 +697,452 @@ static void cf_diag_get_cpu_speed(void) } } +/* Code to create device and file I/O operations */ +static atomic_t ctrset_opencnt = ATOMIC_INIT(0); /* Excl. access */ + +static int cf_diag_open(struct inode *inode, struct file *file) +{ + int err = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (atomic_xchg(&ctrset_opencnt, 1)) + return -EBUSY; + + /* Avoid concurrent access with perf_event_open() system call */ + mutex_lock(&cf_diag_reserve_mutex); + if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin()) + err = -EBUSY; + mutex_unlock(&cf_diag_reserve_mutex); + if (err) { + atomic_set(&ctrset_opencnt, 0); + return err; + } + file->private_data = NULL; + debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__); + /* nonseekable_open() never fails */ + return nonseekable_open(inode, file); +} + +/* Variables for ioctl() interface support */ +static DEFINE_MUTEX(cf_diag_ctrset_mutex); +static struct cf_diag_ctrset { + unsigned long ctrset; /* Bit mask of counter set to read */ + cpumask_t mask; /* CPU mask to read from */ + time64_t lastread; /* Epoch counter set last read */ +} cf_diag_ctrset; + +static void cf_diag_ctrset_clear(void) +{ + cpumask_clear(&cf_diag_ctrset.mask); + cf_diag_ctrset.ctrset = 0; +} + +static void cf_diag_release_cpu(void *p) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + + debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__, + smp_processor_id()); + lcctl(0); /* Reset counter sets */ + cpuhw->state = 0; /* Save state in CPU hardware state */ +} + +/* Release function is also called when application gets terminated without + * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command. + * Since only one application is allowed to open the device, simple stop all + * CPU counter sets. + */ +static int cf_diag_release(struct inode *inode, struct file *file) +{ + on_each_cpu(cf_diag_release_cpu, NULL, 1); + cf_diag_ctrset_clear(); + atomic_set(&ctrset_opencnt, 0); + __kernel_cpumcf_end(); + debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__); + return 0; +} + +struct cf_diag_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */ + unsigned int sets; /* Counter set bit mask */ + atomic_t cpus_ack; /* # CPUs successfully executed func */ +}; + +static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask) +{ + struct s390_ctrset_read __user *ctrset_read; + unsigned int cpu, cpus, rc; + void __user *uptr; + + ctrset_read = (struct s390_ctrset_read __user *)arg; + uptr = ctrset_read->data; + for_each_cpu(cpu, mask) { + struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu); + struct s390_ctrset_cpudata __user *ctrset_cpudata; + + ctrset_cpudata = uptr; + debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n", + __func__, cpu, csd->used); + rc = put_user(cpu, &ctrset_cpudata->cpu_nr); + rc |= put_user(csd->sets, &ctrset_cpudata->no_sets); + rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used); + if (rc) + return -EFAULT; + uptr += sizeof(struct s390_ctrset_cpudata) + csd->used; + cond_resched(); + } + cpus = cpumask_weight(mask); + if (put_user(cpus, &ctrset_read->no_cpus)) + return -EFAULT; + debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n", + __func__, uptr - (void __user *)ctrset_read->data); + return 0; +} + +static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset, + int ctrset_size, size_t room) +{ + size_t need = 0; + int rc = -1; + + need = sizeof(*p) + sizeof(u64) * ctrset_size; + debug_sprintf_event(cf_diag_dbg, 5, + "%s room %zd need %zd set %#x set_size %d\n", + __func__, room, need, ctrset, ctrset_size); + if (need <= room) { + p->set = cpumf_ctr_ctl[ctrset]; + p->no_cnts = ctrset_size; + rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv); + if (rc == 3) /* Nothing stored */ + need = 0; + } + debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__, + need, rc); + return need; +} + +/* Read all counter sets. Since the perf_event_open() system call with + * event cpum_cf_diag/.../ is blocked when this interface is active, reuse + * the perf_event_open() data buffer to store the counter sets. + */ +static void cf_diag_cpu_read(void *parm) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); + struct cf_diag_call_on_cpu_parm *p = parm; + int set, set_size; + size_t space; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s new %#x flags %#x state %#llx\n", + __func__, p->sets, cpuhw->flags, + cpuhw->state); + /* No data saved yet */ + csd->used = 0; + csd->sets = 0; + memset(csd->data, 0, sizeof(csd->data)); + + /* Scan the counter sets */ + for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) { + struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used; + + if (!(p->sets & cpumf_ctr_ctl[set])) + continue; /* Counter set not in list */ + set_size = cf_diag_ctrset_size(set, &cpuhw->info); + space = sizeof(csd->data) - csd->used; + space = cf_diag_cpuset_read(sp, set, set_size, space); + if (space) { + csd->used += space; + csd->sets += 1; + } + debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n", + __func__, sp, space); + } + debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__, + csd->sets, csd->used); +} + +static int cf_diag_all_read(unsigned long arg) +{ + struct cf_diag_call_on_cpu_parm p; + cpumask_var_t mask; + time64_t now; + int rc = 0; + + debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__); + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + now = ktime_get_seconds(); + if (cf_diag_ctrset.lastread + cf_diag_interval > now) { + debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld " + " lastread %lld\n", __func__, now, + cf_diag_ctrset.lastread); + rc = -EAGAIN; + goto out; + } else { + cf_diag_ctrset.lastread = now; + } + p.sets = cf_diag_ctrset.ctrset; + cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask); + on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1); + rc = cf_diag_all_copy(arg, mask); +out: + free_cpumask_var(mask); + debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc); + return rc; +} + +/* Stop all counter sets via ioctl interface */ +static void cf_diag_ioctl_off(void *parm) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_call_on_cpu_parm *p = parm; + int rc; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s new %#x flags %#x state %#llx\n", + __func__, p->sets, cpuhw->flags, + cpuhw->state); + + ctr_set_multiple_disable(&cpuhw->state, p->sets); + ctr_set_multiple_stop(&cpuhw->state, p->sets); + rc = lcctl(cpuhw->state); /* Stop counter sets */ + if (!cpuhw->state) + cpuhw->flags &= ~PMU_F_IN_USE; + debug_sprintf_event(cf_diag_dbg, 5, + "%s rc %d flags %#x state %#llx\n", __func__, + rc, cpuhw->flags, cpuhw->state); +} + +/* Start counter sets on particular CPU */ +static void cf_diag_ioctl_on(void *parm) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_call_on_cpu_parm *p = parm; + int rc; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s new %#x flags %#x state %#llx\n", + __func__, p->sets, cpuhw->flags, + cpuhw->state); + + if (!(cpuhw->flags & PMU_F_IN_USE)) + cpuhw->state = 0; + cpuhw->flags |= PMU_F_IN_USE; + rc = lcctl(cpuhw->state); /* Reset unused counter sets */ + ctr_set_multiple_enable(&cpuhw->state, p->sets); + ctr_set_multiple_start(&cpuhw->state, p->sets); + rc |= lcctl(cpuhw->state); /* Start counter sets */ + if (!rc) + atomic_inc(&p->cpus_ack); + debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n", + __func__, rc, cpuhw->state); +} + +static int cf_diag_all_stop(void) +{ + struct cf_diag_call_on_cpu_parm p = { + .sets = cf_diag_ctrset.ctrset, + }; + cpumask_var_t mask; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask); + on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1); + free_cpumask_var(mask); + return 0; +} + +static int cf_diag_all_start(void) +{ + struct cf_diag_call_on_cpu_parm p = { + .sets = cf_diag_ctrset.ctrset, + .cpus_ack = ATOMIC_INIT(0), + }; + cpumask_var_t mask; + int rc = 0; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask); + on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1); + if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) { + on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1); + rc = -EIO; + } + free_cpumask_var(mask); + return rc; +} + +/* Return the maximum required space for all possible CPUs in case one + * CPU will be onlined during the START, READ, STOP cycles. + * To find out the size of the counter sets, any one CPU will do. They + * all have the same counter sets. + */ +static size_t cf_diag_needspace(unsigned int sets) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + size_t bytes = 0; + int i; + + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { + if (!(sets & cpumf_ctr_ctl[i])) + continue; + bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) + + sizeof(((struct s390_ctrset_setdata *)0)->set) + + sizeof(((struct s390_ctrset_setdata *)0)->no_cnts); + } + bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids * + (bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) + + sizeof(((struct s390_ctrset_cpudata *)0)->no_sets)); + debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__, + bytes); + return bytes; +} + +static long cf_diag_ioctl_read(unsigned long arg) +{ + struct s390_ctrset_read read; + int ret = 0; + + debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__); + if (copy_from_user(&read, (char __user *)arg, sizeof(read))) + return -EFAULT; + ret = cf_diag_all_read(arg); + debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret); + return ret; +} + +static long cf_diag_ioctl_stop(void) +{ + int ret; + + debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__); + ret = cf_diag_all_stop(); + cf_diag_ctrset_clear(); + debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret); + return ret; +} + +static long cf_diag_ioctl_start(unsigned long arg) +{ + struct s390_ctrset_start __user *ustart; + struct s390_ctrset_start start; + void __user *umask; + unsigned int len; + int ret = 0; + size_t need; + + if (cf_diag_ctrset.ctrset) + return -EBUSY; + ustart = (struct s390_ctrset_start __user *)arg; + if (copy_from_user(&start, ustart, sizeof(start))) + return -EFAULT; + if (start.version != S390_HWCTR_START_VERSION) + return -EINVAL; + if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] | + cpumf_ctr_ctl[CPUMF_CTR_SET_USER] | + cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] | + cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] | + cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG])) + return -EINVAL; /* Invalid counter set */ + if (!start.counter_sets) + return -EINVAL; /* No counter set at all? */ + cpumask_clear(&cf_diag_ctrset.mask); + len = min_t(u64, start.cpumask_len, cpumask_size()); + umask = (void __user *)start.cpumask; + if (copy_from_user(&cf_diag_ctrset.mask, umask, len)) + return -EFAULT; + if (cpumask_empty(&cf_diag_ctrset.mask)) + return -EINVAL; + need = cf_diag_needspace(start.counter_sets); + if (put_user(need, &ustart->data_bytes)) + ret = -EFAULT; + if (ret) + goto out; + cf_diag_ctrset.ctrset = start.counter_sets; + ret = cf_diag_all_start(); +out: + if (ret) + cf_diag_ctrset_clear(); + debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n", + __func__, cf_diag_ctrset.ctrset, need, ret); + return ret; +} + +static long cf_diag_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__, + cmd, arg); + get_online_cpus(); + mutex_lock(&cf_diag_ctrset_mutex); + switch (cmd) { + case S390_HWCTR_START: + ret = cf_diag_ioctl_start(arg); + break; + case S390_HWCTR_STOP: + ret = cf_diag_ioctl_stop(); + break; + case S390_HWCTR_READ: + ret = cf_diag_ioctl_read(arg); + break; + default: + ret = -ENOTTY; + break; + } + mutex_unlock(&cf_diag_ctrset_mutex); + put_online_cpus(); + debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret); + return ret; +} + +static const struct file_operations cf_diag_fops = { + .owner = THIS_MODULE, + .open = cf_diag_open, + .release = cf_diag_release, + .unlocked_ioctl = cf_diag_ioctl, + .compat_ioctl = cf_diag_ioctl, + .llseek = no_llseek +}; + +static struct miscdevice cf_diag_dev = { + .name = S390_HWCTR_DEVICE, + .minor = MISC_DYNAMIC_MINOR, + .fops = &cf_diag_fops, +}; + +static int cf_diag_online_cpu(unsigned int cpu) +{ + struct cf_diag_call_on_cpu_parm p; + + mutex_lock(&cf_diag_ctrset_mutex); + if (!cf_diag_ctrset.ctrset) + goto out; + p.sets = cf_diag_ctrset.ctrset; + cf_diag_ioctl_on(&p); +out: + mutex_unlock(&cf_diag_ctrset_mutex); + return 0; +} + +static int cf_diag_offline_cpu(unsigned int cpu) +{ + struct cf_diag_call_on_cpu_parm p; + + mutex_lock(&cf_diag_ctrset_mutex); + if (!cf_diag_ctrset.ctrset) + goto out; + p.sets = cf_diag_ctrset.ctrset; + cf_diag_ioctl_off(&p); +out: + mutex_unlock(&cf_diag_ctrset_mutex); + return 0; +} + /* Initialize the counter set PMU to generate complete counter set data as * event raw data. This relies on the CPU Measurement Counter Facility device * already being loaded and initialized. @@ -685,21 +1165,43 @@ static int __init cf_diag_init(void) return -ENOMEM; } + rc = misc_register(&cf_diag_dev); + if (rc) { + pr_err("Registration of /dev/" S390_HWCTR_DEVICE + "failed rc=%d\n", rc); + goto out; + } + /* Setup s390dbf facility */ cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128); if (!cf_diag_dbg) { pr_err("Registration of s390dbf(cpum_cf_diag) failed\n"); - return -ENOMEM; + rc = -ENOMEM; + goto out_dbf; } debug_register_view(cf_diag_dbg, &debug_sprintf_view); rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1); if (rc) { - debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); - debug_unregister(cf_diag_dbg); pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n", rc); + goto out_perf; } + rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE, + "perf/s390/cfd:online", + cf_diag_online_cpu, cf_diag_offline_cpu); + if (!rc) + goto out; + + pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n", + rc); + perf_pmu_unregister(&cf_diag); +out_perf: + debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); + debug_unregister(cf_diag_dbg); +out_dbf: + misc_deregister(&cf_diag_dev); +out: return rc; } -arch_initcall(cf_diag_init); +device_initcall(cf_diag_init); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 19cd7b961c45..db62def4ef28 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1682,7 +1682,7 @@ static void aux_sdb_init(unsigned long sdb) /* Save clock base */ te->clock_base = 1; - memcpy(&te->progusage2, &tod_clock_base[1], 8); + te->progusage2 = tod_clock_base.tod; } /* diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index bc3ca54edfb4..e20bed1ed34a 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/random.h> #include <linux/export.h> #include <linux/init_task.h> +#include <linux/entry-common.h> #include <asm/cpu_mf.h> #include <asm/io.h> #include <asm/processor.h> @@ -43,9 +44,22 @@ #include <asm/unwind.h> #include "entry.h" -asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); +void ret_from_fork(void) asm("ret_from_fork"); -extern void kernel_thread_starter(void); +void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs) +{ + void (*func)(void *arg); + + schedule_tail(prev); + + if (!user_mode(regs)) { + /* Kernel thread */ + func = (void *)regs->gprs[9]; + func((void *)regs->gprs[10]); + } + clear_pt_regs_flag(regs, PIF_SYSCALL); + syscall_exit_to_user_mode(regs); +} void flush_thread(void) { @@ -108,22 +122,24 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, p->thread.last_break = 1; frame->sf.back_chain = 0; + frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame); + frame->sf.gprs[6] = (unsigned long)p; /* new return point is ret_from_fork */ - frame->sf.gprs[8] = (unsigned long) ret_from_fork; + frame->sf.gprs[8] = (unsigned long)ret_from_fork; /* fake return stack for resume(), don't go back to schedule */ - frame->sf.gprs[9] = (unsigned long) frame; + frame->sf.gprs[9] = (unsigned long)frame; /* Store access registers to kernel stack of new process. */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(&frame->childregs, 0, sizeof(struct pt_regs)); frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; frame->childregs.psw.addr = - (unsigned long) kernel_thread_starter; + (unsigned long)__ret_from_fork; frame->childregs.gprs[9] = new_stackp; /* function */ frame->childregs.gprs[10] = arg; - frame->childregs.gprs[11] = (unsigned long) do_exit; + frame->childregs.gprs[11] = (unsigned long)do_exit; frame->childregs.orig_gpr2 = -1; return 0; @@ -153,7 +169,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, return 0; } -asmlinkage void execve_tail(void) +void execve_tail(void) { current->thread.fpu.fpc = 0; asm volatile("sfpc %0" : : "d" (0)); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index a76dd27fb2e8..18b3416fd663 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -7,6 +7,7 @@ * Martin Schwidefsky (schwidefsky@de.ibm.com) */ +#include "asm/ptrace.h" #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> @@ -37,9 +38,6 @@ #include "compat_ptrace.h" #endif -#define CREATE_TRACE_POINTS -#include <trace/events/syscalls.h> - void update_cr_regs(struct task_struct *task) { struct pt_regs *regs = task_pt_regs(task); @@ -140,7 +138,7 @@ void ptrace_disable(struct task_struct *task) memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP); + clear_tsk_thread_flag(task, TIF_PER_TRAP); task->thread.per_flags = 0; } @@ -322,25 +320,6 @@ static inline void __poke_user_per(struct task_struct *child, child->thread.per_user.end = data; } -static void fixup_int_code(struct task_struct *child, addr_t data) -{ - struct pt_regs *regs = task_pt_regs(child); - int ilc = regs->int_code >> 16; - u16 insn; - - if (ilc > 6) - return; - - if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), - &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) - return; - - /* double check that tracee stopped on svc instruction */ - if ((insn >> 8) != 0xa) - return; - - regs->int_code = 0x20000 | (data & 0xffff); -} /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. @@ -374,10 +353,12 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) } if (test_pt_regs_flag(regs, PIF_SYSCALL) && - addr == offsetof(struct user, regs.gprs[2])) - fixup_int_code(child, data); - *(addr_t *)((addr_t) ®s->psw + addr) = data; + addr == offsetof(struct user, regs.gprs[2])) { + struct pt_regs *regs = task_pt_regs(child); + regs->int_code = 0x20000 | (data & 0xffff); + } + *(addr_t *)((addr_t) ®s->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* * access registers are stored in the thread structure @@ -742,10 +723,12 @@ static int __poke_user_compat(struct task_struct *child, regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { - if (test_pt_regs_flag(regs, PIF_SYSCALL) && - addr == offsetof(struct compat_user, regs.gprs[2])) - fixup_int_code(child, data); + addr == offsetof(struct compat_user, regs.gprs[2])) { + struct pt_regs *regs = task_pt_regs(child); + + regs->int_code = 0x20000 | (data & 0xffff); + } /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; } @@ -862,82 +845,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } #endif -asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) -{ - unsigned long mask = -1UL; - long ret = -1; - - if (is_compat_task()) - mask = 0xffffffff; - - /* - * The sysc_tracesys code in entry.S stored the system - * call number to gprs[2]. - */ - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { - /* - * Tracing decided this syscall should not happen. Skip - * the system call and the system call restart handling. - */ - goto skip; - } - -#ifdef CONFIG_SECCOMP - /* Do the secure computing check after ptrace. */ - if (unlikely(test_thread_flag(TIF_SECCOMP))) { - struct seccomp_data sd; - - if (is_compat_task()) { - sd.instruction_pointer = regs->psw.addr & 0x7fffffff; - sd.arch = AUDIT_ARCH_S390; - } else { - sd.instruction_pointer = regs->psw.addr; - sd.arch = AUDIT_ARCH_S390X; - } - - sd.nr = regs->int_code & 0xffff; - sd.args[0] = regs->orig_gpr2 & mask; - sd.args[1] = regs->gprs[3] & mask; - sd.args[2] = regs->gprs[4] & mask; - sd.args[3] = regs->gprs[5] & mask; - sd.args[4] = regs->gprs[6] & mask; - sd.args[5] = regs->gprs[7] & mask; - - if (__secure_computing(&sd) == -1) - goto skip; - } -#endif /* CONFIG_SECCOMP */ - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_enter(regs, regs->int_code & 0xffff); - - - audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask, - regs->gprs[3] &mask, regs->gprs[4] &mask, - regs->gprs[5] &mask); - - if ((signed long)regs->gprs[2] >= NR_syscalls) { - regs->gprs[2] = -ENOSYS; - ret = -ENOSYS; - } - return regs->gprs[2]; -skip: - clear_pt_regs_flag(regs, PIF_SYSCALL); - return ret; -} - -asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) -{ - audit_syscall_exit(regs); - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->gprs[2]); - - if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); -} - /* * user_regset definitions. */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 1fbed91c73bc..60da976eee6f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -338,7 +338,7 @@ int __init arch_early_irq_init(void) return 0; } -static int __init async_stack_realloc(void) +static int __init stack_realloc(void) { unsigned long old, new; @@ -346,11 +346,18 @@ static int __init async_stack_realloc(void) new = stack_alloc(); if (!new) panic("Couldn't allocate async stack"); - S390_lowcore.async_stack = new + STACK_INIT_OFFSET; + WRITE_ONCE(S390_lowcore.async_stack, new + STACK_INIT_OFFSET); free_pages(old, THREAD_SIZE_ORDER); + + old = S390_lowcore.mcck_stack - STACK_INIT_OFFSET; + new = stack_alloc(); + if (!new) + panic("Couldn't allocate machine check stack"); + WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET); + memblock_free(old, THREAD_SIZE); return 0; } -early_initcall(async_stack_realloc); +early_initcall(stack_realloc); void __init arch_call_rest_init(void) { @@ -372,6 +379,7 @@ void __init arch_call_rest_init(void) static void __init setup_lowcore_dat_off(void) { unsigned long int_psw_mask = PSW_KERNEL_BITS; + unsigned long mcck_stack; struct lowcore *lc; if (IS_ENABLED(CONFIG_KASAN)) @@ -411,8 +419,7 @@ static void __init setup_lowcore_dat_off(void) memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, sizeof(lc->alt_stfle_fac_list)); nmi_alloc_boot_cpu(lc); - lc->sync_enter_timer = S390_lowcore.sync_enter_timer; - lc->async_enter_timer = S390_lowcore.async_enter_timer; + lc->sys_enter_timer = S390_lowcore.sys_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; lc->user_timer = S390_lowcore.user_timer; lc->system_timer = S390_lowcore.system_timer; @@ -440,6 +447,12 @@ static void __init setup_lowcore_dat_off(void) lc->restart_data = 0; lc->restart_source = -1UL; + mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE); + if (!mcck_stack) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, THREAD_SIZE, THREAD_SIZE); + lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; + /* Setup absolute zero lowcore */ mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index b27b6c1f058d..90163e6184f5 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -16,6 +16,7 @@ #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> +#include <linux/entry-common.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> @@ -170,6 +171,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); return 0; } @@ -459,7 +461,8 @@ static void handle_signal(struct ksignal *ksig, sigset_t *oldset, * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ -void do_signal(struct pt_regs *regs) + +void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { struct ksignal ksig; sigset_t *oldset = sigmask_to_save(); @@ -472,7 +475,7 @@ void do_signal(struct pt_regs *regs) current->thread.system_call = test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; - if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) { + if (has_signal && get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ if (current->thread.system_call) { regs->int_code = current->thread.system_call; @@ -498,6 +501,7 @@ void do_signal(struct pt_regs *regs) } /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); rseq_signal_deliver(&ksig, regs); if (is_compat_task()) handle_signal32(&ksig, oldset, regs); @@ -508,6 +512,7 @@ void do_signal(struct pt_regs *regs) /* No handlers present - check for system call restart */ clear_pt_regs_flag(regs, PIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); if (current->thread.system_call) { regs->int_code = current->thread.system_call; switch (regs->gprs[2]) { @@ -520,9 +525,9 @@ void do_signal(struct pt_regs *regs) case -ERESTARTNOINTR: /* Restart system call with magic TIF bit. */ regs->gprs[2] = regs->orig_gpr2; - set_pt_regs_flag(regs, PIF_SYSCALL); + set_pt_regs_flag(regs, PIF_SYSCALL_RESTART); if (test_thread_flag(TIF_SINGLE_STEP)) - clear_pt_regs_flag(regs, PIF_PER_TRAP); + clear_thread_flag(TIF_PER_TRAP); break; } } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 27c763014114..58c8afa3da65 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqflags.h> +#include <linux/irq_work.h> #include <linux/cpu.h> #include <linux/slab.h> #include <linux/sched/hotplug.h> @@ -62,6 +63,7 @@ enum { ec_call_function_single, ec_stop_cpu, ec_mcck_pending, + ec_irq_work, }; enum { @@ -189,7 +191,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) { - unsigned long async_stack, nodat_stack; + unsigned long async_stack, nodat_stack, mcck_stack; struct lowcore *lc; if (pcpu != &pcpu_devices[0]) { @@ -202,13 +204,15 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; } async_stack = stack_alloc(); - if (!async_stack) - goto out; + mcck_stack = stack_alloc(); + if (!async_stack || !mcck_stack) + goto out_stack; lc = pcpu->lowcore; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); lc->async_stack = async_stack + STACK_INIT_OFFSET; lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; + lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); lc->spinlock_index = 0; @@ -216,12 +220,13 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); if (nmi_alloc_per_cpu(lc)) - goto out_async; + goto out_stack; lowcore_ptr[cpu] = lc; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); return 0; -out_async: +out_stack: + stack_free(mcck_stack); stack_free(async_stack); out: if (pcpu != &pcpu_devices[0]) { @@ -233,16 +238,18 @@ out: static void pcpu_free_lowcore(struct pcpu *pcpu) { - unsigned long async_stack, nodat_stack, lowcore; + unsigned long async_stack, nodat_stack, mcck_stack, lowcore; nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET; + mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET; lowcore = (unsigned long) pcpu->lowcore; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[pcpu - pcpu_devices] = NULL; nmi_free_per_cpu(pcpu->lowcore); stack_free(async_stack); + stack_free(mcck_stack); if (pcpu == &pcpu_devices[0]) return; free_pages(nodat_stack, THREAD_SIZE_ORDER); @@ -429,10 +436,12 @@ void notrace smp_yield_cpu(int cpu) */ void notrace smp_emergency_stop(void) { - cpumask_t cpumask; + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; + static cpumask_t cpumask; u64 end; int cpu; + arch_spin_lock(&lock); cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); @@ -453,6 +462,7 @@ void notrace smp_emergency_stop(void) break; cpu_relax(); } + arch_spin_unlock(&lock); } NOKPROBE_SYMBOL(smp_emergency_stop); @@ -499,7 +509,9 @@ static void smp_handle_ext_call(void) if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); if (test_bit(ec_mcck_pending, &bits)) - s390_handle_mcck(); + __s390_handle_mcck(); + if (test_bit(ec_irq_work, &bits)) + irq_work_run(); } static void do_ext_call_interrupt(struct ext_code ext_code, @@ -532,6 +544,13 @@ void smp_send_reschedule(int cpu) pcpu_ec_call(pcpu_devices + cpu, ec_schedule); } +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work); +} +#endif + /* * parameter area for the set/clear control bit callbacks */ @@ -770,11 +789,13 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) { struct sclp_core_entry *core; - cpumask_t avail; + static cpumask_t avail; bool configured; u16 core_id; int nr, i; + get_online_cpus(); + mutex_lock(&smp_cpu_state_mutex); nr = 0; cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); /* @@ -795,6 +816,8 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) configured = i < info->configured; nr += smp_add_core(&info->core[i], &avail, configured, early); } + mutex_unlock(&smp_cpu_state_mutex); + put_online_cpus(); return nr; } @@ -842,9 +865,7 @@ void __init smp_detect_cpus(void) pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); /* Add CPUs present at boot */ - get_online_cpus(); __smp_rescan_cpus(info, true); - put_online_cpus(); memblock_free_early((unsigned long)info, sizeof(*info)); } @@ -1173,11 +1194,7 @@ int __ref smp_rescan_cpus(void) if (!info) return -ENOMEM; smp_get_core_info(info, 0); - get_online_cpus(); - mutex_lock(&smp_cpu_state_mutex); nr = __smp_rescan_cpus(info, false); - mutex_unlock(&smp_cpu_state_mutex); - put_online_cpus(); kfree(info); if (nr) topology_schedule_update(); diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/syscall.c index 202fa73ac167..bc8e650e377d 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/syscall.c @@ -29,6 +29,13 @@ #include <linux/unistd.h> #include <linux/ipc.h> #include <linux/uaccess.h> +#include <linux/string.h> +#include <linux/thread_info.h> +#include <linux/entry-common.h> + +#include <asm/ptrace.h> +#include <asm/vtime.h> + #include "entry.h" /* @@ -100,3 +107,62 @@ SYSCALL_DEFINE0(ni_syscall) { return -ENOSYS; } + +void do_syscall(struct pt_regs *regs) +{ + unsigned long nr; + + nr = regs->int_code & 0xffff; + if (!nr) { + nr = regs->gprs[1] & 0xffff; + regs->int_code &= ~0xffffUL; + regs->int_code |= nr; + } + + regs->gprs[2] = nr; + + nr = syscall_enter_from_user_mode_work(regs, nr); + + /* + * In the s390 ptrace ABI, both the syscall number and the return value + * use gpr2. However, userspace puts the syscall number either in the + * svc instruction itself, or uses gpr1. To make at least skipping syscalls + * work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here + * and if set, the syscall will be skipped. + */ + if (!test_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)) { + regs->gprs[2] = -ENOSYS; + if (likely(nr < NR_syscalls)) + regs->gprs[2] = current->thread.sys_call_table[nr](regs); + } else { + clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET); + } + syscall_exit_to_user_mode_work(regs); +} + +void noinstr __do_syscall(struct pt_regs *regs, int per_trap) +{ + enter_from_user_mode(regs); + + memcpy(®s->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long)); + memcpy(®s->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code)); + regs->psw = S390_lowcore.svc_old_psw; + + update_timer_sys(); + + local_irq_enable(); + regs->orig_gpr2 = regs->gprs[2]; + + if (per_trap) + set_thread_flag(TIF_PER_TRAP); + + for (;;) { + regs->flags = 0; + set_pt_regs_flag(regs, PIF_SYSCALL); + do_syscall(regs); + if (!test_pt_regs_flag(regs, PIF_SYSCALL_RESTART)) + break; + local_irq_enable(); + } + exit_to_user_mode(); +} diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index d443423495e5..3abef2144dac 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -444,3 +444,4 @@ 439 common faccessat2 sys_faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr sys_mount_setattr diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index c59cb44fbb7d..06bcfa636638 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -55,11 +55,7 @@ #include <asm/cio.h> #include "entry.h" -unsigned char tod_clock_base[16] __aligned(8) = { - /* Force to data section. */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -}; +union tod_clock tod_clock_base __section(".data"); EXPORT_SYMBOL_GPL(tod_clock_base); u64 clock_comparator_max = -1ULL; @@ -86,7 +82,7 @@ void __init time_early_init(void) struct ptff_qui qui; /* Initialize TOD steering parameters */ - tod_steering_end = *(unsigned long long *) &tod_clock_base[1]; + tod_steering_end = tod_clock_base.tod; vdso_data->arch_data.tod_steering_end = tod_steering_end; if (!test_facility(28)) @@ -113,18 +109,13 @@ unsigned long long notrace sched_clock(void) } NOKPROBE_SYMBOL(sched_clock); -static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt) +static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt) { - unsigned long long high, low, rem, sec, nsec; + unsigned long rem, sec, nsec; - /* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */ - high = (*(unsigned long long *) clk) >> 4; - low = (*(unsigned long long *)&clk[7]) << 4; - /* Calculate seconds and nano-seconds */ - sec = high; + sec = clk->us; rem = do_div(sec, 1000000); - nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32; - + nsec = ((clk->sus + (rem << 12)) * 125) >> 9; xt->tv_sec = sec; xt->tv_nsec = nsec; } @@ -204,30 +195,26 @@ static void stp_reset(void); void read_persistent_clock64(struct timespec64 *ts) { - unsigned char clk[STORE_CLOCK_EXT_SIZE]; - __u64 delta; + union tod_clock clk; + u64 delta; delta = initial_leap_seconds + TOD_UNIX_EPOCH; - get_tod_clock_ext(clk); - *(__u64 *) &clk[1] -= delta; - if (*(__u64 *) &clk[1] > delta) - clk[0]--; - ext_to_timespec64(clk, ts); + store_tod_clock_ext(&clk); + clk.eitod -= delta; + ext_to_timespec64(&clk, ts); } void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time, struct timespec64 *boot_offset) { - unsigned char clk[STORE_CLOCK_EXT_SIZE]; struct timespec64 boot_time; - __u64 delta; + union tod_clock clk; + u64 delta; delta = initial_leap_seconds + TOD_UNIX_EPOCH; - memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE); - *(__u64 *)&clk[1] -= delta; - if (*(__u64 *)&clk[1] > delta) - clk[0]--; - ext_to_timespec64(clk, &boot_time); + clk = tod_clock_base; + clk.eitod -= delta; + ext_to_timespec64(&clk, &boot_time); read_persistent_clock64(wall_time); *boot_offset = timespec64_sub(*wall_time, boot_time); @@ -381,10 +368,7 @@ static void clock_sync_global(unsigned long long delta) struct ptff_qto qto; /* Fixup the monotonic sched clock. */ - *(unsigned long long *) &tod_clock_base[1] += delta; - if (*(unsigned long long *) &tod_clock_base[1] < delta) - /* Epoch overflow */ - tod_clock_base[0]++; + tod_clock_base.eitod += delta; /* Adjust TOD steering parameters. */ now = get_tod_clock(); adj = tod_steering_end - now; diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index ca47141a5be9..e7ce447651b9 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -62,16 +62,16 @@ static struct mask_info drawer_info; struct cpu_topology_s390 cpu_topology[NR_CPUS]; EXPORT_SYMBOL_GPL(cpu_topology); -static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) +static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu) { - cpumask_t mask; + static cpumask_t mask; cpumask_copy(&mask, cpumask_of(cpu)); switch (topology_mode) { case TOPOLOGY_MODE_HW: while (info) { if (cpumask_test_cpu(cpu, &info->mask)) { - mask = info->mask; + cpumask_copy(&mask, &info->mask); break; } info = info->next; @@ -89,23 +89,24 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) break; } cpumask_and(&mask, &mask, cpu_online_mask); - return mask; + cpumask_copy(dst, &mask); } -static cpumask_t cpu_thread_map(unsigned int cpu) +static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) { - cpumask_t mask; + static cpumask_t mask; int i; cpumask_copy(&mask, cpumask_of(cpu)); if (topology_mode != TOPOLOGY_MODE_HW) - return mask; + goto out; cpu -= cpu % (smp_cpu_mtid + 1); for (i = 0; i <= smp_cpu_mtid; i++) if (cpu_present(cpu + i)) cpumask_set_cpu(cpu + i, &mask); cpumask_and(&mask, &mask, cpu_online_mask); - return mask; +out: + cpumask_copy(dst, &mask); } #define TOPOLOGY_CORE_BITS 64 @@ -250,10 +251,10 @@ void update_cpu_masks(void) for_each_possible_cpu(cpu) { topo = &cpu_topology[cpu]; - topo->thread_mask = cpu_thread_map(cpu); - topo->core_mask = cpu_group_map(&socket_info, cpu); - topo->book_mask = cpu_group_map(&book_info, cpu); - topo->drawer_mask = cpu_group_map(&drawer_info, cpu); + cpu_thread_map(&topo->thread_mask, cpu); + cpu_group_map(&topo->core_mask, &socket_info, cpu); + cpu_group_map(&topo->book_mask, &book_info, cpu); + cpu_group_map(&topo->drawer_mask, &drawer_info, cpu); topo->booted_cores = 0; if (topology_mode != TOPOLOGY_MODE_HW) { id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 8d1e8a1a97df..db7dd59b570c 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -13,6 +13,8 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */ +#include "asm/irqflags.h" +#include "asm/ptrace.h" #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/extable.h> @@ -23,7 +25,9 @@ #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/cpu.h> +#include <linux/entry-common.h> #include <asm/fpu/api.h> +#include <asm/vtime.h> #include "entry.h" static inline void __user *get_trap_ip(struct pt_regs *regs) @@ -288,3 +292,64 @@ void __init trap_init(void) local_mcck_enable(); test_monitor_call(); } + +void noinstr __do_pgm_check(struct pt_regs *regs) +{ + unsigned long last_break = S390_lowcore.breaking_event_addr; + unsigned int trapnr, syscall_redirect = 0; + irqentry_state_t state; + + regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc; + regs->int_parm_long = S390_lowcore.trans_exc_code; + + state = irqentry_enter(regs); + + if (user_mode(regs)) { + update_timer_sys(); + if (last_break < 4096) + last_break = 1; + current->thread.last_break = last_break; + regs->args[0] = last_break; + } + + if (S390_lowcore.pgm_code & 0x0200) { + /* transaction abort */ + memcpy(¤t->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256); + } + + if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) { + if (user_mode(regs)) { + struct per_event *ev = ¤t->thread.per_event; + + set_thread_flag(TIF_PER_TRAP); + ev->address = S390_lowcore.per_address; + ev->cause = *(u16 *)&S390_lowcore.per_code; + ev->paid = S390_lowcore.per_access_id; + } else { + /* PER event in kernel is kprobes */ + __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); + do_per_trap(regs); + goto out; + } + } + + if (!irqs_disabled_flags(regs->psw.mask)) + trace_hardirqs_on(); + __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); + + trapnr = regs->int_code & PGM_INT_CODE_MASK; + if (trapnr) + pgm_check_table[trapnr](regs); + syscall_redirect = user_mode(regs) && test_pt_regs_flag(regs, PIF_SYSCALL); +out: + local_irq_disable(); + irqentry_exit(regs, state); + + if (syscall_redirect) { + enter_from_user_mode(regs); + local_irq_enable(); + regs->orig_gpr2 = regs->gprs[2]; + do_syscall(regs); + exit_to_user_mode(); + } +} diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index 5007fac01bb5..bbf8622bbf5d 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -32,7 +32,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) return -EINVAL; if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) return -EINVAL; - clear_pt_regs_flag(regs, PIF_PER_TRAP); + clear_thread_flag(TIF_PER_TRAP); auprobe->saved_per = psw_bits(regs->psw).per; auprobe->saved_int_code = regs->int_code; regs->int_code = UPROBE_TRAP_NR; @@ -103,7 +103,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) /* fix per address */ current->thread.per_event.address = utask->vaddr; /* trigger per event */ - set_pt_regs_flag(regs, PIF_PER_TRAP); + set_thread_flag(TIF_PER_TRAP); } return 0; } @@ -259,7 +259,7 @@ static void sim_stor_event(struct pt_regs *regs, void *addr, int len) return; current->thread.per_event.address = regs->psw.addr; current->thread.per_event.cause = PER_EVENT_STORE >> 16; - set_pt_regs_flag(regs, PIF_PER_TRAP); + set_thread_flag(TIF_PER_TRAP); } /* diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 8bc269c55fd3..8c4e07d533c8 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -6,186 +6,226 @@ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ -#include <linux/init.h> +#include <linux/binfmts.h> +#include <linux/compat.h> +#include <linux/elf.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/stddef.h> -#include <linux/unistd.h> #include <linux/slab.h> -#include <linux/user.h> -#include <linux/elf.h> -#include <linux/security.h> -#include <linux/memblock.h> -#include <linux/compat.h> -#include <linux/binfmts.h> +#include <linux/smp.h> +#include <linux/time_namespace.h> #include <vdso/datapage.h> -#include <asm/asm-offsets.h> -#include <asm/processor.h> -#include <asm/mmu.h> -#include <asm/mmu_context.h> -#include <asm/sections.h> #include <asm/vdso.h> -#include <asm/facility.h> -#include <asm/timex.h> -extern char vdso64_start, vdso64_end; -static void *vdso64_kbase = &vdso64_start; -static unsigned int vdso64_pages; -static struct page **vdso64_pagelist; +extern char vdso64_start[], vdso64_end[]; +static unsigned int vdso_pages; + +static struct vm_special_mapping vvar_mapping; + +static union { + struct vdso_data data[CS_BASES]; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; + +struct vdso_data *vdso_data = vdso_data_store.data; + +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_NR_PAGES, +}; -/* - * Should the kernel map a VDSO page into processes and pass its - * address down to glibc upon exec()? - */ unsigned int __read_mostly vdso_enabled = 1; -static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf) +static int __init vdso_setup(char *str) { - struct page **vdso_pagelist; - unsigned long vdso_pages; + bool enabled; - vdso_pagelist = vdso64_pagelist; - vdso_pages = vdso64_pages; + if (!kstrtobool(str, &enabled)) + vdso_enabled = enabled; + return 1; +} +__setup("vdso=", vdso_setup); - if (vmf->pgoff >= vdso_pages) - return VM_FAULT_SIGBUS; +#ifdef CONFIG_TIME_NS +struct vdso_data *arch_get_vdso_data(void *vvar_page) +{ + return (struct vdso_data *)(vvar_page); +} + +static struct page *find_timens_vvar_page(struct vm_area_struct *vma) +{ + if (likely(vma->vm_mm == current->mm)) + return current->nsproxy->time_ns->vvar_page; + /* + * VM_PFNMAP | VM_IO protect .fault() handler from being called + * through interfaces like /proc/$pid/mem or + * process_vm_{readv,writev}() as long as there's no .access() + * in special_mapping_vmops(). + * For more details check_vma_flags() and __access_remote_vm() + */ + WARN(1, "vvar_page accessed remotely"); + return NULL; +} + +/* + * The VVAR page layout depends on whether a task belongs to the root or + * non-root time namespace. Whenever a task changes its namespace, the VVAR + * page tables are cleared and then they will be re-faulted with a + * corresponding layout. + * See also the comment near timens_setup_vdso_data() for details. + */ +int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) +{ + struct mm_struct *mm = task->mm; + struct vm_area_struct *vma; - vmf->page = vdso_pagelist[vmf->pgoff]; - get_page(vmf->page); + mmap_read_lock(mm); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + unsigned long size = vma->vm_end - vma->vm_start; + + if (!vma_is_special_mapping(vma, &vvar_mapping)) + continue; + zap_page_range(vma, vma->vm_start, size); + break; + } + mmap_read_unlock(mm); return 0; } +#else +static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma) +{ + return NULL; +} +#endif + +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *timens_page = find_timens_vvar_page(vma); + unsigned long addr, pfn; + vm_fault_t err; + + switch (vmf->pgoff) { + case VVAR_DATA_PAGE_OFFSET: + pfn = virt_to_pfn(vdso_data); + if (timens_page) { + /* + * Fault in VVAR page too, since it will be accessed + * to get clock data anyway. + */ + addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE; + err = vmf_insert_pfn(vma, addr, pfn); + if (unlikely(err & VM_FAULT_ERROR)) + return err; + pfn = page_to_pfn(timens_page); + } + break; +#ifdef CONFIG_TIME_NS + case VVAR_TIMENS_PAGE_OFFSET: + /* + * If a task belongs to a time namespace then a namespace + * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and + * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET + * offset. + * See also the comment near timens_setup_vdso_data(). + */ + if (!timens_page) + return VM_FAULT_SIGBUS; + pfn = virt_to_pfn(vdso_data); + break; +#endif /* CONFIG_TIME_NS */ + default: + return VM_FAULT_SIGBUS; + } + return vmf_insert_pfn(vma, vmf->address, pfn); +} static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *vma) { current->mm->context.vdso_base = vma->vm_start; - return 0; } -static const struct vm_special_mapping vdso_mapping = { +static struct vm_special_mapping vvar_mapping = { + .name = "[vvar]", + .fault = vvar_fault, +}; + +static struct vm_special_mapping vdso_mapping = { .name = "[vdso]", - .fault = vdso_fault, .mremap = vdso_mremap, }; -static int __init vdso_setup(char *str) -{ - bool enabled; - - if (!kstrtobool(str, &enabled)) - vdso_enabled = enabled; - return 1; -} -__setup("vdso=", vdso_setup); - -/* - * The vdso data page - */ -static union { - struct vdso_data data; - u8 page[PAGE_SIZE]; -} vdso_data_store __page_aligned_data; -struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data; - -void vdso_getcpu_init(void) +int vdso_getcpu_init(void) { set_tod_programmable_field(smp_processor_id()); + return 0; } +early_initcall(vdso_getcpu_init); /* Must be called before SMP init */ -/* - * This is called from binfmt_elf, we create the special vma for the - * vDSO and insert it into the mm struct tree - */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { + unsigned long vdso_text_len, vdso_mapping_len; + unsigned long vvar_start, vdso_text_start; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long vdso_pages; - unsigned long vdso_base; int rc; - if (!vdso_enabled) - return 0; - - if (is_compat_task()) + BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); + if (!vdso_enabled || is_compat_task()) return 0; - - vdso_pages = vdso64_pages; - /* - * vDSO has a problem and was disabled, just don't "enable" it for - * the process - */ - if (vdso_pages == 0) - return 0; - - /* - * pick a base address for the vDSO in process space. We try to put - * it at vdso_base which is the "natural" base for it, but we might - * fail and end up putting it elsewhere. - */ if (mmap_write_lock_killable(mm)) return -EINTR; - vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); - if (IS_ERR_VALUE(vdso_base)) { - rc = vdso_base; - goto out_up; - } - - /* - * our vma flags don't have VM_WRITE so by default, the process - * isn't allowed to write those pages. - * gdb can break that with ptrace interface, and thus trigger COW - * on those pages but it's then your responsibility to never do that - * on the "data" page of the vDSO or you'll stop getting kernel - * updates and your nice userland gettimeofday will be totally dead. - * It's fine to use that for setting breakpoints in the vDSO code - * pages though. - */ - vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, + vdso_text_len = vdso_pages << PAGE_SHIFT; + vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; + vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + rc = vvar_start; + if (IS_ERR_VALUE(vvar_start)) + goto out; + vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE, + VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP| + VM_PFNMAP, + &vvar_mapping); + rc = PTR_ERR(vma); + if (IS_ERR(vma)) + goto out; + vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE; + /* VM_MAYWRITE for COW so gdb can set breakpoints */ + vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_mapping); if (IS_ERR(vma)) { + do_munmap(mm, vvar_start, PAGE_SIZE, NULL); rc = PTR_ERR(vma); - goto out_up; + } else { + current->mm->context.vdso_base = vdso_text_start; + rc = 0; } - - current->mm->context.vdso_base = vdso_base; - rc = 0; - -out_up: +out: mmap_write_unlock(mm); return rc; } static int __init vdso_init(void) { + struct page **pages; int i; - vdso_getcpu_init(); - /* Calculate the size of the 64 bit vDSO */ - vdso64_pages = ((&vdso64_end - &vdso64_start - + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; - - /* Make sure pages are in the correct state */ - vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), - GFP_KERNEL); - BUG_ON(vdso64_pagelist == NULL); - for (i = 0; i < vdso64_pages - 1; i++) { - struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); - get_page(pg); - vdso64_pagelist[i] = pg; + vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT; + pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); + if (!pages) { + vdso_enabled = 0; + return -ENOMEM; } - vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); - vdso64_pagelist[vdso64_pages] = NULL; - - get_page(virt_to_page(vdso_data)); - + for (i = 0; i < vdso_pages; i++) + pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE); + pages[vdso_pages] = NULL; + vdso_mapping.pages = pages; return 0; } -early_initcall(vdso_init); +arch_initcall(vdso_init); diff --git a/arch/s390/kernel/vdso64/getcpu.c b/arch/s390/kernel/vdso64/getcpu.c index 5b2bc7494d5b..5c5d4a848b76 100644 --- a/arch/s390/kernel/vdso64/getcpu.c +++ b/arch/s390/kernel/vdso64/getcpu.c @@ -8,12 +8,12 @@ int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) { - __u16 todval[8]; + union tod_clock clk; /* CPU number is stored in the programmable field of the TOD clock */ - get_tod_clock_ext((char *)todval); + store_tod_clock_ext(&clk); if (cpu) - *cpu = todval[7]; + *cpu = clk.pf; /* NUMA node is always zero */ if (node) *node = 0; diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S index 7bde3909290f..518f1ea405f4 100644 --- a/arch/s390/kernel/vdso64/vdso64.lds.S +++ b/arch/s390/kernel/vdso64/vdso64.lds.S @@ -13,6 +13,10 @@ ENTRY(_start) SECTIONS { + PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); +#ifdef CONFIG_TIME_NS + PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); +#endif . = VDSO64_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text @@ -94,9 +98,6 @@ SECTIONS .debug_ranges 0 : { *(.debug_ranges) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } - . = ALIGN(PAGE_SIZE); - PROVIDE(_vdso_data = .); - /DISCARD/ : { *(.note.GNU-stack) *(.branch_lt) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 5aaa2ca6a928..73c7afcc0527 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -13,7 +13,7 @@ #include <linux/timex.h> #include <linux/types.h> #include <linux/time.h> - +#include <asm/alternative.h> #include <asm/vtimer.h> #include <asm/vtime.h> #include <asm/cpu_mf.h> @@ -128,15 +128,13 @@ static int do_account_vtime(struct task_struct *tsk) timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; - asm volatile( - " stpt %0\n" /* Store current cpu timer value */ -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - " stckf %1" /* Store current tod clock value */ -#else - " stck %1" /* Store current tod clock value */ -#endif - : "=Q" (S390_lowcore.last_update_timer), - "=Q" (S390_lowcore.last_update_clock)); + /* Use STORE CLOCK by default, STORE CLOCK FAST if available. */ + alternative_io("stpt %0\n .insn s,0xb2050000,%1\n", + "stpt %0\n .insn s,0xb27c0000,%1\n", + 25, + ASM_OUTPUT2("=Q" (S390_lowcore.last_update_timer), + "=Q" (S390_lowcore.last_update_clock)), + ASM_NO_INPUT_CLOBBER("cc")); clock = S390_lowcore.last_update_clock - clock; timer -= S390_lowcore.last_update_timer; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index dbafd057ca6a..2f09e9d7dc95 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -45,6 +45,7 @@ #include <asm/timex.h> #include <asm/ap.h> #include <asm/uv.h> +#include <asm/fpu/api.h> #include "kvm-s390.h" #include "gaccess.h" @@ -164,12 +165,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; -struct kvm_s390_tod_clock_ext { - __u8 epoch_idx; - __u64 tod; - __u8 reserved[7]; -} __packed; - /* allow nested virtualization in KVM (if enabled by user space) */ static int nested; module_param(nested, int, S_IRUGO); @@ -1165,17 +1160,17 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) static void kvm_s390_get_tod_clock(struct kvm *kvm, struct kvm_s390_vm_tod_clock *gtod) { - struct kvm_s390_tod_clock_ext htod; + union tod_clock clk; preempt_disable(); - get_tod_clock_ext((char *)&htod); + store_tod_clock_ext(&clk); - gtod->tod = htod.tod + kvm->arch.epoch; + gtod->tod = clk.tod + kvm->arch.epoch; gtod->epoch_idx = 0; if (test_kvm_facility(kvm, 139)) { - gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; - if (gtod->tod < htod.tod) + gtod->epoch_idx = clk.ei + kvm->arch.epdx; + if (gtod->tod < clk.tod) gtod->epoch_idx += 1; } @@ -3866,18 +3861,18 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) { struct kvm_vcpu *vcpu; - struct kvm_s390_tod_clock_ext htod; + union tod_clock clk; int i; mutex_lock(&kvm->lock); preempt_disable(); - get_tod_clock_ext((char *)&htod); + store_tod_clock_ext(&clk); - kvm->arch.epoch = gtod->tod - htod.tod; + kvm->arch.epoch = gtod->tod - clk.tod; kvm->arch.epdx = 0; if (test_kvm_facility(kvm, 139)) { - kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; + kvm->arch.epdx = gtod->epoch_idx - clk.ei; if (kvm->arch.epoch > gtod->tod) kvm->arch.epdx -= 1; } @@ -4147,6 +4142,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) vcpu->run->s.regs.gprs, sizeof(sie_page->pv_grregs)); } + if (test_cpu_flag(CIF_FPU)) + load_fpu_regs(); exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); if (kvm_s390_pv_cpu_is_protected(vcpu)) { diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index c5d0a58b2c29..bd803e091918 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -18,6 +18,7 @@ #include <asm/sclp.h> #include <asm/nmi.h> #include <asm/dis.h> +#include <asm/fpu/api.h> #include "kvm-s390.h" #include "gaccess.h" @@ -1028,6 +1029,8 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) */ vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; barrier(); + if (test_cpu_flag(CIF_FPU)) + load_fpu_regs(); if (!kvm_s390_vcpu_sie_inhibited(vcpu)) rc = sie64a(scb_s, vcpu->run->s.regs.gprs); barrier(); diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index e8f642446fed..2fece1fd210a 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -16,8 +16,8 @@ #include <asm/mmu_context.h> #include <asm/facility.h> -#ifdef CONFIG_DEBUG_USER_ASCE -void debug_user_asce(void) +#ifdef CONFIG_DEBUG_ENTRY +void debug_user_asce(int exit) { unsigned long cr1, cr7; @@ -25,12 +25,14 @@ void debug_user_asce(void) __ctl_store(cr7, 7, 7); if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce) return; - panic("incorrect ASCE on kernel exit\n" + panic("incorrect ASCE on kernel %s\n" "cr1: %016lx cr7: %016lx\n" "kernel: %016llx user: %016llx\n", - cr1, cr7, S390_lowcore.kernel_asce, S390_lowcore.user_asce); + exit ? "exit" : "entry", cr1, cr7, + S390_lowcore.kernel_asce, S390_lowcore.user_asce); + } -#endif /*CONFIG_DEBUG_USER_ASCE */ +#endif /*CONFIG_DEBUG_ENTRY */ #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES static DEFINE_STATIC_KEY_FALSE(have_mvcos); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b8210103de14..e30c7c781172 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -385,7 +385,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) * The instruction that caused the program check has * been nullified. Don't signal single step via SIGTRAP. */ - clear_pt_regs_flag(regs, PIF_PER_TRAP); + clear_thread_flag(TIF_PER_TRAP); if (kprobe_page_fault(regs, 14)) return 0; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 73a163065b95..0e76b2127dc6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -297,6 +297,7 @@ int arch_add_memory(int nid, u64 start, u64 size, if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) return -EINVAL; + VM_BUG_ON(!mhp_range_allowed(start, size, true)); rc = vmem_add_mapping(start, size); if (rc) return rc; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 4e87c819ddea..781965f7210e 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -58,7 +58,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) if (!page) return NULL; arch_set_page_dat(page, 2); - return (unsigned long *) page_to_phys(page); + return (unsigned long *) page_to_virt(page); } void crst_table_free(struct mm_struct *mm, unsigned long *table) @@ -161,7 +161,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm) page = alloc_page(GFP_KERNEL); if (page) { - table = (u64 *)page_to_phys(page); + table = (u64 *)page_to_virt(page); memset64(table, _PAGE_INVALID, PTRS_PER_PTE); memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); } @@ -194,7 +194,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) mask = atomic_read(&page->_refcount) >> 24; mask = (mask | (mask >> 4)) & 3; if (mask != 3) { - table = (unsigned long *) page_to_phys(page); + table = (unsigned long *) page_to_virt(page); bit = mask & 1; /* =1 -> second 2K */ if (bit) table += PTRS_PER_PTE; @@ -217,7 +217,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) } arch_set_page_dat(page, 0); /* Initialize page table */ - table = (unsigned long *) page_to_phys(page); + table = (unsigned long *) page_to_virt(page); if (mm_alloc_pgste(mm)) { /* Return 4K page table with PGSTEs */ atomic_xor_bits(&page->_refcount, 3 << 24); @@ -239,10 +239,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned int bit, mask; - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + page = virt_to_page(table); if (!mm_alloc_pgste(mm)) { /* Free 2K page table fragment of a 4K page */ - bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); + bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); mask >>= 24; @@ -269,14 +269,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, unsigned int bit, mask; mm = tlb->mm; - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + page = virt_to_page(table); if (mm_alloc_pgste(mm)) { gmap_unlink(mm, table, vmaddr); - table = (unsigned long *) (__pa(table) | 3); + table = (unsigned long *) ((unsigned long)table | 3); tlb_remove_table(tlb, table); return; } - bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); + bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); mask >>= 24; @@ -285,7 +285,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, else list_del(&page->lru); spin_unlock_bh(&mm->context.lock); - table = (unsigned long *) (__pa(table) | (1U << bit)); + table = (unsigned long *) ((unsigned long) table | (1U << bit)); tlb_remove_table(tlb, table); } @@ -293,7 +293,7 @@ void __tlb_remove_table(void *_table) { unsigned int mask = (unsigned long) _table & 3; void *table = (void *)((unsigned long) _table ^ mask); - struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + struct page *page = virt_to_page(table); switch (mask) { case 0: /* pmd, pud, or p4d */ diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 01f3a5f58e64..96897fab89dc 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -4,6 +4,7 @@ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ +#include <linux/memory_hotplug.h> #include <linux/memblock.h> #include <linux/pfn.h> #include <linux/mm.h> @@ -26,14 +27,14 @@ static void __ref *vmem_alloc_pages(unsigned int order) if (slab_is_available()) return (void *)__get_free_pages(GFP_KERNEL, order); - return (void *) memblock_phys_alloc(size, size); + return memblock_alloc(size, size); } static void vmem_free_pages(unsigned long addr, int order) { /* We don't expect boot memory to be removed ever. */ if (!slab_is_available() || - WARN_ON_ONCE(PageReserved(phys_to_page(addr)))) + WARN_ON_ONCE(PageReserved(virt_to_page(addr)))) return; free_pages(addr, order); } @@ -56,7 +57,7 @@ pte_t __ref *vmem_pte_alloc(void) if (slab_is_available()) pte = (pte_t *) page_table_alloc(&init_mm); else - pte = (pte_t *) memblock_phys_alloc(size, size); + pte = (pte_t *) memblock_alloc(size, size); if (!pte) return NULL; memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); @@ -84,7 +85,7 @@ static void vmemmap_flush_unused_sub_pmd(void) { if (!unused_sub_pmd_start) return; - memset(__va(unused_sub_pmd_start), PAGE_UNUSED, + memset((void *)unused_sub_pmd_start, PAGE_UNUSED, ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); unused_sub_pmd_start = 0; } @@ -97,7 +98,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end) * getting removed (just in case the memmap never gets initialized, * e.g., because the memory block never gets onlined). */ - memset(__va(start), 0, sizeof(struct page)); + memset((void *)start, 0, sizeof(struct page)); } static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) @@ -118,7 +119,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) { - void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); + unsigned long page = ALIGN_DOWN(start, PMD_SIZE); vmemmap_flush_unused_sub_pmd(); @@ -127,7 +128,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) /* Mark the unused parts of the new memmap page PAGE_UNUSED. */ if (!IS_ALIGNED(start, PMD_SIZE)) - memset(page, PAGE_UNUSED, start - __pa(page)); + memset((void *)page, PAGE_UNUSED, start - page); /* * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of * consecutive sections. Remember for the last added PMD the last @@ -140,11 +141,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) /* Returns true if the PMD is completely unused and can be freed. */ static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end) { - void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); + unsigned long page = ALIGN_DOWN(start, PMD_SIZE); vmemmap_flush_unused_sub_pmd(); - memset(__va(start), PAGE_UNUSED, end - start); - return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE); + memset((void *)start, PAGE_UNUSED, end - start); + return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE); } /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ @@ -165,7 +166,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, if (pte_none(*pte)) continue; if (!direct) - vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0); + vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0); pte_clear(&init_mm, addr, pte); } else if (pte_none(*pte)) { if (!direct) { @@ -175,7 +176,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, goto out; pte_val(*pte) = __pa(new_page) | prot; } else { - pte_val(*pte) = addr | prot; + pte_val(*pte) = __pa(addr) | prot; } } else { continue; @@ -200,7 +201,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start) if (!pte_none(*pte)) return; } - vmem_pte_free(__va(pmd_deref(*pmd))); + vmem_pte_free((unsigned long *) pmd_deref(*pmd)); pmd_clear(pmd); } @@ -241,7 +242,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr, IS_ALIGNED(next, PMD_SIZE) && MACHINE_HAS_EDAT1 && addr && direct && !debug_pagealloc_enabled()) { - pmd_val(*pmd) = addr | prot; + pmd_val(*pmd) = __pa(addr) | prot; pages++; continue; } else if (!direct && MACHINE_HAS_EDAT1) { @@ -337,7 +338,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, IS_ALIGNED(next, PUD_SIZE) && MACHINE_HAS_EDAT2 && addr && direct && !debug_pagealloc_enabled()) { - pud_val(*pud) = addr | prot; + pud_val(*pud) = __pa(addr) | prot; pages++; continue; } @@ -532,11 +533,22 @@ void vmem_remove_mapping(unsigned long start, unsigned long size) mutex_unlock(&vmem_mutex); } +struct range arch_get_mappable_range(void) +{ + struct range mhp_range; + + mhp_range.start = 0; + mhp_range.end = VMEM_MAX_PHYS - 1; + return mhp_range; +} + int vmem_add_mapping(unsigned long start, unsigned long size) { + struct range range = arch_get_mappable_range(); int ret; - if (start + size > VMEM_MAX_PHYS || + if (start < range.start || + start + size > range.end + 1 || start + size < start) return -ERANGE; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 41df8fcfddde..600881d894dd 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -695,43 +695,68 @@ void zpci_remove_device(struct zpci_dev *zdev) } } -int zpci_create_device(struct zpci_dev *zdev) +/** + * zpci_create_device() - Create a new zpci_dev and add it to the zbus + * @fid: Function ID of the device to be created + * @fh: Current Function Handle of the device to be created + * @state: Initial state after creation either Standby or Configured + * + * Creates a new zpci device and adds it to its, possibly newly created, zbus + * as well as zpci_list. + * + * Returns: 0 on success, an error value otherwise + */ +int zpci_create_device(u32 fid, u32 fh, enum zpci_state state) { + struct zpci_dev *zdev; int rc; - kref_init(&zdev->kref); + zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); + zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; - spin_lock(&zpci_list_lock); - list_add_tail(&zdev->entry, &zpci_list); - spin_unlock(&zpci_list_lock); + /* FID and Function Handle are the static/dynamic identifiers */ + zdev->fid = fid; + zdev->fh = fh; - rc = zpci_init_iommu(zdev); + /* Query function properties and update zdev */ + rc = clp_query_pci_fn(zdev); if (rc) - goto out; + goto error; + zdev->state = state; + kref_init(&zdev->kref); mutex_init(&zdev->lock); + + rc = zpci_init_iommu(zdev); + if (rc) + goto error; + if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { rc = zpci_enable_device(zdev); if (rc) - goto out_destroy_iommu; + goto error_destroy_iommu; } rc = zpci_bus_device_register(zdev, &pci_root_ops); if (rc) - goto out_disable; + goto error_disable; + + spin_lock(&zpci_list_lock); + list_add_tail(&zdev->entry, &zpci_list); + spin_unlock(&zpci_list_lock); return 0; -out_disable: +error_disable: if (zdev->state == ZPCI_FN_STATE_ONLINE) zpci_disable_device(zdev); - -out_destroy_iommu: +error_destroy_iommu: zpci_destroy_iommu(zdev); -out: - spin_lock(&zpci_list_lock); - list_del(&zdev->entry); - spin_unlock(&zpci_list_lock); +error: + zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc); + kfree(zdev); return rc; } diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 153720d21ae7..d3331596ddbe 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -181,7 +181,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev, return 0; } -static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh) +int clp_query_pci_fn(struct zpci_dev *zdev) { struct clp_req_rsp_query_pci *rrb; int rc; @@ -194,7 +194,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh) rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.cmd = CLP_QUERY_PCI_FN; rrb->response.hdr.len = sizeof(rrb->response); - rrb->request.fh = fh; + rrb->request.fh = zdev->fh; rc = clp_req(rrb, CLP_LPS_PCI); if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) { @@ -212,40 +212,6 @@ out: return rc; } -int clp_add_pci_device(u32 fid, u32 fh, int configured) -{ - struct zpci_dev *zdev; - int rc = -ENOMEM; - - zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); - zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); - if (!zdev) - goto error; - - zdev->fh = fh; - zdev->fid = fid; - - /* Query function properties and update zdev */ - rc = clp_query_pci_fn(zdev, fh); - if (rc) - goto error; - - if (configured) - zdev->state = ZPCI_FN_STATE_CONFIGURED; - else - zdev->state = ZPCI_FN_STATE_STANDBY; - - rc = zpci_create_device(zdev); - if (rc) - goto error; - return 0; - -error: - zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc); - kfree(zdev); - return rc; -} - static int clp_refresh_fh(u32 fid); /* * Enable/Disable a given PCI function and update its function handle if @@ -408,7 +374,7 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) zdev = get_zdev_by_fid(entry->fid); if (!zdev) - clp_add_pci_device(entry->fid, entry->fh, entry->config_state); + zpci_create_device(entry->fid, entry->fh, entry->config_state); } int clp_scan_pci_devices(void) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 9a6bae503fe6..b4162da4e8a2 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -80,7 +80,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) enum zpci_state state; int ret; - if (zdev && zdev->zbus && zdev->zbus->bus) + if (zdev && zdev->zbus->bus) pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); zpci_err("avail CCDF:\n"); @@ -89,7 +89,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) switch (ccdf->pec) { case 0x0301: /* Reserved|Standby -> Configured */ if (!zdev) { - ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1); + zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED); break; } /* the configuration request may be stale */ @@ -116,7 +116,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; case 0x0302: /* Reserved -> Standby */ if (!zdev) { - clp_add_pci_device(ccdf->fid, ccdf->fh, 0); + zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY); break; } zdev->fh = ccdf->fh; diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 18f2d10c3176..474617b88648 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -170,7 +170,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl); if (ret) goto out_unlock_mmap; @@ -311,7 +311,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl); if (ret) goto out_unlock_mmap; diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt index 46d8ed96cf06..0e207c46e8da 100644 --- a/arch/s390/tools/opcodes.txt +++ b/arch/s390/tools/opcodes.txt @@ -597,7 +597,7 @@ b9b3 cu42 RRE_RR b9bd trtre RRF_U0RR b9be srstu RRE_RR b9bf trte RRF_U0RR -b9c0 selhhhr RRF_RURR +b9c0 selfhr RRF_RURR b9c8 ahhhr RRF_R0RR2 b9c9 shhhr RRF_R0RR2 b9ca alhhhr RRF_R0RR2 diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 7ac847ca6356..e798e55915c2 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -54,6 +54,7 @@ config SUPERH select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_UID16 + select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select IRQ_FORCED_THREADING diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c index 1c0da99dfc60..ff2200fec29a 100644 --- a/arch/sh/boards/mach-landisk/gio.c +++ b/arch/sh/boards/mach-landisk/gio.c @@ -27,11 +27,10 @@ static int openCnt; static int gio_open(struct inode *inode, struct file *filp) { - int minor; + int minor = iminor(inode); int ret = -ENOENT; preempt_disable(); - minor = MINOR(inode->i_rdev); if (minor < DEVCOUNT) { if (openCnt > 0) { ret = -EALREADY; @@ -46,9 +45,8 @@ static int gio_open(struct inode *inode, struct file *filp) static int gio_close(struct inode *inode, struct file *filp) { - int minor; + int minor = iminor(inode); - minor = MINOR(inode->i_rdev); if (minor < DEVCOUNT) { openCnt--; } diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index 02ba62298576..d77f54e906fd 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -102,7 +102,6 @@ CONFIG_NLS_UTF8=y CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y -CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig index d00376eb044f..6c719ab4332a 100644 --- a/arch/sh/configs/sdk7780_defconfig +++ b/arch/sh/configs/sdk7780_defconfig @@ -128,7 +128,6 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y -CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 6d44c32ef047..839551ce398c 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h @@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); #ifdef CONFIG_IRQSTACKS extern void irq_ctx_init(int cpu); extern void irq_ctx_exit(int cpu); -# define __ARCH_HAS_DO_SOFTIRQ #else # define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_exit(cpu) do { } while (0) diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index ab5f790b0cd2..ef0f0827cf57 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -20,6 +20,7 @@ #include <linux/uaccess.h> #include <asm/thread_info.h> #include <cpu/mmu_context.h> +#include <asm/softirq_stack.h> atomic_t irq_err_count; diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 80a5d1c66a51..1aa508eb0823 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, childregs = task_pt_regs(p); p->thread.sp = (unsigned long) childregs; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); p->thread.pc = (unsigned long) ret_from_kernel_thread; childregs->regs[4] = arg; diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile index 659faefdcb1d..285aaba832d9 100644 --- a/arch/sh/kernel/syscalls/Makefile +++ b/arch/sh/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))' \ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 9df40ac0ebc0..d08eebad6b7f 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -444,3 +444,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 2c1cee9eed73..164a5254c91c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -96,6 +96,7 @@ config SPARC64 select ARCH_HAS_PTE_SPECIAL select PCI_DOMAINS if PCI select ARCH_HAS_GIGANTIC_PAGE + select HAVE_SOFTIRQ_ON_OWN_STACK config ARCH_PROC_KCORE_TEXT def_bool y @@ -175,7 +176,7 @@ config SMP Management" code will be disabled if you say Y here. See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO - available at <http://www.tldp.org/docs.html#howto>. + available at <https://www.tldp.org/docs.html#howto>. If you don't know what to do here, say N. diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c index a7a38fb4ece0..6d74064add0a 100644 --- a/arch/sparc/boot/piggyback.c +++ b/arch/sparc/boot/piggyback.c @@ -154,6 +154,10 @@ static off_t get_hdrs_offset(int kernelfd, const char *filename) offset -= LOOKBACK; /* skip a.out header */ offset += AOUT_TEXT_OFFSET; + if (offset < 0) { + errno = -EINVAL; + die("Calculated a negative offset, probably elftoaout generated an invalid image. Did you use a recent elftoaout ?"); + } if (lseek(kernelfd, offset, SEEK_SET) < 0) die("lseek"); if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE) diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index d91eb6a76dd1..12a4fb0bd52a 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -65,9 +65,8 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_WCACHE=y CONFIG_ATA_OVER_ETH=m CONFIG_SUNVDC=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_ALI15X3=y +CONFIG_ATA=y +CONFIG_PATA_ALI=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y @@ -94,7 +93,7 @@ CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_MII=m CONFIG_SUNLANCE=m -CONFIG_HAPPYMEAL=m +CONFIG_HAPPYMEAL=y CONFIG_SUNGEM=m CONFIG_SUNVNET=m CONFIG_LDMVSW=m @@ -235,3 +234,7 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRC16=m CONFIG_LIBCRC32C=m CONFIG_VCC=m +CONFIG_PATA_CMD64X=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_DEVTMPFS=y diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h index 8625946d8d00..597a22953bc5 100644 --- a/arch/sparc/include/asm/backoff.h +++ b/arch/sparc/include/asm/backoff.h @@ -18,7 +18,7 @@ * * When we spin, we try to use an operation that will cause the * current cpu strand to block, and therefore make the core fully - * available to any other other runnable strands. There are two + * available to any other runnable strands. There are two * options, based upon cpu capabilities. * * On all cpus prior to SPARC-T4 we do three dummy reads of the diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index c73b5a3ab7b9..a53d744d4212 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int return x; } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) /* Emulate cmpxchg() the same way we emulate atomics, * by hashing the object address and indexing into an array diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 7e078bc73ef5..8fb09eec8c3e 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -8,7 +8,6 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/extable_64.h> #include <asm/spitfire.h> #include <asm/adi.h> diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable.h index 5a0171907b7e..554a9dc376fc 100644 --- a/arch/sparc/include/asm/extable_64.h +++ b/arch/sparc/include/asm/extable.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_EXTABLE64_H -#define __ASM_EXTABLE64_H +#ifndef __ASM_EXTABLE_H +#define __ASM_EXTABLE_H /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 4d748e93b974..154df2cf19f4 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; -#define __ARCH_HAS_DO_SOFTIRQ #define NO_IRQ 0xffffffff diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index f94532f25db1..274217e7ed70 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -57,35 +57,39 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) { if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI)) return 0; - if (prot & PROT_ADI) { - if (!adi_capable()) - return 0; + return 1; +} - if (addr) { - struct vm_area_struct *vma; +#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) +/* arch_validate_flags() - Ensure combination of flags is valid for a + * VMA. + */ +static inline bool arch_validate_flags(unsigned long vm_flags) +{ + /* If ADI is being enabled on this VMA, check for ADI + * capability on the platform and ensure VMA is suitable + * for ADI + */ + if (vm_flags & VM_SPARC_ADI) { + if (!adi_capable()) + return false; - vma = find_vma(current->mm, addr); - if (vma) { - /* ADI can not be enabled on PFN - * mapped pages - */ - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) - return 0; + /* ADI can not be enabled on PFN mapped pages */ + if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + return false; - /* Mergeable pages can become unmergeable - * if ADI is enabled on them even if they - * have identical data on them. This can be - * because ADI enabled pages with identical - * data may still not have identical ADI - * tags on them. Disallow ADI on mergeable - * pages. - */ - if (vma->vm_flags & VM_MERGEABLE) - return 0; - } - } + /* Mergeable pages can become unmergeable + * if ADI is enabled on them even if they + * have identical data on them. This can be + * because ADI enabled pages with identical + * data may still not have identical ADI + * tags on them. Disallow ADI on mergeable + * pages. + */ + if (vm_flags & VM_MERGEABLE) + return false; } - return 1; + return true; } #endif /* CONFIG_SPARC64 */ diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 7708d015712b..6067925972d9 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -113,7 +113,7 @@ extern unsigned long last_valid_pfn; extern void *srmmu_nocache_pool; #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool)) #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR) -#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) +#define __nocache_fix(VADDR) ((__typeof__(VADDR))__va(__nocache_pa(VADDR))) /* Accessing the MMU control register. */ unsigned int srmmu_get_mmureg(void); diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index 3c4bc2189092..b6242f7771e9 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -50,16 +50,12 @@ struct thread_struct { unsigned long fsr; unsigned long fpqdepth; struct fpq fpqueue[16]; - unsigned long flags; mm_segment_t current_ds; }; -#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */ -#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */ - #define INIT_THREAD { \ - .flags = SPARC_FLAG_KTHREAD, \ .current_ds = KERNEL_DS, \ + .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \ } /* Do necessary setup to start up a newly executed thread. */ diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h index 827b73a97f8a..28f81081e37d 100644 --- a/arch/sparc/include/asm/signal.h +++ b/arch/sparc/include/asm/signal.h @@ -9,18 +9,6 @@ #include <uapi/asm/signal.h> #ifndef __ASSEMBLY__ -/* - * DJHR - * SA_STATIC_ALLOC is used for the sparc32 system to indicate that this - * interrupt handler's irq structure should be statically allocated - * by the request_irq routine. - * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge - * of interrupt usage and that sucks. Also without a flag like this - * it may be possible for the free_irq routine to attempt to free - * statically allocated data.. which is NOT GOOD. - * - */ -#define SA_STATIC_ALLOC 0x8000 #define __ARCH_HAS_KA_RESTORER #define __ARCH_HAS_SA_RESTORER diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 7fc82a233f49..3a9a0b0c7465 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -11,8 +11,8 @@ #include <asm/processor.h> #include <asm/barrier.h> -#include <asm/qrwlock.h> #include <asm/qspinlock.h> +#include <asm/qrwlock.h> #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 42cd4cd3892e..8047a9caab2f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -118,6 +118,7 @@ struct thread_info { .task = &tsk, \ .current_ds = ASI_P, \ .preempt_count = INIT_PREEMPT_COUNT, \ + .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \ } /* how to get the thread information struct from C */ diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index dd85bc2c2cad..390094200fc4 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -1,6 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_UACCESS_H #define ___ASM_SPARC_UACCESS_H + +#include <asm/extable.h> + #if defined(__sparc__) && defined(__arch64__) #include <asm/uaccess_64.h> #else diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 0a2d3ebc4bb8..4a12346bb69c 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -13,9 +13,6 @@ #include <asm/processor.h> -#define ARCH_HAS_SORT_EXTABLE -#define ARCH_HAS_SEARCH_EXTABLE - /* Sparc is not segmented, however we need to be able to fool access_ok() * when doing system calls from kernel mode legitimately. * @@ -40,36 +37,6 @@ #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define access_ok(addr, size) __access_ok((unsigned long)(addr), size) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - * - * There is a special way how to put a range of potentially faulting - * insns (like twenty ldd/std's with now intervening other instructions) - * You specify address of first in insn and 0 in fixup and in the next - * exception_table_entry you specify last potentially faulting insn + 1 - * and in fixup the routine which should handle the fault. - * That fixup code will get - * (faulting_insn_address - first_insn_in_the_range_address)/4 - * in %g2 (ie. index of the faulting instruction in the range). - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2); - /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right * pointer type.. @@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size) unsigned long ret; __asm__ __volatile__ ( - ".section __ex_table,#alloc\n\t" - ".align 4\n\t" - ".word 1f,3\n\t" - ".previous\n\t" "mov %2, %%o1\n" - "1:\n\t" "call __bzero\n\t" " mov %1, %%o0\n\t" "mov %%o0, %0\n" diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 698cf69f74e9..30eb4c6414d1 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -10,7 +10,6 @@ #include <linux/string.h> #include <asm/asi.h> #include <asm/spitfire.h> -#include <asm/extable_64.h> #include <asm/processor.h> diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index d58940280f8d..a269ad2fe6df 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -994,7 +994,7 @@ do_syscall: andcc %l5, _TIF_SYSCALL_TRACE, %g0 mov %i4, %o4 bne linux_syscall_trace - mov %i0, %l5 + mov %i0, %l6 2: call %l7 mov %i5, %o5 @@ -1003,16 +1003,15 @@ do_syscall: st %o0, [%sp + STACKFRAME_SZ + PT_I0] ret_sys_call: - ld [%curptr + TI_FLAGS], %l6 + ld [%curptr + TI_FLAGS], %l5 cmp %o0, -ERESTART_RESTARTBLOCK ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 set PSR_C, %g2 bgeu 1f - andcc %l6, _TIF_SYSCALL_TRACE, %g0 + andcc %l5, _TIF_SYSCALL_TRACE, %g0 /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 - clr %l6 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] bne linux_syscall_trace2 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ @@ -1027,7 +1026,6 @@ ret_sys_call: sub %g0, %o0, %o0 or %g3, %g2, %g3 st %o0, [%sp + STACKFRAME_SZ + PT_I0] - mov 1, %l6 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] bne linux_syscall_trace2 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index be30c8d4cc73..6044b82b9767 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -515,7 +515,7 @@ continue_boot: /* I want a kernel stack NOW! */ set init_thread_union, %g1 - set (THREAD_SIZE - STACKFRAME_SZ), %g2 + set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2 add %g1, %g2, %sp mov 0, %fp /* And for good luck */ diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index c5ff2472b3d9..72a5bdc833ea 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -706,7 +706,7 @@ tlb_fixup_done: wr %g0, ASI_P, %asi mov 1, %g1 sllx %g1, THREAD_SHIFT, %g1 - sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 + sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1 add %g6, %g1, %sp /* Set per-cpu pointer initially to zero, this makes diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 3ec9f1402aad..c8848bb681a1 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -42,6 +42,7 @@ #include <asm/head.h> #include <asm/hypervisor.h> #include <asm/cacheflush.h> +#include <asm/softirq_stack.h> #include "entry.h" #include "cpumap.h" diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c index bd48575172c3..3a66e62eb2a0 100644 --- a/arch/sparc/kernel/led.c +++ b/arch/sparc/kernel/led.c @@ -50,6 +50,7 @@ static void led_blink(struct timer_list *unused) add_timer(&led_blink_timer); } +#ifdef CONFIG_PROC_FS static int led_proc_show(struct seq_file *m, void *v) { if (get_auxio() & AUXIO_LED) @@ -111,6 +112,7 @@ static const struct proc_ops led_proc_ops = { .proc_release = single_release, .proc_write = led_proc_write, }; +#endif static struct proc_dir_entry *led; diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 5d45b6d766d6..9c2b720bfd20 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -552,9 +552,8 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm, pci_info(bus, "scan_bus[%pOF] bus no %d\n", node, bus->number); - child = NULL; prev_devfn = -1; - while ((child = of_get_next_child(node, child)) != NULL) { + for_each_child_of_node(node, child) { if (ofpci_verbose) pci_info(bus, " * %pOF\n", child); reg = of_get_property(child, "reg", ®len); diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index a02363735915..3b9794978e5b 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -183,7 +183,7 @@ void exit_thread(struct task_struct *tsk) #ifndef CONFIG_SMP if (last_task_used_math == tsk) { #else - if (test_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU)) { + if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { #endif /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); @@ -216,16 +216,6 @@ void flush_thread(void) clear_thread_flag(TIF_USEDFPU); #endif } - - /* This task is no longer a kernel thread. */ - if (current->thread.flags & SPARC_FLAG_KTHREAD) { - current->thread.flags &= ~SPARC_FLAG_KTHREAD; - - /* We must fixup kregs as well. */ - /* XXX This was not fixed for ti for a while, worked. Unused? */ - current->thread.kregs = (struct pt_regs *) - (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); - } } static inline struct sparc_stackf __user * @@ -309,11 +299,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, ti->ksp = (unsigned long) new_stack; p->thread.kregs = childregs; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { extern int nwindows; unsigned long psr; memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); - p->thread.flags |= SPARC_FLAG_KTHREAD; p->thread.current_ds = KERNEL_DS; ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); childregs->u_regs[UREG_G1] = sp; /* function */ @@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, } memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); childregs->u_regs[UREG_FP] = sp; - p->thread.flags &= ~SPARC_FLAG_KTHREAD; p->thread.current_ds = USER_DS; ti->kpc = (((unsigned long) ret_from_fork) - 0x8); ti->kpsr = current->thread.fork_kpsr | PSR_PIL; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6f8c7822fc06..7afd0a859a78 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -597,7 +597,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, sizeof(struct sparc_stackf)); t->fpsaved[0] = 0; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(child_trap_frame, 0, child_stack_sz); __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = (current_pt_regs()->tstate + 1) & TSTATE_CWP; diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S index dca8ed810046..8931fe266346 100644 --- a/arch/sparc/kernel/rtrap_32.S +++ b/arch/sparc/kernel/rtrap_32.S @@ -75,7 +75,7 @@ signal_p: ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr mov %g2, %o2 - mov %l5, %o1 + mov %l6, %o1 call do_notify_resume add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index eea43a1aef1b..c8e0dd99f370 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -266,7 +266,6 @@ static __init void leon_patch(void) } struct tt_entry *sparc_ttable; -static struct pt_regs fake_swapper_regs; /* Called from head_32.S - before we have setup anything * in the kernel. Be very careful with what you do here. @@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p) (*(linux_dbvec->teach_debugger))(); } - init_task.thread.kregs = &fake_swapper_regs; - /* Run-time patch instructions to match the cpu model */ per_cpu_patch(); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index d87244197d5c..48abee4eee29 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -165,8 +165,6 @@ extern int root_mountflags; char reboot_command[COMMAND_LINE_SIZE]; -static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; - static void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; @@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p) rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; #endif - task_thread_info(&init_task)->kregs = &fake_swapper_regs; - #ifdef CONFIG_IP_PNP if (!ic_set_manually) { phandle chosen = prom_finddevice("/chosen"); diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 11cf2281b581..02f3ad55dfe3 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -400,8 +400,8 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); - /* mov __NR_sigreturn, %g1 */ - err |= __put_user(0x821020d8, &sf->insns[0]); + /* mov __NR_rt_sigreturn, %g1 */ + err |= __put_user(0x82102065, &sf->insns[0]); /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile index c22a21c39f30..283f64407b07 100644 --- a/arch/sparc/kernel/syscalls/Makefile +++ b/arch/sparc/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -22,24 +22,24 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_abis_unistd_32 := common,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abis_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_abis_syscall_table_32 := common,32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_64 := common,64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_c32 := common,32 systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h @@ -47,9 +47,10 @@ kapisyshdr-y += syscall_table_32.h \ syscall_table_64.h \ syscall_table_c32.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 40d8c7cd8298..84403a99039c 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -487,3 +487,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index d92e5eaa4c1d..a850dccd78ea 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs) asi = (regs->tstate >> 24); /* saved %asi */ else asi = (insn >> 5); /* immediate asi */ - if ((asi & 0xf2) == ASI_PNF) { - if (insn & 0x1000000) { /* op3[5:4]=3 */ - handle_ldf_stq(insn, regs); - return true; - } else if (insn & 0x200000) { /* op3[2], stores */ + if ((asi & 0xf6) == ASI_PNF) { + if (insn & 0x200000) /* op3[2], stores */ return false; - } - handle_ld_nf(insn, regs); + if (insn & 0x1000000) /* op3[5:4]=3 (fp) */ + handle_ldf_stq(insn, regs); + else + handle_ld_nf(insn, regs); return true; } } diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 83db94c0b431..ef5c5207c9ff 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -16,6 +16,7 @@ #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/perf_event.h> +#include <linux/extable.h> #include <asm/setup.h> @@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn) static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { - unsigned long g2 = regs->u_regs [UREG_G2]; - unsigned long fixup = search_extables_range(regs->pc, &g2); + const struct exception_table_entry *entry; - if (!fixup) { + entry = search_exception_tables(regs->pc); + if (!entry) { unsigned long address = compute_effective_address(regs, insn); if(address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); @@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) die_if_kernel("Oops", regs); /* Not reached */ } - regs->pc = fixup; + regs->pc = entry->fixup; regs->npc = regs->pc + 4; - regs->u_regs [UREG_G2] = g2; } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) @@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) } } -static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, - enum direction dir) -{ - unsigned int reg; - int size = ((insn >> 19) & 3) == 3 ? 8 : 4; - - if ((regs->pc | regs->npc) & 3) - return 0; - - /* Must access_ok() in all the necessary places. */ -#define WINREG_ADDR(regnum) \ - ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum))) - - reg = (insn >> 25) & 0x1f; - if (reg >= 16) { - if (!access_ok(WINREG_ADDR(reg - 16), size)) - return -EFAULT; - } - reg = (insn >> 14) & 0x1f; - if (reg >= 16) { - if (!access_ok(WINREG_ADDR(reg - 16), size)) - return -EFAULT; - } - if (!(insn & 0x2000)) { - reg = (insn & 0x1f); - if (reg >= 16) { - if (!access_ok(WINREG_ADDR(reg - 16), size)) - return -EFAULT; - } - } -#undef WINREG_ADDR - return 0; -} - -static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) +asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { send_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)safe_compute_effective_address(regs, insn), 0, current); } - -asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) -{ - enum direction dir; - - if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || - (((insn >> 30) & 3) != 3)) - goto kill_user; - dir = decode_direction(insn); - if(!ok_for_user(regs, insn, dir)) { - goto kill_user; - } else { - int err, size = decode_access_size(insn); - unsigned long addr; - - if(floating_point_load_or_store_p(insn)) { - printk("User FPU load/store unaligned unsupported.\n"); - goto kill_user; - } - - addr = compute_effective_address(regs, insn); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); - switch(dir) { - case load: - err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), - regs), - size, (unsigned long *) addr, - decode_signedness(insn)); - break; - - case store: - err = do_int_store(((insn>>25)&0x1f), size, - (unsigned long *) addr, regs); - break; - - case both: - /* - * This was supported in 2.4. However, we question - * the value of SWAP instruction across word boundaries. - */ - printk("Unaligned SWAP unsupported.\n"); - err = -EFAULT; - break; - - default: - unaligned_panic("Impossible user unaligned trap."); - goto out; - } - if (err) - goto kill_user; - else - advance(regs); - goto out; - } - -kill_user: - user_mna_trap_fault(regs, insn); -out: - ; -} diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index 7db5aabe9708..e27afd233bf5 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -428,7 +428,7 @@ static int process_dreg_info(struct vio_driver_state *vio, struct vio_dring_register *pkt) { struct vio_dring_state *dr; - int i, len; + int i; viodbg(HS, "GOT DRING_REG INFO ident[%llx] " "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", @@ -482,9 +482,7 @@ static int process_dreg_info(struct vio_driver_state *vio, pkt->num_descr, pkt->descr_size, pkt->options, pkt->num_cookies); - len = (sizeof(*pkt) + - (dr->ncookies * sizeof(struct ldc_trans_cookie))); - if (send_ctrl(vio, &pkt->tag, len) < 0) + if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0) goto send_nack; vio->dr_state |= VIO_DR_STATE_RXREG; diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 7488d130faf7..781e39b3c009 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -155,13 +155,6 @@ cpout: retl ! get outta here .text; \ .align 4 -#define EXT(start,end) \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word start, 0, end, cc_fault; \ - .text; \ - .align 4 - /* This aligned version executes typically in 8.5 superscalar cycles, this * is the best I can do. I say 8.5 because the final add will pair with * the next ldd in the main unrolled loop. Thus the pipe is always full. @@ -169,20 +162,20 @@ cpout: retl ! get outta here * please check the fixup code below as well. */ #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [src + off + 0x00], t0; \ - ldd [src + off + 0x08], t2; \ + EX(ldd [src + off + 0x00], t0); \ + EX(ldd [src + off + 0x08], t2); \ addxcc t0, sum, sum; \ - ldd [src + off + 0x10], t4; \ + EX(ldd [src + off + 0x10], t4); \ addxcc t1, sum, sum; \ - ldd [src + off + 0x18], t6; \ + EX(ldd [src + off + 0x18], t6); \ addxcc t2, sum, sum; \ - std t0, [dst + off + 0x00]; \ + EX(std t0, [dst + off + 0x00]); \ addxcc t3, sum, sum; \ - std t2, [dst + off + 0x08]; \ + EX(std t2, [dst + off + 0x08]); \ addxcc t4, sum, sum; \ - std t4, [dst + off + 0x10]; \ + EX(std t4, [dst + off + 0x10]); \ addxcc t5, sum, sum; \ - std t6, [dst + off + 0x18]; \ + EX(std t6, [dst + off + 0x18]); \ addxcc t6, sum, sum; \ addxcc t7, sum, sum; @@ -191,39 +184,39 @@ cpout: retl ! get outta here * Viking MXCC into streaming mode. Ho hum... */ #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [src + off + 0x00], t0; \ - ldd [src + off + 0x08], t2; \ - ldd [src + off + 0x10], t4; \ - ldd [src + off + 0x18], t6; \ - st t0, [dst + off + 0x00]; \ + EX(ldd [src + off + 0x00], t0); \ + EX(ldd [src + off + 0x08], t2); \ + EX(ldd [src + off + 0x10], t4); \ + EX(ldd [src + off + 0x18], t6); \ + EX(st t0, [dst + off + 0x00]); \ addxcc t0, sum, sum; \ - st t1, [dst + off + 0x04]; \ + EX(st t1, [dst + off + 0x04]); \ addxcc t1, sum, sum; \ - st t2, [dst + off + 0x08]; \ + EX(st t2, [dst + off + 0x08]); \ addxcc t2, sum, sum; \ - st t3, [dst + off + 0x0c]; \ + EX(st t3, [dst + off + 0x0c]); \ addxcc t3, sum, sum; \ - st t4, [dst + off + 0x10]; \ + EX(st t4, [dst + off + 0x10]); \ addxcc t4, sum, sum; \ - st t5, [dst + off + 0x14]; \ + EX(st t5, [dst + off + 0x14]); \ addxcc t5, sum, sum; \ - st t6, [dst + off + 0x18]; \ + EX(st t6, [dst + off + 0x18]); \ addxcc t6, sum, sum; \ - st t7, [dst + off + 0x1c]; \ + EX(st t7, [dst + off + 0x1c]); \ addxcc t7, sum, sum; /* Yuck, 6 superscalar cycles... */ #define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \ - ldd [src - off - 0x08], t0; \ - ldd [src - off - 0x00], t2; \ + EX(ldd [src - off - 0x08], t0); \ + EX(ldd [src - off - 0x00], t2); \ addxcc t0, sum, sum; \ - st t0, [dst - off - 0x08]; \ + EX(st t0, [dst - off - 0x08]); \ addxcc t1, sum, sum; \ - st t1, [dst - off - 0x04]; \ + EX(st t1, [dst - off - 0x04]); \ addxcc t2, sum, sum; \ - st t2, [dst - off - 0x00]; \ + EX(st t2, [dst - off - 0x00]); \ addxcc t3, sum, sum; \ - st t3, [dst - off + 0x04]; + EX(st t3, [dst - off + 0x04]); /* Handle the end cruft code out of band for better cache patterns. */ cc_end_cruft: @@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -10: EXT(5b, 10b) ! note for exception handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? @@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) -12: EXT(cctbl, 12b) ! note for exception table handling - addx %g0, %g7, %g7 +12: addx %g0, %g7, %g7 andcc %o3, 0xf, %g0 ! check for low bits set ccte: bne cc_end_cruft ! something left, handle it out of band andcc %o3, 8, %g0 ! begin checks for that code @@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -11: EXT(ccdbl, 11b) ! note for exception table handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S index dc72f2b970b7..954572c78539 100644 --- a/arch/sparc/lib/copy_user.S +++ b/arch/sparc/lib/copy_user.S @@ -21,98 +21,134 @@ /* Work around cpp -rob */ #define ALLOC #alloc #define EXECINSTR #execinstr + +#define EX_ENTRY(l1, l2) \ + .section __ex_table,ALLOC; \ + .align 4; \ + .word l1, l2; \ + .text; + #define EX(x,y,a,b) \ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ -99: ba fixupretl; \ - a, b, %g3; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4 +99: retl; \ + a, b, %o0; \ + EX_ENTRY(98b, 99b) #define EX2(x,y,c,d,e,a,b) \ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ 99: c, d, e; \ - ba fixupretl; \ - a, b, %g3; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4 + retl; \ + a, b, %o0; \ + EX_ENTRY(98b, 99b) #define EXO2(x,y) \ 98: x, y; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 97f; \ - .text; \ - .align 4 + EX_ENTRY(98b, 97f) -#define EXT(start,end,handler) \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word start, 0, end, handler; \ - .text; \ - .align 4 +#define LD(insn, src, offset, reg, label) \ +98: insn [%src + (offset)], %reg; \ + .section .fixup,ALLOC,EXECINSTR; \ +99: ba label; \ + mov offset, %g5; \ + EX_ENTRY(98b, 99b) -/* Please do not change following macros unless you change logic used - * in .fixup at the end of this file as well - */ +#define ST(insn, dst, offset, reg, label) \ +98: insn %reg, [%dst + (offset)]; \ + .section .fixup,ALLOC,EXECINSTR; \ +99: ba label; \ + mov offset, %g5; \ + EX_ENTRY(98b, 99b) /* Both these macros have to start with exactly the same insn */ +/* left: g7 + (g1 % 128) - offset */ #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - ldd [%src + (offset) + 0x10], %t4; \ - ldd [%src + (offset) + 0x18], %t6; \ - st %t0, [%dst + (offset) + 0x00]; \ - st %t1, [%dst + (offset) + 0x04]; \ - st %t2, [%dst + (offset) + 0x08]; \ - st %t3, [%dst + (offset) + 0x0c]; \ - st %t4, [%dst + (offset) + 0x10]; \ - st %t5, [%dst + (offset) + 0x14]; \ - st %t6, [%dst + (offset) + 0x18]; \ - st %t7, [%dst + (offset) + 0x1c]; + LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ + LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ + LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ + LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ + ST(st, dst, offset + 0x00, t0, bigchunk_fault) \ + ST(st, dst, offset + 0x04, t1, bigchunk_fault) \ + ST(st, dst, offset + 0x08, t2, bigchunk_fault) \ + ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \ + ST(st, dst, offset + 0x10, t4, bigchunk_fault) \ + ST(st, dst, offset + 0x14, t5, bigchunk_fault) \ + ST(st, dst, offset + 0x18, t6, bigchunk_fault) \ + ST(st, dst, offset + 0x1c, t7, bigchunk_fault) +/* left: g7 + (g1 % 128) - offset */ #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - ldd [%src + (offset) + 0x10], %t4; \ - ldd [%src + (offset) + 0x18], %t6; \ - std %t0, [%dst + (offset) + 0x00]; \ - std %t2, [%dst + (offset) + 0x08]; \ - std %t4, [%dst + (offset) + 0x10]; \ - std %t6, [%dst + (offset) + 0x18]; + LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ + LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ + LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ + LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ + ST(std, dst, offset + 0x00, t0, bigchunk_fault) \ + ST(std, dst, offset + 0x08, t2, bigchunk_fault) \ + ST(std, dst, offset + 0x10, t4, bigchunk_fault) \ + ST(std, dst, offset + 0x18, t6, bigchunk_fault) + .section .fixup,#alloc,#execinstr +bigchunk_fault: + sub %g7, %g5, %o0 + and %g1, 127, %g1 + retl + add %o0, %g1, %o0 + +/* left: offset + 16 + (g1 % 16) */ #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldd [%src - (offset) - 0x10], %t0; \ - ldd [%src - (offset) - 0x08], %t2; \ - st %t0, [%dst - (offset) - 0x10]; \ - st %t1, [%dst - (offset) - 0x0c]; \ - st %t2, [%dst - (offset) - 0x08]; \ - st %t3, [%dst - (offset) - 0x04]; + LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \ + LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \ + ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \ + ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \ + ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \ + ST(st, dst, -(offset + 0x04), t3, lastchunk_fault) + + .section .fixup,#alloc,#execinstr +lastchunk_fault: + and %g1, 15, %g1 + retl + sub %g1, %g5, %o0 +/* left: o3 + (o2 % 16) - offset */ #define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \ - lduh [%src + (offset) + 0x00], %t0; \ - lduh [%src + (offset) + 0x02], %t1; \ - lduh [%src + (offset) + 0x04], %t2; \ - lduh [%src + (offset) + 0x06], %t3; \ - sth %t0, [%dst + (offset) + 0x00]; \ - sth %t1, [%dst + (offset) + 0x02]; \ - sth %t2, [%dst + (offset) + 0x04]; \ - sth %t3, [%dst + (offset) + 0x06]; + LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \ + LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \ + LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \ + LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \ + ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \ + ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \ + ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \ + ST(sth, dst, offset + 0x06, t3, halfchunk_fault) +/* left: o3 + (o2 % 16) + offset + 2 */ #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ - ldub [%src - (offset) - 0x02], %t0; \ - ldub [%src - (offset) - 0x01], %t1; \ - stb %t0, [%dst - (offset) - 0x02]; \ - stb %t1, [%dst - (offset) - 0x01]; + LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \ + LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \ + ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \ + ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault) + + .section .fixup,#alloc,#execinstr +halfchunk_fault: + and %o2, 15, %o2 + sub %o3, %g5, %o3 + retl + add %o2, %o3, %o0 + +/* left: offset + 2 + (o2 % 2) */ +#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \ + LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \ + LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \ + ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \ + ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault) + + .section .fixup,#alloc,#execinstr +last_shortchunk_fault: + and %o2, 1, %o2 + retl + sub %o2, %g5, %o0 .text .align 4 @@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) -80: - EXT(5b, 80b, 50f) subcc %g7, 128, %g7 add %o1, 128, %o1 bne 5b @@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */ jmpl %o5 + %lo(copy_user_table_end), %g0 add %o0, %g7, %o0 -copy_user_table: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) @@ -210,7 +243,6 @@ copy_user_table: MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) copy_user_table_end: - EXT(copy_user_table, copy_user_table_end, 51f) be copy_user_last7 andcc %g1, 4, %g0 @@ -250,8 +282,6 @@ ldd_std: MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) -81: - EXT(ldd_std, 81b, 52f) subcc %g7, 128, %g7 add %o1, 128, %o1 bne ldd_std @@ -290,8 +320,6 @@ cannot_optimize: 10: MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5) MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5) -82: - EXT(10b, 82b, 53f) subcc %o3, 0x10, %o3 add %o1, 0x10, %o1 bne 10b @@ -308,8 +336,6 @@ byte_chunk: MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3) -83: - EXT(byte_chunk, 83b, 54f) subcc %o3, 0x10, %o3 add %o1, 0x10, %o1 bne byte_chunk @@ -325,16 +351,14 @@ short_end: add %o1, %o3, %o1 jmpl %o5 + %lo(short_table_end), %g0 andcc %o2, 1, %g0 -84: - MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3) short_table_end: - EXT(84b, short_table_end, 55f) be 1f nop EX(ldub [%o1], %g2, add %g0, 1) @@ -363,123 +387,8 @@ short_aligned_end: .section .fixup,#alloc,#execinstr .align 4 97: - mov %o2, %g3 -fixupretl: retl - mov %g3, %o0 - -/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */ -50: -/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK - * happens. This is derived from the amount ldd reads, st stores, etc. - * x = g2 % 12; - * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4); - * o0 += (g2 / 12) * 32; - */ - cmp %g2, 12 - add %o0, %g7, %o0 - bcs 1f - cmp %g2, 24 - bcs 2f - cmp %g2, 36 - bcs 3f - nop - sub %g2, 12, %g2 - sub %g7, 32, %g7 -3: sub %g2, 12, %g2 - sub %g7, 32, %g7 -2: sub %g2, 12, %g2 - sub %g7, 32, %g7 -1: cmp %g2, 4 - bcs,a 60f - clr %g2 - sub %g2, 4, %g2 - sll %g2, 2, %g2 -60: and %g1, 0x7f, %g3 - sub %o0, %g7, %o0 - add %g3, %g7, %g3 - ba fixupretl - sub %g3, %g2, %g3 -51: -/* i = 41 - g2; j = i % 6; - * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16; - * o0 -= (i / 6) * 16 + 16; - */ - neg %g2 - and %g1, 0xf, %g1 - add %g2, 41, %g2 - add %o0, %g1, %o0 -1: cmp %g2, 6 - bcs,a 2f - cmp %g2, 4 - add %g1, 16, %g1 - b 1b - sub %g2, 6, %g2 -2: bcc,a 2f - mov 16, %g2 - inc %g2 - sll %g2, 2, %g2 -2: add %g1, %g2, %g3 - ba fixupretl - sub %o0, %g3, %o0 -52: -/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0; - o0 += (g2 / 8) * 32 */ - andn %g2, 7, %g4 - add %o0, %g7, %o0 - andcc %g2, 4, %g0 - and %g2, 3, %g2 - sll %g4, 2, %g4 - sll %g2, 3, %g2 - bne 60b - sub %g7, %g4, %g7 - ba 60b - clr %g2 -53: -/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0; - o0 += (g2 & 8) */ - and %g2, 3, %g4 - andcc %g2, 4, %g0 - and %g2, 8, %g2 - sll %g4, 1, %g4 - be 1f - add %o0, %g2, %o0 - add %g2, %g4, %g2 -1: and %o2, 0xf, %g3 - add %g3, %o3, %g3 - ba fixupretl - sub %g3, %g2, %g3 -54: -/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0; - o0 += (g2 / 4) * 2 */ - srl %g2, 2, %o4 - and %g2, 1, %o5 - srl %g2, 1, %g2 - add %o4, %o4, %o4 - and %o5, %g2, %o5 - and %o2, 0xf, %o2 - add %o0, %o4, %o0 - sub %o3, %o5, %o3 - sub %o2, %o4, %o2 - ba fixupretl - add %o2, %o3, %g3 -55: -/* i = 27 - g2; - g3 = (o2 & 1) + i / 4 * 2 + !(i & 3); - o0 -= i / 4 * 2 + 1 */ - neg %g2 - and %o2, 1, %o2 - add %g2, 27, %g2 - srl %g2, 2, %o5 - andcc %g2, 3, %g0 - mov 1, %g2 - add %o5, %o5, %o5 - be,a 1f - clr %g2 -1: add %g2, %o5, %g3 - sub %o0, %g3, %o0 - ba fixupretl - add %g3, %o2, %g3 + mov %o2, %o0 .globl __copy_user_end __copy_user_end: diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index b89d42b29e34..eaff68213fdf 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -19,7 +19,7 @@ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ -99: ba 30f; \ +99: retl; \ a, b, %o0; \ .section __ex_table,ALLOC; \ .align 4; \ @@ -27,35 +27,44 @@ .text; \ .align 4 -#define EXT(start,end,handler) \ +#define STORE(source, base, offset, n) \ +98: std source, [base + offset + n]; \ + .section .fixup,ALLOC,EXECINSTR; \ + .align 4; \ +99: ba 30f; \ + sub %o3, n - offset, %o3; \ .section __ex_table,ALLOC; \ .align 4; \ - .word start, 0, end, handler; \ + .word 98b, 99b; \ .text; \ - .align 4 + .align 4; + +#define STORE_LAST(source, base, offset, n) \ + EX(std source, [base - offset - n], \ + add %o1, offset + n); /* Please don't change these macros, unless you change the logic * in the .fixup section below as well. * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ -#define ZERO_BIG_BLOCK(base, offset, source) \ - std source, [base + offset + 0x00]; \ - std source, [base + offset + 0x08]; \ - std source, [base + offset + 0x10]; \ - std source, [base + offset + 0x18]; \ - std source, [base + offset + 0x20]; \ - std source, [base + offset + 0x28]; \ - std source, [base + offset + 0x30]; \ - std source, [base + offset + 0x38]; +#define ZERO_BIG_BLOCK(base, offset, source) \ + STORE(source, base, offset, 0x00); \ + STORE(source, base, offset, 0x08); \ + STORE(source, base, offset, 0x10); \ + STORE(source, base, offset, 0x18); \ + STORE(source, base, offset, 0x20); \ + STORE(source, base, offset, 0x28); \ + STORE(source, base, offset, 0x30); \ + STORE(source, base, offset, 0x38); #define ZERO_LAST_BLOCKS(base, offset, source) \ - std source, [base - offset - 0x38]; \ - std source, [base - offset - 0x30]; \ - std source, [base - offset - 0x28]; \ - std source, [base - offset - 0x20]; \ - std source, [base - offset - 0x18]; \ - std source, [base - offset - 0x10]; \ - std source, [base - offset - 0x08]; \ - std source, [base - offset - 0x00]; + STORE_LAST(source, base, offset, 0x38); \ + STORE_LAST(source, base, offset, 0x30); \ + STORE_LAST(source, base, offset, 0x28); \ + STORE_LAST(source, base, offset, 0x20); \ + STORE_LAST(source, base, offset, 0x18); \ + STORE_LAST(source, base, offset, 0x10); \ + STORE_LAST(source, base, offset, 0x08); \ + STORE_LAST(source, base, offset, 0x00); .text .align 4 @@ -68,8 +77,6 @@ __bzero_begin: .globl memset EXPORT_SYMBOL(__bzero) EXPORT_SYMBOL(memset) - .globl __memset_start, __memset_end -__memset_start: memset: mov %o0, %g1 mov 1, %g4 @@ -122,8 +129,6 @@ __bzero: ZERO_BIG_BLOCK(%o0, 0x00, %g2) subcc %o3, 128, %o3 ZERO_BIG_BLOCK(%o0, 0x40, %g2) -11: - EXT(10b, 11b, 20f) bne 10b add %o0, 128, %o0 @@ -138,7 +143,6 @@ __bzero: jmp %o4 add %o0, %o2, %o0 -12: ZERO_LAST_BLOCKS(%o0, 0x48, %g2) ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 13: @@ -181,37 +185,13 @@ __bzero: 5: retl clr %o0 -__memset_end: .section .fixup,#alloc,#execinstr .align 4 -20: - cmp %g2, 8 - bleu 1f - and %o1, 0x7f, %o1 - sub %g2, 9, %g2 - add %o3, 64, %o3 -1: - sll %g2, 3, %g2 - add %o3, %o1, %o0 - b 30f - sub %o0, %g2, %o0 -21: - mov 8, %o0 - and %o1, 7, %o1 - sub %o0, %g2, %o0 - sll %o0, 3, %o0 - b 30f - add %o0, %o1, %o0 30: -/* %o4 is faulting address, %o5 is %pc where fault occurred */ - save %sp, -104, %sp - mov %i5, %o0 - mov %i7, %o1 - call lookup_fault - mov %i4, %o2 - ret - restore + and %o1, 0x7f, %o1 + retl + add %o3, %o1, %o0 .globl __bzero_end __bzero_end: diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 68db1f859b02..871354aa3c00 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -8,7 +8,7 @@ ccflags-y := -Werror obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-y += fault_$(BITS).o obj-y += init_$(BITS).o -obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o +obj-$(CONFIG_SPARC32) += srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += srmmu_access.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o obj-$(CONFIG_SPARC32) += leon_mm.o diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c deleted file mode 100644 index 241b40641873..000000000000 --- a/arch/sparc/mm/extable.c +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/sparc/mm/extable.c - */ - -#include <linux/module.h> -#include <linux/extable.h> -#include <linux/uaccess.h> - -void sort_extable(struct exception_table_entry *start, - struct exception_table_entry *finish) -{ -} - -/* Caller knows they are in a range if ret->fixup == 0 */ -const struct exception_table_entry * -search_extable(const struct exception_table_entry *base, - const size_t num, - unsigned long value) -{ - int i; - - /* Single insn entries are encoded as: - * word 1: insn address - * word 2: fixup code address - * - * Range entries are encoded as: - * word 1: first insn address - * word 2: 0 - * word 3: last insn address + 4 bytes - * word 4: fixup code address - * - * Deleted entries are encoded as: - * word 1: unused - * word 2: -1 - * - * See asm/uaccess.h for more details. - */ - - /* 1. Try to find an exact match. */ - for (i = 0; i < num; i++) { - if (base[i].fixup == 0) { - /* A range entry, skip both parts. */ - i++; - continue; - } - - /* A deleted entry; see trim_init_extable */ - if (base[i].fixup == -1) - continue; - - if (base[i].insn == value) - return &base[i]; - } - - /* 2. Try to find a range match. */ - for (i = 0; i < (num - 1); i++) { - if (base[i].fixup) - continue; - - if (base[i].insn <= value && base[i + 1].insn > value) - return &base[i]; - - i++; - } - - return NULL; -} - -#ifdef CONFIG_MODULES -/* We could memmove them around; easier to mark the trimmed ones. */ -void trim_init_extable(struct module *m) -{ - unsigned int i; - bool range; - - for (i = 0; i < m->num_exentries; i += range ? 2 : 1) { - range = m->extable[i].fixup == 0; - - if (within_module_init(m->extable[i].insn, m)) { - m->extable[i].fixup = -1; - if (range) - m->extable[i+1].fixup = -1; - } - if (range) - i++; - } -} -#endif /* CONFIG_MODULES */ - -/* Special extable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2) -{ - const struct exception_table_entry *entry; - - entry = search_exception_tables(addr); - if (!entry) - return 0; - - /* Inside range? Fix g2 and return correct fixup */ - if (!entry->fixup) { - *g2 = (addr - entry->insn) / 4; - return (entry + 1)->fixup; - } - - return entry->fixup; -} diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 40ce087dfecf..de2031c2b2d7 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/kdebug.h> #include <linux/uaccess.h> +#include <linux/extable.h> #include <asm/page.h> #include <asm/openprom.h> @@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address, die_if_kernel("Oops", regs); } -asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, - unsigned long address) -{ - struct pt_regs regs; - unsigned long g2; - unsigned int insn; - int i; - - i = search_extables_range(ret_pc, &g2); - switch (i) { - case 3: - /* load & store will be handled by fixup */ - return 3; - - case 1: - /* store will be handled by fixup, load will bump out */ - /* for _to_ macros */ - insn = *((unsigned int *) pc); - if ((insn >> 21) & 1) - return 1; - break; - - case 2: - /* load will be handled by fixup, store will bump out */ - /* for _from_ macros */ - insn = *((unsigned int *) pc); - if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) - return 2; - break; - - default: - break; - } - - memset(®s, 0, sizeof(regs)); - regs.pc = pc; - regs.npc = pc + 4; - __asm__ __volatile__( - "rd %%psr, %0\n\t" - "nop\n\t" - "nop\n\t" - "nop\n" : "=r" (regs.psr)); - unhandled_fault(address, current, ®s); - - /* Not reached */ - return 0; -} - static inline void show_signal_msg(struct pt_regs *regs, int sig, int code, unsigned long address, struct task_struct *tsk) @@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - unsigned int fixup; - unsigned long g2; int from_user = !(regs->psr & PSR_PS); int code; vm_fault_t fault; @@ -281,30 +232,19 @@ bad_area_nosemaphore: /* Is this in ex_table? */ no_context: - g2 = regs->u_regs[UREG_G2]; if (!from_user) { - fixup = search_extables_range(regs->pc, &g2); - /* Values below 10 are reserved for other things */ - if (fixup > 10) { - extern const unsigned int __memset_start[]; - extern const unsigned int __memset_end[]; + const struct exception_table_entry *entry; + entry = search_exception_tables(regs->pc); #ifdef DEBUG_EXCEPTIONS - printk("Exception: PC<%08lx> faddr<%08lx>\n", - regs->pc, address); - printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", - regs->pc, fixup, g2); + printk("Exception: PC<%08lx> faddr<%08lx>\n", + regs->pc, address); + printk("EX_TABLE: insn<%08lx> fixup<%08x>\n", + regs->pc, entry->fixup); #endif - if ((regs->pc >= (unsigned long)__memset_start && - regs->pc < (unsigned long)__memset_end)) { - regs->u_regs[UREG_I4] = address; - regs->u_regs[UREG_I5] = regs->pc; - } - regs->u_regs[UREG_G2] = g2; - regs->pc = fixup; - regs->npc = regs->pc + 4; - return; - } + regs->pc = entry->fixup; + regs->npc = regs->pc + 4; + return; } unhandled_fault(address, tsk, regs); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index eb2946b1df8a..6139c5700ccc 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) size = memblock_phys_mem_size() - memblock_reserved_size(); *pages_avail = (size >> PAGE_SHIFT) - high_pages; + /* Only allow low memory to be allocated via memblock allocation */ + memblock_set_current_limit(max_low_pfn << PAGE_SHIFT); + return max_pfn; } diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h index ce750a99eea9..ee55f1080634 100644 --- a/arch/sparc/mm/mm_32.h +++ b/arch/sparc/mm/mm_32.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* fault_32.c - visible as they are called from assembler */ -asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, - unsigned long address); asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index a03caa5f6628..a9aa6a92c7fe 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -351,7 +351,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) pte_t *ptep; struct page *page; - if ((ptep = pte_alloc_one_kernel(mm)) == 0) + if (!(ptep = pte_alloc_one_kernel(mm))) return NULL; page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); @@ -689,7 +689,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, pgdp = pgd_offset_k(start); p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); - if (pud_none(*(pud_t *)__nocache_fix(pudp))) { + if (pud_none(*__nocache_fix(pudp))) { pmdp = __srmmu_get_nocache( SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) @@ -698,7 +698,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, pud_set(__nocache_fix(pudp), pmdp); } pmdp = pmd_offset(__nocache_fix(pudp), start); - if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { + if (srmmu_pmd_none(*__nocache_fix(pmdp))) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); @@ -810,11 +810,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); if (what == 2) { - *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed); + *__nocache_fix(pgdp) = __pgd(probed); start += PGDIR_SIZE; continue; } - if (pud_none(*(pud_t *)__nocache_fix(pudp))) { + if (pud_none(*__nocache_fix(pudp))) { pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) @@ -822,13 +822,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); pud_set(__nocache_fix(pudp), pmdp); } - pmdp = pmd_offset(__nocache_fix(pgdp), start); + pmdp = pmd_offset(__nocache_fix(pudp), start); if (what == 1) { *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed); start += PMD_SIZE; continue; } - if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { + if (srmmu_pmd_none(*__nocache_fix(pmdp))) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); @@ -836,7 +836,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, pmd_set(__nocache_fix(pmdp), ptep); } ptep = pte_offset_kernel(__nocache_fix(pmdp), start); - *(pte_t *)__nocache_fix(ptep) = __pte(probed); + *__nocache_fix(ptep) = __pte(probed); start += PAGE_SIZE; } } @@ -850,7 +850,7 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base unsigned long big_pte; big_pte = KERNEL_PTE(phys_base >> 4); - *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); + *__nocache_fix(pgdp) = __pgd(big_pte); } /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ @@ -940,7 +940,7 @@ void __init srmmu_paging_init(void) srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table); for (i = 0; i < num_contexts; i++) - srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); + srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); flush_cache_all(); srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig index 2e7b8e0e7194..03ba34b61115 100644 --- a/arch/um/drivers/Kconfig +++ b/arch/um/drivers/Kconfig @@ -323,7 +323,7 @@ config UML_NET_SLIRP frames. In general, slirp allows the UML the same IP connectivity to the outside world that the host user is permitted, and unlike other transports, SLiRP works without the need of root level - privleges, setuid binaries, or SLIP devices on the host. This + privileges, setuid binaries, or SLIP devices on the host. This also means not every type of connection is possible, but most situations can be accommodated with carefully crafted slirp commands that can be passed along as part of the network device's @@ -346,3 +346,14 @@ config VIRTIO_UML help This driver provides support for virtio based paravirtual device drivers over vhost-user sockets. + +config UML_RTC + bool "UML RTC driver" + depends on RTC_CLASS + # there's no use in this if PM_SLEEP isn't enabled ... + depends on PM_SLEEP + help + When PM_SLEEP is configured, it may be desirable to wake up using + rtcwake, especially in time-travel mode. This driver enables that + by providing a fake RTC clock that causes a wakeup at the right + time. diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile index 2a249f619467..dcc64a02f81f 100644 --- a/arch/um/drivers/Makefile +++ b/arch/um/drivers/Makefile @@ -17,6 +17,7 @@ hostaudio-objs := hostaudio_kern.o ubd-objs := ubd_kern.o ubd_user.o port-objs := port_kern.o port_user.o harddog-objs := harddog_kern.o harddog_user.o +rtc-objs := rtc_kern.o rtc_user.o LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) @@ -62,6 +63,7 @@ obj-$(CONFIG_UML_WATCHDOG) += harddog.o obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o obj-$(CONFIG_UML_RANDOM) += random.o obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o +obj-$(CONFIG_UML_RTC) += rtc.o # pcap_user.o must be added explicitly. USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o diff --git a/arch/um/drivers/rtc.h b/arch/um/drivers/rtc.h new file mode 100644 index 000000000000..95e41c7d35c4 --- /dev/null +++ b/arch/um/drivers/rtc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#ifndef __UM_RTC_H__ +#define __UM_RTC_H__ + +int uml_rtc_start(bool timetravel); +int uml_rtc_enable_alarm(unsigned long long delta_seconds); +void uml_rtc_disable_alarm(void); +void uml_rtc_stop(bool timetravel); +void uml_rtc_send_timetravel_alarm(void); + +#endif /* __UM_RTC_H__ */ diff --git a/arch/um/drivers/rtc_kern.c b/arch/um/drivers/rtc_kern.c new file mode 100644 index 000000000000..97ceb205cfe6 --- /dev/null +++ b/arch/um/drivers/rtc_kern.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#include <linux/platform_device.h> +#include <linux/time-internal.h> +#include <linux/suspend.h> +#include <linux/err.h> +#include <linux/rtc.h> +#include <kern_util.h> +#include <irq_kern.h> +#include <os.h> +#include "rtc.h" + +static time64_t uml_rtc_alarm_time; +static bool uml_rtc_alarm_enabled; +static struct rtc_device *uml_rtc; +static int uml_rtc_irq_fd, uml_rtc_irq; + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + +static void uml_rtc_time_travel_alarm(struct time_travel_event *ev) +{ + uml_rtc_send_timetravel_alarm(); +} + +static struct time_travel_event uml_rtc_alarm_event = { + .fn = uml_rtc_time_travel_alarm, +}; +#endif + +static int uml_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct timespec64 ts; + + /* Use this to get correct time in time-travel mode */ + read_persistent_clock64(&ts); + rtc_time64_to_tm(timespec64_to_ktime(ts) / NSEC_PER_SEC, tm); + + return 0; +} + +static int uml_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + rtc_time64_to_tm(uml_rtc_alarm_time, &alrm->time); + alrm->enabled = uml_rtc_alarm_enabled; + + return 0; +} + +static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + unsigned long long secs; + + if (!enable && !uml_rtc_alarm_enabled) + return 0; + + uml_rtc_alarm_enabled = enable; + + secs = uml_rtc_alarm_time - ktime_get_real_seconds(); + + if (time_travel_mode == TT_MODE_OFF) { + if (!enable) { + uml_rtc_disable_alarm(); + return 0; + } + + /* enable or update */ + return uml_rtc_enable_alarm(secs); + } else { + time_travel_del_event(¨_rtc_alarm_event); + + if (enable) + time_travel_add_event_rel(¨_rtc_alarm_event, + secs * NSEC_PER_SEC); + } + + return 0; +} + +static int uml_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + uml_rtc_alarm_irq_enable(dev, 0); + uml_rtc_alarm_time = rtc_tm_to_time64(&alrm->time); + uml_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static const struct rtc_class_ops uml_rtc_ops = { + .read_time = uml_rtc_read_time, + .read_alarm = uml_rtc_read_alarm, + .alarm_irq_enable = uml_rtc_alarm_irq_enable, + .set_alarm = uml_rtc_set_alarm, +}; + +static irqreturn_t uml_rtc_interrupt(int irq, void *data) +{ + unsigned long long c = 0; + + /* alarm triggered, it's now off */ + uml_rtc_alarm_enabled = false; + + os_read_file(uml_rtc_irq_fd, &c, sizeof(c)); + WARN_ON(c == 0); + + pm_system_wakeup(); + rtc_update_irq(uml_rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int uml_rtc_setup(void) +{ + int err; + + err = uml_rtc_start(time_travel_mode != TT_MODE_OFF); + if (WARN(err < 0, "err = %d\n", err)) + return err; + + uml_rtc_irq_fd = err; + + err = um_request_irq(UM_IRQ_ALLOC, uml_rtc_irq_fd, IRQ_READ, + uml_rtc_interrupt, 0, "rtc", NULL); + if (err < 0) { + uml_rtc_stop(time_travel_mode != TT_MODE_OFF); + return err; + } + + irq_set_irq_wake(err, 1); + + uml_rtc_irq = err; + return 0; +} + +static void uml_rtc_cleanup(void) +{ + um_free_irq(uml_rtc_irq, NULL); + uml_rtc_stop(time_travel_mode != TT_MODE_OFF); +} + +static int uml_rtc_probe(struct platform_device *pdev) +{ + int err; + + err = uml_rtc_setup(); + if (err) + return err; + + uml_rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(uml_rtc)) { + err = PTR_ERR(uml_rtc); + goto cleanup; + } + + uml_rtc->ops = ¨_rtc_ops; + + device_init_wakeup(&pdev->dev, 1); + + err = devm_rtc_register_device(uml_rtc); + if (err) + goto cleanup; + + return 0; +cleanup: + uml_rtc_cleanup(); + return err; +} + +static int uml_rtc_remove(struct platform_device *pdev) +{ + device_init_wakeup(&pdev->dev, 0); + uml_rtc_cleanup(); + return 0; +} + +static struct platform_driver uml_rtc_driver = { + .probe = uml_rtc_probe, + .remove = uml_rtc_remove, + .driver = { + .name = "uml-rtc", + }, +}; + +static int __init uml_rtc_init(void) +{ + struct platform_device *pdev; + int err; + + err = platform_driver_register(¨_rtc_driver); + if (err) + return err; + + pdev = platform_device_alloc("uml-rtc", 0); + if (!pdev) { + err = -ENOMEM; + goto unregister; + } + + err = platform_device_add(pdev); + if (err) + goto unregister; + return 0; + +unregister: + platform_device_put(pdev); + platform_driver_unregister(¨_rtc_driver); + return err; +} +device_initcall(uml_rtc_init); diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c new file mode 100644 index 000000000000..4016bc1d577e --- /dev/null +++ b/arch/um/drivers/rtc_user.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#include <os.h> +#include <errno.h> +#include <sched.h> +#include <unistd.h> +#include <kern_util.h> +#include <sys/select.h> +#include <stdio.h> +#include <sys/timerfd.h> +#include "rtc.h" + +static int uml_rtc_irq_fds[2]; + +void uml_rtc_send_timetravel_alarm(void) +{ + unsigned long long c = 1; + + CATCH_EINTR(write(uml_rtc_irq_fds[1], &c, sizeof(c))); +} + +int uml_rtc_start(bool timetravel) +{ + int err; + + if (timetravel) { + int err = os_pipe(uml_rtc_irq_fds, 1, 1); + if (err) + goto fail; + } else { + uml_rtc_irq_fds[0] = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC); + if (uml_rtc_irq_fds[0] < 0) { + err = -errno; + goto fail; + } + + /* apparently timerfd won't send SIGIO, use workaround */ + sigio_broken(uml_rtc_irq_fds[0]); + err = add_sigio_fd(uml_rtc_irq_fds[0]); + if (err < 0) { + close(uml_rtc_irq_fds[0]); + goto fail; + } + } + + return uml_rtc_irq_fds[0]; +fail: + uml_rtc_stop(timetravel); + return err; +} + +int uml_rtc_enable_alarm(unsigned long long delta_seconds) +{ + struct itimerspec it = { + .it_value = { + .tv_sec = delta_seconds, + }, + }; + + if (timerfd_settime(uml_rtc_irq_fds[0], 0, &it, NULL)) + return -errno; + return 0; +} + +void uml_rtc_disable_alarm(void) +{ + uml_rtc_enable_alarm(0); +} + +void uml_rtc_stop(bool timetravel) +{ + if (timetravel) + os_close_file(uml_rtc_irq_fds[1]); + else + ignore_sigio_fd(uml_rtc_irq_fds[0]); + os_close_file(uml_rtc_irq_fds[0]); +} diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 5d957b7e7fd5..91ddf74ca888 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -55,16 +55,16 @@ struct virtio_uml_device { u64 protocol_features; u8 status; u8 registered:1; + u8 suspended:1; + + u8 config_changed_irq:1; + uint64_t vq_irq_vq_map; }; struct virtio_uml_vq_info { int kick_fd, call_fd; char name[32]; -#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT - struct virtqueue *vq; - vq_callback_t *callback; - struct time_travel_event defer; -#endif + bool suspended; }; extern unsigned long long physmem_size, highmem; @@ -97,6 +97,9 @@ static int full_read(int fd, void *buf, int len, bool abortable) { int rc; + if (!len) + return 0; + do { rc = os_read_file(fd, buf, len); if (rc > 0) { @@ -347,9 +350,9 @@ static void vhost_user_reply(struct virtio_uml_device *vu_dev, rc, size); } -static irqreturn_t vu_req_interrupt(int irq, void *data) +static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev, + struct time_travel_event *ev) { - struct virtio_uml_device *vu_dev = data; struct virtqueue *vq; int response = 1; struct { @@ -367,14 +370,14 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) switch (msg.msg.header.request) { case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG: - virtio_config_changed(&vu_dev->vdev); + vu_dev->config_changed_irq = true; response = 0; break; case VHOST_USER_SLAVE_VRING_CALL: virtio_device_for_each_vq((&vu_dev->vdev), vq) { if (vq->index == msg.msg.payload.vring_state.index) { response = 0; - vring_interrupt(0 /* ignored */, vq); + vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index); break; } } @@ -388,12 +391,45 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) msg.msg.header.request); } + if (ev && !vu_dev->suspended) + time_travel_add_irq_event(ev); + if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY) vhost_user_reply(vu_dev, &msg.msg, response); return IRQ_HANDLED; } +static irqreturn_t vu_req_interrupt(int irq, void *data) +{ + struct virtio_uml_device *vu_dev = data; + irqreturn_t ret = IRQ_HANDLED; + + if (!um_irq_timetravel_handler_used()) + ret = vu_req_read_message(vu_dev, NULL); + + if (vu_dev->vq_irq_vq_map) { + struct virtqueue *vq; + + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index)) + vring_interrupt(0 /* ignored */, vq); + } + vu_dev->vq_irq_vq_map = 0; + } else if (vu_dev->config_changed_irq) { + virtio_config_changed(&vu_dev->vdev); + vu_dev->config_changed_irq = false; + } + + return ret; +} + +static void vu_req_interrupt_comm_handler(int irq, int fd, void *data, + struct time_travel_event *ev) +{ + vu_req_read_message(data, ev); +} + static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev) { int rc, req_fds[2]; @@ -404,9 +440,10 @@ static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev) return rc; vu_dev->req_fd = req_fds[0]; - rc = um_request_irq(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ, - vu_req_interrupt, IRQF_SHARED, - vu_dev->pdev->name, vu_dev); + rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ, + vu_req_interrupt, IRQF_SHARED, + vu_dev->pdev->name, vu_dev, + vu_req_interrupt_comm_handler); if (rc < 0) goto err_close; @@ -722,6 +759,9 @@ static bool vu_notify(struct virtqueue *vq) const uint64_t n = 1; int rc; + if (info->suspended) + return true; + time_travel_propagate_time(); if (info->kick_fd < 0) { @@ -875,23 +915,6 @@ out: return rc; } -#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT -static void vu_defer_irq_handle(struct time_travel_event *d) -{ - struct virtio_uml_vq_info *info; - - info = container_of(d, struct virtio_uml_vq_info, defer); - info->callback(info->vq); -} - -static void vu_defer_irq_callback(struct virtqueue *vq) -{ - struct virtio_uml_vq_info *info = vq->priv; - - time_travel_add_irq_event(&info->defer); -} -#endif - static struct virtqueue *vu_setup_vq(struct virtio_device *vdev, unsigned index, vq_callback_t *callback, const char *name, bool ctx) @@ -911,19 +934,6 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev, snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name, pdev->id, name); -#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT - /* - * When we get an interrupt, we must bounce it through the simulation - * calendar (the simtime device), except for the simtime device itself - * since that's part of the simulation control. - */ - if (time_travel_mode == TT_MODE_EXTERNAL && callback) { - info->callback = callback; - callback = vu_defer_irq_callback; - time_travel_set_event_fn(&info->defer, vu_defer_irq_handle); - } -#endif - vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true, ctx, vu_notify, callback, info->name); if (!vq) { @@ -932,9 +942,6 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev, } vq->priv = info; num = virtqueue_get_vring_size(vq); -#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT - info->vq = vq; -#endif if (vu_dev->protocol_features & BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) { @@ -993,6 +1000,10 @@ static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs, int i, queue_idx = 0, rc; struct virtqueue *vq; + /* not supported for now */ + if (WARN_ON(nvqs > 64)) + return -EINVAL; + rc = vhost_user_set_mem_table(vu_dev); if (rc) return rc; @@ -1125,6 +1136,8 @@ static int virtio_uml_probe(struct platform_device *pdev) platform_set_drvdata(pdev, vu_dev); + device_set_wakeup_capable(&vu_dev->vdev.dev, true); + rc = register_virtio_device(&vu_dev->vdev); if (rc) put_device(&vu_dev->vdev.dev); @@ -1286,6 +1299,46 @@ static const struct of_device_id virtio_uml_match[] = { }; MODULE_DEVICE_TABLE(of, virtio_uml_match); +static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); + struct virtqueue *vq; + + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + struct virtio_uml_vq_info *info = vq->priv; + + info->suspended = true; + vhost_user_set_vring_enable(vu_dev, vq->index, false); + } + + if (!device_may_wakeup(&vu_dev->vdev.dev)) { + vu_dev->suspended = true; + return 0; + } + + return irq_set_irq_wake(vu_dev->irq, 1); +} + +static int virtio_uml_resume(struct platform_device *pdev) +{ + struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); + struct virtqueue *vq; + + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + struct virtio_uml_vq_info *info = vq->priv; + + info->suspended = false; + vhost_user_set_vring_enable(vu_dev, vq->index, true); + } + + vu_dev->suspended = false; + + if (!device_may_wakeup(&vu_dev->vdev.dev)) + return 0; + + return irq_set_irq_wake(vu_dev->irq, 0); +} + static struct platform_driver virtio_uml_driver = { .probe = virtio_uml_probe, .remove = virtio_uml_remove, @@ -1293,6 +1346,8 @@ static struct platform_driver virtio_uml_driver = { .name = "virtio-uml", .of_match_table = virtio_uml_match, }, + .suspend = virtio_uml_suspend, + .resume = virtio_uml_resume, }; static int __init virtio_uml_init(void) diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 1c63b260ecc4..d7492e5a1bbb 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -14,15 +14,16 @@ generic-y += irq_regs.h generic-y += irq_work.h generic-y += kdebug.h generic-y += mcs_spinlock.h -generic-y += mm-arch-hooks.h generic-y += mmiowb.h generic-y += module.lds.h generic-y += param.h generic-y += pci.h generic-y += percpu.h generic-y += preempt.h +generic-y += softirq_stack.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h generic-y += word-at-a-time.h generic-y += kprobes.h +generic-y += mm_hooks.h diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h index cef03e3aa0f9..6ce18d343997 100644 --- a/arch/um/include/asm/io.h +++ b/arch/um/include/asm/io.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_UM_IO_H #define _ASM_UM_IO_H +#include <linux/types.h> #define ioremap ioremap static inline void __iomem *ioremap(phys_addr_t offset, size_t size) diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h index 547bff7b3a89..3f5d3e8228fc 100644 --- a/arch/um/include/asm/irq.h +++ b/arch/um/include/asm/irq.h @@ -33,4 +33,5 @@ #define NR_IRQS 64 +#include <asm-generic/irq.h> #endif diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index f8a100770691..68e2eb9cfb47 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -10,33 +10,9 @@ #include <linux/mm_types.h> #include <linux/mmap_lock.h> +#include <asm/mm_hooks.h> #include <asm/mmu.h> -extern void uml_setup_stubs(struct mm_struct *mm); -/* - * Needed since we do not use the asm-generic/mm_hooks.h: - */ -static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) -{ - uml_setup_stubs(mm); - return 0; -} -extern void arch_exit_mmap(struct mm_struct *mm); -static inline void arch_unmap(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} -static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool execute, bool foreign) -{ - /* by default, allow everything */ - return true; -} - -/* - * end asm-generic/mm_hooks.h functions - */ - extern void force_flush_all(void); #define activate_mm activate_mm @@ -47,9 +23,6 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) * when the new ->mm is used for the first time. */ __switch_mm(&new->context.id); - mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING); - uml_setup_stubs(new); - mmap_write_unlock(new); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h index 68e45e950137..759956ab0108 100644 --- a/arch/um/include/linux/time-internal.h +++ b/arch/um/include/linux/time-internal.h @@ -7,6 +7,7 @@ #ifndef __TIMER_INTERNAL_H__ #define __TIMER_INTERNAL_H__ #include <linux/list.h> +#include <asm/bug.h> #define TIMER_MULTIPLIER 256 #define TIMER_MIN_DELTA 500 @@ -54,6 +55,9 @@ static inline void time_travel_wait_readable(int fd) } void time_travel_add_irq_event(struct time_travel_event *e); +void time_travel_add_event_rel(struct time_travel_event *e, + unsigned long long delay_ns); +bool time_travel_del_event(struct time_travel_event *e); #else struct time_travel_event { }; @@ -74,6 +78,19 @@ static inline void time_travel_propagate_time(void) static inline void time_travel_wait_readable(int fd) { } + +static inline void time_travel_add_irq_event(struct time_travel_event *e) +{ + WARN_ON(1); +} + +/* + * not inlines so the data structure need not exist, + * cause linker failures + */ +extern void time_travel_not_configured(void); +#define time_travel_add_event_rel(...) time_travel_not_configured() +#define time_travel_del_event(...) time_travel_not_configured() #endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */ /* diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h index 5f286ef2721b..9a0bd648d872 100644 --- a/arch/um/include/shared/as-layout.h +++ b/arch/um/include/shared/as-layout.h @@ -20,18 +20,10 @@ * 'UL' and other type specifiers unilaterally. We * use the following macros to deal with this. */ - -#ifdef __ASSEMBLY__ -#define _UML_AC(X, Y) (Y) -#else -#define __UML_AC(X, Y) (X(Y)) -#define _UML_AC(X, Y) __UML_AC(X, Y) -#endif - -#define STUB_START _UML_AC(, 0x100000) -#define STUB_CODE _UML_AC((unsigned long), STUB_START) -#define STUB_DATA _UML_AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE) -#define STUB_END _UML_AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE) +#define STUB_START stub_start +#define STUB_CODE STUB_START +#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE) +#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE) #ifndef __ASSEMBLY__ @@ -54,6 +46,7 @@ extern unsigned long long highmem; extern unsigned long brk_start; extern unsigned long host_task_size; +extern unsigned long stub_start; extern int linux_main(int argc, char **argv); extern void uml_finishsetup(void); diff --git a/arch/um/include/shared/common-offsets.h b/arch/um/include/shared/common-offsets.h index 16a51a8c800f..edc90ab73734 100644 --- a/arch/um/include/shared/common-offsets.h +++ b/arch/um/include/shared/common-offsets.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* for use by sys-$SUBARCH/kernel-offsets.c */ +#include <stub-data.h> DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE); @@ -43,3 +44,8 @@ DEFINE(UML_CONFIG_64BIT, CONFIG_64BIT); #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT DEFINE(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT, CONFIG_UML_TIME_TRAVEL_SUPPORT); #endif + +/* for stub */ +DEFINE(UML_STUB_FIELD_OFFSET, offsetof(struct stub_data, offset)); +DEFINE(UML_STUB_FIELD_CHILD_ERR, offsetof(struct stub_data, child_err)); +DEFINE(UML_STUB_FIELD_FD, offsetof(struct stub_data, fd)); diff --git a/arch/um/include/shared/irq_kern.h b/arch/um/include/shared/irq_kern.h index 7807de593bda..f2dc817abb7c 100644 --- a/arch/um/include/shared/irq_kern.h +++ b/arch/um/include/shared/irq_kern.h @@ -7,6 +7,7 @@ #define __IRQ_KERN_H__ #include <linux/interrupt.h> +#include <linux/time-internal.h> #include <asm/ptrace.h> #include "irq_user.h" @@ -15,5 +16,64 @@ int um_request_irq(int irq, int fd, enum um_irq_type type, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +/** + * um_request_irq_tt - request an IRQ with timetravel handler + * + * @irq: the IRQ number, or %UM_IRQ_ALLOC + * @fd: The file descriptor to request an IRQ for + * @type: read or write + * @handler: the (generic style) IRQ handler + * @irqflags: Linux IRQ flags + * @devname: name for this to show + * @dev_id: data pointer to pass to the IRQ handler + * @timetravel_handler: the timetravel interrupt handler, invoked with the IRQ + * number, fd, dev_id and time-travel event pointer. + * + * Returns: The interrupt number assigned or a negative error. + * + * Note that the timetravel handler is invoked only if the time_travel_mode is + * %TT_MODE_EXTERNAL, and then it is invoked even while the system is suspended! + * This function must call time_travel_add_irq_event() for the event passed with + * an appropriate delay, before sending an ACK on the socket it was invoked for. + * + * If this was called while the system is suspended, then adding the event will + * cause the system to resume. + * + * Since this function will almost certainly have to handle the FD's condition, + * a read will consume the message, and after that it is up to the code using + * it to pass such a message to the @handler in whichever way it can. + * + * If time_travel_mode is not %TT_MODE_EXTERNAL the @timetravel_handler will + * not be invoked at all and the @handler must handle the FD becoming + * readable (or writable) instead. Use um_irq_timetravel_handler_used() to + * distinguish these cases. + * + * See virtio_uml.c for an example. + */ +int um_request_irq_tt(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)); +#else +static inline +int um_request_irq_tt(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) +{ + return um_request_irq(irq, fd, type, handler, irqflags, + devname, dev_id); +} +#endif + +static inline bool um_irq_timetravel_handler_used(void) +{ + return time_travel_mode == TT_MODE_EXTERNAL; +} + void um_free_irq(int irq, void *dev_id); #endif diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h index 4337b4ced095..e82e203f5f41 100644 --- a/arch/um/include/shared/skas/mm_id.h +++ b/arch/um/include/shared/skas/mm_id.h @@ -12,6 +12,7 @@ struct mm_id { int pid; } u; unsigned long stack; + int kill; }; #endif diff --git a/arch/um/include/shared/skas/stub-data.h b/arch/um/include/shared/skas/stub-data.h index 6b01d97a9386..5e3ade3fb38b 100644 --- a/arch/um/include/shared/skas/stub-data.h +++ b/arch/um/include/shared/skas/stub-data.h @@ -11,7 +11,7 @@ struct stub_data { unsigned long offset; int fd; - long err; + long parent_err, child_err; }; #endif diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index e8fd5d540b05..4d8498100341 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -26,9 +26,7 @@ void flush_thread(void) arch_flush_thread(¤t->thread.arch); - ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); - ret = ret || unmap(¤t->mm->context.id, STUB_END, - host_task_size - STUB_END, 1, &data); + ret = unmap(¤t->mm->context.id, 0, TASK_SIZE, 1, &data); if (ret) { printk(KERN_ERR "flush_thread - clearing address space failed, " "err = %d\n", ret); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 3741d2380060..82af5191e73d 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -20,7 +20,7 @@ #include <os.h> #include <irq_user.h> #include <irq_kern.h> -#include <as-layout.h> +#include <linux/time-internal.h> extern void free_irqs(void); @@ -38,6 +38,12 @@ struct irq_reg { bool active; bool pending; bool wakeup; +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + bool pending_on_resume; + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *); + struct time_travel_event event; +#endif }; struct irq_entry { @@ -51,6 +57,7 @@ struct irq_entry { static DEFINE_SPINLOCK(irq_lock); static LIST_HEAD(active_fds); static DECLARE_BITMAP(irqs_allocated, NR_IRQS); +static bool irqs_suspended; static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs) { @@ -74,9 +81,65 @@ static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs) } } -void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +static void irq_event_handler(struct time_travel_event *ev) { - /* nothing */ + struct irq_reg *reg = container_of(ev, struct irq_reg, event); + + /* do nothing if suspended - just to cause a wakeup */ + if (irqs_suspended) + return; + + generic_handle_irq(reg->irq); +} + +static bool irq_do_timetravel_handler(struct irq_entry *entry, + enum um_irq_type t) +{ + struct irq_reg *reg = &entry->reg[t]; + + if (!reg->timetravel_handler) + return false; + + /* prevent nesting - we'll get it again later when we SIGIO ourselves */ + if (reg->pending_on_resume) + return true; + + reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event); + + if (!reg->event.pending) + return false; + + if (irqs_suspended) + reg->pending_on_resume = true; + return true; +} +#else +static bool irq_do_timetravel_handler(struct irq_entry *entry, + enum um_irq_type t) +{ + return false; +} +#endif + +static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t, + struct uml_pt_regs *regs) +{ + struct irq_reg *reg = &entry->reg[t]; + + if (!reg->events) + return; + + if (os_epoll_triggered(idx, reg->events) <= 0) + return; + + if (irq_do_timetravel_handler(entry, t)) + return; + + if (irqs_suspended) + return; + + irq_io_loop(reg, regs); } void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) @@ -84,6 +147,9 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) struct irq_entry *irq_entry; int n, i; + if (irqs_suspended && !um_irq_timetravel_handler_used()) + return; + while (1) { /* This is now lockless - epoll keeps back-referencesto the irqs * which have trigger it so there is no need to walk the irq @@ -105,19 +171,13 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) irq_entry = os_epoll_get_data_pointer(i); - for (t = 0; t < NUM_IRQ_TYPES; t++) { - int events = irq_entry->reg[t].events; - - if (!events) - continue; - - if (os_epoll_triggered(i, events) > 0) - irq_io_loop(&irq_entry->reg[t], regs); - } + for (t = 0; t < NUM_IRQ_TYPES; t++) + sigio_reg_handler(i, irq_entry, t, regs); } } - free_irqs(); + if (!irqs_suspended) + free_irqs(); } static struct irq_entry *get_irq_entry_by_fd(int fd) @@ -169,7 +229,9 @@ static void update_or_free_irq_entry(struct irq_entry *entry) free_irq_entry(entry, false); } -static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id) +static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) { struct irq_entry *irq_entry; int err, events = os_event_mask(type); @@ -206,6 +268,13 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id) irq_entry->reg[type].active = true; irq_entry->reg[type].events = events; +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + if (um_irq_timetravel_handler_used()) { + irq_entry->reg[type].timetravel_handler = timetravel_handler; + irq_entry->reg[type].event.fn = irq_event_handler; + } +#endif + WARN_ON(!update_irq_entry(irq_entry)); spin_unlock_irqrestore(&irq_lock, flags); @@ -339,9 +408,12 @@ void um_free_irq(int irq, void *dev) } EXPORT_SYMBOL(um_free_irq); -int um_request_irq(int irq, int fd, enum um_irq_type type, - irq_handler_t handler, unsigned long irqflags, - const char *devname, void *dev_id) +static int +_um_request_irq(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) { int err; @@ -360,7 +432,7 @@ int um_request_irq(int irq, int fd, enum um_irq_type type, return -ENOSPC; if (fd != -1) { - err = activate_fd(irq, fd, type, dev_id); + err = activate_fd(irq, fd, type, dev_id, timetravel_handler); if (err) goto error; } @@ -374,20 +446,41 @@ error: clear_bit(irq, irqs_allocated); return err; } + +int um_request_irq(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id) +{ + return _um_request_irq(irq, fd, type, handler, irqflags, + devname, dev_id, NULL); +} EXPORT_SYMBOL(um_request_irq); +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +int um_request_irq_tt(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) +{ + return _um_request_irq(irq, fd, type, handler, irqflags, + devname, dev_id, timetravel_handler); +} +EXPORT_SYMBOL(um_request_irq_tt); +#endif + #ifdef CONFIG_PM_SLEEP void um_irqs_suspend(void) { struct irq_entry *entry; unsigned long flags; - sig_info[SIGIO] = sigio_handler_suspend; + irqs_suspended = true; spin_lock_irqsave(&irq_lock, flags); list_for_each_entry(entry, &active_fds, list) { enum um_irq_type t; - bool wake = false; + bool clear = true; for (t = 0; t < NUM_IRQ_TYPES; t++) { if (!entry->reg[t].events) @@ -400,13 +493,17 @@ void um_irqs_suspend(void) * any FDs that should be suspended. */ if (entry->reg[t].wakeup || - entry->reg[t].irq == SIGIO_WRITE_IRQ) { - wake = true; + entry->reg[t].irq == SIGIO_WRITE_IRQ +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + || entry->reg[t].timetravel_handler +#endif + ) { + clear = false; break; } } - if (!wake) { + if (clear) { entry->suspended = true; os_clear_fd_async(entry->fd); entry->sigio_workaround = @@ -421,7 +518,31 @@ void um_irqs_resume(void) struct irq_entry *entry; unsigned long flags; - spin_lock_irqsave(&irq_lock, flags); + + local_irq_save(flags); +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + /* + * We don't need to lock anything here since we're in resume + * and nothing else is running, but have disabled IRQs so we + * don't try anything else with the interrupt list from there. + */ + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type t; + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + struct irq_reg *reg = &entry->reg[t]; + + if (reg->pending_on_resume) { + irq_enter(); + generic_handle_irq(reg->irq); + irq_exit(); + reg->pending_on_resume = false; + } + } + } +#endif + + spin_lock(&irq_lock); list_for_each_entry(entry, &active_fds, list) { if (entry->suspended) { int err = os_set_fd_async(entry->fd); @@ -437,7 +558,7 @@ void um_irqs_resume(void) } spin_unlock_irqrestore(&irq_lock, flags); - sig_info[SIGIO] = sigio_handler; + irqs_suspended = false; send_sigio_to_self(); } diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 81d508daf67c..c5011064b5dd 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); - int kthread = current->flags & PF_KTHREAD; + int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER); int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c index bfb70c456b30..592cdb138441 100644 --- a/arch/um/kernel/skas/clone.c +++ b/arch/um/kernel/skas/clone.c @@ -24,29 +24,25 @@ void __attribute__ ((__section__ (".__syscall_stub"))) stub_clone_handler(void) { - struct stub_data *data = (struct stub_data *) STUB_DATA; + int stack; + struct stub_data *data = (void *) ((unsigned long)&stack & ~(UM_KERN_PAGE_SIZE - 1)); long err; err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD, - STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *)); - if (err != 0) - goto out; + (unsigned long)data + UM_KERN_PAGE_SIZE / 2 - sizeof(void *)); + if (err) { + data->parent_err = err; + goto done; + } err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0); - if (err) - goto out; + if (err) { + data->child_err = err; + goto done; + } - remap_stack(data->fd, data->offset); - goto done; + remap_stack_and_trap(); - out: - /* - * save current result. - * Parent: pid; - * child: retcode of mmap already saved and it jumps around this - * assignment - */ - data->err = err; done: trap_myself(); } diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index d9961163da66..125df465e8ea 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -14,47 +14,6 @@ #include <os.h> #include <skas.h> -static int init_stub_pte(struct mm_struct *mm, unsigned long proc, - unsigned long kernel) -{ - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset(mm, proc); - - p4d = p4d_alloc(mm, pgd, proc); - if (!p4d) - goto out; - - pud = pud_alloc(mm, p4d, proc); - if (!pud) - goto out_pud; - - pmd = pmd_alloc(mm, pud, proc); - if (!pmd) - goto out_pmd; - - pte = pte_alloc_map(mm, pmd, proc); - if (!pte) - goto out_pte; - - *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); - *pte = pte_mkread(*pte); - return 0; - - out_pte: - pmd_free(mm, pmd); - out_pmd: - pud_free(mm, pud); - out_pud: - p4d_free(mm, p4d); - out: - return -ENOMEM; -} - int init_new_context(struct task_struct *task, struct mm_struct *mm) { struct mm_context *from_mm = NULL; @@ -98,52 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) return ret; } -void uml_setup_stubs(struct mm_struct *mm) -{ - int err, ret; - - ret = init_stub_pte(mm, STUB_CODE, - (unsigned long) __syscall_stub_start); - if (ret) - goto out; - - ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); - if (ret) - goto out; - - mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); - mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); - - /* dup_mmap already holds mmap_lock */ - err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, - VM_READ | VM_MAYREAD | VM_EXEC | - VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, - mm->context.stub_pages); - if (err) { - printk(KERN_ERR "install_special_mapping returned %d\n", err); - goto out; - } - return; - -out: - force_sigsegv(SIGSEGV); -} - -void arch_exit_mmap(struct mm_struct *mm) -{ - pte_t *pte; - - pte = virt_to_pte(mm, STUB_CODE); - if (pte != NULL) - pte_clear(mm, STUB_CODE, pte); - - pte = virt_to_pte(mm, STUB_DATA); - if (pte == NULL) - return; - - pte_clear(mm, STUB_DATA, pte); -} - void destroy_context(struct mm_struct *mm) { struct mm_context *mmu = &mm->context; diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 315248b03941..e0cdb9694fb8 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -278,6 +278,7 @@ static void __time_travel_add_event(struct time_travel_event *e, { struct time_travel_event *tmp; bool inserted = false; + unsigned long flags; if (e->pending) return; @@ -285,6 +286,7 @@ static void __time_travel_add_event(struct time_travel_event *e, e->pending = true; e->time = time; + local_irq_save(flags); list_for_each_entry(tmp, &time_travel_events, list) { /* * Add the new entry before one with higher time, @@ -307,6 +309,7 @@ static void __time_travel_add_event(struct time_travel_event *e, tmp = time_travel_first_event(); time_travel_ext_update_request(tmp->time); time_travel_next_event = tmp->time; + local_irq_restore(flags); } static void time_travel_add_event(struct time_travel_event *e, @@ -318,6 +321,12 @@ static void time_travel_add_event(struct time_travel_event *e, __time_travel_add_event(e, time); } +void time_travel_add_event_rel(struct time_travel_event *e, + unsigned long long delay_ns) +{ + time_travel_add_event(e, time_travel_time + delay_ns); +} + void time_travel_periodic_timer(struct time_travel_event *e) { time_travel_add_event(&time_travel_timer_event, @@ -381,12 +390,16 @@ static void time_travel_deliver_event(struct time_travel_event *e) } } -static bool time_travel_del_event(struct time_travel_event *e) +bool time_travel_del_event(struct time_travel_event *e) { + unsigned long flags; + if (!e->pending) return false; + local_irq_save(flags); list_del(&e->list); e->pending = false; + local_irq_restore(flags); return true; } @@ -587,6 +600,8 @@ extern u64 time_travel_ext_req(u32 op, u64 time); /* these are empty macros so the struct/fn need not exist */ #define time_travel_add_event(e, time) do { } while (0) +/* externally not usable - redefine here so we can */ +#undef time_travel_del_event #define time_travel_del_event(e) do { } while (0) #endif diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 61776790cd67..bc38f79ca3a3 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -162,9 +162,6 @@ static int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if ((addr >= STUB_START) && (addr < STUB_END)) - return -EINVAL; - if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MUNMAP) && @@ -226,9 +223,6 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - if ((addr >= STUB_START) && (addr < STUB_END)) - continue; - r = pte_read(*pte); w = pte_write(*pte); x = pte_exec(*pte); @@ -346,12 +340,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, /* This is not an else because ret is modified above */ if (ret) { + struct mm_id *mm_idp = ¤t->mm->context.id; + printk(KERN_ERR "fix_range_common: failed, killing current " "process: %d\n", task_tgid_vnr(current)); - /* We are under mmap_lock, release it such that current can terminate */ - mmap_write_unlock(current->mm); - force_sig(SIGKILL); - do_signal(¤t->thread.regs); + mm_idp->kill = 1; } } @@ -472,6 +465,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) struct mm_id *mm_id; address &= PAGE_MASK; + pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto kill; diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 80e2660782a0..74e07e748a9b 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -249,6 +249,7 @@ void uml_finishsetup(void) } /* Set during early boot */ +unsigned long stub_start; unsigned long task_size; EXPORT_SYMBOL(task_size); @@ -283,6 +284,10 @@ int __init linux_main(int argc, char **argv) add_arg(DEFAULT_COMMAND_LINE_CONSOLE); host_task_size = os_get_top_address(); + /* reserve two pages for the stubs */ + host_task_size -= 2 * PAGE_SIZE; + stub_start = host_task_size; + /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index c546d16f8dfe..3b4975ee67e2 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c @@ -40,6 +40,8 @@ static int __init init_syscall_regs(void) syscall_regs[REGS_IP_INDEX] = STUB_CODE + ((unsigned long) batch_syscall_stub - (unsigned long) __syscall_stub_start); + syscall_regs[REGS_SP_INDEX] = STUB_DATA; + return 0; } diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 0621d521208e..fba674fac8b7 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -28,6 +28,54 @@ int is_skas_winch(int pid, int fd, void *data) return pid == getpgrp(); } +static const char *ptrace_reg_name(int idx) +{ +#define R(n) case HOST_##n: return #n + + switch (idx) { +#ifdef __x86_64__ + R(BX); + R(CX); + R(DI); + R(SI); + R(DX); + R(BP); + R(AX); + R(R8); + R(R9); + R(R10); + R(R11); + R(R12); + R(R13); + R(R14); + R(R15); + R(ORIG_AX); + R(CS); + R(SS); + R(EFLAGS); +#elif defined(__i386__) + R(IP); + R(SP); + R(EFLAGS); + R(AX); + R(BX); + R(CX); + R(DX); + R(SI); + R(DI); + R(BP); + R(CS); + R(SS); + R(DS); + R(FS); + R(ES); + R(GS); + R(ORIG_AX); +#endif + } + return ""; +} + static int ptrace_dump_regs(int pid) { unsigned long regs[MAX_REG_NR]; @@ -37,8 +85,11 @@ static int ptrace_dump_regs(int pid) return -errno; printk(UM_KERN_ERR "Stub registers -\n"); - for (i = 0; i < ARRAY_SIZE(regs); i++) - printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]); + for (i = 0; i < ARRAY_SIZE(regs); i++) { + const char *regname = ptrace_reg_name(i); + + printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]); + } return 0; } @@ -200,10 +251,6 @@ static int userspace_tramp(void *stack) signal(SIGTERM, SIG_DFL); signal(SIGWINCH, SIG_IGN); - /* - * This has a pte, but it can't be mapped in with the usual - * tlb_flush mechanism because this is part of that mechanism - */ fd = phys_mapping(to_phys(__syscall_stub_start), &offset); addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE, PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); @@ -249,6 +296,7 @@ static int userspace_tramp(void *stack) } int userspace_pid[NR_CPUS]; +int kill_userspace_mm[NR_CPUS]; /** * start_userspace() - prepare a new userspace process @@ -342,6 +390,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs) interrupt_end(); while (1) { + if (kill_userspace_mm[0]) + fatal_sigsegv(); /* * This can legitimately fail if the process loads a @@ -491,8 +541,14 @@ int copy_context_skas0(unsigned long new_stack, int pid) * and child's mmap2 calls */ *data = ((struct stub_data) { - .offset = MMAP_OFFSET(new_offset), - .fd = new_fd + .offset = MMAP_OFFSET(new_offset), + .fd = new_fd, + .parent_err = -ESRCH, + .child_err = 0, + }); + + *child_data = ((struct stub_data) { + .child_err = -ESRCH, }); err = ptrace_setregs(pid, thread_regs); @@ -510,9 +566,6 @@ int copy_context_skas0(unsigned long new_stack, int pid) return err; } - /* set a well known return code for detection of child write failure */ - child_data->err = 12345678; - /* * Wait, until parent has finished its work: read child's pid from * parent's stack, and check, if bad result. @@ -527,7 +580,7 @@ int copy_context_skas0(unsigned long new_stack, int pid) wait_stub_done(pid); - pid = data->err; + pid = data->parent_err; if (pid < 0) { printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports " "error %d\n", -pid); @@ -539,10 +592,10 @@ int copy_context_skas0(unsigned long new_stack, int pid) * child's stack and check it. */ wait_stub_done(pid); - if (child_data->err != STUB_DATA) { - printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports " - "error %ld\n", child_data->err); - err = child_data->err; + if (child_data->child_err != STUB_DATA) { + printk(UM_KERN_ERR "copy_context_skas0 - stub-child %d reports " + "error %ld\n", pid, data->child_err); + err = data->child_err; goto out_kill; } @@ -663,4 +716,5 @@ void reboot_skas(void) void __switch_mm(struct mm_id *mm_idp) { userspace_pid[0] = mm_idp->u.pid; + kill_userspace_mm[0] = mm_idp->kill; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 595193bc2d31..2792879d398e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -97,6 +97,8 @@ config X86 select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096 + select ARCH_SUPPORTS_LTO_CLANG if X86_64 + select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS @@ -149,6 +151,7 @@ config X86 select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KASAN_VMALLOC if X86_64 + select HAVE_ARCH_KFENCE select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT @@ -169,6 +172,7 @@ config X86 select HAVE_CONTEXT_TRACKING if X86_64 select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT + select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE @@ -188,6 +192,7 @@ config X86 select HAVE_HW_BREAKPOINT select HAVE_IDE select HAVE_IOREMAP_PROT + select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP @@ -220,6 +225,7 @@ config X86 select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_FUNCTION_ARG_ACCESS_API + select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR select HAVE_STACK_VALIDATION if X86_64 select HAVE_STATIC_CALL @@ -445,7 +451,7 @@ config X86_X2APIC If you don't know what to do here, say N. config X86_MPPARSE - bool "Enable MPS table" if ACPI || SFI + bool "Enable MPS table" if ACPI default y depends on X86_LOCAL_APIC help @@ -604,7 +610,6 @@ config X86_INTEL_MID depends on PCI depends on X86_64 || (PCI_GOANY && X86_32) depends on X86_IO_APIC - select SFI select I2C select DW_APB_TIMER select APB_TIMER @@ -893,18 +898,6 @@ config HPET_EMULATE_RTC def_bool y depends on HPET_TIMER && (RTC_DRV_CMOS=m || RTC_DRV_CMOS=y) -config APB_TIMER - def_bool y if X86_INTEL_MID - prompt "Intel MID APB Timer Support" if X86_INTEL_MID - select DW_APB_TIMER - depends on X86_INTEL_MID && SFI - help - APB timer is the replacement for 8254, HPET on X86 MID platforms. - The APBT provides a stable time base on SMP - systems, unlike the TSC, but it is more expensive to access, - as it is off-chip. APB timers are always running regardless of CPU - C states, they are used as per CPU clockevent device when possible. - # Mark as expert because too many people got it wrong. # The code disables itself when not needed. config DMI @@ -2466,8 +2459,6 @@ source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" -source "drivers/sfi/Kconfig" - config X86_APM_BOOT def_bool y depends on APM @@ -2654,7 +2645,7 @@ config PCI_DIRECT config PCI_MMCONFIG bool "Support mmconfig PCI config space access" if X86_64 default y - depends on PCI && (ACPI || SFI || JAILHOUSE_GUEST) + depends on PCI && (ACPI || JAILHOUSE_GUEST) depends on X86_64 || (PCI_GOANY || PCI_GOMMCONFIG) config PCI_OLPC diff --git a/arch/x86/Makefile b/arch/x86/Makefile index b797f1561943..2d6d5a28c3bf 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -169,6 +169,11 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) endif +ifdef CONFIG_LTO_CLANG +KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \ + -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8) +endif + # Workaround for a gcc prelease that unfortunately was shipped in a suse release KBUILD_CFLAGS += -Wno-sign-compare # diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 78210793d357..9c9c4a888b1d 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -50,7 +50,6 @@ CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_UNUSED_SYMBOLS is not set CONFIG_BINFMT_MISC=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 9936528e1939..b60bd2d86034 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -48,7 +48,6 @@ CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_UNUSED_SYMBOLS is not set CONFIG_BINFMT_MISC=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a31de0c6ccde..b28e36b7c96b 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y -obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o - obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index d1436c37008b..4e3972570916 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -43,10 +43,6 @@ #ifdef __x86_64__ # constants in mergeable sections, linker can reorder and merge -.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16 -.align 16 -.Lgf128mul_x_ble_mask: - .octa 0x00000000000000010000000000000087 .section .rodata.cst16.POLY, "aM", @progbits, 16 .align 16 POLY: .octa 0xC2000000000000000000000000000001 @@ -146,7 +142,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff #define CTR %xmm11 #define INC %xmm12 -#define GF128MUL_MASK %xmm10 +#define GF128MUL_MASK %xmm7 #ifdef __x86_64__ #define AREG %rax @@ -2577,13 +2573,140 @@ SYM_FUNC_START(aesni_cbc_dec) ret SYM_FUNC_END(aesni_cbc_dec) -#ifdef __x86_64__ +/* + * void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len, u8 *iv) + */ +SYM_FUNC_START(aesni_cts_cbc_enc) + FRAME_BEGIN +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl (FRAME_OFFSET+20)(%esp), KEYP # ctx + movl (FRAME_OFFSET+24)(%esp), OUTP # dst + movl (FRAME_OFFSET+28)(%esp), INP # src + movl (FRAME_OFFSET+32)(%esp), LEN # len + movl (FRAME_OFFSET+36)(%esp), IVP # iv + lea .Lcts_permute_table, T1 +#else + lea .Lcts_permute_table(%rip), T1 +#endif + mov 480(KEYP), KLEN + movups (IVP), STATE + sub $16, LEN + mov T1, IVP + add $32, IVP + add LEN, T1 + sub LEN, IVP + movups (T1), %xmm4 + movups (IVP), %xmm5 + + movups (INP), IN1 + add LEN, INP + movups (INP), IN2 + + pxor IN1, STATE + call _aesni_enc1 + + pshufb %xmm5, IN2 + pxor STATE, IN2 + pshufb %xmm4, STATE + add OUTP, LEN + movups STATE, (LEN) + + movaps IN2, STATE + call _aesni_enc1 + movups STATE, (OUTP) + +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif + FRAME_END + ret +SYM_FUNC_END(aesni_cts_cbc_enc) + +/* + * void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len, u8 *iv) + */ +SYM_FUNC_START(aesni_cts_cbc_dec) + FRAME_BEGIN +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl (FRAME_OFFSET+20)(%esp), KEYP # ctx + movl (FRAME_OFFSET+24)(%esp), OUTP # dst + movl (FRAME_OFFSET+28)(%esp), INP # src + movl (FRAME_OFFSET+32)(%esp), LEN # len + movl (FRAME_OFFSET+36)(%esp), IVP # iv + lea .Lcts_permute_table, T1 +#else + lea .Lcts_permute_table(%rip), T1 +#endif + mov 480(KEYP), KLEN + add $240, KEYP + movups (IVP), IV + sub $16, LEN + mov T1, IVP + add $32, IVP + add LEN, T1 + sub LEN, IVP + movups (T1), %xmm4 + + movups (INP), STATE + add LEN, INP + movups (INP), IN1 + + call _aesni_dec1 + movaps STATE, IN2 + pshufb %xmm4, STATE + pxor IN1, STATE + + add OUTP, LEN + movups STATE, (LEN) + + movups (IVP), %xmm0 + pshufb %xmm0, IN1 + pblendvb IN2, IN1 + movaps IN1, STATE + call _aesni_dec1 + + pxor IV, STATE + movups STATE, (OUTP) + +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif + FRAME_END + ret +SYM_FUNC_END(aesni_cts_cbc_dec) + .pushsection .rodata .align 16 +.Lcts_permute_table: + .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 + .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f + .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 + .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 +#ifdef __x86_64__ .Lbswap_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 +#endif .popsection +#ifdef __x86_64__ /* * _aesni_inc_init: internal ABI * setup registers used by _aesni_inc @@ -2696,6 +2819,14 @@ SYM_FUNC_START(aesni_ctr_enc) ret SYM_FUNC_END(aesni_ctr_enc) +#endif + +.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16 +.align 16 +.Lgf128mul_x_ble_mask: + .octa 0x00000000000000010000000000000087 +.previous + /* * _aesni_gf128mul_x_ble: internal ABI * Multiply in GF(2^128) for XTS IVs @@ -2708,120 +2839,325 @@ SYM_FUNC_END(aesni_ctr_enc) * CTR: == temporary value */ #define _aesni_gf128mul_x_ble() \ - pshufd $0x13, IV, CTR; \ + pshufd $0x13, IV, KEY; \ paddq IV, IV; \ - psrad $31, CTR; \ - pand GF128MUL_MASK, CTR; \ - pxor CTR, IV; + psrad $31, KEY; \ + pand GF128MUL_MASK, KEY; \ + pxor KEY, IV; /* - * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst, - * const u8 *src, bool enc, le128 *iv) + * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, + * const u8 *src, unsigned int len, le128 *iv) */ -SYM_FUNC_START(aesni_xts_crypt8) +SYM_FUNC_START(aesni_xts_encrypt) FRAME_BEGIN - testb %cl, %cl - movl $0, %ecx - movl $240, %r10d - leaq _aesni_enc4, %r11 - leaq _aesni_dec4, %rax - cmovel %r10d, %ecx - cmoveq %rax, %r11 - +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl (FRAME_OFFSET+20)(%esp), KEYP # ctx + movl (FRAME_OFFSET+24)(%esp), OUTP # dst + movl (FRAME_OFFSET+28)(%esp), INP # src + movl (FRAME_OFFSET+32)(%esp), LEN # len + movl (FRAME_OFFSET+36)(%esp), IVP # iv movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK +#else + movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK +#endif movups (IVP), IV mov 480(KEYP), KLEN - addq %rcx, KEYP + +.Lxts_enc_loop4: + sub $64, LEN + jl .Lxts_enc_1x movdqa IV, STATE1 - movdqu 0x00(INP), INC - pxor INC, STATE1 + movdqu 0x00(INP), IN + pxor IN, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x10(INP), INC - pxor INC, STATE2 + movdqu 0x10(INP), IN + pxor IN, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x20(INP), INC - pxor INC, STATE3 + movdqu 0x20(INP), IN + pxor IN, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x30(INP), INC - pxor INC, STATE4 + movdqu 0x30(INP), IN + pxor IN, STATE4 movdqu IV, 0x30(OUTP) - CALL_NOSPEC r11 + call _aesni_enc4 - movdqu 0x00(OUTP), INC - pxor INC, STATE1 + movdqu 0x00(OUTP), IN + pxor IN, STATE1 movdqu STATE1, 0x00(OUTP) + movdqu 0x10(OUTP), IN + pxor IN, STATE2 + movdqu STATE2, 0x10(OUTP) + + movdqu 0x20(OUTP), IN + pxor IN, STATE3 + movdqu STATE3, 0x20(OUTP) + + movdqu 0x30(OUTP), IN + pxor IN, STATE4 + movdqu STATE4, 0x30(OUTP) + _aesni_gf128mul_x_ble() - movdqa IV, STATE1 - movdqu 0x40(INP), INC - pxor INC, STATE1 - movdqu IV, 0x40(OUTP) - movdqu 0x10(OUTP), INC - pxor INC, STATE2 - movdqu STATE2, 0x10(OUTP) + add $64, INP + add $64, OUTP + test LEN, LEN + jnz .Lxts_enc_loop4 + +.Lxts_enc_ret_iv: + movups IV, (IVP) + +.Lxts_enc_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif + FRAME_END + ret + +.Lxts_enc_1x: + add $64, LEN + jz .Lxts_enc_ret_iv + sub $16, LEN + jl .Lxts_enc_cts4 +.Lxts_enc_loop1: + movdqu (INP), STATE + pxor IV, STATE + call _aesni_enc1 + pxor IV, STATE _aesni_gf128mul_x_ble() - movdqa IV, STATE2 - movdqu 0x50(INP), INC - pxor INC, STATE2 - movdqu IV, 0x50(OUTP) - movdqu 0x20(OUTP), INC - pxor INC, STATE3 - movdqu STATE3, 0x20(OUTP) + test LEN, LEN + jz .Lxts_enc_out + + add $16, INP + sub $16, LEN + jl .Lxts_enc_cts1 + + movdqu STATE, (OUTP) + add $16, OUTP + jmp .Lxts_enc_loop1 + +.Lxts_enc_out: + movdqu STATE, (OUTP) + jmp .Lxts_enc_ret_iv + +.Lxts_enc_cts4: + movdqa STATE4, STATE + sub $16, OUTP + +.Lxts_enc_cts1: +#ifndef __x86_64__ + lea .Lcts_permute_table, T1 +#else + lea .Lcts_permute_table(%rip), T1 +#endif + add LEN, INP /* rewind input pointer */ + add $16, LEN /* # bytes in final block */ + movups (INP), IN1 + + mov T1, IVP + add $32, IVP + add LEN, T1 + sub LEN, IVP + add OUTP, LEN + + movups (T1), %xmm4 + movaps STATE, IN2 + pshufb %xmm4, STATE + movups STATE, (LEN) + + movups (IVP), %xmm0 + pshufb %xmm0, IN1 + pblendvb IN2, IN1 + movaps IN1, STATE + + pxor IV, STATE + call _aesni_enc1 + pxor IV, STATE + + movups STATE, (OUTP) + jmp .Lxts_enc_ret +SYM_FUNC_END(aesni_xts_encrypt) + +/* + * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, + * const u8 *src, unsigned int len, le128 *iv) + */ +SYM_FUNC_START(aesni_xts_decrypt) + FRAME_BEGIN +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl (FRAME_OFFSET+20)(%esp), KEYP # ctx + movl (FRAME_OFFSET+24)(%esp), OUTP # dst + movl (FRAME_OFFSET+28)(%esp), INP # src + movl (FRAME_OFFSET+32)(%esp), LEN # len + movl (FRAME_OFFSET+36)(%esp), IVP # iv + movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK +#else + movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK +#endif + movups (IVP), IV + + mov 480(KEYP), KLEN + add $240, KEYP + + test $15, LEN + jz .Lxts_dec_loop4 + sub $16, LEN + +.Lxts_dec_loop4: + sub $64, LEN + jl .Lxts_dec_1x + + movdqa IV, STATE1 + movdqu 0x00(INP), IN + pxor IN, STATE1 + movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() - movdqa IV, STATE3 - movdqu 0x60(INP), INC - pxor INC, STATE3 - movdqu IV, 0x60(OUTP) + movdqa IV, STATE2 + movdqu 0x10(INP), IN + pxor IN, STATE2 + movdqu IV, 0x10(OUTP) - movdqu 0x30(OUTP), INC - pxor INC, STATE4 - movdqu STATE4, 0x30(OUTP) + _aesni_gf128mul_x_ble() + movdqa IV, STATE3 + movdqu 0x20(INP), IN + pxor IN, STATE3 + movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x70(INP), INC - pxor INC, STATE4 - movdqu IV, 0x70(OUTP) + movdqu 0x30(INP), IN + pxor IN, STATE4 + movdqu IV, 0x30(OUTP) - _aesni_gf128mul_x_ble() - movups IV, (IVP) + call _aesni_dec4 - CALL_NOSPEC r11 + movdqu 0x00(OUTP), IN + pxor IN, STATE1 + movdqu STATE1, 0x00(OUTP) - movdqu 0x40(OUTP), INC - pxor INC, STATE1 - movdqu STATE1, 0x40(OUTP) + movdqu 0x10(OUTP), IN + pxor IN, STATE2 + movdqu STATE2, 0x10(OUTP) - movdqu 0x50(OUTP), INC - pxor INC, STATE2 - movdqu STATE2, 0x50(OUTP) + movdqu 0x20(OUTP), IN + pxor IN, STATE3 + movdqu STATE3, 0x20(OUTP) - movdqu 0x60(OUTP), INC - pxor INC, STATE3 - movdqu STATE3, 0x60(OUTP) + movdqu 0x30(OUTP), IN + pxor IN, STATE4 + movdqu STATE4, 0x30(OUTP) - movdqu 0x70(OUTP), INC - pxor INC, STATE4 - movdqu STATE4, 0x70(OUTP) + _aesni_gf128mul_x_ble() + + add $64, INP + add $64, OUTP + test LEN, LEN + jnz .Lxts_dec_loop4 + +.Lxts_dec_ret_iv: + movups IV, (IVP) +.Lxts_dec_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif FRAME_END ret -SYM_FUNC_END(aesni_xts_crypt8) +.Lxts_dec_1x: + add $64, LEN + jz .Lxts_dec_ret_iv + +.Lxts_dec_loop1: + movdqu (INP), STATE + + add $16, INP + sub $16, LEN + jl .Lxts_dec_cts1 + + pxor IV, STATE + call _aesni_dec1 + pxor IV, STATE + _aesni_gf128mul_x_ble() + + test LEN, LEN + jz .Lxts_dec_out + + movdqu STATE, (OUTP) + add $16, OUTP + jmp .Lxts_dec_loop1 + +.Lxts_dec_out: + movdqu STATE, (OUTP) + jmp .Lxts_dec_ret_iv + +.Lxts_dec_cts1: + movdqa IV, STATE4 + _aesni_gf128mul_x_ble() + + pxor IV, STATE + call _aesni_dec1 + pxor IV, STATE + +#ifndef __x86_64__ + lea .Lcts_permute_table, T1 +#else + lea .Lcts_permute_table(%rip), T1 #endif + add LEN, INP /* rewind input pointer */ + add $16, LEN /* # bytes in final block */ + movups (INP), IN1 + + mov T1, IVP + add $32, IVP + add LEN, T1 + sub LEN, IVP + add OUTP, LEN + + movups (T1), %xmm4 + movaps STATE, IN2 + pshufb %xmm4, STATE + movups STATE, (LEN) + + movups (IVP), %xmm0 + pshufb %xmm0, IN1 + pblendvb IN2, IN1 + movaps IN1, STATE + + pxor STATE4, STATE + call _aesni_dec1 + pxor STATE4, STATE + + movups STATE, (OUTP) + jmp .Lxts_dec_ret +SYM_FUNC_END(aesni_xts_decrypt) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index ad8a7188a2bf..2144e54a6c89 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -31,11 +31,10 @@ #include <crypto/internal/aead.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <linux/jump_label.h> #include <linux/workqueue.h> #include <linux/spinlock.h> -#ifdef CONFIG_X86_64 -#include <asm/crypto/glue_helper.h> -#endif +#include <linux/static_call.h> #define AESNI_ALIGN 16 @@ -93,62 +92,25 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); +asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); +asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); #define AVX_GEN2_OPTSIZE 640 #define AVX_GEN4_OPTSIZE 4096 +asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + +asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + #ifdef CONFIG_X86_64 -static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); - -asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in, bool enc, le128 *iv); - -/* asmlinkage void aesni_gcm_enc() - * void *ctx, AES Key schedule. Starts on a 16 byte boundary. - * struct gcm_context_data. May be uninitialized. - * u8 *out, Ciphertext output. Encrypt in-place is allowed. - * const u8 *in, Plaintext input - * unsigned long plaintext_len, Length of data in bytes for encryption. - * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. - * 16-byte aligned pointer. - * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. - * const u8 *aad, Additional Authentication Data (AAD) - * unsigned long aad_len, Length of AAD in bytes. - * u8 *auth_tag, Authenticated Tag output. - * unsigned long auth_tag_len), Authenticated Tag Length in bytes. - * Valid values are 16 (most likely), 12 or 8. - */ -asmlinkage void aesni_gcm_enc(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long plaintext_len, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len); - -/* asmlinkage void aesni_gcm_dec() - * void *ctx, AES Key schedule. Starts on a 16 byte boundary. - * struct gcm_context_data. May be uninitialized. - * u8 *out, Plaintext output. Decrypt in-place is allowed. - * const u8 *in, Ciphertext input - * unsigned long ciphertext_len, Length of data in bytes for decryption. - * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. - * 16-byte aligned pointer. - * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. - * const u8 *aad, Additional Authentication Data (AAD) - * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going - * to be 8 or 12 bytes - * u8 *auth_tag, Authenticated Tag output. - * unsigned long auth_tag_len) Authenticated Tag Length in bytes. - * Valid values are 16 (most likely), 12 or 8. - */ -asmlinkage void aesni_gcm_dec(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long ciphertext_len, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len); +DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc); /* Scatter / Gather routines, with args similar to above */ asmlinkage void aesni_gcm_init(void *ctx, @@ -167,24 +129,6 @@ asmlinkage void aesni_gcm_finalize(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); -static const struct aesni_gcm_tfm_s { - void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv, - u8 *hash_subkey, const u8 *aad, unsigned long aad_len); - void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long plaintext_len); - void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long ciphertext_len); - void (*finalize)(void *ctx, struct gcm_context_data *gdata, - u8 *auth_tag, unsigned long auth_tag_len); -} *aesni_gcm_tfm; - -static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { - .init = &aesni_gcm_init, - .enc_update = &aesni_gcm_enc_update, - .dec_update = &aesni_gcm_dec_update, - .finalize = &aesni_gcm_finalize, -}; - asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, @@ -214,25 +158,6 @@ asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); -asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long plaintext_len, u8 *iv, - const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len); - -asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long ciphertext_len, u8 *iv, - const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len); - -static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { - .init = &aesni_gcm_init_avx_gen2, - .enc_update = &aesni_gcm_enc_update_avx_gen2, - .dec_update = &aesni_gcm_dec_update_avx_gen2, - .finalize = &aesni_gcm_finalize_avx_gen2, -}; - /* * asmlinkage void aesni_gcm_init_avx_gen4() * gcm_data *my_ctx_data, context data @@ -256,24 +181,8 @@ asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); -asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long plaintext_len, u8 *iv, - const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len); - -asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, unsigned long ciphertext_len, u8 *iv, - const u8 *aad, unsigned long aad_len, - u8 *auth_tag, unsigned long auth_tag_len); - -static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { - .init = &aesni_gcm_init_avx_gen4, - .enc_update = &aesni_gcm_enc_update_avx_gen4, - .dec_update = &aesni_gcm_dec_update_avx_gen4, - .finalize = &aesni_gcm_finalize_avx_gen4, -}; +static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2); static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) @@ -374,16 +283,16 @@ static int ecb_encrypt(struct skcipher_request *req) unsigned int nbytes; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -396,16 +305,16 @@ static int ecb_decrypt(struct skcipher_request *req) unsigned int nbytes; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -418,16 +327,16 @@ static int cbc_encrypt(struct skcipher_request *req) unsigned int nbytes; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -440,36 +349,133 @@ static int cbc_decrypt(struct skcipher_request *req) unsigned int nbytes; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } -#ifdef CONFIG_X86_64 -static void ctr_crypt_final(struct crypto_aes_ctx *ctx, - struct skcipher_walk *walk) +static int cts_cbc_encrypt(struct skcipher_request *req) { - u8 *ctrblk = walk->iv; - u8 keystream[AES_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); + int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; + struct scatterlist *src = req->src, *dst = req->dst; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; + struct skcipher_walk walk; + int err; + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, skcipher_request_flags(req), + NULL, NULL); + + if (req->cryptlen <= AES_BLOCK_SIZE) { + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + cbc_blocks = 1; + } + + if (cbc_blocks > 0) { + skcipher_request_set_crypt(&subreq, req->src, req->dst, + cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = cbc_encrypt(&subreq); + if (err) + return err; + + if (req->cryptlen == AES_BLOCK_SIZE) + return 0; + + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, + subreq.cryptlen); + } + + /* handle ciphertext stealing */ + skcipher_request_set_crypt(&subreq, src, dst, + req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_fpu_begin(); + aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes, walk.iv); + kernel_fpu_end(); + + return skcipher_walk_done(&walk, 0); +} + +static int cts_cbc_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); + int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; + struct scatterlist *src = req->src, *dst = req->dst; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; + struct skcipher_walk walk; + int err; + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, skcipher_request_flags(req), + NULL, NULL); + + if (req->cryptlen <= AES_BLOCK_SIZE) { + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + cbc_blocks = 1; + } + + if (cbc_blocks > 0) { + skcipher_request_set_crypt(&subreq, req->src, req->dst, + cbc_blocks * AES_BLOCK_SIZE, + req->iv); - aesni_enc(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); + err = cbc_decrypt(&subreq); + if (err) + return err; - crypto_inc(ctrblk, AES_BLOCK_SIZE); + if (req->cryptlen == AES_BLOCK_SIZE) + return 0; + + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, + subreq.cryptlen); + } + + /* handle ciphertext stealing */ + skcipher_request_set_crypt(&subreq, src, dst, + req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_fpu_begin(); + aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes, walk.iv); + kernel_fpu_end(); + + return skcipher_walk_done(&walk, 0); } +#ifdef CONFIG_X86_64 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { @@ -491,120 +497,36 @@ static int ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); + u8 keystream[AES_BLOCK_SIZE]; struct skcipher_walk walk; unsigned int nbytes; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_fpu_begin(); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); - nbytes &= AES_BLOCK_SIZE - 1; + while ((nbytes = walk.nbytes) > 0) { + kernel_fpu_begin(); + if (nbytes & AES_BLOCK_MASK) + static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr, + walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, + walk.iv); + nbytes &= ~AES_BLOCK_MASK; + + if (walk.nbytes == walk.total && nbytes > 0) { + aesni_enc(ctx, keystream, walk.iv); + crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes, + walk.src.virt.addr + walk.nbytes - nbytes, + keystream, nbytes); + crypto_inc(walk.iv, AES_BLOCK_SIZE); + nbytes = 0; + } + kernel_fpu_end(); err = skcipher_walk_done(&walk, nbytes); } - if (walk.nbytes) { - ctr_crypt_final(ctx, &walk); - err = skcipher_walk_done(&walk, 0); - } - kernel_fpu_end(); - return err; } -static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - /* first half of xts-key is for crypt */ - err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, - key, keylen); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, - key + keylen, keylen); -} - - -static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc); -} - -static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); -} - -static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - aesni_xts_crypt8(ctx, dst, src, true, iv); -} - -static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - aesni_xts_crypt8(ctx, dst, src, false, iv); -} - -static const struct common_glue_ctx aesni_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = 1, - - .funcs = { { - .num_blocks = 8, - .fn_u = { .xts = aesni_xts_enc8 } - }, { - .num_blocks = 1, - .fn_u = { .xts = aesni_xts_enc } - } } -}; - -static const struct common_glue_ctx aesni_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = 1, - - .funcs = { { - .num_blocks = 8, - .fn_u = { .xts = aesni_xts_dec8 } - }, { - .num_blocks = 1, - .fn_u = { .xts = aesni_xts_dec } - } } -}; - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc, - aes_ctx(ctx->raw_tweak_ctx), - aes_ctx(ctx->raw_crypt_ctx), - false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc, - aes_ctx(ctx->raw_tweak_ctx), - aes_ctx(ctx->raw_crypt_ctx), - true); -} - static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { @@ -681,42 +603,35 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, unsigned int assoclen, u8 *hash_subkey, - u8 *iv, void *aes_ctx) + u8 *iv, void *aes_ctx, u8 *auth_tag, + unsigned long auth_tag_len) { - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned long auth_tag_len = crypto_aead_authsize(tfm); - const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; - struct gcm_context_data data AESNI_ALIGN_ATTR; - struct scatter_walk dst_sg_walk = {}; + u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8); + struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN); unsigned long left = req->cryptlen; - unsigned long len, srclen, dstlen; struct scatter_walk assoc_sg_walk; - struct scatter_walk src_sg_walk; - struct scatterlist src_start[2]; - struct scatterlist dst_start[2]; - struct scatterlist *src_sg; - struct scatterlist *dst_sg; - u8 *src, *dst, *assoc; + struct skcipher_walk walk; + bool do_avx, do_avx2; u8 *assocmem = NULL; - u8 authTag[16]; + u8 *assoc; + int err; if (!enc) left -= auth_tag_len; - if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) - gcm_tfm = &aesni_gcm_tfm_avx_gen2; - if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) - gcm_tfm = &aesni_gcm_tfm_sse; + do_avx = (left >= AVX_GEN2_OPTSIZE); + do_avx2 = (left >= AVX_GEN4_OPTSIZE); /* Linearize assoc, if not already linear */ - if (req->src->length >= assoclen && req->src->length && - (!PageHighMem(sg_page(req->src)) || - req->src->offset + req->src->length <= PAGE_SIZE)) { + if (req->src->length >= assoclen && req->src->length) { scatterwalk_start(&assoc_sg_walk, req->src); assoc = scatterwalk_map(&assoc_sg_walk); } else { + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + /* assoc can be any length, so must be on heap */ - assocmem = kmalloc(assoclen, GFP_ATOMIC); + assocmem = kmalloc(assoclen, flags); if (unlikely(!assocmem)) return -ENOMEM; assoc = assocmem; @@ -724,62 +639,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); } - if (left) { - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); - scatterwalk_start(&src_sg_walk, src_sg); - if (req->src != req->dst) { - dst_sg = scatterwalk_ffwd(dst_start, req->dst, - req->assoclen); - scatterwalk_start(&dst_sg_walk, dst_sg); - } - } - kernel_fpu_begin(); - gcm_tfm->init(aes_ctx, &data, iv, - hash_subkey, assoc, assoclen); - if (req->src != req->dst) { - while (left) { - src = scatterwalk_map(&src_sg_walk); - dst = scatterwalk_map(&dst_sg_walk); - srclen = scatterwalk_clamp(&src_sg_walk, left); - dstlen = scatterwalk_clamp(&dst_sg_walk, left); - len = min(srclen, dstlen); - if (len) { - if (enc) - gcm_tfm->enc_update(aes_ctx, &data, - dst, src, len); - else - gcm_tfm->dec_update(aes_ctx, &data, - dst, src, len); - } - left -= len; - - scatterwalk_unmap(src); - scatterwalk_unmap(dst); - scatterwalk_advance(&src_sg_walk, len); - scatterwalk_advance(&dst_sg_walk, len); - scatterwalk_done(&src_sg_walk, 0, left); - scatterwalk_done(&dst_sg_walk, 1, left); - } - } else { - while (left) { - dst = src = scatterwalk_map(&src_sg_walk); - len = scatterwalk_clamp(&src_sg_walk, left); - if (len) { - if (enc) - gcm_tfm->enc_update(aes_ctx, &data, - src, src, len); - else - gcm_tfm->dec_update(aes_ctx, &data, - src, src, len); - } - left -= len; - scatterwalk_unmap(src); - scatterwalk_advance(&src_sg_walk, len); - scatterwalk_done(&src_sg_walk, 1, left); - } - } - gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len); + if (static_branch_likely(&gcm_use_avx2) && do_avx2) + aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc, + assoclen); + else if (static_branch_likely(&gcm_use_avx) && do_avx) + aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc, + assoclen); + else + aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen); kernel_fpu_end(); if (!assocmem) @@ -787,24 +655,58 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, else kfree(assocmem); - if (!enc) { - u8 authTagMsg[16]; + err = enc ? skcipher_walk_aead_encrypt(&walk, req, false) + : skcipher_walk_aead_decrypt(&walk, req, false); - /* Copy out original authTag */ - scatterwalk_map_and_copy(authTagMsg, req->src, - req->assoclen + req->cryptlen - - auth_tag_len, - auth_tag_len, 0); + while (walk.nbytes > 0) { + kernel_fpu_begin(); + if (static_branch_likely(&gcm_use_avx2) && do_avx2) { + if (enc) + aesni_gcm_enc_update_avx_gen4(aes_ctx, data, + walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes); + else + aesni_gcm_dec_update_avx_gen4(aes_ctx, data, + walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes); + } else if (static_branch_likely(&gcm_use_avx) && do_avx) { + if (enc) + aesni_gcm_enc_update_avx_gen2(aes_ctx, data, + walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes); + else + aesni_gcm_dec_update_avx_gen2(aes_ctx, data, + walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes); + } else if (enc) { + aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr, + walk.src.virt.addr, walk.nbytes); + } else { + aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr, + walk.src.virt.addr, walk.nbytes); + } + kernel_fpu_end(); - /* Compare generated tag with passed in tag. */ - return crypto_memneq(authTagMsg, authTag, auth_tag_len) ? - -EBADMSG : 0; + err = skcipher_walk_done(&walk, 0); } - /* Copy in the authTag */ - scatterwalk_map_and_copy(authTag, req->dst, - req->assoclen + req->cryptlen, - auth_tag_len, 1); + if (err) + return err; + + kernel_fpu_begin(); + if (static_branch_likely(&gcm_use_avx2) && do_avx2) + aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag, + auth_tag_len); + else if (static_branch_likely(&gcm_use_avx) && do_avx) + aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag, + auth_tag_len); + else + aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len); + kernel_fpu_end(); return 0; } @@ -812,15 +714,47 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, u8 *hash_subkey, u8 *iv, void *aes_ctx) { - return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, - aes_ctx); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + unsigned long auth_tag_len = crypto_aead_authsize(tfm); + u8 auth_tag[16]; + int err; + + err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx, + auth_tag, auth_tag_len); + if (err) + return err; + + scatterwalk_map_and_copy(auth_tag, req->dst, + req->assoclen + req->cryptlen, + auth_tag_len, 1); + return 0; } static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, u8 *hash_subkey, u8 *iv, void *aes_ctx) { - return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, - aes_ctx); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + unsigned long auth_tag_len = crypto_aead_authsize(tfm); + u8 auth_tag_msg[16]; + u8 auth_tag[16]; + int err; + + err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx, + auth_tag, auth_tag_len); + if (err) + return err; + + /* Copy out original auth_tag */ + scatterwalk_map_and_copy(auth_tag_msg, req->src, + req->assoclen + req->cryptlen - auth_tag_len, + auth_tag_len, 0); + + /* Compare generated tag with passed in tag. */ + if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) { + memzero_explicit(auth_tag, sizeof(auth_tag)); + return -EBADMSG; + } + return 0; } static int helper_rfc4106_encrypt(struct aead_request *req) @@ -828,7 +762,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); unsigned int i; __be32 counter = cpu_to_be32(1); @@ -855,7 +790,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); unsigned int i; if (unlikely(req->assoclen != 16 && req->assoclen != 20)) @@ -877,6 +813,128 @@ static int helper_rfc4106_decrypt(struct aead_request *req) } #endif +static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + /* first half of xts-key is for crypt */ + err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, + key, keylen); + if (err) + return err; + + /* second half of xts-key is for tweak */ + return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, + key + keylen, keylen); +} + +static int xts_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + struct skcipher_request subreq; + struct skcipher_walk walk; + int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(tail > 0 && walk.nbytes < walk.total)) { + int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; + + skcipher_walk_abort(&walk); + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + blocks * AES_BLOCK_SIZE, req->iv); + req = &subreq; + err = skcipher_walk_virt(&walk, req, false); + } else { + tail = 0; + } + + kernel_fpu_begin(); + + /* calculate first value of T */ + aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv); + + while (walk.nbytes > 0) { + int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); + + if (encrypt) + aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + nbytes, walk.iv); + else + aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + nbytes, walk.iv); + kernel_fpu_end(); + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (walk.nbytes > 0) + kernel_fpu_begin(); + } + + if (unlikely(tail > 0 && !err)) { + struct scatterlist sg_src[2], sg_dst[2]; + struct scatterlist *src, *dst; + + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_fpu_begin(); + if (encrypt) + aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes, walk.iv); + else + aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes, walk.iv); + kernel_fpu_end(); + + err = skcipher_walk_done(&walk, 0); + } + return err; +} + +static int xts_encrypt(struct skcipher_request *req) +{ + return xts_crypt(req, true); +} + +static int xts_decrypt(struct skcipher_request *req) +{ + return xts_crypt(req, false); +} + static struct crypto_alg aesni_cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-aesni", @@ -928,6 +986,23 @@ static struct skcipher_alg aesni_skciphers[] = { .setkey = aesni_skcipher_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, + }, { + .base = { + .cra_name = "__cts(cbc(aes))", + .cra_driver_name = "__cts-cbc-aes-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = CRYPTO_AES_CTX_SIZE, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, + .setkey = aesni_skcipher_setkey, + .encrypt = cts_cbc_encrypt, + .decrypt = cts_cbc_decrypt, #ifdef CONFIG_X86_64 }, { .base = { @@ -946,6 +1021,7 @@ static struct skcipher_alg aesni_skciphers[] = { .setkey = aesni_skcipher_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, +#endif }, { .base = { .cra_name = "__xts(aes)", @@ -959,10 +1035,10 @@ static struct skcipher_alg aesni_skciphers[] = { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, .setkey = xts_aesni_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, -#endif } }; @@ -985,7 +1061,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); __be32 counter = cpu_to_be32(1); memcpy(iv, req->iv, 12); @@ -1001,7 +1078,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); memcpy(iv, req->iv, 12); *((__be32 *)(iv+12)) = counter; @@ -1066,19 +1144,18 @@ static int __init aesni_init(void) #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); - aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; + static_branch_enable(&gcm_use_avx); + static_branch_enable(&gcm_use_avx2); } else if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); - aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; + static_branch_enable(&gcm_use_avx); } else { pr_info("SSE version of gcm_enc/dec engaged.\n"); - aesni_gcm_tfm = &aesni_gcm_tfm_sse; } - aesni_ctr_enc_tfm = aesni_ctr_enc; if (boot_cpu_has(X86_FEATURE_AVX)) { /* optimize performance of ctr mode encryption transform */ - aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; + static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm); pr_info("AES CTR mode by8 optimization enabled\n"); } #endif diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c index c025a01cf708..a40365ab301e 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/crypto/blake2s-glue.c @@ -58,138 +58,40 @@ void blake2s_compress_arch(struct blake2s_state *state, } EXPORT_SYMBOL(blake2s_compress_arch); -static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +static int crypto_blake2s_update_x86(struct shash_desc *desc, + const u8 *in, unsigned int inlen) { - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; + return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); } -static int crypto_blake2s_init(struct shash_desc *desc) +static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out) { - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2s_state *state = shash_desc_ctx(desc); - const int outlen = crypto_shash_digestsize(desc->tfm); - - if (tctx->keylen) - blake2s_init_key(state, outlen, tctx->key, tctx->keylen); - else - blake2s_init(state, outlen); - - return 0; + return crypto_blake2s_final(desc, out, blake2s_compress_arch); } -static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, - unsigned int inlen) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return 0; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; +#define BLAKE2S_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 200, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2s_setkey, \ + .init = crypto_blake2s_init, \ + .update = crypto_blake2s_update_x86, \ + .final = crypto_blake2s_final_x86, \ + .descsize = sizeof(struct blake2s_state), \ } - if (inlen > BLAKE2S_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); - in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; - - return 0; -} - -static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - blake2s_compress_arch(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); - memzero_explicit(state, sizeof(*state)); - - return 0; -} - -static struct shash_alg blake2s_algs[] = {{ - .base.cra_name = "blake2s-128", - .base.cra_driver_name = "blake2s-128-x86", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_128_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-160", - .base.cra_driver_name = "blake2s-160-x86", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_160_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-224", - .base.cra_driver_name = "blake2s-224-x86", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_224_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-256", - .base.cra_driver_name = "blake2s-256-x86", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2S_256_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}}; +static struct shash_alg blake2s_algs[] = { + BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE), + BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE), + BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE), + BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE), +}; static int __init blake2s_mod_init(void) { diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c index cedfdba69ce3..a880e0b1c255 100644 --- a/arch/x86/crypto/blowfish_glue.c +++ b/arch/x86/crypto/blowfish_glue.c @@ -6,8 +6,6 @@ * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> */ #include <crypto/algapi.h> @@ -247,97 +245,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk) -{ - u8 *ctrblk = walk->iv; - u8 keystream[BF_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - blowfish_enc_blk(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); - - crypto_inc(ctrblk, BF_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk) -{ - unsigned int bsize = BF_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); - __be64 ctrblocks[4]; - - /* Process four block batch */ - if (nbytes >= bsize * 4) { - do { - if (dst != src) { - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - dst[3] = src[3]; - } - - /* create ctrblks for parallel encrypt */ - ctrblocks[0] = cpu_to_be64(ctrblk++); - ctrblocks[1] = cpu_to_be64(ctrblk++); - ctrblocks[2] = cpu_to_be64(ctrblk++); - ctrblocks[3] = cpu_to_be64(ctrblk++); - - blowfish_enc_blk_xor_4way(ctx, (u8 *)dst, - (u8 *)ctrblocks); - - src += 4; - dst += 4; - } while ((nbytes -= bsize * 4) >= bsize * 4); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - if (dst != src) - *dst = *src; - - ctrblocks[0] = cpu_to_be64(ctrblk++); - - blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks); - - src += 1; - dst += 1; - } while ((nbytes -= bsize) >= bsize); - -done: - *(__be64 *)walk->iv = cpu_to_be64(ctrblk); - return nbytes; -} - -static int ctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct bf_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) { - nbytes = __ctr_crypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - if (nbytes) { - ctr_crypt_final(ctx, &walk); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - static struct crypto_alg bf_cipher_alg = { .cra_name = "blowfish", .cra_driver_name = "blowfish-asm", @@ -384,20 +291,6 @@ static struct skcipher_alg bf_skcipher_algs[] = { .setkey = blowfish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(blowfish)", - .base.cra_driver_name = "ctr-blowfish-asm", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct bf_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .ivsize = BF_BLOCK_SIZE, - .chunksize = BF_BLOCK_SIZE, - .setkey = blowfish_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index ecc0a9a905c4..e2a0e0f4bf9d 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,7 +17,6 @@ #include <linux/linkage.h> #include <asm/frame.h> -#include <asm/nospec-branch.h> #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -589,14 +588,6 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .long 0x80808080 .long 0x80808080 -/* For CTR-mode IV byteswap */ -.Lbswap128_mask: - .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - -/* For XTS mode IV generation */ -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 - /* * pre-SubByte transform * @@ -998,292 +989,3 @@ SYM_FUNC_START(camellia_cbc_dec_16way) FRAME_END ret; SYM_FUNC_END(camellia_cbc_dec_16way) - -#define inc_le128(x, minus_one, tmp) \ - vpcmpeqq minus_one, x, tmp; \ - vpsubq minus_one, x, x; \ - vpslldq $8, tmp, tmp; \ - vpsubq tmp, x, x; - -SYM_FUNC_START(camellia_ctr_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - subq $(16 * 16), %rsp; - movq %rsp, %rax; - - vmovdqa .Lbswap128_mask, %xmm14; - - /* load IV and byteswap */ - vmovdqu (%rcx), %xmm0; - vpshufb %xmm14, %xmm0, %xmm15; - vmovdqu %xmm15, 15 * 16(%rax); - - vpcmpeqd %xmm15, %xmm15, %xmm15; - vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */ - - /* construct IVs */ - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm13; - vmovdqu %xmm13, 14 * 16(%rax); - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm13; - vmovdqu %xmm13, 13 * 16(%rax); - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm12; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm11; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm10; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm9; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm8; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm7; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm6; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm5; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm4; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm3; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm2; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm1; - inc_le128(%xmm0, %xmm15, %xmm13); - vmovdqa %xmm0, %xmm13; - vpshufb %xmm14, %xmm0, %xmm0; - inc_le128(%xmm13, %xmm15, %xmm14); - vmovdqu %xmm13, (%rcx); - - /* inpack16_pre: */ - vmovq (key_table)(CTX), %xmm15; - vpshufb .Lpack_bswap, %xmm15, %xmm15; - vpxor %xmm0, %xmm15, %xmm0; - vpxor %xmm1, %xmm15, %xmm1; - vpxor %xmm2, %xmm15, %xmm2; - vpxor %xmm3, %xmm15, %xmm3; - vpxor %xmm4, %xmm15, %xmm4; - vpxor %xmm5, %xmm15, %xmm5; - vpxor %xmm6, %xmm15, %xmm6; - vpxor %xmm7, %xmm15, %xmm7; - vpxor %xmm8, %xmm15, %xmm8; - vpxor %xmm9, %xmm15, %xmm9; - vpxor %xmm10, %xmm15, %xmm10; - vpxor %xmm11, %xmm15, %xmm11; - vpxor %xmm12, %xmm15, %xmm12; - vpxor 13 * 16(%rax), %xmm15, %xmm13; - vpxor 14 * 16(%rax), %xmm15, %xmm14; - vpxor 15 * 16(%rax), %xmm15, %xmm15; - - call __camellia_enc_blk16; - - addq $(16 * 16), %rsp; - - vpxor 0 * 16(%rdx), %xmm7, %xmm7; - vpxor 1 * 16(%rdx), %xmm6, %xmm6; - vpxor 2 * 16(%rdx), %xmm5, %xmm5; - vpxor 3 * 16(%rdx), %xmm4, %xmm4; - vpxor 4 * 16(%rdx), %xmm3, %xmm3; - vpxor 5 * 16(%rdx), %xmm2, %xmm2; - vpxor 6 * 16(%rdx), %xmm1, %xmm1; - vpxor 7 * 16(%rdx), %xmm0, %xmm0; - vpxor 8 * 16(%rdx), %xmm15, %xmm15; - vpxor 9 * 16(%rdx), %xmm14, %xmm14; - vpxor 10 * 16(%rdx), %xmm13, %xmm13; - vpxor 11 * 16(%rdx), %xmm12, %xmm12; - vpxor 12 * 16(%rdx), %xmm11, %xmm11; - vpxor 13 * 16(%rdx), %xmm10, %xmm10; - vpxor 14 * 16(%rdx), %xmm9, %xmm9; - vpxor 15 * 16(%rdx), %xmm8, %xmm8; - write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - - FRAME_END - ret; -SYM_FUNC_END(camellia_ctr_16way) - -#define gf128mul_x_ble(iv, mask, tmp) \ - vpsrad $31, iv, tmp; \ - vpaddq iv, iv, iv; \ - vpshufd $0x13, tmp, tmp; \ - vpand mask, tmp, tmp; \ - vpxor tmp, iv, iv; - -.align 8 -SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - * %r8: index for input whitening key - * %r9: pointer to __camellia_enc_blk16 or __camellia_dec_blk16 - */ - FRAME_BEGIN - - subq $(16 * 16), %rsp; - movq %rsp, %rax; - - vmovdqa .Lxts_gf128mul_and_shl1_mask, %xmm14; - - /* load IV */ - vmovdqu (%rcx), %xmm0; - vpxor 0 * 16(%rdx), %xmm0, %xmm15; - vmovdqu %xmm15, 15 * 16(%rax); - vmovdqu %xmm0, 0 * 16(%rsi); - - /* construct IVs */ - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 1 * 16(%rdx), %xmm0, %xmm15; - vmovdqu %xmm15, 14 * 16(%rax); - vmovdqu %xmm0, 1 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 2 * 16(%rdx), %xmm0, %xmm13; - vmovdqu %xmm0, 2 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 3 * 16(%rdx), %xmm0, %xmm12; - vmovdqu %xmm0, 3 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 4 * 16(%rdx), %xmm0, %xmm11; - vmovdqu %xmm0, 4 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 5 * 16(%rdx), %xmm0, %xmm10; - vmovdqu %xmm0, 5 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 6 * 16(%rdx), %xmm0, %xmm9; - vmovdqu %xmm0, 6 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 7 * 16(%rdx), %xmm0, %xmm8; - vmovdqu %xmm0, 7 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 8 * 16(%rdx), %xmm0, %xmm7; - vmovdqu %xmm0, 8 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 9 * 16(%rdx), %xmm0, %xmm6; - vmovdqu %xmm0, 9 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 10 * 16(%rdx), %xmm0, %xmm5; - vmovdqu %xmm0, 10 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 11 * 16(%rdx), %xmm0, %xmm4; - vmovdqu %xmm0, 11 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 12 * 16(%rdx), %xmm0, %xmm3; - vmovdqu %xmm0, 12 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 13 * 16(%rdx), %xmm0, %xmm2; - vmovdqu %xmm0, 13 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 14 * 16(%rdx), %xmm0, %xmm1; - vmovdqu %xmm0, 14 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 15 * 16(%rdx), %xmm0, %xmm15; - vmovdqu %xmm15, 0 * 16(%rax); - vmovdqu %xmm0, 15 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vmovdqu %xmm0, (%rcx); - - /* inpack16_pre: */ - vmovq (key_table)(CTX, %r8, 8), %xmm15; - vpshufb .Lpack_bswap, %xmm15, %xmm15; - vpxor 0 * 16(%rax), %xmm15, %xmm0; - vpxor %xmm1, %xmm15, %xmm1; - vpxor %xmm2, %xmm15, %xmm2; - vpxor %xmm3, %xmm15, %xmm3; - vpxor %xmm4, %xmm15, %xmm4; - vpxor %xmm5, %xmm15, %xmm5; - vpxor %xmm6, %xmm15, %xmm6; - vpxor %xmm7, %xmm15, %xmm7; - vpxor %xmm8, %xmm15, %xmm8; - vpxor %xmm9, %xmm15, %xmm9; - vpxor %xmm10, %xmm15, %xmm10; - vpxor %xmm11, %xmm15, %xmm11; - vpxor %xmm12, %xmm15, %xmm12; - vpxor %xmm13, %xmm15, %xmm13; - vpxor 14 * 16(%rax), %xmm15, %xmm14; - vpxor 15 * 16(%rax), %xmm15, %xmm15; - - CALL_NOSPEC r9; - - addq $(16 * 16), %rsp; - - vpxor 0 * 16(%rsi), %xmm7, %xmm7; - vpxor 1 * 16(%rsi), %xmm6, %xmm6; - vpxor 2 * 16(%rsi), %xmm5, %xmm5; - vpxor 3 * 16(%rsi), %xmm4, %xmm4; - vpxor 4 * 16(%rsi), %xmm3, %xmm3; - vpxor 5 * 16(%rsi), %xmm2, %xmm2; - vpxor 6 * 16(%rsi), %xmm1, %xmm1; - vpxor 7 * 16(%rsi), %xmm0, %xmm0; - vpxor 8 * 16(%rsi), %xmm15, %xmm15; - vpxor 9 * 16(%rsi), %xmm14, %xmm14; - vpxor 10 * 16(%rsi), %xmm13, %xmm13; - vpxor 11 * 16(%rsi), %xmm12, %xmm12; - vpxor 12 * 16(%rsi), %xmm11, %xmm11; - vpxor 13 * 16(%rsi), %xmm10, %xmm10; - vpxor 14 * 16(%rsi), %xmm9, %xmm9; - vpxor 15 * 16(%rsi), %xmm8, %xmm8; - write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - - FRAME_END - ret; -SYM_FUNC_END(camellia_xts_crypt_16way) - -SYM_FUNC_START(camellia_xts_enc_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - xorl %r8d, %r8d; /* input whitening key, 0 for enc */ - - leaq __camellia_enc_blk16, %r9; - - jmp camellia_xts_crypt_16way; -SYM_FUNC_END(camellia_xts_enc_16way) - -SYM_FUNC_START(camellia_xts_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - - cmpl $16, key_length(CTX); - movl $32, %r8d; - movl $24, %eax; - cmovel %eax, %r8d; /* input whitening key, last for dec */ - - leaq __camellia_dec_blk16, %r9; - - jmp camellia_xts_crypt_16way; -SYM_FUNC_END(camellia_xts_dec_16way) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 0907243c501c..782e9712a1ec 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -7,7 +7,6 @@ #include <linux/linkage.h> #include <asm/frame.h> -#include <asm/nospec-branch.h> #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -625,16 +624,6 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .section .rodata.cst16, "aM", @progbits, 16 .align 16 -/* For CTR-mode IV byteswap */ -.Lbswap128_mask: - .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - -/* For XTS mode */ -.Lxts_gf128mul_and_shl1_mask_0: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 -.Lxts_gf128mul_and_shl1_mask_1: - .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 - /* * pre-SubByte transform * @@ -1061,343 +1050,3 @@ SYM_FUNC_START(camellia_cbc_dec_32way) FRAME_END ret; SYM_FUNC_END(camellia_cbc_dec_32way) - -#define inc_le128(x, minus_one, tmp) \ - vpcmpeqq minus_one, x, tmp; \ - vpsubq minus_one, x, x; \ - vpslldq $8, tmp, tmp; \ - vpsubq tmp, x, x; - -#define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ - vpcmpeqq minus_one, x, tmp1; \ - vpcmpeqq minus_two, x, tmp2; \ - vpsubq minus_two, x, x; \ - vpor tmp2, tmp1, tmp1; \ - vpslldq $8, tmp1, tmp1; \ - vpsubq tmp1, x, x; - -SYM_FUNC_START(camellia_ctr_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - vzeroupper; - - movq %rsp, %r10; - cmpq %rsi, %rdx; - je .Lctr_use_stack; - - /* dst can be used as temporary storage, src is not overwritten. */ - movq %rsi, %rax; - jmp .Lctr_continue; - -.Lctr_use_stack: - subq $(16 * 32), %rsp; - movq %rsp, %rax; - -.Lctr_continue: - vpcmpeqd %ymm15, %ymm15, %ymm15; - vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */ - vpaddq %ymm15, %ymm15, %ymm12; /* ab: -2:0 ; cd: -2:0 */ - - /* load IV and byteswap */ - vmovdqu (%rcx), %xmm0; - vmovdqa %xmm0, %xmm1; - inc_le128(%xmm0, %xmm15, %xmm14); - vbroadcasti128 .Lbswap128_mask, %ymm14; - vinserti128 $1, %xmm0, %ymm1, %ymm0; - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 15 * 32(%rax); - - /* construct IVs */ - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); /* ab:le2 ; cd:le3 */ - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 14 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 13 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 12 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 11 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm10; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm9; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm8; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm7; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm6; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm5; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm4; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm3; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm2; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm1; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vextracti128 $1, %ymm0, %xmm13; - vpshufb %ymm14, %ymm0, %ymm0; - inc_le128(%xmm13, %xmm15, %xmm14); - vmovdqu %xmm13, (%rcx); - - /* inpack32_pre: */ - vpbroadcastq (key_table)(CTX), %ymm15; - vpshufb .Lpack_bswap, %ymm15, %ymm15; - vpxor %ymm0, %ymm15, %ymm0; - vpxor %ymm1, %ymm15, %ymm1; - vpxor %ymm2, %ymm15, %ymm2; - vpxor %ymm3, %ymm15, %ymm3; - vpxor %ymm4, %ymm15, %ymm4; - vpxor %ymm5, %ymm15, %ymm5; - vpxor %ymm6, %ymm15, %ymm6; - vpxor %ymm7, %ymm15, %ymm7; - vpxor %ymm8, %ymm15, %ymm8; - vpxor %ymm9, %ymm15, %ymm9; - vpxor %ymm10, %ymm15, %ymm10; - vpxor 11 * 32(%rax), %ymm15, %ymm11; - vpxor 12 * 32(%rax), %ymm15, %ymm12; - vpxor 13 * 32(%rax), %ymm15, %ymm13; - vpxor 14 * 32(%rax), %ymm15, %ymm14; - vpxor 15 * 32(%rax), %ymm15, %ymm15; - - call __camellia_enc_blk32; - - movq %r10, %rsp; - - vpxor 0 * 32(%rdx), %ymm7, %ymm7; - vpxor 1 * 32(%rdx), %ymm6, %ymm6; - vpxor 2 * 32(%rdx), %ymm5, %ymm5; - vpxor 3 * 32(%rdx), %ymm4, %ymm4; - vpxor 4 * 32(%rdx), %ymm3, %ymm3; - vpxor 5 * 32(%rdx), %ymm2, %ymm2; - vpxor 6 * 32(%rdx), %ymm1, %ymm1; - vpxor 7 * 32(%rdx), %ymm0, %ymm0; - vpxor 8 * 32(%rdx), %ymm15, %ymm15; - vpxor 9 * 32(%rdx), %ymm14, %ymm14; - vpxor 10 * 32(%rdx), %ymm13, %ymm13; - vpxor 11 * 32(%rdx), %ymm12, %ymm12; - vpxor 12 * 32(%rdx), %ymm11, %ymm11; - vpxor 13 * 32(%rdx), %ymm10, %ymm10; - vpxor 14 * 32(%rdx), %ymm9, %ymm9; - vpxor 15 * 32(%rdx), %ymm8, %ymm8; - write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, - %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, - %ymm8, %rsi); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(camellia_ctr_32way) - -#define gf128mul_x_ble(iv, mask, tmp) \ - vpsrad $31, iv, tmp; \ - vpaddq iv, iv, iv; \ - vpshufd $0x13, tmp, tmp; \ - vpand mask, tmp, tmp; \ - vpxor tmp, iv, iv; - -#define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ - vpsrad $31, iv, tmp0; \ - vpaddq iv, iv, tmp1; \ - vpsllq $2, iv, iv; \ - vpshufd $0x13, tmp0, tmp0; \ - vpsrad $31, tmp1, tmp1; \ - vpand mask2, tmp0, tmp0; \ - vpshufd $0x13, tmp1, tmp1; \ - vpxor tmp0, iv, iv; \ - vpand mask1, tmp1, tmp1; \ - vpxor tmp1, iv, iv; - -.align 8 -SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - * %r8: index for input whitening key - * %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32 - */ - FRAME_BEGIN - - vzeroupper; - - subq $(16 * 32), %rsp; - movq %rsp, %rax; - - vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_0, %ymm12; - - /* load IV and construct second IV */ - vmovdqu (%rcx), %xmm0; - vmovdqa %xmm0, %xmm15; - gf128mul_x_ble(%xmm0, %xmm12, %xmm13); - vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_1, %ymm13; - vinserti128 $1, %xmm0, %ymm15, %ymm0; - vpxor 0 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 15 * 32(%rax); - vmovdqu %ymm0, 0 * 32(%rsi); - - /* construct IVs */ - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 1 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 14 * 32(%rax); - vmovdqu %ymm0, 1 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 2 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 13 * 32(%rax); - vmovdqu %ymm0, 2 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 3 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 12 * 32(%rax); - vmovdqu %ymm0, 3 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 4 * 32(%rdx), %ymm0, %ymm11; - vmovdqu %ymm0, 4 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 5 * 32(%rdx), %ymm0, %ymm10; - vmovdqu %ymm0, 5 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 6 * 32(%rdx), %ymm0, %ymm9; - vmovdqu %ymm0, 6 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 7 * 32(%rdx), %ymm0, %ymm8; - vmovdqu %ymm0, 7 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 8 * 32(%rdx), %ymm0, %ymm7; - vmovdqu %ymm0, 8 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 9 * 32(%rdx), %ymm0, %ymm6; - vmovdqu %ymm0, 9 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 10 * 32(%rdx), %ymm0, %ymm5; - vmovdqu %ymm0, 10 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 11 * 32(%rdx), %ymm0, %ymm4; - vmovdqu %ymm0, 11 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 12 * 32(%rdx), %ymm0, %ymm3; - vmovdqu %ymm0, 12 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 13 * 32(%rdx), %ymm0, %ymm2; - vmovdqu %ymm0, 13 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 14 * 32(%rdx), %ymm0, %ymm1; - vmovdqu %ymm0, 14 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 15 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 0 * 32(%rax); - vmovdqu %ymm0, 15 * 32(%rsi); - - vextracti128 $1, %ymm0, %xmm0; - gf128mul_x_ble(%xmm0, %xmm12, %xmm15); - vmovdqu %xmm0, (%rcx); - - /* inpack32_pre: */ - vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15; - vpshufb .Lpack_bswap, %ymm15, %ymm15; - vpxor 0 * 32(%rax), %ymm15, %ymm0; - vpxor %ymm1, %ymm15, %ymm1; - vpxor %ymm2, %ymm15, %ymm2; - vpxor %ymm3, %ymm15, %ymm3; - vpxor %ymm4, %ymm15, %ymm4; - vpxor %ymm5, %ymm15, %ymm5; - vpxor %ymm6, %ymm15, %ymm6; - vpxor %ymm7, %ymm15, %ymm7; - vpxor %ymm8, %ymm15, %ymm8; - vpxor %ymm9, %ymm15, %ymm9; - vpxor %ymm10, %ymm15, %ymm10; - vpxor %ymm11, %ymm15, %ymm11; - vpxor 12 * 32(%rax), %ymm15, %ymm12; - vpxor 13 * 32(%rax), %ymm15, %ymm13; - vpxor 14 * 32(%rax), %ymm15, %ymm14; - vpxor 15 * 32(%rax), %ymm15, %ymm15; - - CALL_NOSPEC r9; - - addq $(16 * 32), %rsp; - - vpxor 0 * 32(%rsi), %ymm7, %ymm7; - vpxor 1 * 32(%rsi), %ymm6, %ymm6; - vpxor 2 * 32(%rsi), %ymm5, %ymm5; - vpxor 3 * 32(%rsi), %ymm4, %ymm4; - vpxor 4 * 32(%rsi), %ymm3, %ymm3; - vpxor 5 * 32(%rsi), %ymm2, %ymm2; - vpxor 6 * 32(%rsi), %ymm1, %ymm1; - vpxor 7 * 32(%rsi), %ymm0, %ymm0; - vpxor 8 * 32(%rsi), %ymm15, %ymm15; - vpxor 9 * 32(%rsi), %ymm14, %ymm14; - vpxor 10 * 32(%rsi), %ymm13, %ymm13; - vpxor 11 * 32(%rsi), %ymm12, %ymm12; - vpxor 12 * 32(%rsi), %ymm11, %ymm11; - vpxor 13 * 32(%rsi), %ymm10, %ymm10; - vpxor 14 * 32(%rsi), %ymm9, %ymm9; - vpxor 15 * 32(%rsi), %ymm8, %ymm8; - write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, - %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, - %ymm8, %rsi); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(camellia_xts_crypt_32way) - -SYM_FUNC_START(camellia_xts_enc_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - - xorl %r8d, %r8d; /* input whitening key, 0 for enc */ - - leaq __camellia_enc_blk32, %r9; - - jmp camellia_xts_crypt_32way; -SYM_FUNC_END(camellia_xts_enc_32way) - -SYM_FUNC_START(camellia_xts_dec_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - - cmpl $16, key_length(CTX); - movl $32, %r8d; - movl $24, %eax; - cmovel %eax, %r8d; /* input whitening key, last for dec */ - - leaq __camellia_dec_blk32, %r9; - - jmp camellia_xts_crypt_32way; -SYM_FUNC_END(camellia_xts_dec_32way) diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/crypto/camellia.h index f6d91861cb14..1dcea79e8f8e 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/crypto/camellia.h @@ -19,18 +19,10 @@ struct camellia_ctx { u32 key_length; }; -struct camellia_xts_ctx { - struct camellia_ctx tweak_ctx; - struct camellia_ctx crypt_ctx; -}; - extern int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key, unsigned int key_len); -extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); - /* regular block cipher functions */ asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, bool xor); @@ -46,13 +38,6 @@ asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) { @@ -78,14 +63,5 @@ static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst, /* glue helpers */ extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src); -extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); #endif /* ASM_X86_CAMELLIA_H */ diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index ccda647422d6..e7e4d64e9577 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -5,16 +5,16 @@ * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ -#include <asm/crypto/camellia.h> -#include <asm/crypto/glue_helper.h> #include <crypto/algapi.h> #include <crypto/internal/simd.h> -#include <crypto/xts.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/module.h> #include <linux/types.h> +#include "camellia.h" +#include "ecb_cbc_helpers.h" + #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 @@ -23,121 +23,6 @@ asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -static const struct common_glue_ctx camellia_enc = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_enc_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_enc_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_enc_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_enc_blk } - } } -}; - -static const struct common_glue_ctx camellia_ctr = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ctr = camellia_ctr_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = camellia_ctr_16way } - }, { - .num_blocks = 2, - .fn_u = { .ctr = camellia_crypt_ctr_2way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = camellia_crypt_ctr } - } } -}; - -static const struct common_glue_ctx camellia_enc_xts = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_enc_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_enc_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_enc } - } } -}; - -static const struct common_glue_ctx camellia_dec = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_dec_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_dec_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_cbc = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .cbc = camellia_cbc_dec_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = camellia_cbc_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .cbc = camellia_decrypt_cbc_2way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_xts = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_dec_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_dec_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_dec } - } } -}; static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -147,45 +32,39 @@ static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_enc, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_enc_32way); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way); + ECB_BLOCK(2, camellia_enc_blk_2way); + ECB_BLOCK(1, camellia_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_dec, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_dec_32way); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way); + ECB_BLOCK(2, camellia_dec_blk_2way); + ECB_BLOCK(1, camellia_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(camellia_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&camellia_ctr, req); -} - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_cbc_dec_32way); + CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way); + CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way); + CBC_DEC_BLOCK(1, camellia_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg camellia_algs[] = { @@ -216,35 +95,6 @@ static struct skcipher_alg camellia_algs[] = { .setkey = camellia_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(camellia)", - .base.cra_driver_name = "__ctr-camellia-aesni-avx2", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct camellia_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .chunksize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(camellia)", - .base.cra_driver_name = "__xts-camellia-aesni-avx2", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct camellia_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE, - .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .setkey = xts_camellia_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 4e5de6ef206e..c7ccf63e741e 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -5,16 +5,16 @@ * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ -#include <asm/crypto/camellia.h> -#include <asm/crypto/glue_helper.h> #include <crypto/algapi.h> #include <crypto/internal/simd.h> -#include <crypto/xts.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/module.h> #include <linux/types.h> +#include "camellia.h" +#include "ecb_cbc_helpers.h" + #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 /* 16-way parallel cipher functions (avx/aes-ni) */ @@ -27,120 +27,6 @@ EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); -asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(camellia_ctr_16way); - -asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); - -asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); - -void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk); -} -EXPORT_SYMBOL_GPL(camellia_xts_enc); - -void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk); -} -EXPORT_SYMBOL_GPL(camellia_xts_dec); - -static const struct common_glue_ctx camellia_enc = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_enc_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_enc_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_enc_blk } - } } -}; - -static const struct common_glue_ctx camellia_ctr = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = camellia_ctr_16way } - }, { - .num_blocks = 2, - .fn_u = { .ctr = camellia_crypt_ctr_2way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = camellia_crypt_ctr } - } } -}; - -static const struct common_glue_ctx camellia_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_enc_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_enc } - } } -}; - -static const struct common_glue_ctx camellia_dec = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_dec_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_cbc = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = camellia_cbc_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .cbc = camellia_decrypt_cbc_2way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_dec_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_dec } - } } -}; - static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -149,65 +35,36 @@ static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_enc, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way); + ECB_BLOCK(2, camellia_enc_blk_2way); + ECB_BLOCK(1, camellia_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_dec, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way); + ECB_BLOCK(2, camellia_dec_blk_2way); + ECB_BLOCK(1, camellia_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(camellia_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&camellia_ctr, req); -} - -int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} -EXPORT_SYMBOL_GPL(xts_camellia_setkey); - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way); + CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way); + CBC_DEC_BLOCK(1, camellia_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg camellia_algs[] = { @@ -238,36 +95,7 @@ static struct skcipher_alg camellia_algs[] = { .setkey = camellia_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(camellia)", - .base.cra_driver_name = "__ctr-camellia-aesni", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct camellia_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .chunksize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(camellia)", - .base.cra_driver_name = "__xts-camellia-aesni", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct camellia_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE, - .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .setkey = xts_camellia_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, - }, + } }; static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)]; diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 242c056e5fa8..66c435ba9d3d 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -14,8 +14,9 @@ #include <linux/module.h> #include <linux/types.h> #include <crypto/algapi.h> -#include <asm/crypto/camellia.h> -#include <asm/crypto/glue_helper.h> + +#include "camellia.h" +#include "ecb_cbc_helpers.h" /* regular block cipher functions */ asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, @@ -1262,129 +1263,47 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return camellia_setkey(&tfm->base, key, key_len); } -void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) +void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src) { - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - u128 iv = *src; - - camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); + u8 buf[CAMELLIA_BLOCK_SIZE]; + const u8 *iv = src; - u128_xor(&dst[1], &dst[1], &iv); + if (dst == src) + iv = memcpy(buf, iv, sizeof(buf)); + camellia_dec_blk_2way(ctx, dst, src); + crypto_xor(dst + CAMELLIA_BLOCK_SIZE, iv, CAMELLIA_BLOCK_SIZE); } EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); -void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) - *dst = *src; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk); -} -EXPORT_SYMBOL_GPL(camellia_crypt_ctr); - -void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblks[2]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) { - dst[0] = src[0]; - dst[1] = src[1]; - } - - le128_to_be128(&ctrblks[0], iv); - le128_inc(iv); - le128_to_be128(&ctrblks[1], iv); - le128_inc(iv); - - camellia_enc_blk_xor_2way(ctx, (u8 *)dst, (u8 *)ctrblks); -} -EXPORT_SYMBOL_GPL(camellia_crypt_ctr_2way); - -static const struct common_glue_ctx camellia_enc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .ecb = camellia_enc_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_enc_blk } - } } -}; - -static const struct common_glue_ctx camellia_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .ctr = camellia_crypt_ctr_2way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = camellia_crypt_ctr } - } } -}; - -static const struct common_glue_ctx camellia_dec = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .ecb = camellia_dec_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .cbc = camellia_decrypt_cbc_2way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = camellia_dec_blk } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_enc, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + ECB_BLOCK(2, camellia_enc_blk_2way); + ECB_BLOCK(1, camellia_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_dec, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + ECB_BLOCK(2, camellia_dec_blk_2way); + ECB_BLOCK(1, camellia_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(camellia_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&camellia_ctr, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way); + CBC_DEC_BLOCK(1, camellia_dec_blk); + CBC_WALK_END(); } static struct crypto_alg camellia_cipher_alg = { @@ -1433,20 +1352,6 @@ static struct skcipher_alg camellia_skcipher_algs[] = { .setkey = camellia_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(camellia)", - .base.cra_driver_name = "ctr-camellia-asm", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct camellia_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .chunksize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 384ccb00f9e1..3976a87f92ad 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -6,7 +6,6 @@ * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> */ -#include <asm/crypto/glue_helper.h> #include <crypto/algapi.h> #include <crypto/cast5.h> #include <crypto/internal/simd.h> @@ -15,6 +14,8 @@ #include <linux/module.h> #include <linux/types.h> +#include "ecb_cbc_helpers.h" + #define CAST5_PARALLEL_BLOCKS 16 asmlinkage void cast5_ecb_enc_16way(struct cast5_ctx *ctx, u8 *dst, @@ -23,8 +24,6 @@ asmlinkage void cast5_ecb_dec_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src); asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src); -asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src, - __be64 *iv); static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -32,272 +31,35 @@ static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return cast5_setkey(&tfm->base, key, keylen); } -static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk, - unsigned int nbytes) -{ - return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS, - walk, fpu_enabled, nbytes); -} - -static inline void cast5_fpu_end(bool fpu_enabled) -{ - return glue_fpu_end(fpu_enabled); -} - -static int ecb_crypt(struct skcipher_request *req, bool enc) -{ - bool fpu_enabled = false; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes; - void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src); - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - u8 *wsrc = walk.src.virt.addr; - u8 *wdst = walk.dst.virt.addr; - - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { - fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; - do { - fn(ctx, wdst, wsrc); - - wsrc += bsize * CAST5_PARALLEL_BLOCKS; - wdst += bsize * CAST5_PARALLEL_BLOCKS; - nbytes -= bsize * CAST5_PARALLEL_BLOCKS; - } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - fn = (enc) ? __cast5_encrypt : __cast5_decrypt; - - /* Handle leftovers */ - do { - fn(ctx, wdst, wsrc); - - wsrc += bsize; - wdst += bsize; - nbytes -= bsize; - } while (nbytes >= bsize); - -done: - err = skcipher_walk_done(&walk, nbytes); - } - - cast5_fpu_end(fpu_enabled); - return err; -} - static int ecb_encrypt(struct skcipher_request *req) { - return ecb_crypt(req, true); + ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS); + ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_enc_16way); + ECB_BLOCK(1, __cast5_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return ecb_crypt(req, false); + ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS); + ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_dec_16way); + ECB_BLOCK(1, __cast5_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - const unsigned int bsize = CAST5_BLOCK_SIZE; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - u64 *src = (u64 *)walk.src.virt.addr; - u64 *dst = (u64 *)walk.dst.virt.addr; - u64 *iv = (u64 *)walk.iv; - - do { - *dst = *src ^ *iv; - __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst); - iv = dst; - src++; - dst++; - nbytes -= bsize; - } while (nbytes >= bsize); - - *(u64 *)walk.iv = *iv; - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static unsigned int __cbc_decrypt(struct cast5_ctx *ctx, - struct skcipher_walk *walk) -{ - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 last_iv; - - /* Start of the last block. */ - src += nbytes / bsize - 1; - dst += nbytes / bsize - 1; - - last_iv = *src; - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { - do { - nbytes -= bsize * (CAST5_PARALLEL_BLOCKS - 1); - src -= CAST5_PARALLEL_BLOCKS - 1; - dst -= CAST5_PARALLEL_BLOCKS - 1; - - cast5_cbc_dec_16way(ctx, (u8 *)dst, (u8 *)src); - - nbytes -= bsize; - if (nbytes < bsize) - goto done; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS); - } - - /* Handle leftovers */ - for (;;) { - __cast5_decrypt(ctx, (u8 *)dst, (u8 *)src); - - nbytes -= bsize; - if (nbytes < bsize) - break; - - *dst ^= *(src - 1); - src -= 1; - dst -= 1; - } - -done: - *dst ^= *(u64 *)walk->iv; - *(u64 *)walk->iv = last_iv; - - return nbytes; + CBC_WALK_START(req, CAST5_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__cast5_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - bool fpu_enabled = false; - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); - nbytes = __cbc_decrypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - cast5_fpu_end(fpu_enabled); - return err; -} - -static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx) -{ - u8 *ctrblk = walk->iv; - u8 keystream[CAST5_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - __cast5_encrypt(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); - - crypto_inc(ctrblk, CAST5_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct skcipher_walk *walk, - struct cast5_ctx *ctx) -{ - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { - do { - cast5_ctr_16way(ctx, (u8 *)dst, (u8 *)src, - (__be64 *)walk->iv); - - src += CAST5_PARALLEL_BLOCKS; - dst += CAST5_PARALLEL_BLOCKS; - nbytes -= bsize * CAST5_PARALLEL_BLOCKS; - } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - u64 ctrblk; - - if (dst != src) - *dst = *src; - - ctrblk = *(u64 *)walk->iv; - be64_add_cpu((__be64 *)walk->iv, 1); - - __cast5_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - *dst ^= ctrblk; - - src += 1; - dst += 1; - nbytes -= bsize; - } while (nbytes >= bsize); - -done: - return nbytes; -} - -static int ctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - bool fpu_enabled = false; - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); - nbytes = __ctr_crypt(&walk, ctx); - err = skcipher_walk_done(&walk, nbytes); - } - - cast5_fpu_end(fpu_enabled); - - if (walk.nbytes) { - ctr_crypt_final(&walk, ctx); - err = skcipher_walk_done(&walk, 0); - } - - return err; + CBC_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_cbc_dec_16way); + CBC_DEC_BLOCK(1, __cast5_decrypt); + CBC_WALK_END(); } static struct skcipher_alg cast5_algs[] = { @@ -328,21 +90,6 @@ static struct skcipher_alg cast5_algs[] = { .setkey = cast5_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(cast5)", - .base.cra_driver_name = "__ctr-cast5-avx", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct cast5_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAST5_MIN_KEY_SIZE, - .max_keysize = CAST5_MAX_KEY_SIZE, - .ivsize = CAST5_BLOCK_SIZE, - .chunksize = CAST5_BLOCK_SIZE, - .setkey = cast5_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 932a3ce32a88..fbddcecc3e3f 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -212,8 +212,6 @@ .section .rodata.cst16, "aM", @progbits, 16 .align 16 -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .Lbswap_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lbswap128_mask: @@ -412,85 +410,3 @@ SYM_FUNC_START(cast6_cbc_dec_8way) FRAME_END ret; SYM_FUNC_END(cast6_cbc_dec_8way) - -SYM_FUNC_START(cast6_ctr_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - pushq %r12; - pushq %r15 - - movq %rdi, CTX; - movq %rsi, %r11; - movq %rdx, %r12; - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RX, RKR, RKM); - - call __cast6_enc_blk8; - - store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - popq %r15; - popq %r12; - FRAME_END - ret; -SYM_FUNC_END(cast6_ctr_8way) - -SYM_FUNC_START(cast6_xts_enc_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - pushq %r15; - - movq %rdi, CTX - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); - - call __cast6_enc_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - popq %r15; - FRAME_END - ret; -SYM_FUNC_END(cast6_xts_enc_8way) - -SYM_FUNC_START(cast6_xts_dec_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - pushq %r15; - - movq %rdi, CTX - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); - - call __cast6_dec_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - popq %r15; - FRAME_END - ret; -SYM_FUNC_END(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 48e0f37796fa..7e2aea372349 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -15,8 +15,8 @@ #include <crypto/algapi.h> #include <crypto/cast6.h> #include <crypto/internal/simd.h> -#include <crypto/xts.h> -#include <asm/crypto/glue_helper.h> + +#include "ecb_cbc_helpers.h" #define CAST6_PARALLEL_BLOCKS 8 @@ -24,13 +24,6 @@ asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -38,172 +31,35 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, return cast6_setkey(&tfm->base, key, keylen); } -static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt); -} - -static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt); -} - -static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - __cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, src, (u128 *)&ctrblk); -} - -static const struct common_glue_ctx cast6_enc = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = cast6_ecb_enc_8way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __cast6_encrypt } - } } -}; - -static const struct common_glue_ctx cast6_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ctr = cast6_ctr_8way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = cast6_crypt_ctr } - } } -}; - -static const struct common_glue_ctx cast6_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = cast6_xts_enc_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = cast6_xts_enc } - } } -}; - -static const struct common_glue_ctx cast6_dec = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = cast6_ecb_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __cast6_decrypt } - } } -}; - -static const struct common_glue_ctx cast6_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .cbc = cast6_cbc_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __cast6_decrypt } - } } -}; - -static const struct common_glue_ctx cast6_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = cast6_xts_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = cast6_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&cast6_enc, req); + ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); + ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_enc_8way); + ECB_BLOCK(1, __cast6_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&cast6_dec, req); + ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); + ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_dec_8way); + ECB_BLOCK(1, __cast6_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req); + CBC_WALK_START(req, CAST6_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__cast6_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&cast6_ctr, req); -} - -struct cast6_xts_ctx { - struct cast6_ctx tweak_ctx; - struct cast6_ctx crypt_ctx; -}; - -static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + CBC_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_cbc_dec_8way); + CBC_DEC_BLOCK(1, __cast6_decrypt); + CBC_WALK_END(); } static struct skcipher_alg cast6_algs[] = { @@ -234,35 +90,6 @@ static struct skcipher_alg cast6_algs[] = { .setkey = cast6_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(cast6)", - .base.cra_driver_name = "__ctr-cast6-avx", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct cast6_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAST6_MIN_KEY_SIZE, - .max_keysize = CAST6_MAX_KEY_SIZE, - .ivsize = CAST6_BLOCK_SIZE, - .chunksize = CAST6_BLOCK_SIZE, - .setkey = cast6_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(cast6)", - .base.cra_driver_name = "__xts-cast6-avx", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = CAST6_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cast6_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * CAST6_MIN_KEY_SIZE, - .max_keysize = 2 * CAST6_MAX_KEY_SIZE, - .ivsize = CAST6_BLOCK_SIZE, - .setkey = xts_cast6_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c index 89830e531350..e7cb68a3db3b 100644 --- a/arch/x86/crypto/des3_ede_glue.c +++ b/arch/x86/crypto/des3_ede_glue.c @@ -6,8 +6,6 @@ * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> */ #include <crypto/algapi.h> @@ -253,94 +251,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, - struct skcipher_walk *walk) -{ - u8 *ctrblk = walk->iv; - u8 keystream[DES3_EDE_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - des3_ede_enc_blk(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); - - crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx, - struct skcipher_walk *walk) -{ - unsigned int bsize = DES3_EDE_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - __be64 *src = (__be64 *)walk->src.virt.addr; - __be64 *dst = (__be64 *)walk->dst.virt.addr; - u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); - __be64 ctrblocks[3]; - - /* Process four block batch */ - if (nbytes >= bsize * 3) { - do { - /* create ctrblks for parallel encrypt */ - ctrblocks[0] = cpu_to_be64(ctrblk++); - ctrblocks[1] = cpu_to_be64(ctrblk++); - ctrblocks[2] = cpu_to_be64(ctrblk++); - - des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks, - (u8 *)ctrblocks); - - dst[0] = src[0] ^ ctrblocks[0]; - dst[1] = src[1] ^ ctrblocks[1]; - dst[2] = src[2] ^ ctrblocks[2]; - - src += 3; - dst += 3; - } while ((nbytes -= bsize * 3) >= bsize * 3); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - ctrblocks[0] = cpu_to_be64(ctrblk++); - - des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); - - dst[0] = src[0] ^ ctrblocks[0]; - - src += 1; - dst += 1; - } while ((nbytes -= bsize) >= bsize); - -done: - *(__be64 *)walk->iv = cpu_to_be64(ctrblk); - return nbytes; -} - -static int ctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { - nbytes = __ctr_crypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - if (nbytes) { - ctr_crypt_final(ctx, &walk); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { @@ -428,20 +338,6 @@ static struct skcipher_alg des3_ede_skciphers[] = { .setkey = des3_ede_x86_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(des3_ede)", - .base.cra_driver_name = "ctr-des3_ede-asm", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .chunksize = DES3_EDE_BLOCK_SIZE, - .setkey = des3_ede_x86_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/arch/x86/crypto/ecb_cbc_helpers.h b/arch/x86/crypto/ecb_cbc_helpers.h new file mode 100644 index 000000000000..eaa15c7b29d6 --- /dev/null +++ b/arch/x86/crypto/ecb_cbc_helpers.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CRYPTO_ECB_CBC_HELPER_H +#define _CRYPTO_ECB_CBC_HELPER_H + +#include <crypto/internal/skcipher.h> +#include <asm/fpu/api.h> + +/* + * Mode helpers to instantiate parameterized skcipher ECB/CBC modes without + * having to rely on indirect calls and retpolines. + */ + +#define ECB_WALK_START(req, bsize, fpu_blocks) do { \ + void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); \ + const int __bsize = (bsize); \ + struct skcipher_walk walk; \ + int err = skcipher_walk_virt(&walk, (req), false); \ + while (walk.nbytes > 0) { \ + unsigned int nbytes = walk.nbytes; \ + bool do_fpu = (fpu_blocks) != -1 && \ + nbytes >= (fpu_blocks) * __bsize; \ + const u8 *src = walk.src.virt.addr; \ + u8 *dst = walk.dst.virt.addr; \ + u8 __maybe_unused buf[(bsize)]; \ + if (do_fpu) kernel_fpu_begin() + +#define CBC_WALK_START(req, bsize, fpu_blocks) \ + ECB_WALK_START(req, bsize, fpu_blocks) + +#define ECB_WALK_ADVANCE(blocks) do { \ + dst += (blocks) * __bsize; \ + src += (blocks) * __bsize; \ + nbytes -= (blocks) * __bsize; \ +} while (0) + +#define ECB_BLOCK(blocks, func) do { \ + while (nbytes >= (blocks) * __bsize) { \ + (func)(ctx, dst, src); \ + ECB_WALK_ADVANCE(blocks); \ + } \ +} while (0) + +#define CBC_ENC_BLOCK(func) do { \ + const u8 *__iv = walk.iv; \ + while (nbytes >= __bsize) { \ + crypto_xor_cpy(dst, src, __iv, __bsize); \ + (func)(ctx, dst, dst); \ + __iv = dst; \ + ECB_WALK_ADVANCE(1); \ + } \ + memcpy(walk.iv, __iv, __bsize); \ +} while (0) + +#define CBC_DEC_BLOCK(blocks, func) do { \ + while (nbytes >= (blocks) * __bsize) { \ + const u8 *__iv = src + ((blocks) - 1) * __bsize; \ + if (dst == src) \ + __iv = memcpy(buf, __iv, __bsize); \ + (func)(ctx, dst, src); \ + crypto_xor(dst, walk.iv, __bsize); \ + memcpy(walk.iv, __iv, __bsize); \ + ECB_WALK_ADVANCE(blocks); \ + } \ +} while (0) + +#define ECB_WALK_END() \ + if (do_fpu) kernel_fpu_end(); \ + err = skcipher_walk_done(&walk, nbytes); \ + } \ + return err; \ +} while (0) + +#define CBC_WALK_END() ECB_WALK_END() + +#endif diff --git a/arch/x86/crypto/glue_helper-asm-avx.S b/arch/x86/crypto/glue_helper-asm-avx.S index d08fc575ef7f..3da385271227 100644 --- a/arch/x86/crypto/glue_helper-asm-avx.S +++ b/arch/x86/crypto/glue_helper-asm-avx.S @@ -34,107 +34,3 @@ vpxor (5*16)(src), x6, x6; \ vpxor (6*16)(src), x7, x7; \ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); - -#define inc_le128(x, minus_one, tmp) \ - vpcmpeqq minus_one, x, tmp; \ - vpsubq minus_one, x, x; \ - vpslldq $8, tmp, tmp; \ - vpsubq tmp, x, x; - -#define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \ - vpcmpeqd t0, t0, t0; \ - vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \ - vmovdqa bswap, t1; \ - \ - /* load IV and byteswap */ \ - vmovdqu (iv), x7; \ - vpshufb t1, x7, x0; \ - \ - /* construct IVs */ \ - inc_le128(x7, t0, t2); \ - vpshufb t1, x7, x1; \ - inc_le128(x7, t0, t2); \ - vpshufb t1, x7, x2; \ - inc_le128(x7, t0, t2); \ - vpshufb t1, x7, x3; \ - inc_le128(x7, t0, t2); \ - vpshufb t1, x7, x4; \ - inc_le128(x7, t0, t2); \ - vpshufb t1, x7, x5; \ - inc_le128(x7, t0, t2); \ - vpshufb t1, x7, x6; \ - inc_le128(x7, t0, t2); \ - vmovdqa x7, t2; \ - vpshufb t1, x7, x7; \ - inc_le128(t2, t0, t1); \ - vmovdqu t2, (iv); - -#define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ - vpxor (0*16)(src), x0, x0; \ - vpxor (1*16)(src), x1, x1; \ - vpxor (2*16)(src), x2, x2; \ - vpxor (3*16)(src), x3, x3; \ - vpxor (4*16)(src), x4, x4; \ - vpxor (5*16)(src), x5, x5; \ - vpxor (6*16)(src), x6, x6; \ - vpxor (7*16)(src), x7, x7; \ - store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); - -#define gf128mul_x_ble(iv, mask, tmp) \ - vpsrad $31, iv, tmp; \ - vpaddq iv, iv, iv; \ - vpshufd $0x13, tmp, tmp; \ - vpand mask, tmp, tmp; \ - vpxor tmp, iv, iv; - -#define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \ - t1, xts_gf128mul_and_shl1_mask) \ - vmovdqa xts_gf128mul_and_shl1_mask, t0; \ - \ - /* load IV */ \ - vmovdqu (iv), tiv; \ - vpxor (0*16)(src), tiv, x0; \ - vmovdqu tiv, (0*16)(dst); \ - \ - /* construct and store IVs, also xor with source */ \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (1*16)(src), tiv, x1; \ - vmovdqu tiv, (1*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (2*16)(src), tiv, x2; \ - vmovdqu tiv, (2*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (3*16)(src), tiv, x3; \ - vmovdqu tiv, (3*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (4*16)(src), tiv, x4; \ - vmovdqu tiv, (4*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (5*16)(src), tiv, x5; \ - vmovdqu tiv, (5*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (6*16)(src), tiv, x6; \ - vmovdqu tiv, (6*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vpxor (7*16)(src), tiv, x7; \ - vmovdqu tiv, (7*16)(dst); \ - \ - gf128mul_x_ble(tiv, t0, t1); \ - vmovdqu tiv, (iv); - -#define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ - vpxor (0*16)(dst), x0, x0; \ - vpxor (1*16)(dst), x1, x1; \ - vpxor (2*16)(dst), x2, x2; \ - vpxor (3*16)(dst), x3, x3; \ - vpxor (4*16)(dst), x4, x4; \ - vpxor (5*16)(dst), x5, x5; \ - vpxor (6*16)(dst), x6, x6; \ - vpxor (7*16)(dst), x7, x7; \ - store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); diff --git a/arch/x86/crypto/glue_helper-asm-avx2.S b/arch/x86/crypto/glue_helper-asm-avx2.S index d84508c85c13..c77e9049431f 100644 --- a/arch/x86/crypto/glue_helper-asm-avx2.S +++ b/arch/x86/crypto/glue_helper-asm-avx2.S @@ -37,139 +37,3 @@ vpxor (5*32+16)(src), x6, x6; \ vpxor (6*32+16)(src), x7, x7; \ store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); - -#define inc_le128(x, minus_one, tmp) \ - vpcmpeqq minus_one, x, tmp; \ - vpsubq minus_one, x, x; \ - vpslldq $8, tmp, tmp; \ - vpsubq tmp, x, x; - -#define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ - vpcmpeqq minus_one, x, tmp1; \ - vpcmpeqq minus_two, x, tmp2; \ - vpsubq minus_two, x, x; \ - vpor tmp2, tmp1, tmp1; \ - vpslldq $8, tmp1, tmp1; \ - vpsubq tmp1, x, x; - -#define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ - t1x, t2, t2x, t3, t3x, t4, t5) \ - vpcmpeqd t0, t0, t0; \ - vpsrldq $8, t0, t0; /* ab: -1:0 ; cd: -1:0 */ \ - vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\ - \ - /* load IV and byteswap */ \ - vmovdqu (iv), t2x; \ - vmovdqa t2x, t3x; \ - inc_le128(t2x, t0x, t1x); \ - vbroadcasti128 bswap, t1; \ - vinserti128 $1, t2x, t3, t2; /* ab: le0 ; cd: le1 */ \ - vpshufb t1, t2, x0; \ - \ - /* construct IVs */ \ - add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \ - vpshufb t1, t2, x1; \ - add2_le128(t2, t0, t4, t3, t5); \ - vpshufb t1, t2, x2; \ - add2_le128(t2, t0, t4, t3, t5); \ - vpshufb t1, t2, x3; \ - add2_le128(t2, t0, t4, t3, t5); \ - vpshufb t1, t2, x4; \ - add2_le128(t2, t0, t4, t3, t5); \ - vpshufb t1, t2, x5; \ - add2_le128(t2, t0, t4, t3, t5); \ - vpshufb t1, t2, x6; \ - add2_le128(t2, t0, t4, t3, t5); \ - vpshufb t1, t2, x7; \ - vextracti128 $1, t2, t2x; \ - inc_le128(t2x, t0x, t3x); \ - vmovdqu t2x, (iv); - -#define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ - vpxor (0*32)(src), x0, x0; \ - vpxor (1*32)(src), x1, x1; \ - vpxor (2*32)(src), x2, x2; \ - vpxor (3*32)(src), x3, x3; \ - vpxor (4*32)(src), x4, x4; \ - vpxor (5*32)(src), x5, x5; \ - vpxor (6*32)(src), x6, x6; \ - vpxor (7*32)(src), x7, x7; \ - store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); - -#define gf128mul_x_ble(iv, mask, tmp) \ - vpsrad $31, iv, tmp; \ - vpaddq iv, iv, iv; \ - vpshufd $0x13, tmp, tmp; \ - vpand mask, tmp, tmp; \ - vpxor tmp, iv, iv; - -#define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ - vpsrad $31, iv, tmp0; \ - vpaddq iv, iv, tmp1; \ - vpsllq $2, iv, iv; \ - vpshufd $0x13, tmp0, tmp0; \ - vpsrad $31, tmp1, tmp1; \ - vpand mask2, tmp0, tmp0; \ - vpshufd $0x13, tmp1, tmp1; \ - vpxor tmp0, iv, iv; \ - vpand mask1, tmp1, tmp1; \ - vpxor tmp1, iv, iv; - -#define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \ - tivx, t0, t0x, t1, t1x, t2, t2x, t3, \ - xts_gf128mul_and_shl1_mask_0, \ - xts_gf128mul_and_shl1_mask_1) \ - vbroadcasti128 xts_gf128mul_and_shl1_mask_0, t1; \ - \ - /* load IV and construct second IV */ \ - vmovdqu (iv), tivx; \ - vmovdqa tivx, t0x; \ - gf128mul_x_ble(tivx, t1x, t2x); \ - vbroadcasti128 xts_gf128mul_and_shl1_mask_1, t2; \ - vinserti128 $1, tivx, t0, tiv; \ - vpxor (0*32)(src), tiv, x0; \ - vmovdqu tiv, (0*32)(dst); \ - \ - /* construct and store IVs, also xor with source */ \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (1*32)(src), tiv, x1; \ - vmovdqu tiv, (1*32)(dst); \ - \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (2*32)(src), tiv, x2; \ - vmovdqu tiv, (2*32)(dst); \ - \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (3*32)(src), tiv, x3; \ - vmovdqu tiv, (3*32)(dst); \ - \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (4*32)(src), tiv, x4; \ - vmovdqu tiv, (4*32)(dst); \ - \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (5*32)(src), tiv, x5; \ - vmovdqu tiv, (5*32)(dst); \ - \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (6*32)(src), tiv, x6; \ - vmovdqu tiv, (6*32)(dst); \ - \ - gf128mul_x2_ble(tiv, t1, t2, t0, t3); \ - vpxor (7*32)(src), tiv, x7; \ - vmovdqu tiv, (7*32)(dst); \ - \ - vextracti128 $1, tiv, tivx; \ - gf128mul_x_ble(tivx, t1x, t2x); \ - vmovdqu tivx, (iv); - -#define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ - vpxor (0*32)(dst), x0, x0; \ - vpxor (1*32)(dst), x1, x1; \ - vpxor (2*32)(dst), x2, x2; \ - vpxor (3*32)(dst), x3, x3; \ - vpxor (4*32)(dst), x4, x4; \ - vpxor (5*32)(dst), x5, x5; \ - vpxor (6*32)(dst), x6, x6; \ - vpxor (7*32)(dst), x7, x7; \ - store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c deleted file mode 100644 index d3d91a0abf88..000000000000 --- a/arch/x86/crypto/glue_helper.c +++ /dev/null @@ -1,381 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Shared glue code for 128bit block ciphers - * - * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> - * - * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: - * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> - */ - -#include <linux/module.h> -#include <crypto/b128ops.h> -#include <crypto/gf128mul.h> -#include <crypto/internal/skcipher.h> -#include <crypto/scatterwalk.h> -#include <crypto/xts.h> -#include <asm/crypto/glue_helper.h> - -int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - bool fpu_enabled = false; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - unsigned int func_bytes; - unsigned int i; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); - for (i = 0; i < gctx->num_funcs; i++) { - func_bytes = bsize * gctx->funcs[i].num_blocks; - - if (nbytes < func_bytes) - continue; - - /* Process multi-block batch */ - do { - gctx->funcs[i].fn_u.ecb(ctx, dst, src); - src += func_bytes; - dst += func_bytes; - nbytes -= func_bytes; - } while (nbytes >= func_bytes); - - if (nbytes < bsize) - break; - } - err = skcipher_walk_done(&walk, nbytes); - } - - glue_fpu_end(fpu_enabled); - return err; -} -EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); - -int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u128 *src = (u128 *)walk.src.virt.addr; - u128 *dst = (u128 *)walk.dst.virt.addr; - u128 *iv = (u128 *)walk.iv; - - do { - u128_xor(dst, src, iv); - fn(ctx, (u8 *)dst, (u8 *)dst); - iv = dst; - src++; - dst++; - nbytes -= bsize; - } while (nbytes >= bsize); - - *(u128 *)walk.iv = *iv; - err = skcipher_walk_done(&walk, nbytes); - } - return err; -} -EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit); - -int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - bool fpu_enabled = false; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u128 *src = walk.src.virt.addr; - u128 *dst = walk.dst.virt.addr; - unsigned int func_bytes, num_blocks; - unsigned int i; - u128 last_iv; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); - /* Start of the last block. */ - src += nbytes / bsize - 1; - dst += nbytes / bsize - 1; - - last_iv = *src; - - for (i = 0; i < gctx->num_funcs; i++) { - num_blocks = gctx->funcs[i].num_blocks; - func_bytes = bsize * num_blocks; - - if (nbytes < func_bytes) - continue; - - /* Process multi-block batch */ - do { - src -= num_blocks - 1; - dst -= num_blocks - 1; - - gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, - (const u8 *)src); - - nbytes -= func_bytes; - if (nbytes < bsize) - goto done; - - u128_xor(dst, dst, --src); - dst--; - } while (nbytes >= func_bytes); - } -done: - u128_xor(dst, dst, (u128 *)walk.iv); - *(u128 *)walk.iv = last_iv; - err = skcipher_walk_done(&walk, nbytes); - } - - glue_fpu_end(fpu_enabled); - return err; -} -EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); - -int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - bool fpu_enabled = false; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= bsize) { - const u128 *src = walk.src.virt.addr; - u128 *dst = walk.dst.virt.addr; - unsigned int func_bytes, num_blocks; - unsigned int i; - le128 ctrblk; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); - - be128_to_le128(&ctrblk, (be128 *)walk.iv); - - for (i = 0; i < gctx->num_funcs; i++) { - num_blocks = gctx->funcs[i].num_blocks; - func_bytes = bsize * num_blocks; - - if (nbytes < func_bytes) - continue; - - /* Process multi-block batch */ - do { - gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, - (const u8 *)src, - &ctrblk); - src += num_blocks; - dst += num_blocks; - nbytes -= func_bytes; - } while (nbytes >= func_bytes); - - if (nbytes < bsize) - break; - } - - le128_to_be128((be128 *)walk.iv, &ctrblk); - err = skcipher_walk_done(&walk, nbytes); - } - - glue_fpu_end(fpu_enabled); - - if (nbytes) { - le128 ctrblk; - u128 tmp; - - be128_to_le128(&ctrblk, (be128 *)walk.iv); - memcpy(&tmp, walk.src.virt.addr, nbytes); - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, - (const u8 *)&tmp, - &ctrblk); - memcpy(walk.dst.virt.addr, &tmp, nbytes); - le128_to_be128((be128 *)walk.iv, &ctrblk); - - err = skcipher_walk_done(&walk, 0); - } - - return err; -} -EXPORT_SYMBOL_GPL(glue_ctr_req_128bit); - -static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, - void *ctx, - struct skcipher_walk *walk) -{ - const unsigned int bsize = 128 / 8; - unsigned int nbytes = walk->nbytes; - u128 *src = walk->src.virt.addr; - u128 *dst = walk->dst.virt.addr; - unsigned int num_blocks, func_bytes; - unsigned int i; - - /* Process multi-block batch */ - for (i = 0; i < gctx->num_funcs; i++) { - num_blocks = gctx->funcs[i].num_blocks; - func_bytes = bsize * num_blocks; - - if (nbytes >= func_bytes) { - do { - gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, - (const u8 *)src, - walk->iv); - - src += num_blocks; - dst += num_blocks; - nbytes -= func_bytes; - } while (nbytes >= func_bytes); - - if (nbytes < bsize) - goto done; - } - } - -done: - return nbytes; -} - -int glue_xts_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req, - common_glue_func_t tweak_fn, void *tweak_ctx, - void *crypt_ctx, bool decrypt) -{ - const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); - const unsigned int bsize = 128 / 8; - struct skcipher_request subreq; - struct skcipher_walk walk; - bool fpu_enabled = false; - unsigned int nbytes, tail; - int err; - - if (req->cryptlen < XTS_BLOCK_SIZE) - return -EINVAL; - - if (unlikely(cts)) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - - tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE; - - skcipher_request_set_tfm(&subreq, tfm); - skcipher_request_set_callback(&subreq, - crypto_skcipher_get_flags(tfm), - NULL, NULL); - skcipher_request_set_crypt(&subreq, req->src, req->dst, - req->cryptlen - tail, req->iv); - req = &subreq; - } - - err = skcipher_walk_virt(&walk, req, false); - nbytes = walk.nbytes; - if (err) - return err; - - /* set minimum length to bsize, for tweak_fn */ - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, - nbytes < bsize ? bsize : nbytes); - - /* calculate first value of T */ - tweak_fn(tweak_ctx, walk.iv, walk.iv); - - while (nbytes) { - nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk); - - err = skcipher_walk_done(&walk, nbytes); - nbytes = walk.nbytes; - } - - if (unlikely(cts)) { - u8 *next_tweak, *final_tweak = req->iv; - struct scatterlist *src, *dst; - struct scatterlist s[2], d[2]; - le128 b[2]; - - dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(d, req->dst, req->cryptlen); - - if (decrypt) { - next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE); - gf128mul_x_ble(b, b); - } else { - next_tweak = req->iv; - } - - skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE, - next_tweak); - - err = skcipher_walk_virt(&walk, req, false) ?: - skcipher_walk_done(&walk, - __glue_xts_req_128bit(gctx, crypt_ctx, &walk)); - if (err) - goto out; - - scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0); - memcpy(b + 1, b, tail - XTS_BLOCK_SIZE); - scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE, - tail - XTS_BLOCK_SIZE, 0); - scatterwalk_map_and_copy(b, dst, 0, tail, 1); - - skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE, - final_tweak); - - err = skcipher_walk_virt(&walk, req, false) ?: - skcipher_walk_done(&walk, - __glue_xts_req_128bit(gctx, crypt_ctx, &walk)); - } - -out: - glue_fpu_end(fpu_enabled); - - return err; -} -EXPORT_SYMBOL_GPL(glue_xts_req_128bit); - -void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src, - le128 *iv, common_glue_func_t fn) -{ - le128 ivblk = *iv; - - /* generate next IV */ - gf128mul_x_ble(iv, &ivblk); - - /* CC <- T xor C */ - u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk); - - /* PP <- D(Key2,CC) */ - fn(ctx, dst, dst); - - /* P <- T xor PP */ - u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk); -} -EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one); - -MODULE_LICENSE("GPL"); diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index ba9e4c1e7f5c..b7ee24df7fba 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -18,10 +18,6 @@ .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .text @@ -715,67 +711,3 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx) FRAME_END ret; SYM_FUNC_END(serpent_cbc_dec_8way_avx) - -SYM_FUNC_START(serpent_ctr_8way_avx) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK1, RK2); - - call __serpent_enc_blk8_avx; - - store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END - ret; -SYM_FUNC_END(serpent_ctr_8way_avx) - -SYM_FUNC_START(serpent_xts_enc_8way_avx) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); - - call __serpent_enc_blk8_avx; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_enc_8way_avx) - -SYM_FUNC_START(serpent_xts_dec_8way_avx) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); - - call __serpent_dec_blk8_avx; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_dec_8way_avx) diff --git a/arch/x86/crypto/serpent-avx.h b/arch/x86/crypto/serpent-avx.h new file mode 100644 index 000000000000..23f3361a0e72 --- /dev/null +++ b/arch/x86/crypto/serpent-avx.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASM_X86_SERPENT_AVX_H +#define ASM_X86_SERPENT_AVX_H + +#include <crypto/b128ops.h> +#include <crypto/serpent.h> +#include <linux/types.h> + +struct crypto_skcipher; + +#define SERPENT_PARALLEL_BLOCKS 8 + +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, + const u8 *src); +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, + const u8 *src); + +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, + const u8 *src); + +#endif diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index c9648aeae705..9161b6e441f3 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -20,16 +20,6 @@ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.section .rodata.cst16.xts_gf128mul_and_shl1_mask_0, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask_0: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 - -.section .rodata.cst16.xts_gf128mul_and_shl1_mask_1, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask_1: - .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 - .text #define CTX %rdi @@ -734,80 +724,3 @@ SYM_FUNC_START(serpent_cbc_dec_16way) FRAME_END ret; SYM_FUNC_END(serpent_cbc_dec_16way) - -SYM_FUNC_START(serpent_ctr_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - vzeroupper; - - load_ctr_16way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, - tp); - - call __serpent_enc_blk16; - - store_ctr_16way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(serpent_ctr_16way) - -SYM_FUNC_START(serpent_xts_enc_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - vzeroupper; - - load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, - .Lxts_gf128mul_and_shl1_mask_0, - .Lxts_gf128mul_and_shl1_mask_1); - - call __serpent_enc_blk16; - - store_xts_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_enc_16way) - -SYM_FUNC_START(serpent_xts_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - vzeroupper; - - load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, - .Lxts_gf128mul_and_shl1_mask_0, - .Lxts_gf128mul_and_shl1_mask_1); - - call __serpent_dec_blk16; - - store_xts_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_dec_16way) diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/crypto/serpent-sse2.h index 860ca248914b..860ca248914b 100644 --- a/arch/x86/include/asm/crypto/serpent-sse2.h +++ b/arch/x86/crypto/serpent-sse2.h diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index f973ace44ad3..ccf0b5fa4933 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -12,9 +12,9 @@ #include <crypto/algapi.h> #include <crypto/internal/simd.h> #include <crypto/serpent.h> -#include <crypto/xts.h> -#include <asm/crypto/glue_helper.h> -#include <asm/crypto/serpent-avx.h> + +#include "serpent-avx.h" +#include "ecb_cbc_helpers.h" #define SERPENT_AVX2_PARALLEL_BLOCKS 16 @@ -23,158 +23,44 @@ asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static const struct common_glue_ctx serpent_enc = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ecb = serpent_ecb_enc_16way } - }, { - .num_blocks = 8, - .fn_u = { .ecb = serpent_ecb_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_encrypt } - } } -}; - -static const struct common_glue_ctx serpent_ctr = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ctr = serpent_ctr_16way } - }, { - .num_blocks = 8, - .fn_u = { .ctr = serpent_ctr_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ctr = __serpent_crypt_ctr } - } } -}; - -static const struct common_glue_ctx serpent_enc_xts = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .xts = serpent_xts_enc_16way } - }, { - .num_blocks = 8, - .fn_u = { .xts = serpent_xts_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_enc } - } } -}; - -static const struct common_glue_ctx serpent_dec = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ecb = serpent_ecb_dec_16way } - }, { - .num_blocks = 8, - .fn_u = { .ecb = serpent_ecb_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_cbc = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .cbc = serpent_cbc_dec_16way } - }, { - .num_blocks = 8, - .fn_u = { .cbc = serpent_cbc_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_xts = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .xts = serpent_xts_dec_16way } - }, { - .num_blocks = 8, - .fn_u = { .xts = serpent_xts_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_enc, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_enc_16way); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx); + ECB_BLOCK(1, __serpent_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_dec, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_dec_16way); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx); + ECB_BLOCK(1, __serpent_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__serpent_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&serpent_ctr, req); -} - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_enc_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_dec_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, true); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_cbc_dec_16way); + CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx); + CBC_DEC_BLOCK(1, __serpent_decrypt); + CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { @@ -205,35 +91,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(serpent)", - .base.cra_driver_name = "__ctr-serpent-avx2", - .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct serpent_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = SERPENT_MIN_KEY_SIZE, - .max_keysize = SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .chunksize = SERPENT_BLOCK_SIZE, - .setkey = serpent_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(serpent)", - .base.cra_driver_name = "__xts-serpent-avx2", - .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = SERPENT_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct serpent_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SERPENT_MIN_KEY_SIZE, - .max_keysize = 2 * SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .setkey = xts_serpent_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 7806d1cbe854..6c248e1ea4ef 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -15,9 +15,9 @@ #include <crypto/algapi.h> #include <crypto/internal/simd.h> #include <crypto/serpent.h> -#include <crypto/xts.h> -#include <asm/crypto/glue_helper.h> -#include <asm/crypto/serpent-avx.h> + +#include "serpent-avx.h" +#include "ecb_cbc_helpers.h" /* 8-way parallel cipher functions */ asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, @@ -32,191 +32,41 @@ asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); -asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); - -asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); - -asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); - -void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, src, (u128 *)&ctrblk); -} -EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); - -void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); -} -EXPORT_SYMBOL_GPL(serpent_xts_enc); - -void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); -} -EXPORT_SYMBOL_GPL(serpent_xts_dec); - static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} -EXPORT_SYMBOL_GPL(xts_serpent_setkey); - -static const struct common_glue_ctx serpent_enc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_ecb_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_encrypt } - } } -}; - -static const struct common_glue_ctx serpent_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = serpent_ctr_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ctr = __serpent_crypt_ctr } - } } -}; - -static const struct common_glue_ctx serpent_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = serpent_xts_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_enc } - } } -}; - -static const struct common_glue_ctx serpent_dec = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_ecb_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = serpent_cbc_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = serpent_xts_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_enc, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx); + ECB_BLOCK(1, __serpent_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_dec, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx); + ECB_BLOCK(1, __serpent_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__serpent_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&serpent_ctr, req); -} - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_enc_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_dec_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, true); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx); + CBC_DEC_BLOCK(1, __serpent_decrypt); + CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { @@ -247,35 +97,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(serpent)", - .base.cra_driver_name = "__ctr-serpent-avx", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct serpent_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = SERPENT_MIN_KEY_SIZE, - .max_keysize = SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .chunksize = SERPENT_BLOCK_SIZE, - .setkey = serpent_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(serpent)", - .base.cra_driver_name = "__xts-serpent-avx", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = SERPENT_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct serpent_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SERPENT_MIN_KEY_SIZE, - .max_keysize = 2 * SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .setkey = xts_serpent_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 4fed8d26b91a..d78f37e9b2cf 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -10,8 +10,6 @@ * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> */ #include <linux/module.h> @@ -22,8 +20,9 @@ #include <crypto/b128ops.h> #include <crypto/internal/simd.h> #include <crypto/serpent.h> -#include <asm/crypto/serpent-sse2.h> -#include <asm/crypto/glue_helper.h> + +#include "serpent-sse2.h" +#include "ecb_cbc_helpers.h" static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -31,130 +30,46 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) -{ - u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - unsigned int j; - - for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) - ivs[j] = src[j]; - - serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); - - for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) - u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); -} - -static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, src, (u128 *)&ctrblk); -} - -static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s, - le128 *iv) +static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src) { - be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - unsigned int i; - - for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { - if (dst != src) - dst[i] = src[i]; - - le128_to_be128(&ctrblks[i], iv); - le128_inc(iv); - } + u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE]; + const u8 *s = src; - serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); + if (dst == src) + s = memcpy(buf, src, sizeof(buf)); + serpent_dec_blk_xway(ctx, dst, src); + crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf)); } -static const struct common_glue_ctx serpent_enc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_enc_blk_xway } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_encrypt } - } } -}; - -static const struct common_glue_ctx serpent_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = serpent_crypt_ctr_xway } - }, { - .num_blocks = 1, - .fn_u = { .ctr = serpent_crypt_ctr } - } } -}; - -static const struct common_glue_ctx serpent_dec = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_dec_blk_xway } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = serpent_decrypt_cbc_xway } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __serpent_decrypt } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_enc, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway); + ECB_BLOCK(1, __serpent_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_dec, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway); + ECB_BLOCK(1, __serpent_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__serpent_encrypt, - req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__serpent_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&serpent_ctr, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway); + CBC_DEC_BLOCK(1, __serpent_decrypt); + CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { @@ -185,21 +100,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(serpent)", - .base.cra_driver_name = "__ctr-serpent-sse2", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct serpent_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = SERPENT_MIN_KEY_SIZE, - .max_keysize = SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .chunksize = SERPENT_BLOCK_SIZE, - .setkey = serpent_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index a5151393bb2f..37e63b3c664e 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -19,11 +19,6 @@ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 - .text /* structure of crypto context */ @@ -379,78 +374,3 @@ SYM_FUNC_START(twofish_cbc_dec_8way) FRAME_END ret; SYM_FUNC_END(twofish_cbc_dec_8way) - -SYM_FUNC_START(twofish_ctr_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - pushq %r12; - - movq %rsi, %r11; - movq %rdx, %r12; - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RX0, RX1, RY0); - - call __twofish_enc_blk8; - - store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - - popq %r12; - - FRAME_END - ret; -SYM_FUNC_END(twofish_ctr_8way) - -SYM_FUNC_START(twofish_xts_enc_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); - - call __twofish_enc_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - - FRAME_END - ret; -SYM_FUNC_END(twofish_xts_enc_8way) - -SYM_FUNC_START(twofish_xts_dec_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2, - RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); - - call __twofish_dec_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END - ret; -SYM_FUNC_END(twofish_xts_dec_8way) diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/crypto/twofish.h index 2c377a8042e1..12df400e6d53 100644 --- a/arch/x86/include/asm/crypto/twofish.h +++ b/arch/x86/crypto/twofish.h @@ -17,9 +17,5 @@ asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src); /* helpers from twofish_x86_64-3way module */ extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src); -extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); #endif /* ASM_X86_TWOFISH_H */ diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 2dbc8ce3730e..3eb3440b477a 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -15,9 +15,9 @@ #include <crypto/algapi.h> #include <crypto/internal/simd.h> #include <crypto/twofish.h> -#include <crypto/xts.h> -#include <asm/crypto/glue_helper.h> -#include <asm/crypto/twofish.h> + +#include "twofish.h" +#include "ecb_cbc_helpers.h" #define TWOFISH_PARALLEL_BLOCKS 8 @@ -26,13 +26,6 @@ asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -45,171 +38,38 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) __twofish_enc_blk_3way(ctx, dst, src, false); } -static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk); -} - -static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk); -} - -struct twofish_xts_ctx { - struct twofish_ctx tweak_ctx; - struct twofish_ctx crypt_ctx; -}; - -static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} - -static const struct common_glue_ctx twofish_enc = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = twofish_ecb_enc_8way } - }, { - .num_blocks = 3, - .fn_u = { .ecb = twofish_enc_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_enc_blk } - } } -}; - -static const struct common_glue_ctx twofish_ctr = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ctr = twofish_ctr_8way } - }, { - .num_blocks = 3, - .fn_u = { .ctr = twofish_enc_blk_ctr_3way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = twofish_enc_blk_ctr } - } } -}; - -static const struct common_glue_ctx twofish_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = twofish_xts_enc_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = twofish_xts_enc } - } } -}; - -static const struct common_glue_ctx twofish_dec = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = twofish_ecb_dec_8way } - }, { - .num_blocks = 3, - .fn_u = { .ecb = twofish_dec_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_dec_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec_cbc = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .cbc = twofish_cbc_dec_8way } - }, { - .num_blocks = 3, - .fn_u = { .cbc = twofish_dec_blk_cbc_3way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = twofish_dec_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = twofish_xts_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = twofish_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_enc, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS); + ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_enc_8way); + ECB_BLOCK(3, twofish_enc_blk_3way); + ECB_BLOCK(1, twofish_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_dec, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS); + ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_dec_8way); + ECB_BLOCK(3, twofish_dec_blk_3way); + ECB_BLOCK(1, twofish_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(twofish_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&twofish_ctr, req); -} - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + CBC_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_cbc_dec_8way); + CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way); + CBC_DEC_BLOCK(1, twofish_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg twofish_algs[] = { @@ -240,35 +100,6 @@ static struct skcipher_alg twofish_algs[] = { .setkey = twofish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(twofish)", - .base.cra_driver_name = "__ctr-twofish-avx", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct twofish_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .chunksize = TF_BLOCK_SIZE, - .setkey = twofish_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(twofish)", - .base.cra_driver_name = "__xts-twofish-avx", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = TF_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct twofish_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * TF_MIN_KEY_SIZE, - .max_keysize = 2 * TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = xts_twofish_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 768af6075479..03725696397c 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -5,17 +5,16 @@ * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ -#include <asm/crypto/glue_helper.h> -#include <asm/crypto/twofish.h> #include <crypto/algapi.h> -#include <crypto/b128ops.h> -#include <crypto/internal/skcipher.h> #include <crypto/twofish.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> +#include "twofish.h" +#include "ecb_cbc_helpers.h" + EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way); EXPORT_SYMBOL_GPL(twofish_dec_blk_3way); @@ -30,143 +29,48 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) __twofish_enc_blk_3way(ctx, dst, src, false); } -static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst, - const u8 *src) -{ - __twofish_enc_blk_3way(ctx, dst, src, true); -} - -void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) +void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src) { - u128 ivs[2]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - ivs[0] = src[0]; - ivs[1] = src[1]; + u8 buf[2][TF_BLOCK_SIZE]; + const u8 *s = src; - twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); + if (dst == src) + s = memcpy(buf, src, sizeof(buf)); + twofish_dec_blk_3way(ctx, dst, src); + crypto_xor(dst + TF_BLOCK_SIZE, s, sizeof(buf)); - u128_xor(&dst[1], &dst[1], &ivs[0]); - u128_xor(&dst[2], &dst[2], &ivs[1]); } EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); -void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) - *dst = *src; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - twofish_enc_blk(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, dst, (u128 *)&ctrblk); -} -EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); - -void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblks[3]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) { - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - } - - le128_to_be128(&ctrblks[0], iv); - le128_inc(iv); - le128_to_be128(&ctrblks[1], iv); - le128_inc(iv); - le128_to_be128(&ctrblks[2], iv); - le128_inc(iv); - - twofish_enc_blk_xor_3way(ctx, (u8 *)dst, (u8 *)ctrblks); -} -EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr_3way); - -static const struct common_glue_ctx twofish_enc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .ecb = twofish_enc_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_enc_blk } - } } -}; - -static const struct common_glue_ctx twofish_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .ctr = twofish_enc_blk_ctr_3way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = twofish_enc_blk_ctr } - } } -}; - -static const struct common_glue_ctx twofish_dec = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .ecb = twofish_dec_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_dec_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .cbc = twofish_dec_blk_cbc_3way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = twofish_dec_blk } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_enc, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, -1); + ECB_BLOCK(3, twofish_enc_blk_3way); + ECB_BLOCK(1, twofish_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_dec, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, -1); + ECB_BLOCK(3, twofish_dec_blk_3way); + ECB_BLOCK(1, twofish_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(twofish_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req); -} - -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&twofish_ctr, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, -1); + CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way); + CBC_DEC_BLOCK(1, twofish_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg tf_skciphers[] = { @@ -195,20 +99,6 @@ static struct skcipher_alg tf_skciphers[] = { .setkey = twofish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(twofish)", - .base.cra_driver_name = "ctr-twofish-3way", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct twofish_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .chunksize = TF_BLOCK_SIZE, - .setkey = twofish_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 0904f5676e4d..a2433ae8a65e 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -249,30 +249,23 @@ static __always_inline bool get_and_clear_inhcall(void) { return false; } static __always_inline void restore_inhcall(bool inhcall) { } #endif -static void __xen_pv_evtchn_do_upcall(void) +static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs) { - irq_enter_rcu(); + struct pt_regs *old_regs = set_irq_regs(regs); + inc_irq_stat(irq_hv_callback_count); xen_hvm_evtchn_do_upcall(); - irq_exit_rcu(); + set_irq_regs(old_regs); } __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) { - struct pt_regs *old_regs; + irqentry_state_t state = irqentry_enter(regs); bool inhcall; - irqentry_state_t state; - state = irqentry_enter(regs); - old_regs = set_irq_regs(regs); - - instrumentation_begin(); - run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); - instrumentation_begin(); - - set_irq_regs(old_regs); + run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); inhcall = get_and_clear_inhcall(); if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index ce0464d630a2..400908dff42e 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -754,47 +754,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) SYM_CODE_END(.Lbad_gs) .previous -/* - * rdi: New stack pointer points to the top word of the stack - * rsi: Function pointer - * rdx: Function argument (can be NULL if none) - */ -SYM_FUNC_START(asm_call_on_stack) -SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL) -SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) - /* - * Save the frame pointer unconditionally. This allows the ORC - * unwinder to handle the stack switch. - */ - pushq %rbp - mov %rsp, %rbp - - /* - * The unwinder relies on the word at the top of the new stack - * page linking back to the previous RSP. - */ - mov %rsp, (%rdi) - mov %rdi, %rsp - /* Move the argument to the right place */ - mov %rdx, %rdi - -1: - .pushsection .discard.instr_begin - .long 1b - . - .popsection - - CALL_NOSPEC rsi - -2: - .pushsection .discard.instr_end - .long 2b - . - .popsection - - /* Restore the previous stack pointer from RBP. */ - leaveq - ret -SYM_FUNC_END(asm_call_on_stack) - #ifdef CONFIG_XEN_PV /* * A note on the "critical region" in our callback handler. diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile index 6fb9b57ed5ba..d8c4f6c9eadc 100644 --- a/arch/x86/entry/syscalls/Makefile +++ b/arch/x86/entry/syscalls/Makefile @@ -6,8 +6,8 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') -syscall32 := $(srctree)/$(src)/syscall_32.tbl -syscall64 := $(srctree)/$(src)/syscall_64.tbl +syscall32 := $(src)/syscall_32.tbl +syscall64 := $(src)/syscall_64.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -21,37 +21,37 @@ quiet_cmd_systbl = SYSTBL $@ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ quiet_cmd_hypercalls = HYPERCALLS $@ - cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^) + cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<, $(real-prereqs)) syshdr_abi_unistd_32 := i386 -$(uapi)/unistd_32.h: $(syscall32) $(syshdr) +$(uapi)/unistd_32.h: $(syscall32) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_32_ia32 := i386 syshdr_pfx_unistd_32_ia32 := ia32_ -$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) +$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_x32 := common,x32 syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT -$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) +$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall64) $(syshdr) +$(uapi)/unistd_64.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_64_x32 := x32 syshdr_pfx_unistd_64_x32 := x32_ -$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) +$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) -$(out)/syscalls_32.h: $(syscall32) $(systbl) +$(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE $(call if_changed,systbl) -$(out)/syscalls_64.h: $(syscall64) $(systbl) +$(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE $(call if_changed,systbl) -$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh +$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh FORCE $(call if_changed,hypercalls) $(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h @@ -62,9 +62,10 @@ syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h syshdr-$(CONFIG_X86_64) += syscalls_64.h syshdr-$(CONFIG_XEN) += xen-hypercalls.h -targets += $(uapisyshdr-y) $(syshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +syshdr-y := $(addprefix $(out)/, $(syshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(syshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(out)/,$(syshdr-y)) +all: $(uapisyshdr-y) $(syshdr-y) @: diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 874aeacde2dd..a1c9f496fca6 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -446,3 +446,4 @@ 439 i386 faccessat2 sys_faccessat2 440 i386 process_madvise sys_process_madvise 441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 i386 mount_setattr sys_mount_setattr diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 78672124d28b..7bf01cbe582f 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -363,6 +363,7 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr # # Due to a historical design error, certain syscalls are numbered differently diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 02e3e42f380b..05c4abc2fdfd 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -91,7 +91,7 @@ ifneq ($(RETPOLINE_VDSO_CFLAGS),) endif endif -$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -150,6 +150,7 @@ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) +KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += -fno-stack-protector KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile index 89b1f74d3225..48e2c51464e8 100644 --- a/arch/x86/hyperv/Makefile +++ b/arch/x86/hyperv/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y := hv_init.o mmu.o nested.o -obj-$(CONFIG_X86_64) += hv_apic.o +obj-y := hv_init.o mmu.o nested.o irqdomain.o +obj-$(CONFIG_X86_64) += hv_apic.o hv_proc.o ifdef CONFIG_X86_64 obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 6375967a8244..b81047dec1da 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -10,6 +10,7 @@ #include <linux/acpi.h> #include <linux/efi.h> #include <linux/types.h> +#include <linux/bitfield.h> #include <asm/apic.h> #include <asm/desc.h> #include <asm/hypervisor.h> @@ -26,8 +27,11 @@ #include <linux/cpuhotplug.h> #include <linux/syscore_ops.h> #include <clocksource/hyperv_timer.h> +#include <linux/highmem.h> int hyperv_init_cpuhp; +u64 hv_current_partition_id = ~0ull; +EXPORT_SYMBOL_GPL(hv_current_partition_id); void *hv_hypercall_pg; EXPORT_SYMBOL_GPL(hv_hypercall_pg); @@ -44,6 +48,9 @@ EXPORT_SYMBOL_GPL(hv_vp_assist_page); void __percpu **hyperv_pcpu_input_arg; EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); +void __percpu **hyperv_pcpu_output_arg; +EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); + u32 hv_max_vp_index; EXPORT_SYMBOL_GPL(hv_max_vp_index); @@ -76,12 +83,19 @@ static int hv_cpu_init(unsigned int cpu) void **input_arg; struct page *pg; - input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ - pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL); + pg = alloc_pages(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL, hv_root_partition ? 1 : 0); if (unlikely(!pg)) return -ENOMEM; + + input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); *input_arg = page_address(pg); + if (hv_root_partition) { + void **output_arg; + + output_arg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); + *output_arg = page_address(pg + 1); + } hv_get_vp_index(msr_vp_index); @@ -208,14 +222,23 @@ static int hv_cpu_die(unsigned int cpu) unsigned int new_cpu; unsigned long flags; void **input_arg; - void *input_pg = NULL; + void *pg; local_irq_save(flags); input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); - input_pg = *input_arg; + pg = *input_arg; *input_arg = NULL; + + if (hv_root_partition) { + void **output_arg; + + output_arg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); + *output_arg = NULL; + } + local_irq_restore(flags); - free_page((unsigned long)input_pg); + + free_pages((unsigned long)pg, hv_root_partition ? 1 : 0); if (hv_vp_assist_page && hv_vp_assist_page[cpu]) wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0); @@ -264,6 +287,9 @@ static int hv_suspend(void) union hv_x64_msr_hypercall_contents hypercall_msr; int ret; + if (hv_root_partition) + return -EPERM; + /* * Reset the hypercall page as it is going to be invalidated * accross hibernation. Setting hv_hypercall_pg to NULL ensures @@ -334,6 +360,24 @@ static void __init hv_stimer_setup_percpu_clockev(void) old_setup_percpu_clockev(); } +static void __init hv_get_partition_id(void) +{ + struct hv_get_partition_id *output_page; + u64 status; + unsigned long flags; + + local_irq_save(flags); + output_page = *this_cpu_ptr(hyperv_pcpu_output_arg); + status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page); + if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) { + /* No point in proceeding if this failed */ + pr_err("Failed to get partition ID: %lld\n", status); + BUG(); + } + hv_current_partition_id = output_page->partition_id; + local_irq_restore(flags); +} + /* * This function is to be invoked early in the boot sequence after the * hypervisor has been detected. @@ -368,6 +412,12 @@ void __init hyperv_init(void) BUG_ON(hyperv_pcpu_input_arg == NULL); + /* Allocate the per-CPU state for output arg for root */ + if (hv_root_partition) { + hyperv_pcpu_output_arg = alloc_percpu(void *); + BUG_ON(hyperv_pcpu_output_arg == NULL); + } + /* Allocate percpu VP index */ hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), GFP_KERNEL); @@ -408,8 +458,35 @@ void __init hyperv_init(void) rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); hypercall_msr.enable = 1; - hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + + if (hv_root_partition) { + struct page *pg; + void *src, *dst; + + /* + * For the root partition, the hypervisor will set up its + * hypercall page. The hypervisor guarantees it will not show + * up in the root's address space. The root can't change the + * location of the hypercall page. + * + * Order is important here. We must enable the hypercall page + * so it is populated with code, then copy the code to an + * executable page. + */ + wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + + pg = vmalloc_to_page(hv_hypercall_pg); + dst = kmap(pg); + src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE, + MEMREMAP_WB); + BUG_ON(!(src && dst)); + memcpy(dst, src, HV_HYP_PAGE_SIZE); + memunmap(src); + kunmap(pg); + } else { + hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); + wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + } /* * hyperv_init() is called before LAPIC is initialized: see @@ -428,6 +505,21 @@ void __init hyperv_init(void) register_syscore_ops(&hv_syscore_ops); hyperv_init_cpuhp = cpuhp; + + if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID) + hv_get_partition_id(); + + BUG_ON(hv_root_partition && hv_current_partition_id == ~0ull); + +#ifdef CONFIG_PCI_MSI + /* + * If we're running as root, we want to create our own PCI MSI domain. + * We can't set this in hv_pci_init because that would be too late. + */ + if (hv_root_partition) + x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain; +#endif + return; remove_cpuhp_state: @@ -552,6 +644,20 @@ EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized); bool hv_is_hibernation_supported(void) { - return acpi_sleep_state_supported(ACPI_STATE_S4); + return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4); } EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); + +enum hv_isolation_type hv_get_isolation_type(void) +{ + if (!(ms_hyperv.features_b & HV_ISOLATION)) + return HV_ISOLATION_TYPE_NONE; + return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b); +} +EXPORT_SYMBOL_GPL(hv_get_isolation_type); + +bool hv_is_isolation_supported(void) +{ + return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE; +} +EXPORT_SYMBOL_GPL(hv_is_isolation_supported); diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c new file mode 100644 index 000000000000..60461e598239 --- /dev/null +++ b/arch/x86/hyperv/hv_proc.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/version.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/clockchips.h> +#include <linux/acpi.h> +#include <linux/hyperv.h> +#include <linux/slab.h> +#include <linux/cpuhotplug.h> +#include <linux/minmax.h> +#include <asm/hypervisor.h> +#include <asm/mshyperv.h> +#include <asm/apic.h> + +#include <asm/trace/hyperv.h> + +/* + * See struct hv_deposit_memory. The first u64 is partition ID, the rest + * are GPAs. + */ +#define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1) + +/* Deposits exact number of pages. Must be called with interrupts enabled. */ +int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) +{ + struct page **pages, *page; + int *counts; + int num_allocations; + int i, j, page_count; + int order; + u64 status; + int ret; + u64 base_pfn; + struct hv_deposit_memory *input_page; + unsigned long flags; + + if (num_pages > HV_DEPOSIT_MAX) + return -E2BIG; + if (!num_pages) + return 0; + + /* One buffer for page pointers and counts */ + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + pages = page_address(page); + + counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL); + if (!counts) { + free_page((unsigned long)pages); + return -ENOMEM; + } + + /* Allocate all the pages before disabling interrupts */ + i = 0; + + while (num_pages) { + /* Find highest order we can actually allocate */ + order = 31 - __builtin_clz(num_pages); + + while (1) { + pages[i] = alloc_pages_node(node, GFP_KERNEL, order); + if (pages[i]) + break; + if (!order) { + ret = -ENOMEM; + num_allocations = i; + goto err_free_allocations; + } + --order; + } + + split_page(pages[i], order); + counts[i] = 1 << order; + num_pages -= counts[i]; + i++; + } + num_allocations = i; + + local_irq_save(flags); + + input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); + + input_page->partition_id = partition_id; + + /* Populate gpa_page_list - these will fit on the input page */ + for (i = 0, page_count = 0; i < num_allocations; ++i) { + base_pfn = page_to_pfn(pages[i]); + for (j = 0; j < counts[i]; ++j, ++page_count) + input_page->gpa_page_list[page_count] = base_pfn + j; + } + status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY, + page_count, 0, input_page, NULL); + local_irq_restore(flags); + + if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) { + pr_err("Failed to deposit pages: %lld\n", status); + ret = status; + goto err_free_allocations; + } + + ret = 0; + goto free_buf; + +err_free_allocations: + for (i = 0; i < num_allocations; ++i) { + base_pfn = page_to_pfn(pages[i]); + for (j = 0; j < counts[i]; ++j) + __free_page(pfn_to_page(base_pfn + j)); + } + +free_buf: + free_page((unsigned long)pages); + kfree(counts); + return ret; +} + +int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id) +{ + struct hv_add_logical_processor_in *input; + struct hv_add_logical_processor_out *output; + u64 status; + unsigned long flags; + int ret = 0; + int pxm = node_to_pxm(node); + + /* + * When adding a logical processor, the hypervisor may return + * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more + * pages and retry. + */ + do { + local_irq_save(flags); + + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + /* We don't do anything with the output right now */ + output = *this_cpu_ptr(hyperv_pcpu_output_arg); + + input->lp_index = lp_index; + input->apic_id = apic_id; + input->flags = 0; + input->proximity_domain_info.domain_id = pxm; + input->proximity_domain_info.flags.reserved = 0; + input->proximity_domain_info.flags.proximity_info_valid = 1; + input->proximity_domain_info.flags.proximity_preferred = 1; + status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR, + input, output); + local_irq_restore(flags); + + status &= HV_HYPERCALL_RESULT_MASK; + + if (status != HV_STATUS_INSUFFICIENT_MEMORY) { + if (status != HV_STATUS_SUCCESS) { + pr_err("%s: cpu %u apic ID %u, %lld\n", __func__, + lp_index, apic_id, status); + ret = status; + } + break; + } + ret = hv_call_deposit_pages(node, hv_current_partition_id, 1); + } while (!ret); + + return ret; +} + +int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) +{ + struct hv_create_vp *input; + u64 status; + unsigned long irq_flags; + int ret = 0; + int pxm = node_to_pxm(node); + + /* Root VPs don't seem to need pages deposited */ + if (partition_id != hv_current_partition_id) { + /* The value 90 is empirically determined. It may change. */ + ret = hv_call_deposit_pages(node, partition_id, 90); + if (ret) + return ret; + } + + do { + local_irq_save(irq_flags); + + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + + input->partition_id = partition_id; + input->vp_index = vp_index; + input->flags = flags; + input->subnode_type = HvSubnodeAny; + if (node != NUMA_NO_NODE) { + input->proximity_domain_info.domain_id = pxm; + input->proximity_domain_info.flags.reserved = 0; + input->proximity_domain_info.flags.proximity_info_valid = 1; + input->proximity_domain_info.flags.proximity_preferred = 1; + } else { + input->proximity_domain_info.as_uint64 = 0; + } + status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL); + local_irq_restore(irq_flags); + + status &= HV_HYPERCALL_RESULT_MASK; + + if (status != HV_STATUS_INSUFFICIENT_MEMORY) { + if (status != HV_STATUS_SUCCESS) { + pr_err("%s: vcpu %u, lp %u, %lld\n", __func__, + vp_index, flags, status); + ret = status; + } + break; + } + ret = hv_call_deposit_pages(node, partition_id, 1); + + } while (!ret); + + return ret; +} + diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c new file mode 100644 index 000000000000..4421a8d92e23 --- /dev/null +++ b/arch/x86/hyperv/irqdomain.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Irqdomain for Linux to run as the root partition on Microsoft Hypervisor. + * + * Authors: + * Sunil Muthuswamy <sunilmut@microsoft.com> + * Wei Liu <wei.liu@kernel.org> + */ + +#include <linux/pci.h> +#include <linux/irq.h> +#include <asm/mshyperv.h> + +static int hv_map_interrupt(union hv_device_id device_id, bool level, + int cpu, int vector, struct hv_interrupt_entry *entry) +{ + struct hv_input_map_device_interrupt *input; + struct hv_output_map_device_interrupt *output; + struct hv_device_interrupt_descriptor *intr_desc; + unsigned long flags; + u64 status; + int nr_bank, var_size; + + local_irq_save(flags); + + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + output = *this_cpu_ptr(hyperv_pcpu_output_arg); + + intr_desc = &input->interrupt_descriptor; + memset(input, 0, sizeof(*input)); + input->partition_id = hv_current_partition_id; + input->device_id = device_id.as_uint64; + intr_desc->interrupt_type = HV_X64_INTERRUPT_TYPE_FIXED; + intr_desc->vector_count = 1; + intr_desc->target.vector = vector; + + if (level) + intr_desc->trigger_mode = HV_INTERRUPT_TRIGGER_MODE_LEVEL; + else + intr_desc->trigger_mode = HV_INTERRUPT_TRIGGER_MODE_EDGE; + + intr_desc->target.vp_set.valid_bank_mask = 0; + intr_desc->target.vp_set.format = HV_GENERIC_SET_SPARSE_4K; + nr_bank = cpumask_to_vpset(&(intr_desc->target.vp_set), cpumask_of(cpu)); + if (nr_bank < 0) { + local_irq_restore(flags); + pr_err("%s: unable to generate VP set\n", __func__); + return EINVAL; + } + intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_bank_mask + * does). + */ + var_size = nr_bank + 1; + + status = hv_do_rep_hypercall(HVCALL_MAP_DEVICE_INTERRUPT, 0, var_size, + input, output); + *entry = output->interrupt_entry; + + local_irq_restore(flags); + + if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) + pr_err("%s: hypercall failed, status %lld\n", __func__, status); + + return status & HV_HYPERCALL_RESULT_MASK; +} + +static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry) +{ + unsigned long flags; + struct hv_input_unmap_device_interrupt *input; + struct hv_interrupt_entry *intr_entry; + u64 status; + + local_irq_save(flags); + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + + memset(input, 0, sizeof(*input)); + intr_entry = &input->interrupt_entry; + input->partition_id = hv_current_partition_id; + input->device_id = id; + *intr_entry = *old_entry; + + status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL); + local_irq_restore(flags); + + return status & HV_HYPERCALL_RESULT_MASK; +} + +#ifdef CONFIG_PCI_MSI +struct rid_data { + struct pci_dev *bridge; + u32 rid; +}; + +static int get_rid_cb(struct pci_dev *pdev, u16 alias, void *data) +{ + struct rid_data *rd = data; + u8 bus = PCI_BUS_NUM(rd->rid); + + if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) { + rd->bridge = pdev; + rd->rid = alias; + } + + return 0; +} + +static union hv_device_id hv_build_pci_dev_id(struct pci_dev *dev) +{ + union hv_device_id dev_id; + struct rid_data data = { + .bridge = NULL, + .rid = PCI_DEVID(dev->bus->number, dev->devfn) + }; + + pci_for_each_dma_alias(dev, get_rid_cb, &data); + + dev_id.as_uint64 = 0; + dev_id.device_type = HV_DEVICE_TYPE_PCI; + dev_id.pci.segment = pci_domain_nr(dev->bus); + + dev_id.pci.bdf.bus = PCI_BUS_NUM(data.rid); + dev_id.pci.bdf.device = PCI_SLOT(data.rid); + dev_id.pci.bdf.function = PCI_FUNC(data.rid); + dev_id.pci.source_shadow = HV_SOURCE_SHADOW_NONE; + + if (data.bridge) { + int pos; + + /* + * Microsoft Hypervisor requires a bus range when the bridge is + * running in PCI-X mode. + * + * To distinguish conventional vs PCI-X bridge, we can check + * the bridge's PCI-X Secondary Status Register, Secondary Bus + * Mode and Frequency bits. See PCI Express to PCI/PCI-X Bridge + * Specification Revision 1.0 5.2.2.1.3. + * + * Value zero means it is in conventional mode, otherwise it is + * in PCI-X mode. + */ + + pos = pci_find_capability(data.bridge, PCI_CAP_ID_PCIX); + if (pos) { + u16 status; + + pci_read_config_word(data.bridge, pos + + PCI_X_BRIDGE_SSTATUS, &status); + + if (status & PCI_X_SSTATUS_FREQ) { + /* Non-zero, PCI-X mode */ + u8 sec_bus, sub_bus; + + dev_id.pci.source_shadow = HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE; + + pci_read_config_byte(data.bridge, PCI_SECONDARY_BUS, &sec_bus); + dev_id.pci.shadow_bus_range.secondary_bus = sec_bus; + pci_read_config_byte(data.bridge, PCI_SUBORDINATE_BUS, &sub_bus); + dev_id.pci.shadow_bus_range.subordinate_bus = sub_bus; + } + } + } + + return dev_id; +} + +static int hv_map_msi_interrupt(struct pci_dev *dev, int cpu, int vector, + struct hv_interrupt_entry *entry) +{ + union hv_device_id device_id = hv_build_pci_dev_id(dev); + + return hv_map_interrupt(device_id, false, cpu, vector, entry); +} + +static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg) +{ + /* High address is always 0 */ + msg->address_hi = 0; + msg->address_lo = entry->msi_entry.address.as_uint32; + msg->data = entry->msi_entry.data.as_uint32; +} + +static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry); +static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct msi_desc *msidesc; + struct pci_dev *dev; + struct hv_interrupt_entry out_entry, *stored_entry; + struct irq_cfg *cfg = irqd_cfg(data); + cpumask_t *affinity; + int cpu; + u64 status; + + msidesc = irq_data_get_msi_desc(data); + dev = msi_desc_to_pci_dev(msidesc); + + if (!cfg) { + pr_debug("%s: cfg is NULL", __func__); + return; + } + + affinity = irq_data_get_effective_affinity_mask(data); + cpu = cpumask_first_and(affinity, cpu_online_mask); + + if (data->chip_data) { + /* + * This interrupt is already mapped. Let's unmap first. + * + * We don't use retarget interrupt hypercalls here because + * Microsoft Hypervisor doens't allow root to change the vector + * or specify VPs outside of the set that is initially used + * during mapping. + */ + stored_entry = data->chip_data; + data->chip_data = NULL; + + status = hv_unmap_msi_interrupt(dev, stored_entry); + + kfree(stored_entry); + + if (status != HV_STATUS_SUCCESS) { + pr_debug("%s: failed to unmap, status %lld", __func__, status); + return; + } + } + + stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC); + if (!stored_entry) { + pr_debug("%s: failed to allocate chip data\n", __func__); + return; + } + + status = hv_map_msi_interrupt(dev, cpu, cfg->vector, &out_entry); + if (status != HV_STATUS_SUCCESS) { + kfree(stored_entry); + return; + } + + *stored_entry = out_entry; + data->chip_data = stored_entry; + entry_to_msi_msg(&out_entry, msg); + + return; +} + +static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry) +{ + return hv_unmap_interrupt(hv_build_pci_dev_id(dev).as_uint64, old_entry); +} + +static void hv_teardown_msi_irq_common(struct pci_dev *dev, struct msi_desc *msidesc, int irq) +{ + u64 status; + struct hv_interrupt_entry old_entry; + struct irq_desc *desc; + struct irq_data *data; + struct msi_msg msg; + + desc = irq_to_desc(irq); + if (!desc) { + pr_debug("%s: no irq desc\n", __func__); + return; + } + + data = &desc->irq_data; + if (!data) { + pr_debug("%s: no irq data\n", __func__); + return; + } + + if (!data->chip_data) { + pr_debug("%s: no chip data\n!", __func__); + return; + } + + old_entry = *(struct hv_interrupt_entry *)data->chip_data; + entry_to_msi_msg(&old_entry, &msg); + + kfree(data->chip_data); + data->chip_data = NULL; + + status = hv_unmap_msi_interrupt(dev, &old_entry); + + if (status != HV_STATUS_SUCCESS) { + pr_err("%s: hypercall failed, status %lld\n", __func__, status); + return; + } +} + +static void hv_msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) +{ + int i; + struct msi_desc *entry; + struct pci_dev *pdev; + + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return; + + pdev = to_pci_dev(dev); + + for_each_pci_msi_entry(entry, pdev) { + if (entry->irq) { + for (i = 0; i < entry->nvec_used; i++) { + hv_teardown_msi_irq_common(pdev, entry, entry->irq + i); + irq_domain_free_irqs(entry->irq + i, 1); + } + } + } +} + +/* + * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, + * which implement the MSI or MSI-X Capability Structure. + */ +static struct irq_chip hv_pci_msi_controller = { + .name = "HV-PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = irq_chip_ack_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_compose_msi_msg = hv_irq_compose_msi_msg, + .irq_set_affinity = msi_domain_set_affinity, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static struct msi_domain_ops pci_msi_domain_ops = { + .domain_free_irqs = hv_msi_domain_free_irqs, + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info hv_pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &hv_pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +struct irq_domain * __init hv_create_pci_msi_domain(void) +{ + struct irq_domain *d = NULL; + struct fwnode_handle *fn; + + fn = irq_domain_alloc_named_fwnode("HV-PCI-MSI"); + if (fn) + d = pci_msi_create_irq_domain(fn, &hv_pci_msi_domain_info, x86_vector_domain); + + /* No point in going further if we can't get an irq domain */ + BUG_ON(!d); + + return d; +} + +#endif /* CONFIG_PCI_MSI */ + +int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry) +{ + union hv_device_id device_id; + + device_id.as_uint64 = 0; + device_id.device_type = HV_DEVICE_TYPE_IOAPIC; + device_id.ioapic.ioapic_id = (u8)ioapic_id; + + return hv_unmap_interrupt(device_id.as_uint64, entry); +} +EXPORT_SYMBOL_GPL(hv_unmap_ioapic_interrupt); + +int hv_map_ioapic_interrupt(int ioapic_id, bool level, int cpu, int vector, + struct hv_interrupt_entry *entry) +{ + union hv_device_id device_id; + + device_id.as_uint64 = 0; + device_id.device_type = HV_DEVICE_TYPE_IOAPIC; + device_id.ioapic.ioapic_id = (u8)ioapic_id; + + return hv_map_interrupt(device_id, level, cpu, vector, entry); +} +EXPORT_SYMBOL_GPL(hv_map_ioapic_interrupt); diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h new file mode 100644 index 000000000000..e003a01b7c67 --- /dev/null +++ b/arch/x86/include/asm/acrn.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_ACRN_H +#define _ASM_X86_ACRN_H + +/* + * This CPUID returns feature bitmaps in EAX. + * Guest VM uses this to detect the appropriate feature bit. + */ +#define ACRN_CPUID_FEATURES 0x40000001 +/* Bit 0 indicates whether guest VM is privileged */ +#define ACRN_FEATURE_PRIVILEGED_VM BIT(0) + +void acrn_setup_intr_handler(void (*handler)(void)); +void acrn_remove_intr_handler(void); + +static inline u32 acrn_cpuid_base(void) +{ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return hypervisor_cpuid_base("ACRNACRNACRN", 0); + + return 0; +} + +/* + * Hypercalls for ACRN + * + * - VMCALL instruction is used to implement ACRN hypercalls. + * - ACRN hypercall ABI: + * - Hypercall number is passed in R8 register. + * - Up to 2 arguments are passed in RDI, RSI. + * - Return value will be placed in RAX. + * + * Because GCC doesn't support R8 register as direct register constraints, use + * supported constraint as input with a explicit MOV to R8 in beginning of asm. + */ +static inline long acrn_hypercall0(unsigned long hcall_id) +{ + long result; + + asm volatile("movl %1, %%r8d\n\t" + "vmcall\n\t" + : "=a" (result) + : "g" (hcall_id) + : "r8", "memory"); + + return result; +} + +static inline long acrn_hypercall1(unsigned long hcall_id, + unsigned long param1) +{ + long result; + + asm volatile("movl %1, %%r8d\n\t" + "vmcall\n\t" + : "=a" (result) + : "g" (hcall_id), "D" (param1) + : "r8", "memory"); + + return result; +} + +static inline long acrn_hypercall2(unsigned long hcall_id, + unsigned long param1, + unsigned long param2) +{ + long result; + + asm volatile("movl %1, %%r8d\n\t" + "vmcall\n\t" + : "=a" (result) + : "g" (hcall_id), "D" (param1), "S" (param2) + : "r8", "memory"); + + return result; +} + +#endif /* _ASM_X86_ACRN_H */ diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h deleted file mode 100644 index 87ce8e963215..000000000000 --- a/arch/x86/include/asm/apb_timer.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare - * - * (C) Copyright 2009 Intel Corporation - * Author: Jacob Pan (jacob.jun.pan@intel.com) - * - * Note: - */ - -#ifndef ASM_X86_APBT_H -#define ASM_X86_APBT_H -#include <linux/sfi.h> - -#ifdef CONFIG_APB_TIMER - -/* default memory mapped register base */ -#define LNW_SCU_ADDR 0xFF100000 -#define LNW_EXT_TIMER_OFFSET 0x1B800 -#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET) -#define LNW_EXT_TIMER_PGOFFSET 0x800 - -/* APBT clock speed range from PCLK to fabric base, 25-100MHz */ -#define APBT_MAX_FREQ 50000000 -#define APBT_MIN_FREQ 1000000 -#define APBT_MMAP_SIZE 1024 - -extern void apbt_time_init(void); -extern void apbt_setup_secondary_clock(void); - -extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); -extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); -extern int sfi_mtimer_num; - -#else /* CONFIG_APB_TIMER */ - -static inline void apbt_time_init(void) { } - -#endif -#endif /* ASM_X86_APBT_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1feb6c089ba2..cc96e26d69f7 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -292,6 +292,7 @@ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ +#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ @@ -335,6 +336,7 @@ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ +#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h deleted file mode 100644 index 777c0f63418c..000000000000 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ /dev/null @@ -1,118 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Shared glue code for 128bit block ciphers - */ - -#ifndef _CRYPTO_GLUE_HELPER_H -#define _CRYPTO_GLUE_HELPER_H - -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <asm/fpu/api.h> -#include <crypto/b128ops.h> - -typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); -typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); -typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -struct common_glue_func_entry { - unsigned int num_blocks; /* number of blocks that @fn will process */ - union { - common_glue_func_t ecb; - common_glue_cbc_func_t cbc; - common_glue_ctr_func_t ctr; - common_glue_xts_func_t xts; - } fn_u; -}; - -struct common_glue_ctx { - unsigned int num_funcs; - int fpu_blocks_limit; /* -1 means fpu not needed at all */ - - /* - * First funcs entry must have largest num_blocks and last funcs entry - * must have num_blocks == 1! - */ - struct common_glue_func_entry funcs[]; -}; - -static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, - struct skcipher_walk *walk, - bool fpu_enabled, unsigned int nbytes) -{ - if (likely(fpu_blocks_limit < 0)) - return false; - - if (fpu_enabled) - return true; - - /* - * Vector-registers are only used when chunk to be processed is large - * enough, so do not enable FPU until it is necessary. - */ - if (nbytes < bsize * (unsigned int)fpu_blocks_limit) - return false; - - /* prevent sleeping if FPU is in use */ - skcipher_walk_atomise(walk); - - kernel_fpu_begin(); - return true; -} - -static inline void glue_fpu_end(bool fpu_enabled) -{ - if (fpu_enabled) - kernel_fpu_end(); -} - -static inline void le128_to_be128(be128 *dst, const le128 *src) -{ - dst->a = cpu_to_be64(le64_to_cpu(src->a)); - dst->b = cpu_to_be64(le64_to_cpu(src->b)); -} - -static inline void be128_to_le128(le128 *dst, const be128 *src) -{ - dst->a = cpu_to_le64(be64_to_cpu(src->a)); - dst->b = cpu_to_le64(be64_to_cpu(src->b)); -} - -static inline void le128_inc(le128 *i) -{ - u64 a = le64_to_cpu(i->a); - u64 b = le64_to_cpu(i->b); - - b++; - if (!b) - a++; - - i->a = cpu_to_le64(a); - i->b = cpu_to_le64(b); -} - -extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req); - -extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, - struct skcipher_request *req); - -extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req); - -extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req); - -extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req, - common_glue_func_t tweak_fn, void *tweak_ctx, - void *crypt_ctx, bool decrypt); - -extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, - const u8 *src, le128 *iv, - common_glue_func_t fn); - -#endif /* _CRYPTO_GLUE_HELPER_H */ diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h deleted file mode 100644 index 251c2c89d7cf..000000000000 --- a/arch/x86/include/asm/crypto/serpent-avx.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_X86_SERPENT_AVX_H -#define ASM_X86_SERPENT_AVX_H - -#include <crypto/b128ops.h> -#include <crypto/serpent.h> -#include <linux/types.h> - -struct crypto_skcipher; - -#define SERPENT_PARALLEL_BLOCKS 8 - -struct serpent_xts_ctx { - struct serpent_ctx tweak_ctx; - struct serpent_ctx crypt_ctx; -}; - -asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, - const u8 *src); -asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, - const u8 *src); - -asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, - const u8 *src); -asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); - -extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv); - -extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); - -#endif diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 9f1a0a987e5e..d0dcefb5cc59 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -108,9 +108,6 @@ enum fixed_addresses { #ifdef CONFIG_PARAVIRT_XXL FIX_PARAVIRT_BOOTMAP, #endif -#ifdef CONFIG_X86_INTEL_MID - FIX_LNW_VRTC, -#endif #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 6bf42aed387e..e6cd3fee562b 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -21,7 +21,9 @@ #define HYPERV_CPUID_FEATURES 0x40000003 #define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 #define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 +#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007 #define HYPERV_CPUID_NESTED_FEATURES 0x4000000A +#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C #define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081 #define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */ @@ -111,6 +113,15 @@ #define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) /* + * CPU management features identification. + * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits. + */ +#define HV_X64_START_LOGICAL_PROCESSOR BIT(0) +#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1) +#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2) +#define HV_X64_RESERVED_IDENTITY_BIT BIT(31) + +/* * Virtual processor will never share a physical core with another virtual * processor, except for virtual processors that are reported as sibling SMT * threads. @@ -122,6 +133,20 @@ #define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) #define HV_X64_NESTED_MSR_BITMAP BIT(19) +/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */ +#define HV_PARAVISOR_PRESENT BIT(0) + +/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */ +#define HV_ISOLATION_TYPE GENMASK(3, 0) +#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5) +#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6) + +enum hv_isolation_type { + HV_ISOLATION_TYPE_NONE = 0, + HV_ISOLATION_TYPE_VBS = 1, + HV_ISOLATION_TYPE_SNP = 2 +}; + /* Hyper-V specific model specific registers (MSRs) */ /* MSR used to identify the guest OS. */ @@ -523,6 +548,19 @@ struct hv_partition_assist_pg { u32 tlb_lock_count; }; +enum hv_interrupt_type { + HV_X64_INTERRUPT_TYPE_FIXED = 0x0000, + HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001, + HV_X64_INTERRUPT_TYPE_SMI = 0x0002, + HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003, + HV_X64_INTERRUPT_TYPE_NMI = 0x0004, + HV_X64_INTERRUPT_TYPE_INIT = 0x0005, + HV_X64_INTERRUPT_TYPE_SIPI = 0x0006, + HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007, + HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008, + HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009, + HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A, +}; #include <asm-generic/hyperv-tlfs.h> diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 41e2e2e1b439..5eb3bdf36a41 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -187,23 +187,22 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code) * has to be done in the function body if necessary. */ #define DEFINE_IDTENTRY_IRQ(func) \ -static __always_inline void __##func(struct pt_regs *regs, u8 vector); \ +static void __##func(struct pt_regs *regs, u32 vector); \ \ __visible noinstr void func(struct pt_regs *regs, \ unsigned long error_code) \ { \ irqentry_state_t state = irqentry_enter(regs); \ + u32 vector = (u32)(u8)error_code; \ \ instrumentation_begin(); \ - irq_enter_rcu(); \ kvm_set_cpu_l1tf_flush_l1d(); \ - __##func (regs, (u8)error_code); \ - irq_exit_rcu(); \ + run_irq_on_irqstack_cond(__##func, regs, vector); \ instrumentation_end(); \ irqentry_exit(regs, state); \ } \ \ -static __always_inline void __##func(struct pt_regs *regs, u8 vector) +static noinline void __##func(struct pt_regs *regs, u32 vector) /** * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points @@ -237,10 +236,8 @@ __visible noinstr void func(struct pt_regs *regs) \ irqentry_state_t state = irqentry_enter(regs); \ \ instrumentation_begin(); \ - irq_enter_rcu(); \ kvm_set_cpu_l1tf_flush_l1d(); \ run_sysvec_on_irqstack_cond(__##func, regs); \ - irq_exit_rcu(); \ instrumentation_end(); \ irqentry_exit(regs, state); \ } \ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index a8c3d284fa46..95a448fbb44c 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -7,9 +7,12 @@ * Copyright (C) IBM Corporation, 2009 */ +#include <asm/byteorder.h> /* insn_attr_t is defined in inat.h */ #include <asm/inat.h> +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) + struct insn_field { union { insn_value_t value; @@ -20,6 +23,48 @@ struct insn_field { unsigned char nbytes; }; +static inline void insn_field_set(struct insn_field *p, insn_value_t v, + unsigned char n) +{ + p->value = v; + p->nbytes = n; +} + +static inline void insn_set_byte(struct insn_field *p, unsigned char n, + insn_byte_t v) +{ + p->bytes[n] = v; +} + +#else + +struct insn_field { + insn_value_t value; + union { + insn_value_t little; + insn_byte_t bytes[4]; + }; + /* !0 if we've run insn_get_xxx() for this field */ + unsigned char got; + unsigned char nbytes; +}; + +static inline void insn_field_set(struct insn_field *p, insn_value_t v, + unsigned char n) +{ + p->value = v; + p->little = __cpu_to_le32(v); + p->nbytes = n; +} + +static inline void insn_set_byte(struct insn_field *p, unsigned char n, + insn_byte_t v) +{ + p->bytes[n] = v; + p->value = __le32_to_cpu(p->little); +} +#endif + struct insn { struct insn_field prefixes; /* * Prefixes diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index cf0e25f45422..c201083b34f6 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h @@ -1,15 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * intel-mid.h: Intel MID specific setup code + * Intel MID specific setup code * - * (C) Copyright 2009 Intel Corporation + * (C) Copyright 2009, 2021 Intel Corporation */ #ifndef _ASM_X86_INTEL_MID_H #define _ASM_X86_INTEL_MID_H -#include <linux/sfi.h> #include <linux/pci.h> -#include <linux/platform_device.h> extern int intel_mid_pci_init(void); extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); @@ -22,93 +20,18 @@ extern void intel_mid_pwr_power_off(void); extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev); -extern int get_gpio_by_name(const char *name); -extern int __init sfi_parse_mrtc(struct sfi_table_header *table); -extern int __init sfi_parse_mtmr(struct sfi_table_header *table); -extern int sfi_mrtc_num; -extern struct sfi_rtc_table_entry sfi_mrtc_array[]; - -/* - * Here defines the array of devices platform data that IAFW would export - * through SFI "DEVS" table, we use name and type to match the device and - * its platform data. - */ -struct devs_id { - char name[SFI_NAME_LEN + 1]; - u8 type; - u8 delay; - u8 msic; - void *(*get_platform_data)(void *info); -}; - -#define sfi_device(i) \ - static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \ - __section(".x86_intel_mid_dev.init") = &i - -/** -* struct mid_sd_board_info - template for SD device creation -* @name: identifies the driver -* @bus_num: board-specific identifier for a given SD controller -* @max_clk: the maximum frequency device supports -* @platform_data: the particular data stored there is driver-specific -*/ -struct mid_sd_board_info { - char name[SFI_NAME_LEN]; - int bus_num; - unsigned short addr; - u32 max_clk; - void *platform_data; -}; - -/* - * Medfield is the follow-up of Moorestown, it combines two chip solution into - * one. Other than that it also added always-on and constant tsc and lapic - * timers. Medfield is the platform name, and the chip name is called Penwell - * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be - * identified via MSRs. - */ -enum intel_mid_cpu_type { - /* 1 was Moorestown */ - INTEL_MID_CPU_CHIP_PENWELL = 2, - INTEL_MID_CPU_CHIP_CLOVERVIEW, - INTEL_MID_CPU_CHIP_TANGIER, -}; - -extern enum intel_mid_cpu_type __intel_mid_cpu_chip; - #ifdef CONFIG_X86_INTEL_MID -static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) -{ - return __intel_mid_cpu_chip; -} - -static inline bool intel_mid_has_msic(void) -{ - return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL); -} - extern void intel_scu_devices_create(void); extern void intel_scu_devices_destroy(void); #else /* !CONFIG_X86_INTEL_MID */ -#define intel_mid_identify_cpu() 0 -#define intel_mid_has_msic() 0 - static inline void intel_scu_devices_create(void) { } static inline void intel_scu_devices_destroy(void) { } #endif /* !CONFIG_X86_INTEL_MID */ -enum intel_mid_timer_options { - INTEL_MID_TIMER_DEFAULT, - INTEL_MID_TIMER_APBT_ONLY, - INTEL_MID_TIMER_LAPIC_APBT, -}; - -extern enum intel_mid_timer_options intel_mid_timer_options; - /* Bus Select SoC Fuse value */ #define BSEL_SOC_FUSE_MASK 0x7 /* FSB 133MHz */ @@ -118,16 +41,4 @@ extern enum intel_mid_timer_options intel_mid_timer_options; /* FSB 83MHz */ #define BSEL_SOC_FUSE_111 0x7 -#define SFI_MTMR_MAX_NUM 8 -#define SFI_MRTC_MAX 8 - -/* VRTC timer */ -#define MRST_VRTC_MAP_SZ 1024 -/* #define MRST_VRTC_PGOFFSET 0xc00 */ - -extern void intel_mid_rtc_init(void); - -/* The offset for the mapping of global gpio pin to irq */ -#define INTEL_MID_IRQ_OFFSET 0x100 - #endif /* _ASM_X86_INTEL_MID_H */ diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h deleted file mode 100644 index 0b44b1abe4d9..000000000000 --- a/arch/x86/include/asm/intel_mid_vrtc.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _INTEL_MID_VRTC_H -#define _INTEL_MID_VRTC_H - -extern unsigned char vrtc_cmos_read(unsigned char reg); -extern void vrtc_cmos_write(unsigned char val, unsigned char reg); -extern void vrtc_get_time(struct timespec64 *now); -extern int vrtc_set_mmss(const struct timespec64 *now); - -#endif diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h index 11d457af68c5..8537f597d20a 100644 --- a/arch/x86/include/asm/intel_scu_ipc.h +++ b/arch/x86/include/asm/intel_scu_ipc.h @@ -65,6 +65,4 @@ static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int c inlen, out, outlen); } -#include <asm/intel_scu_ipc_legacy.h> - #endif diff --git a/arch/x86/include/asm/intel_scu_ipc_legacy.h b/arch/x86/include/asm/intel_scu_ipc_legacy.h deleted file mode 100644 index 4cf13fecb673..000000000000 --- a/arch/x86/include/asm/intel_scu_ipc_legacy.h +++ /dev/null @@ -1,91 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_INTEL_SCU_IPC_LEGACY_H_ -#define _ASM_X86_INTEL_SCU_IPC_LEGACY_H_ - -#include <linux/notifier.h> - -#define IPCMSG_INDIRECT_READ 0x02 -#define IPCMSG_INDIRECT_WRITE 0x05 - -#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */ - -#define IPCMSG_WARM_RESET 0xF0 -#define IPCMSG_COLD_RESET 0xF1 -#define IPCMSG_SOFT_RESET 0xF2 -#define IPCMSG_COLD_BOOT 0xF3 - -#define IPCMSG_VRTC 0xFA /* Set vRTC device */ -/* Command id associated with message IPCMSG_VRTC */ -#define IPC_CMD_VRTC_SETTIME 1 /* Set time */ -#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ - -/* Don't call these in new code - they will be removed eventually */ - -/* Read single register */ -static inline int intel_scu_ipc_ioread8(u16 addr, u8 *data) -{ - return intel_scu_ipc_dev_ioread8(NULL, addr, data); -} - -/* Read a vector */ -static inline int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) -{ - return intel_scu_ipc_dev_readv(NULL, addr, data, len); -} - -/* Write single register */ -static inline int intel_scu_ipc_iowrite8(u16 addr, u8 data) -{ - return intel_scu_ipc_dev_iowrite8(NULL, addr, data); -} - -/* Write a vector */ -static inline int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) -{ - return intel_scu_ipc_dev_writev(NULL, addr, data, len); -} - -/* Update single register based on the mask */ -static inline int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask) -{ - return intel_scu_ipc_dev_update(NULL, addr, data, mask); -} - -/* Issue commands to the SCU with or without data */ -static inline int intel_scu_ipc_simple_command(int cmd, int sub) -{ - return intel_scu_ipc_dev_simple_command(NULL, cmd, sub); -} - -static inline int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, - u32 *out, int outlen) -{ - /* New API takes both inlen and outlen as bytes so convert here */ - size_t inbytes = inlen * sizeof(u32); - size_t outbytes = outlen * sizeof(u32); - - return intel_scu_ipc_dev_command_with_size(NULL, cmd, sub, in, inbytes, - inlen, out, outbytes); -} - -extern struct blocking_notifier_head intel_scu_notifier; - -static inline void intel_scu_notifier_add(struct notifier_block *nb) -{ - blocking_notifier_chain_register(&intel_scu_notifier, nb); -} - -static inline void intel_scu_notifier_remove(struct notifier_block *nb) -{ - blocking_notifier_chain_unregister(&intel_scu_notifier, nb); -} - -static inline int intel_scu_notifier_post(unsigned long v, void *p) -{ - return blocking_notifier_call_chain(&intel_scu_notifier, v, p); -} - -#define SCU_AVAILABLE 1 -#define SCU_DOWN 2 - -#endif diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 76d389691b5b..768aa234cbb4 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -25,8 +25,6 @@ static inline int irq_canonicalize(int irq) extern int irq_init_percpu_irqstack(unsigned int cpu); -#define __ARCH_HAS_DO_SOFTIRQ - struct irq_desc; extern void fixup_irqs(void); diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index 775816965c6a..9b2a0ff76c73 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -7,100 +7,217 @@ #include <asm/processor.h> #ifdef CONFIG_X86_64 -static __always_inline bool irqstack_active(void) -{ - return __this_cpu_read(irq_count) != -1; -} - -void asm_call_on_stack(void *sp, void (*func)(void), void *arg); -void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs), - struct pt_regs *regs); -void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc), - struct irq_desc *desc); - -static __always_inline void __run_on_irqstack(void (*func)(void)) -{ - void *tos = __this_cpu_read(hardirq_stack_ptr); - __this_cpu_add(irq_count, 1); - asm_call_on_stack(tos - 8, func, NULL); - __this_cpu_sub(irq_count, 1); +/* + * Macro to inline switching to an interrupt stack and invoking function + * calls from there. The following rules apply: + * + * - Ordering: + * + * 1. Write the stack pointer into the top most place of the irq + * stack. This ensures that the various unwinders can link back to the + * original stack. + * + * 2. Switch the stack pointer to the top of the irq stack. + * + * 3. Invoke whatever needs to be done (@asm_call argument) + * + * 4. Pop the original stack pointer from the top of the irq stack + * which brings it back to the original stack where it left off. + * + * - Function invocation: + * + * To allow flexible usage of the macro, the actual function code including + * the store of the arguments in the call ABI registers is handed in via + * the @asm_call argument. + * + * - Local variables: + * + * @tos: + * The @tos variable holds a pointer to the top of the irq stack and + * _must_ be allocated in a non-callee saved register as this is a + * restriction coming from objtool. + * + * Note, that (tos) is both in input and output constraints to ensure + * that the compiler does not assume that R11 is left untouched in + * case this macro is used in some place where the per cpu interrupt + * stack pointer is used again afterwards + * + * - Function arguments: + * The function argument(s), if any, have to be defined in register + * variables at the place where this is invoked. Storing the + * argument(s) in the proper register(s) is part of the @asm_call + * + * - Constraints: + * + * The constraints have to be done very carefully because the compiler + * does not know about the assembly call. + * + * output: + * As documented already above the @tos variable is required to be in + * the output constraints to make the compiler aware that R11 cannot be + * reused after the asm() statement. + * + * For builds with CONFIG_UNWIND_FRAME_POINTER ASM_CALL_CONSTRAINT is + * required as well as this prevents certain creative GCC variants from + * misplacing the ASM code. + * + * input: + * - func: + * Immediate, which tells the compiler that the function is referenced. + * + * - tos: + * Register. The actual register is defined by the variable declaration. + * + * - function arguments: + * The constraints are handed in via the 'argconstr' argument list. They + * describe the register arguments which are used in @asm_call. + * + * clobbers: + * Function calls can clobber anything except the callee-saved + * registers. Tell the compiler. + */ +#define call_on_irqstack(func, asm_call, argconstr...) \ +{ \ + register void *tos asm("r11"); \ + \ + tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \ + \ + asm_inline volatile( \ + "movq %%rsp, (%[tos]) \n" \ + "movq %[tos], %%rsp \n" \ + \ + asm_call \ + \ + "popq %%rsp \n" \ + \ + : "+r" (tos), ASM_CALL_CONSTRAINT \ + : [__func] "i" (func), [tos] "r" (tos) argconstr \ + : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \ + "memory" \ + ); \ } -static __always_inline void -__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), - struct pt_regs *regs) -{ - void *tos = __this_cpu_read(hardirq_stack_ptr); +/* Macros to assert type correctness for run_*_on_irqstack macros */ +#define assert_function_type(func, proto) \ + static_assert(__builtin_types_compatible_p(typeof(&func), proto)) - __this_cpu_add(irq_count, 1); - asm_call_sysvec_on_stack(tos - 8, func, regs); - __this_cpu_sub(irq_count, 1); -} - -static __always_inline void -__run_irq_on_irqstack(void (*func)(struct irq_desc *desc), - struct irq_desc *desc) -{ - void *tos = __this_cpu_read(hardirq_stack_ptr); +#define assert_arg_type(arg, proto) \ + static_assert(__builtin_types_compatible_p(typeof(arg), proto)) - __this_cpu_add(irq_count, 1); - asm_call_irq_on_stack(tos - 8, func, desc); - __this_cpu_sub(irq_count, 1); +/* + * Macro to invoke system vector and device interrupt C handlers. + */ +#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \ +{ \ + /* \ + * User mode entry and interrupt on the irq stack do not \ + * switch stacks. If from user mode the task stack is empty. \ + */ \ + if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \ + irq_enter_rcu(); \ + func(c_args); \ + irq_exit_rcu(); \ + } else { \ + /* \ + * Mark the irq stack inuse _before_ and unmark _after_ \ + * switching stacks. Interrupts are disabled in both \ + * places. Invoke the stack switch macro with the call \ + * sequence which matches the above direct invocation. \ + */ \ + __this_cpu_write(hardirq_stack_inuse, true); \ + call_on_irqstack(func, asm_call, constr); \ + __this_cpu_write(hardirq_stack_inuse, false); \ + } \ } -#else /* CONFIG_X86_64 */ -static inline bool irqstack_active(void) { return false; } -static inline void __run_on_irqstack(void (*func)(void)) { } -static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), - struct pt_regs *regs) { } -static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc), - struct irq_desc *desc) { } -#endif /* !CONFIG_X86_64 */ +/* + * Function call sequence for __call_on_irqstack() for system vectors. + * + * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input + * mechanism because these functions are global and cannot be optimized out + * when compiling a particular source file which uses one of these macros. + * + * The argument (regs) does not need to be pushed or stashed in a callee + * saved register to be safe vs. the irq_enter_rcu() call because the + * clobbers already prevent the compiler from storing it in a callee + * clobbered register. As the compiler has to preserve @regs for the final + * call to idtentry_exit() anyway, it's likely that it does not cause extra + * effort for this asm magic. + */ +#define ASM_CALL_SYSVEC \ + "call irq_enter_rcu \n" \ + "movq %[arg1], %%rdi \n" \ + "call %P[__func] \n" \ + "call irq_exit_rcu \n" + +#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs) -static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) -{ - if (IS_ENABLED(CONFIG_X86_32)) - return false; - if (!regs) - return !irqstack_active(); - return !user_mode(regs) && !irqstack_active(); +#define run_sysvec_on_irqstack_cond(func, regs) \ +{ \ + assert_function_type(func, void (*)(struct pt_regs *)); \ + assert_arg_type(regs, struct pt_regs *); \ + \ + call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \ + SYSVEC_CONSTRAINTS, regs); \ } +/* + * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store + * @regs and @vector in callee saved registers. + */ +#define ASM_CALL_IRQ \ + "call irq_enter_rcu \n" \ + "movq %[arg1], %%rdi \n" \ + "movl %[arg2], %%esi \n" \ + "call %P[__func] \n" \ + "call irq_exit_rcu \n" -static __always_inline void run_on_irqstack_cond(void (*func)(void), - struct pt_regs *regs) -{ - lockdep_assert_irqs_disabled(); +#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector) - if (irq_needs_irq_stack(regs)) - __run_on_irqstack(func); - else - func(); +#define run_irq_on_irqstack_cond(func, regs, vector) \ +{ \ + assert_function_type(func, void (*)(struct pt_regs *, u32)); \ + assert_arg_type(regs, struct pt_regs *); \ + assert_arg_type(vector, u32); \ + \ + call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \ + IRQ_CONSTRAINTS, regs, vector); \ } -static __always_inline void -run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs), - struct pt_regs *regs) -{ - lockdep_assert_irqs_disabled(); +#define ASM_CALL_SOFTIRQ \ + "call %P[__func] \n" - if (irq_needs_irq_stack(regs)) - __run_sysvec_on_irqstack(func, regs); - else - func(regs); +/* + * Macro to invoke __do_softirq on the irq stack. This is only called from + * task context when bottom halfs are about to be reenabled and soft + * interrupts are pending to be processed. The interrupt stack cannot be in + * use here. + */ +#define do_softirq_own_stack() \ +{ \ + __this_cpu_write(hardirq_stack_inuse, true); \ + call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ + __this_cpu_write(hardirq_stack_inuse, false); \ } -static __always_inline void -run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc, - struct pt_regs *regs) -{ - lockdep_assert_irqs_disabled(); +#else /* CONFIG_X86_64 */ +/* System vector handlers always run on the stack they interrupted. */ +#define run_sysvec_on_irqstack_cond(func, regs) \ +{ \ + irq_enter_rcu(); \ + func(regs); \ + irq_exit_rcu(); \ +} - if (irq_needs_irq_stack(regs)) - __run_irq_on_irqstack(func, desc); - else - func(desc); +/* Switches to the irq stack within func() */ +#define run_irq_on_irqstack_cond(func, regs, vector) \ +{ \ + irq_enter_rcu(); \ + func(regs, vector); \ + irq_exit_rcu(); \ } +#endif /* !CONFIG_X86_64 */ + #endif diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h new file mode 100644 index 000000000000..97bbb4a9083a --- /dev/null +++ b/arch/x86/include/asm/kfence.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * x86 KFENCE support. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef _ASM_X86_KFENCE_H +#define _ASM_X86_KFENCE_H + +#include <linux/bug.h> +#include <linux/kfence.h> + +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/set_memory.h> +#include <asm/tlbflush.h> + +/* Force 4K pages for __kfence_pool. */ +static inline bool arch_kfence_init_pool(void) +{ + unsigned long addr; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + unsigned int level; + + if (!lookup_address(addr, &level)) + return false; + + if (level != PG_LEVEL_4K) + set_memory_4k(addr, 1); + } + + return true; +} + +/* Protect the given page and flush TLB. */ +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + unsigned int level; + pte_t *pte = lookup_address(addr, &level); + + if (WARN_ON(!pte || level != PG_LEVEL_4K)) + return false; + + /* + * We need to avoid IPIs, as we may get KFENCE allocations or faults + * with interrupts disabled. Therefore, the below is best-effort, and + * does not flush TLBs on all CPUs. We can tolerate some inaccuracy; + * lazy fault handling takes care of faults after the page is PRESENT. + */ + + if (protect) + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + else + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + + /* Flush this CPU's TLB. */ + flush_tlb_one_kernel(addr); + return true; +} + +#endif /* _ASM_X86_KFENCE_H */ diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h new file mode 100644 index 000000000000..323641097f63 --- /dev/null +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL) +BUILD_BUG_ON(1) +#endif + +/* + * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate + * "static_call()"s. They are also intended for use when defining + * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those + * functions that follow the [svm|vmx]_func_name convention. + * KVM_X86_OP_NULL() can leave a NULL definition for the + * case where there is no definition or a function name that + * doesn't match the typical naming convention is supplied. + */ +KVM_X86_OP_NULL(hardware_enable) +KVM_X86_OP_NULL(hardware_disable) +KVM_X86_OP_NULL(hardware_unsetup) +KVM_X86_OP_NULL(cpu_has_accelerated_tpr) +KVM_X86_OP(has_emulated_msr) +KVM_X86_OP(vcpu_after_set_cpuid) +KVM_X86_OP(vm_init) +KVM_X86_OP_NULL(vm_destroy) +KVM_X86_OP(vcpu_create) +KVM_X86_OP(vcpu_free) +KVM_X86_OP(vcpu_reset) +KVM_X86_OP(prepare_guest_switch) +KVM_X86_OP(vcpu_load) +KVM_X86_OP(vcpu_put) +KVM_X86_OP(update_exception_bitmap) +KVM_X86_OP(get_msr) +KVM_X86_OP(set_msr) +KVM_X86_OP(get_segment_base) +KVM_X86_OP(get_segment) +KVM_X86_OP(get_cpl) +KVM_X86_OP(set_segment) +KVM_X86_OP_NULL(get_cs_db_l_bits) +KVM_X86_OP(set_cr0) +KVM_X86_OP(is_valid_cr4) +KVM_X86_OP(set_cr4) +KVM_X86_OP(set_efer) +KVM_X86_OP(get_idt) +KVM_X86_OP(set_idt) +KVM_X86_OP(get_gdt) +KVM_X86_OP(set_gdt) +KVM_X86_OP(sync_dirty_debug_regs) +KVM_X86_OP(set_dr7) +KVM_X86_OP(cache_reg) +KVM_X86_OP(get_rflags) +KVM_X86_OP(set_rflags) +KVM_X86_OP(tlb_flush_all) +KVM_X86_OP(tlb_flush_current) +KVM_X86_OP_NULL(tlb_remote_flush) +KVM_X86_OP_NULL(tlb_remote_flush_with_range) +KVM_X86_OP(tlb_flush_gva) +KVM_X86_OP(tlb_flush_guest) +KVM_X86_OP(run) +KVM_X86_OP_NULL(handle_exit) +KVM_X86_OP_NULL(skip_emulated_instruction) +KVM_X86_OP_NULL(update_emulated_instruction) +KVM_X86_OP(set_interrupt_shadow) +KVM_X86_OP(get_interrupt_shadow) +KVM_X86_OP(patch_hypercall) +KVM_X86_OP(set_irq) +KVM_X86_OP(set_nmi) +KVM_X86_OP(queue_exception) +KVM_X86_OP(cancel_injection) +KVM_X86_OP(interrupt_allowed) +KVM_X86_OP(nmi_allowed) +KVM_X86_OP(get_nmi_mask) +KVM_X86_OP(set_nmi_mask) +KVM_X86_OP(enable_nmi_window) +KVM_X86_OP(enable_irq_window) +KVM_X86_OP(update_cr8_intercept) +KVM_X86_OP(check_apicv_inhibit_reasons) +KVM_X86_OP_NULL(pre_update_apicv_exec_ctrl) +KVM_X86_OP(refresh_apicv_exec_ctrl) +KVM_X86_OP(hwapic_irr_update) +KVM_X86_OP(hwapic_isr_update) +KVM_X86_OP_NULL(guest_apic_has_interrupt) +KVM_X86_OP(load_eoi_exitmap) +KVM_X86_OP(set_virtual_apic_mode) +KVM_X86_OP_NULL(set_apic_access_page_addr) +KVM_X86_OP(deliver_posted_interrupt) +KVM_X86_OP_NULL(sync_pir_to_irr) +KVM_X86_OP(set_tss_addr) +KVM_X86_OP(set_identity_map_addr) +KVM_X86_OP(get_mt_mask) +KVM_X86_OP(load_mmu_pgd) +KVM_X86_OP_NULL(has_wbinvd_exit) +KVM_X86_OP(write_l1_tsc_offset) +KVM_X86_OP(get_exit_info) +KVM_X86_OP(check_intercept) +KVM_X86_OP(handle_exit_irqoff) +KVM_X86_OP_NULL(request_immediate_exit) +KVM_X86_OP(sched_in) +KVM_X86_OP_NULL(update_cpu_dirty_logging) +KVM_X86_OP_NULL(pre_block) +KVM_X86_OP_NULL(post_block) +KVM_X86_OP_NULL(vcpu_blocking) +KVM_X86_OP_NULL(vcpu_unblocking) +KVM_X86_OP_NULL(update_pi_irte) +KVM_X86_OP_NULL(apicv_post_state_restore) +KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt) +KVM_X86_OP_NULL(set_hv_timer) +KVM_X86_OP_NULL(cancel_hv_timer) +KVM_X86_OP(setup_mce) +KVM_X86_OP(smi_allowed) +KVM_X86_OP(pre_enter_smm) +KVM_X86_OP(pre_leave_smm) +KVM_X86_OP(enable_smi_window) +KVM_X86_OP_NULL(mem_enc_op) +KVM_X86_OP_NULL(mem_enc_reg_region) +KVM_X86_OP_NULL(mem_enc_unreg_region) +KVM_X86_OP(get_msr_feature) +KVM_X86_OP(can_emulate_instruction) +KVM_X86_OP(apic_init_signal_blocked) +KVM_X86_OP_NULL(enable_direct_tlbflush) +KVM_X86_OP_NULL(migrate_timers) +KVM_X86_OP(msr_filter_changed) +KVM_X86_OP_NULL(complete_emulated_msr) + +#undef KVM_X86_OP +#undef KVM_X86_OP_NULL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3d6616f6f6ef..877a4025d8da 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -40,10 +40,8 @@ #define KVM_MAX_VCPUS 288 #define KVM_SOFT_MAX_VCPUS 240 #define KVM_MAX_VCPU_ID 1023 -#define KVM_USER_MEM_SLOTS 509 /* memory slots that are not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 3 -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #define KVM_HALT_POLL_NS_DEFAULT 200000 @@ -52,6 +50,9 @@ #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) +#define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \ + KVM_BUS_LOCK_DETECTION_EXIT) + /* x86-specific vcpu->requests bit members */ #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) @@ -88,6 +89,8 @@ KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_APF_READY KVM_ARCH_REQ(28) #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29) +#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \ + KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define CR0_RESERVED_BITS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ @@ -200,9 +203,17 @@ enum x86_intercept_stage; #define DR6_BS (1 << 14) #define DR6_BT (1 << 15) #define DR6_RTM (1 << 16) -#define DR6_FIXED_1 0xfffe0ff0 -#define DR6_INIT 0xffff0ff0 +/* + * DR6_ACTIVE_LOW combines fixed-1 and active-low bits. + * We can regard all the bits in DR6_FIXED_1 as active_low bits; + * they will never be 0 for now, but when they are defined + * in the future it will require no code change. + * + * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. + */ +#define DR6_ACTIVE_LOW 0xffff0ff0 #define DR6_VOLATILE 0x0001e00f +#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) #define DR7_BP_EN_MASK 0x000000ff #define DR7_GE (1 << 9) @@ -337,6 +348,8 @@ struct kvm_mmu_root_info { #define KVM_MMU_NUM_PREV_ROOTS 3 +#define KVM_HAVE_MMU_RWLOCK + struct kvm_mmu_page; /* @@ -358,8 +371,6 @@ struct kvm_mmu { int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); - void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *spte, const void *pte); hpa_t root_hpa; gpa_t root_pgd; union kvm_mmu_role mmu_role; @@ -510,6 +521,7 @@ struct kvm_vcpu_hv_synic { /* Hyper-V per vcpu emulation context */ struct kvm_vcpu_hv { + struct kvm_vcpu *vcpu; u32 vp_index; u64 hv_vapic; s64 runtime_offset; @@ -520,6 +532,21 @@ struct kvm_vcpu_hv { cpumask_t tlb_flush; }; +/* Xen HVM per vcpu emulation context */ +struct kvm_vcpu_xen { + u64 hypercall_rip; + u32 current_runstate; + bool vcpu_info_set; + bool vcpu_time_info_set; + bool runstate_set; + struct gfn_to_hva_cache vcpu_info_cache; + struct gfn_to_hva_cache vcpu_time_info_cache; + struct gfn_to_hva_cache runstate_cache; + u64 last_steal; + u64 runstate_entry_time; + u64 runstate_times[4]; +}; + struct kvm_vcpu_arch { /* * rip and regs accesses must go through @@ -640,7 +667,7 @@ struct kvm_vcpu_arch { int cpuid_nent; struct kvm_cpuid_entry2 *cpuid_entries; - unsigned long cr3_lm_rsvd_bits; + u64 reserved_gpa_bits; int maxphyaddr; int max_tdp_level; @@ -717,7 +744,9 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ unsigned long singlestep_rip; - struct kvm_vcpu_hv hyperv; + bool hyperv_enabled; + struct kvm_vcpu_hv *hyperv; + struct kvm_vcpu_xen xen; cpumask_var_t wbinvd_dirty_mask; @@ -888,6 +917,14 @@ struct msr_bitmap_range { unsigned long *bitmap; }; +/* Xen emulation context */ +struct kvm_xen { + bool long_mode; + bool shinfo_set; + u8 upcall_vector; + struct gfn_to_hva_cache shinfo_cache; +}; + enum kvm_irqchip_mode { KVM_IRQCHIP_NONE, KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ @@ -908,9 +945,6 @@ struct kvm_arch { unsigned int indirect_shadow_pages; u8 mmu_valid_gen; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; - /* - * Hash table of struct kvm_mmu_page. - */ struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; struct list_head lpage_disallowed_mmu_pages; @@ -967,6 +1001,7 @@ struct kvm_arch { struct hlist_head mask_notifier_list; struct kvm_hv hyperv; + struct kvm_xen xen; #ifdef CONFIG_KVM_MMU_AUDIT int audit_point; @@ -977,6 +1012,7 @@ struct kvm_arch { u32 bsp_vcpu_id; u64 disabled_quirks; + int cpu_dirty_logging_count; enum kvm_irqchip_mode irqchip_mode; u8 nr_reserved_ioapic_pins; @@ -998,9 +1034,12 @@ struct kvm_arch { struct msr_bitmap_range ranges[16]; } msr_filter; + bool bus_lock_detection_enabled; + struct kvm_pmu_event_filter *pmu_event_filter; struct task_struct *nx_lpage_recovery_thread; +#ifdef CONFIG_X86_64 /* * Whether the TDP MMU is enabled for this VM. This contains a * snapshot of the TDP MMU module parameter from when the VM was @@ -1026,12 +1065,25 @@ struct kvm_arch { * tdp_mmu_page set and a root_count of 0. */ struct list_head tdp_mmu_pages; + + /* + * Protects accesses to the following fields when the MMU lock + * is held in read mode: + * - tdp_mmu_pages (above) + * - the link field of struct kvm_mmu_pages used by the TDP MMU + * - lpage_disallowed_mmu_pages + * - the lpage_disallowed_link field of struct kvm_mmu_pages used + * by the TDP MMU + * It is acceptable, but not necessary, to acquire this lock when + * the thread holds the MMU lock in write mode. + */ + spinlock_t tdp_mmu_pages_lock; +#endif /* CONFIG_X86_64 */ }; struct kvm_vm_stat { ulong mmu_shadow_zapped; ulong mmu_pte_write; - ulong mmu_pte_updated; ulong mmu_pde_zapped; ulong mmu_flooded; ulong mmu_recycled; @@ -1225,30 +1277,11 @@ struct kvm_x86_ops { void (*sched_in)(struct kvm_vcpu *kvm, int cpu); /* - * Arch-specific dirty logging hooks. These hooks are only supposed to - * be valid if the specific arch has hardware-accelerated dirty logging - * mechanism. Currently only for PML on VMX. - * - * - slot_enable_log_dirty: - * called when enabling log dirty mode for the slot. - * - slot_disable_log_dirty: - * called when disabling log dirty mode for the slot. - * also called when slot is created with log dirty disabled. - * - flush_log_dirty: - * called before reporting dirty_bitmap to userspace. - * - enable_log_dirty_pt_masked: - * called when reenabling log dirty for the GFNs in the mask after - * corresponding bits are cleared in slot->dirty_bitmap. + * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero + * value indicates CPU dirty logging is unsupported or disabled. */ - void (*slot_enable_log_dirty)(struct kvm *kvm, - struct kvm_memory_slot *slot); - void (*slot_disable_log_dirty)(struct kvm *kvm, - struct kvm_memory_slot *slot); - void (*flush_log_dirty)(struct kvm *kvm); - void (*enable_log_dirty_pt_masked)(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t offset, unsigned long mask); - int (*cpu_dirty_log_size)(void); + int cpu_dirty_log_size; + void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu); /* pmu operations of sub-arch */ const struct kvm_pmu_ops *pmu_ops; @@ -1340,6 +1373,19 @@ extern u64 __read_mostly host_efer; extern bool __read_mostly allow_smaller_maxphyaddr; extern struct kvm_x86_ops kvm_x86_ops; +#define KVM_X86_OP(func) \ + DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); +#define KVM_X86_OP_NULL KVM_X86_OP +#include <asm/kvm-x86-ops.h> + +static inline void kvm_ops_static_call_update(void) +{ +#define KVM_X86_OP(func) \ + static_call_update(kvm_x86_##func, kvm_x86_ops.func); +#define KVM_X86_OP_NULL KVM_X86_OP +#include <asm/kvm-x86-ops.h> +} + #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { @@ -1351,7 +1397,7 @@ void kvm_arch_free_vm(struct kvm *kvm); static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) { if (kvm_x86_ops.tlb_remote_flush && - !kvm_x86_ops.tlb_remote_flush(kvm)) + !static_call(kvm_x86_tlb_remote_flush)(kvm)) return 0; else return -ENOTSUPP; @@ -1378,11 +1424,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot); void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, struct kvm_memory_slot *memslot); -void kvm_mmu_slot_set_dirty(struct kvm *kvm, - struct kvm_memory_slot *memslot); -void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t gfn_offset, unsigned long mask); void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); @@ -1421,6 +1462,8 @@ extern u8 kvm_tsc_scaling_ratio_frac_bits; extern u64 kvm_max_tsc_scaling_ratio; /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ extern u64 kvm_default_tsc_scaling_ratio; +/* bus lock detection supported? */ +extern bool kvm_has_bus_lock_exit; extern u64 kvm_mce_cap_supported; @@ -1501,7 +1544,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); -int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); +void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); @@ -1552,7 +1595,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu); void kvm_update_dr7(struct kvm_vcpu *vcpu); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); -int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); @@ -1742,14 +1784,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) { - if (kvm_x86_ops.vcpu_blocking) - kvm_x86_ops.vcpu_blocking(vcpu); + static_call_cond(kvm_x86_vcpu_blocking)(vcpu); } static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) { - if (kvm_x86_ops.vcpu_unblocking) - kvm_x86_ops.vcpu_unblocking(vcpu); + static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); } static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 30f76b966857..ccf60a809a17 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -78,6 +78,13 @@ extern int hyperv_init_cpuhp; extern void *hv_hypercall_pg; extern void __percpu **hyperv_pcpu_input_arg; +extern void __percpu **hyperv_pcpu_output_arg; + +extern u64 hv_current_partition_id; + +int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); +int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); +int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); static inline u64 hv_do_hypercall(u64 control, void *input, void *output) { @@ -239,6 +246,8 @@ int hyperv_fill_flush_guest_mapping_list( struct hv_guest_mapping_flush_list *flush, u64 start_gfn, u64 end_gfn); +extern bool hv_root_partition; + #ifdef CONFIG_X86_64 void hv_apic_init(void); void __init hv_init_spinlocks(void); @@ -250,10 +259,16 @@ static inline void hv_apic_init(void) {} static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, struct msi_desc *msi_desc) { - msi_entry->address = msi_desc->msg.address_lo; - msi_entry->data = msi_desc->msg.data; + msi_entry->address.as_uint32 = msi_desc->msg.address_lo; + msi_entry->data.as_uint32 = msi_desc->msg.data; } +struct irq_domain *hv_create_pci_msi_domain(void); + +int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, + struct hv_interrupt_entry *entry); +int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); + #else /* CONFIG_HYPERV */ static inline void hyperv_init(void) {} static inline void hyperv_setup_mmu_ops(void) {} diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h index fdbffec4cfde..5a2baf28a1dc 100644 --- a/arch/x86/include/asm/orc_types.h +++ b/arch/x86/include/asm/orc_types.h @@ -40,6 +40,8 @@ #define ORC_REG_MAX 15 #ifndef __ASSEMBLY__ +#include <asm/byteorder.h> + /* * This struct is more or less a vastly simplified version of the DWARF Call * Frame Information standard. It contains only the necessary parts of DWARF @@ -51,10 +53,18 @@ struct orc_entry { s16 sp_offset; s16 bp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) unsigned sp_reg:4; unsigned bp_reg:4; unsigned type:2; unsigned end:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned bp_reg:4; + unsigned sp_reg:4; + unsigned unused:5; + unsigned end:1; + unsigned type:2; +#endif } __packed; #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h index 16b9f220bdeb..40f92270515b 100644 --- a/arch/x86/include/asm/platform_sst_audio.h +++ b/arch/x86/include/asm/platform_sst_audio.h @@ -10,8 +10,6 @@ #ifndef _PLATFORM_SST_AUDIO_H_ #define _PLATFORM_SST_AUDIO_H_ -#include <linux/sfi.h> - #define MAX_NUM_STREAMS_MRFLD 25 #define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c20a52b5534b..dc6d149bf851 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -426,8 +426,6 @@ struct irq_stack { char stack[IRQ_STACK_SIZE]; } __aligned(IRQ_STACK_SIZE); -DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); - #ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); #else @@ -454,7 +452,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu) return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); } -DECLARE_PER_CPU(unsigned int, irq_count); +DECLARE_PER_CPU(void *, hardirq_stack_ptr); +DECLARE_PER_CPU(bool, hardirq_stack_inuse); extern asmlinkage void ignore_sysret(void); /* Save actual FS/GS selectors and bases to current->thread */ @@ -473,9 +472,9 @@ struct stack_canary { }; DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); #endif -/* Per CPU softirq stack pointer */ +DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr); -#endif /* X86_64 */ +#endif /* !X86_64 */ extern unsigned int fpu_kernel_xstate_size; extern unsigned int fpu_user_xstate_size; diff --git a/arch/x86/include/asm/softirq_stack.h b/arch/x86/include/asm/softirq_stack.h new file mode 100644 index 000000000000..889d53d6a0e1 --- /dev/null +++ b/arch/x86/include/asm/softirq_stack.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_SOFTIRQ_STACK_H +#define _ASM_X86_SOFTIRQ_STACK_H + +#ifdef CONFIG_X86_64 +# include <asm/irq_stack.h> +#else +# include <asm-generic/softirq_stack.h> +#endif + +#endif diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 664d4610d700..8e574c0afef8 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -48,17 +48,8 @@ UNWIND_HINT_REGS base=\base offset=\offset partial=1 .endm -.macro UNWIND_HINT_FUNC sp_offset=8 - UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL -.endm - -/* - * RET_OFFSET: Used on instructions that terminate a function; mostly RETURN - * and sibling calls. On these, sp_offset denotes the expected offset from - * initial_func_cfi. - */ -.macro UNWIND_HINT_RET_OFFSET sp_offset=8 - UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset +.macro UNWIND_HINT_FUNC + UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 9aad0e0876fb..8757078d4442 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -30,16 +30,29 @@ static inline int cpu_has_vmx(void) } -/** Disable VMX on the current CPU +/** + * cpu_vmxoff() - Disable VMX on the current CPU * - * vmxoff causes a undefined-opcode exception if vmxon was not run - * on the CPU previously. Only call this function if you know VMX - * is enabled. + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) + * + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to + * atomically track post-VMXON state, e.g. this may be called in NMI context. + * Eat all faults as all other faults on VMXOFF faults are mode related, i.e. + * faults are guaranteed to be due to the !post-VMXON check unless the CPU is + * magically in RM, VM86, compat mode, or at CPL>0. */ -static inline void cpu_vmxoff(void) +static inline int cpu_vmxoff(void) { - asm volatile ("vmxoff"); + asm_volatile_goto("1: vmxoff\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "cc", "memory" : fault); + + cr4_clear_bits(X86_CR4_VMXE); + return 0; + +fault: cr4_clear_bits(X86_CR4_VMXE); + return -EIO; } static inline int cpu_vmx_enabled(void) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 38ca445a8429..358707f60d99 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -73,6 +73,7 @@ #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) +#define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION) #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index 9915990fd8cf..d9a74681a77d 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -83,5 +83,6 @@ #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */ #define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */ #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */ +#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */ #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index 9139b3e86316..baca0b00ef76 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h @@ -182,6 +182,9 @@ struct arch_shared_info { unsigned long p2m_cr3; /* cr3 value of the p2m address space */ unsigned long p2m_vaddr; /* virtual address of the p2m list */ unsigned long p2m_generation; /* generation count of p2m mapping */ +#ifdef CONFIG_X86_32 + uint32_t wc_sec_hi; +#endif }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 1a162e559753..7068e4bb057d 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -87,6 +87,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, #endif /* + * The maximum amount of extra memory compared to the base size. The + * main scaling factor is the size of struct page. At extreme ratios + * of base:extra, all the base memory can be filled with page + * structures for the extra memory, leaving no space for anything + * else. + * + * 10x seems like a reasonable balance between scaling flexibility and + * leaving a practically usable system. + */ +#define XEN_EXTRA_MEM_RATIO (10) + +/* * Helper functions to write or read unsigned long values to/from * memory, when the access may fault. */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 8e76d3701db3..5a3022c8af82 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -112,6 +112,7 @@ struct kvm_ioapic_state { #define KVM_NR_IRQCHIPS 3 #define KVM_RUN_X86_SMM (1 << 0) +#define KVM_RUN_X86_BUS_LOCK (1 << 1) /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index ada955c5ebb6..b8e650a985e3 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -89,6 +89,7 @@ #define EXIT_REASON_XRSTORS 64 #define EXIT_REASON_UMWAIT 67 #define EXIT_REASON_TPAUSE 68 +#define EXIT_REASON_BUS_LOCK 74 #define VMX_EXIT_REASONS \ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ @@ -150,7 +151,8 @@ { EXIT_REASON_XSAVES, "XSAVES" }, \ { EXIT_REASON_XRSTORS, "XRSTORS" }, \ { EXIT_REASON_UMWAIT, "UMWAIT" }, \ - { EXIT_REASON_TPAUSE, "TPAUSE" } + { EXIT_REASON_TPAUSE, "TPAUSE" }, \ + { EXIT_REASON_BUS_LOCK, "BUS_LOCK" } #define VMX_EXIT_REASON_FLAGS \ { VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" } diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5eeb808eb024..2ddf08351f0b 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -116,7 +116,6 @@ obj-$(CONFIG_VM86) += vm86_32.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_HPET_TIMER) += hpet.o -obj-$(CONFIG_APB_TIMER) += apb_timer.o obj-$(CONFIG_AMD_NB) += amd_nb.o obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index f1bb57b0e41e..cf340d85946a 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 5d3a0b8fd379..56b6865afb2a 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -1,12 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0-only */ .text #include <linux/linkage.h> +#include <linux/objtool.h> #include <asm/segment.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/asm-offsets.h> #include <asm/frame.h> +#include <asm/nospec-branch.h> # Copyright 2003 Pavel Machek <pavel@suse.cz @@ -39,6 +41,7 @@ SYM_FUNC_START(wakeup_long64) movq saved_rbp, %rbp movq saved_rip, %rax + ANNOTATE_RETPOLINE_SAFE jmp *%rax SYM_FUNC_END(wakeup_long64) @@ -126,6 +129,7 @@ SYM_FUNC_START(do_suspend_lowlevel) FRAME_END jmp restore_processor_state SYM_FUNC_END(do_suspend_lowlevel) +STACK_FRAME_NON_STANDARD do_suspend_lowlevel .data saved_rbp: .quad 0 diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c deleted file mode 100644 index 263eeaddb0aa..000000000000 --- a/arch/x86/kernel/apb_timer.c +++ /dev/null @@ -1,347 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * apb_timer.c: Driver for Langwell APB timers - * - * (C) Copyright 2009 Intel Corporation - * Author: Jacob Pan (jacob.jun.pan@intel.com) - * - * Note: - * Langwell is the south complex of Intel Moorestown MID platform. There are - * eight external timers in total that can be used by the operating system. - * The timer information, such as frequency and addresses, is provided to the - * OS via SFI tables. - * Timer interrupts are routed via FW/HW emulated IOAPIC independently via - * individual redirection table entries (RTE). - * Unlike HPET, there is no master counter, therefore one of the timers are - * used as clocksource. The overall allocation looks like: - * - timer 0 - NR_CPUs for per cpu timer - * - one timer for clocksource - * - one timer for watchdog driver. - * It is also worth notice that APB timer does not support true one-shot mode, - * free-running mode will be used here to emulate one-shot mode. - * APB timer can also be used as broadcast timer along with per cpu local APIC - * timer, but by default APB timer has higher rating than local APIC timers. - */ - -#include <linux/delay.h> -#include <linux/dw_apb_timer.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/pm.h> -#include <linux/sfi.h> -#include <linux/interrupt.h> -#include <linux/cpu.h> -#include <linux/irq.h> - -#include <asm/fixmap.h> -#include <asm/apb_timer.h> -#include <asm/intel-mid.h> -#include <asm/time.h> - -#define APBT_CLOCKEVENT_RATING 110 -#define APBT_CLOCKSOURCE_RATING 250 - -#define APBT_CLOCKEVENT0_NUM (0) -#define APBT_CLOCKSOURCE_NUM (2) - -static phys_addr_t apbt_address; -static int apb_timer_block_enabled; -static void __iomem *apbt_virt_address; - -/* - * Common DW APB timer info - */ -static unsigned long apbt_freq; - -struct apbt_dev { - struct dw_apb_clock_event_device *timer; - unsigned int num; - int cpu; - unsigned int irq; - char name[10]; -}; - -static struct dw_apb_clocksource *clocksource_apbt; - -static inline void __iomem *adev_virt_addr(struct apbt_dev *adev) -{ - return apbt_virt_address + adev->num * APBTMRS_REG_SIZE; -} - -static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev); - -#ifdef CONFIG_SMP -static unsigned int apbt_num_timers_used; -#endif - -static inline void apbt_set_mapping(void) -{ - struct sfi_timer_table_entry *mtmr; - int phy_cs_timer_id = 0; - - if (apbt_virt_address) { - pr_debug("APBT base already mapped\n"); - return; - } - mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); - if (mtmr == NULL) { - printk(KERN_ERR "Failed to get MTMR %d from SFI\n", - APBT_CLOCKEVENT0_NUM); - return; - } - apbt_address = (phys_addr_t)mtmr->phys_addr; - if (!apbt_address) { - printk(KERN_WARNING "No timer base from SFI, use default\n"); - apbt_address = APBT_DEFAULT_BASE; - } - apbt_virt_address = ioremap(apbt_address, APBT_MMAP_SIZE); - if (!apbt_virt_address) { - pr_debug("Failed mapping APBT phy address at %lu\n",\ - (unsigned long)apbt_address); - goto panic_noapbt; - } - apbt_freq = mtmr->freq_hz; - sfi_free_mtmr(mtmr); - - /* Now figure out the physical timer id for clocksource device */ - mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); - if (mtmr == NULL) - goto panic_noapbt; - - /* Now figure out the physical timer id */ - pr_debug("Use timer %d for clocksource\n", - (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE); - phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) / - APBTMRS_REG_SIZE; - - clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING, - "apbt0", apbt_virt_address + phy_cs_timer_id * - APBTMRS_REG_SIZE, apbt_freq); - return; - -panic_noapbt: - panic("Failed to setup APB system timer\n"); - -} - -static inline void apbt_clear_mapping(void) -{ - iounmap(apbt_virt_address); - apbt_virt_address = NULL; -} - -static int __init apbt_clockevent_register(void) -{ - struct sfi_timer_table_entry *mtmr; - struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); - - mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); - if (mtmr == NULL) { - printk(KERN_ERR "Failed to get MTMR %d from SFI\n", - APBT_CLOCKEVENT0_NUM); - return -ENODEV; - } - - adev->num = smp_processor_id(); - adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0", - intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ? - APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING, - adev_virt_addr(adev), 0, apbt_freq); - /* Firmware does EOI handling for us. */ - adev->timer->eoi = NULL; - - if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) { - global_clock_event = &adev->timer->ced; - printk(KERN_DEBUG "%s clockevent registered as global\n", - global_clock_event->name); - } - - dw_apb_clockevent_register(adev->timer); - - sfi_free_mtmr(mtmr); - return 0; -} - -#ifdef CONFIG_SMP - -static void apbt_setup_irq(struct apbt_dev *adev) -{ - irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); - irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); -} - -/* Should be called with per cpu */ -void apbt_setup_secondary_clock(void) -{ - struct apbt_dev *adev; - int cpu; - - /* Don't register boot CPU clockevent */ - cpu = smp_processor_id(); - if (!cpu) - return; - - adev = this_cpu_ptr(&cpu_apbt_dev); - if (!adev->timer) { - adev->timer = dw_apb_clockevent_init(cpu, adev->name, - APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), - adev->irq, apbt_freq); - adev->timer->eoi = NULL; - } else { - dw_apb_clockevent_resume(adev->timer); - } - - printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n", - cpu, adev->name, adev->cpu); - - apbt_setup_irq(adev); - dw_apb_clockevent_register(adev->timer); - - return; -} - -/* - * this notify handler process CPU hotplug events. in case of S0i3, nonboot - * cpus are disabled/enabled frequently, for performance reasons, we keep the - * per cpu timer irq registered so that we do need to do free_irq/request_irq. - * - * TODO: it might be more reliable to directly disable percpu clockevent device - * without the notifier chain. currently, cpu 0 may get interrupts from other - * cpu timers during the offline process due to the ordering of notification. - * the extra interrupt is harmless. - */ -static int apbt_cpu_dead(unsigned int cpu) -{ - struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); - - dw_apb_clockevent_pause(adev->timer); - if (system_state == SYSTEM_RUNNING) { - pr_debug("skipping APBT CPU %u offline\n", cpu); - } else { - pr_debug("APBT clockevent for cpu %u offline\n", cpu); - dw_apb_clockevent_stop(adev->timer); - } - return 0; -} - -static __init int apbt_late_init(void) -{ - if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT || - !apb_timer_block_enabled) - return 0; - return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL, - apbt_cpu_dead); -} -fs_initcall(apbt_late_init); -#else - -void apbt_setup_secondary_clock(void) {} - -#endif /* CONFIG_SMP */ - -static int apbt_clocksource_register(void) -{ - u64 start, now; - u64 t1; - - /* Start the counter, use timer 2 as source, timer 0/1 for event */ - dw_apb_clocksource_start(clocksource_apbt); - - /* Verify whether apbt counter works */ - t1 = dw_apb_clocksource_read(clocksource_apbt); - start = rdtsc(); - - /* - * We don't know the TSC frequency yet, but waiting for - * 200000 TSC cycles is safe: - * 4 GHz == 50us - * 1 GHz == 200us - */ - do { - rep_nop(); - now = rdtsc(); - } while ((now - start) < 200000UL); - - /* APBT is the only always on clocksource, it has to work! */ - if (t1 == dw_apb_clocksource_read(clocksource_apbt)) - panic("APBT counter not counting. APBT disabled\n"); - - dw_apb_clocksource_register(clocksource_apbt); - - return 0; -} - -/* - * Early setup the APBT timer, only use timer 0 for booting then switch to - * per CPU timer if possible. - * returns 1 if per cpu apbt is setup - * returns 0 if no per cpu apbt is chosen - * panic if set up failed, this is the only platform timer on Moorestown. - */ -void __init apbt_time_init(void) -{ -#ifdef CONFIG_SMP - int i; - struct sfi_timer_table_entry *p_mtmr; - struct apbt_dev *adev; -#endif - - if (apb_timer_block_enabled) - return; - apbt_set_mapping(); - if (!apbt_virt_address) - goto out_noapbt; - /* - * Read the frequency and check for a sane value, for ESL model - * we extend the possible clock range to allow time scaling. - */ - - if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { - pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq); - goto out_noapbt; - } - if (apbt_clocksource_register()) { - pr_debug("APBT has failed to register clocksource\n"); - goto out_noapbt; - } - if (!apbt_clockevent_register()) - apb_timer_block_enabled = 1; - else { - pr_debug("APBT has failed to register clockevent\n"); - goto out_noapbt; - } -#ifdef CONFIG_SMP - /* kernel cmdline disable apb timer, so we will use lapic timers */ - if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) { - printk(KERN_INFO "apbt: disabled per cpu timer\n"); - return; - } - pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); - if (num_possible_cpus() <= sfi_mtimer_num) - apbt_num_timers_used = num_possible_cpus(); - else - apbt_num_timers_used = 1; - pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); - - /* here we set up per CPU timer data structure */ - for (i = 0; i < apbt_num_timers_used; i++) { - adev = &per_cpu(cpu_apbt_dev, i); - adev->num = i; - adev->cpu = i; - p_mtmr = sfi_get_mtmr(i); - if (p_mtmr) - adev->irq = p_mtmr->irq; - else - printk(KERN_ERR "Failed to get timer for cpu %d\n", i); - snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i); - } -#endif - - return; - -out_noapbt: - apbt_clear_mapping(); - apb_timer_block_enabled = 0; - panic("failed to enable APB timer\n"); -} diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 7f4c081f59f0..bda4f2a36868 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1747,6 +1747,7 @@ void apic_ap_setup(void) #ifdef CONFIG_X86_X2APIC int x2apic_mode; +EXPORT_SYMBOL_GPL(x2apic_mode); enum { X2APIC_OFF, @@ -2137,18 +2138,11 @@ void __init register_lapic_address(unsigned long address) * Local APIC interrupts */ -/** - * spurious_interrupt - Catch all for interrupts raised on unused vectors - * @regs: Pointer to pt_regs on stack - * @vector: The vector number - * - * This is invoked from ASM entry code to catch all interrupts which - * trigger on an entry which is routed to the common_spurious idtentry - * point. - * - * Also called from sysvec_spurious_apic_interrupt(). +/* + * Common handling code for spurious_interrupt and spurious_vector entry + * points below. No point in allowing the compiler to inline it twice. */ -DEFINE_IDTENTRY_IRQ(spurious_interrupt) +static noinline void handle_spurious_interrupt(u8 vector) { u32 v; @@ -2183,9 +2177,23 @@ out: trace_spurious_apic_exit(vector); } +/** + * spurious_interrupt - Catch all for interrupts raised on unused vectors + * @regs: Pointer to pt_regs on stack + * @vector: The vector number + * + * This is invoked from ASM entry code to catch all interrupts which + * trigger on an entry which is routed to the common_spurious idtentry + * point. + */ +DEFINE_IDTENTRY_IRQ(spurious_interrupt) +{ + handle_spurious_interrupt(vector); +} + DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt) { - __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR); + handle_spurious_interrupt(SPURIOUS_APIC_VECTOR); } /* diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e4ab4804b20d..c3b60c37c728 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -198,7 +198,7 @@ static int __init parse_noapic(char *str) } early_param("noapic", parse_noapic); -/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ +/* Will be called in mpparse/ACPI codes for saving IRQ info */ void mp_save_irq(struct mpc_intsrc *m) { int i; @@ -2863,7 +2863,7 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base, /* * If mp_register_ioapic() is called during early boot stage when - * walking ACPI/SFI/DT tables, it's too early to create irqdomain, + * walking ACPI/DT tables, it's too early to create irqdomain, * we are still using bootmem allocator. So delay it to setup_IO_APIC(). */ if (hotplug) { diff --git a/arch/x86/kernel/cpu/acrn.c b/arch/x86/kernel/cpu/acrn.c index 0b2c03943ac6..23f5f27b5a02 100644 --- a/arch/x86/kernel/cpu/acrn.c +++ b/arch/x86/kernel/cpu/acrn.c @@ -10,6 +10,8 @@ */ #include <linux/interrupt.h> + +#include <asm/acrn.h> #include <asm/apic.h> #include <asm/cpufeatures.h> #include <asm/desc.h> @@ -19,7 +21,7 @@ static u32 __init acrn_detect(void) { - return hypervisor_cpuid_base("ACRNACRNACRN", 0); + return acrn_cpuid_base(); } static void __init acrn_init_platform(void) @@ -55,6 +57,18 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback) set_irq_regs(old_regs); } +void acrn_setup_intr_handler(void (*handler)(void)) +{ + acrn_intr_handler = handler; +} +EXPORT_SYMBOL_GPL(acrn_setup_intr_handler); + +void acrn_remove_intr_handler(void) +{ + acrn_intr_handler = NULL; +} +EXPORT_SYMBOL_GPL(acrn_remove_intr_handler); + const __initconst struct hypervisor_x86 x86_hyper_acrn = { .name = "ACRN", .detect = acrn_detect, diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9215b91bc044..ab640abe26b6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1742,8 +1742,8 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); -DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); -DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; +DEFINE_PER_CPU(void *, hardirq_stack_ptr); +DEFINE_PER_CPU(bool, hardirq_stack_inuse); DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 43b54bef5448..e88bc296afca 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -31,6 +31,11 @@ #include <asm/reboot.h> #include <asm/nmi.h> #include <clocksource/hyperv_timer.h> +#include <asm/numa.h> + +/* Is Linux running as the root partition? */ +bool hv_root_partition; +EXPORT_SYMBOL_GPL(hv_root_partition); struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); @@ -226,6 +231,32 @@ static void __init hv_smp_prepare_boot_cpu(void) hv_init_spinlocks(); #endif } + +static void __init hv_smp_prepare_cpus(unsigned int max_cpus) +{ +#ifdef CONFIG_X86_64 + int i; + int ret; +#endif + + native_smp_prepare_cpus(max_cpus); + +#ifdef CONFIG_X86_64 + for_each_present_cpu(i) { + if (i == 0) + continue; + ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i)); + BUG_ON(ret); + } + + for_each_present_cpu(i) { + if (i == 0) + continue; + ret = hv_call_create_vp(numa_cpu_node(i), hv_current_partition_id, i, i); + BUG_ON(ret); + } +#endif +} #endif static void __init ms_hyperv_init_platform(void) @@ -243,6 +274,7 @@ static void __init ms_hyperv_init_platform(void) * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); + ms_hyperv.features_b = cpuid_ebx(HYPERV_CPUID_FEATURES); ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); @@ -256,6 +288,22 @@ static void __init ms_hyperv_init_platform(void) ms_hyperv.max_vp_index, ms_hyperv.max_lp_index); /* + * Check CPU management privilege. + * + * To mirror what Windows does we should extract CPU management + * features and use the ReservedIdentityBit to detect if Linux is the + * root partition. But that requires negotiating CPU management + * interface (a process to be finalized). + * + * For now, use the privilege flag as the indicator for running as + * root. + */ + if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) { + hv_root_partition = true; + pr_info("Hyper-V: running as root partition\n"); + } + + /* * Extract host information. */ if (cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS) >= @@ -277,6 +325,14 @@ static void __init ms_hyperv_init_platform(void) x86_platform.calibrate_cpu = hv_get_tsc_khz; } + if (ms_hyperv.features_b & HV_ISOLATION) { + ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG); + ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG); + + pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n", + ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b); + } + if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) { ms_hyperv.nested_features = cpuid_eax(HYPERV_CPUID_NESTED_FEATURES); @@ -366,6 +422,8 @@ static void __init ms_hyperv_init_platform(void) # ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu; + if (hv_root_partition) + smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus; # endif /* diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 1dd851397bd9..5601b95944fa 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -128,12 +128,21 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) { - unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr); - unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long)); + unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr); + unsigned long *begin; /* - * This is a software stack, so 'end' can be a valid stack pointer. - * It just means the stack is empty. + * @end points directly to the top most stack entry to avoid a -8 + * adjustment in the stack switch hotpath. Adjust it back before + * calculating @begin. + */ + end++; + begin = end - (IRQ_STACK_SIZE / sizeof(long)); + + /* + * Due to the switching logic RSP can never be == @end because the + * final operation is 'popq %rsp' which means after that RSP points + * to the original stack and not to @end. */ if (stack < begin || stack >= end) return false; @@ -143,8 +152,9 @@ static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info info->end = end; /* - * The next stack pointer is the first thing pushed by the entry code - * after switching to the irq stack. + * The next stack pointer is stored at the top of the irq stack + * before switching to the irq stack. Actual stack entries are all + * below that. */ info->next_sp = (unsigned long *)*(end - 1); diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 0d54099c2a3a..7c273846c687 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -184,6 +184,7 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) * It is also used to copy the retq for trampolines. */ SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) + UNWIND_HINT_FUNC retq SYM_FUNC_END(ftrace_epilogue) @@ -276,7 +277,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) restore_mcount_regs 8 /* Restore flags */ popfq - UNWIND_HINT_RET_OFFSET + UNWIND_HINT_FUNC jmp ftrace_epilogue SYM_FUNC_END(ftrace_regs_caller) @@ -333,8 +334,7 @@ SYM_FUNC_START(ftrace_graph_caller) retq SYM_FUNC_END(ftrace_graph_caller) -SYM_CODE_START(return_to_handler) - UNWIND_HINT_EMPTY +SYM_FUNC_START(return_to_handler) subq $24, %rsp /* Save the return values */ @@ -349,5 +349,5 @@ SYM_CODE_START(return_to_handler) movq (%rsp), %rax addq $24, %rsp JMP_NOSPEC rdi -SYM_CODE_END(return_to_handler) +SYM_FUNC_END(return_to_handler) #endif diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index d4ad344e80bf..58aa712973ac 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -228,7 +228,7 @@ static __always_inline void handle_irq(struct irq_desc *desc, struct pt_regs *regs) { if (IS_ENABLED(CONFIG_X86_64)) - run_irq_on_irqstack_cond(desc->handle_irq, desc, regs); + generic_handle_irq_desc(desc); else __handle_irq(desc, regs); } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 0b79efc87be5..044902d5a3c4 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -22,6 +22,7 @@ #include <asm/apic.h> #include <asm/nospec-branch.h> +#include <asm/softirq_stack.h> #ifdef CONFIG_DEBUG_STACKOVERFLOW diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 440eed558558..1c0fb96b9e39 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -20,6 +20,7 @@ #include <linux/sched/task_stack.h> #include <asm/cpu_entry_area.h> +#include <asm/softirq_stack.h> #include <asm/irq_stack.h> #include <asm/io_apic.h> #include <asm/apic.h> @@ -48,7 +49,8 @@ static int map_irq_stack(unsigned int cpu) if (!va) return -ENOMEM; - per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; + /* Store actual TOS to avoid adjustment in the hotpath */ + per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; return 0; } #else @@ -60,7 +62,8 @@ static int map_irq_stack(unsigned int cpu) { void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); - per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; + /* Store actual TOS to avoid adjustment in the hotpath */ + per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; return 0; } #endif @@ -71,8 +74,3 @@ int irq_init_percpu_irqstack(unsigned int cpu) return 0; return map_irq_stack(cpu); } - -void do_softirq_own_stack(void) -{ - run_on_irqstack_cond(__do_softirq, NULL); -} diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 145a7ac0c19a..9c214d7085a4 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -161,7 +161,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, #endif /* Kernel thread ? */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); kthread_frame_init(frame, sp, arg); return 0; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ad582f9ac5a6..d08307df69ad 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -539,7 +539,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && - this_cpu_read(irq_count) != -1); + this_cpu_read(hardirq_stack_inuse)); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_fpu, cpu); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 9991c5920aac..b29657b76e3f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -547,31 +547,21 @@ static void emergency_vmx_disable_all(void) local_irq_disable(); /* - * We need to disable VMX on all CPUs before rebooting, otherwise - * we risk hanging up the machine, because the CPU ignores INIT - * signals when VMX is enabled. + * Disable VMX on all CPUs before rebooting, otherwise we risk hanging + * the machine, because the CPU blocks INIT when it's in VMX root. * - * We can't take any locks and we may be on an inconsistent - * state, so we use NMIs as IPIs to tell the other CPUs to disable - * VMX and halt. + * We can't take any locks and we may be on an inconsistent state, so + * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. * - * For safety, we will avoid running the nmi_shootdown_cpus() - * stuff unnecessarily, but we don't have a way to check - * if other CPUs have VMX enabled. So we will call it only if the - * CPU we are running on has VMX enabled. - * - * We will miss cases where VMX is not enabled on all CPUs. This - * shouldn't do much harm because KVM always enable VMX on all - * CPUs anyway. But we can miss it on the small window where KVM - * is still enabling VMX. + * Do the NMI shootdown even if VMX if off on _this_ CPU, as that + * doesn't prevent a different CPU from being in VMX root operation. */ - if (cpu_has_vmx() && cpu_vmx_enabled()) { - /* Disable VMX on this CPU. */ - cpu_vmxoff(); + if (cpu_has_vmx()) { + /* Safely force _this_ CPU out of VMX root operation. */ + __cpu_emergency_vmxoff(); - /* Halt and disable VMX on the other CPUs */ + /* Halt and exit VMX root operation on the other CPUs. */ nmi_shootdown_cpus(vmxoff_nmi); - } } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 740f3bdb3f61..d883176ef2ce 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -16,7 +16,6 @@ #include <linux/memblock.h> #include <linux/pci.h> #include <linux/root_dev.h> -#include <linux/sfi.h> #include <linux/hugetlb.h> #include <linux/tboot.h> #include <linux/usb/xhci-dbgp.h> @@ -1185,7 +1184,6 @@ void __init setup_arch(char **cmdline_p) * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); - sfi_init(); x86_dtb_init(); /* diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 73f800100066..2a1d47f47eee 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -471,7 +471,7 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_REG_SP_INDIRECT: - sp = state->sp + orc->sp_offset; + sp = state->sp; indirect = true; break; @@ -521,6 +521,9 @@ bool unwind_next_frame(struct unwind_state *state) if (indirect) { if (!deref_stack_reg(state, sp, &sp)) goto err; + + if (orc->sp_reg == ORC_REG_SP_INDIRECT) + sp += orc->sp_offset; } /* Find IP, SP and possibly regs: */ diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 7ac592664c52..a788d5120d4d 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -103,6 +103,15 @@ config KVM_AMD_SEV Provides support for launching Encrypted VMs (SEV) and Encrypted VMs with Encrypted State (SEV-ES) on AMD processors. +config KVM_XEN + bool "Support for Xen hypercall interface" + depends on KVM + help + Provides KVM support for the hosting Xen HVM guests and + passing Xen hypercalls to userspace. + + If in doubt, say "N". + config KVM_MMU_AUDIT bool "Audit KVM MMU" depends on KVM && TRACEPOINTS diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 4bd14ab01323..1b4766fe1de2 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -17,7 +17,9 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o + mmu/spte.o +kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o +kvm-$(CONFIG_KVM_XEN) += xen.o kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 38172ca627d3..6bd2f8b830e4 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -173,16 +173,22 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_update_pv_runtime(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); - kvm_mmu_reset_context(vcpu); + vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); kvm_pmu_refresh(vcpu); vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); - vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + kvm_hv_set_cpuid(vcpu); /* Invoke the vendor callback only after the above state is updated. */ - kvm_x86_ops.vcpu_after_set_cpuid(vcpu); + static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); + + /* + * Except for the MMU, which needs to be reset after any vendor + * specific adjustments to the reserved GPA bits. + */ + kvm_mmu_reset_context(vcpu); } static int is_efer_nx(void) @@ -223,6 +229,16 @@ not_found: return 36; } +/* + * This "raw" version returns the reserved GPA bits without any adjustments for + * encryption technologies that usurp bits. The raw mask should be used if and + * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. + */ +u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) +{ + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); +} + /* when an old userspace process fills a new kernel module */ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, @@ -392,7 +408,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_7_0_EBX, F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | - F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) | + F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/ @@ -434,7 +450,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); kvm_cpu_cap_mask(CPUID_7_1_EAX, - F(AVX512_BF16) + F(AVX_VNNI) | F(AVX512_BF16) ); kvm_cpu_cap_mask(CPUID_D_1_EAX, diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index dc921d76e42e..2a0c5064497f 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -30,15 +30,32 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); +u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu); static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) { return vcpu->arch.maxphyaddr; } +static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + return !(gpa & vcpu->arch.reserved_gpa_bits); +} + static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { - return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu))); + return !kvm_vcpu_is_legal_gpa(vcpu, gpa); +} + +static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, + gpa_t gpa, gpa_t alignment) +{ + return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); +} + +static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); } struct cpuid_reg { @@ -324,11 +341,6 @@ static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) kvm_cpu_cap_set(x86_feature); } -static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); -} - static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, unsigned int kvm_feature) { diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 66a08322988f..f7970ba6219f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2506,12 +2506,12 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, val = GET_SMSTATE(u32, smstate, 0x7fcc); - if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 6, val)) return X86EMUL_UNHANDLEABLE; val = GET_SMSTATE(u32, smstate, 0x7fc8); - if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 7, val)) return X86EMUL_UNHANDLEABLE; selector = GET_SMSTATE(u32, smstate, 0x7fc4); @@ -2564,14 +2564,14 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; - val = GET_SMSTATE(u32, smstate, 0x7f68); + val = GET_SMSTATE(u64, smstate, 0x7f68); - if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 6, val)) return X86EMUL_UNHANDLEABLE; - val = GET_SMSTATE(u32, smstate, 0x7f60); + val = GET_SMSTATE(u64, smstate, 0x7f60); - if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 7, val)) return X86EMUL_UNHANDLEABLE; cr0 = GET_SMSTATE(u64, smstate, 0x7f58); @@ -4329,7 +4329,7 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt) ctxt->ops->get_dr(ctxt, 6, &dr6); dr6 &= ~DR_TRAP_BITS; - dr6 |= DR6_BD | DR6_RTM; + dr6 |= DR6_BD | DR6_ACTIVE_LOW; ctxt->ops->set_dr(ctxt, 6, dr6); return emulate_db(ctxt); } diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 922c69dcca4d..58fa8c029867 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -23,6 +23,7 @@ #include "ioapic.h" #include "cpuid.h" #include "hyperv.h" +#include "xen.h" #include <linux/cpu.h> #include <linux/kvm_host.h> @@ -36,6 +37,9 @@ #include "trace.h" #include "irq.h" +/* "Hv#1" signature */ +#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 + #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, @@ -128,7 +132,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, synic_update_vector(synic, vector); /* Load SynIC vectors into EOI exit bitmap */ - kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); + kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic)); return 0; } @@ -141,10 +145,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) return NULL; vcpu = kvm_get_vcpu(kvm, vpidx); - if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) + if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) - if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) + if (kvm_hv_get_vpindex(vcpu) == vpidx) return vcpu; return NULL; } @@ -155,17 +159,17 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) struct kvm_vcpu_hv_synic *synic; vcpu = get_vcpu_by_vpidx(kvm, vpidx); - if (!vcpu) + if (!vcpu || !to_hv_vcpu(vcpu)) return NULL; - synic = vcpu_to_synic(vcpu); + synic = to_hv_synic(vcpu); return (synic->active) ? synic : NULL; } static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) { struct kvm *kvm = vcpu->kvm; - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv_stimer *stimer; int gsi, idx; @@ -189,8 +193,8 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; hv_vcpu->exit.u.synic.msr = msr; @@ -204,7 +208,7 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 data, bool host) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); int ret; if (!synic->active && !host) @@ -282,8 +286,7 @@ static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu) static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) { - struct kvm *kvm = vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) hv->hv_syndbg.control.status = @@ -293,8 +296,8 @@ static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; hv_vcpu->exit.u.syndbg.msr = msr; @@ -310,13 +313,13 @@ static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) return 1; trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, - vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data); + to_hv_vcpu(vcpu)->vp_index, msr, data); switch (msr) { case HV_X64_MSR_SYNDBG_CONTROL: syndbg->control.control = data; @@ -349,7 +352,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) return 1; @@ -377,9 +380,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) break; } - trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, - vcpu_to_hv_vcpu(vcpu)->vp_index, msr, - *pdata); + trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); return 0; } @@ -421,7 +422,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); struct kvm_lapic_irq irq; int ret, vector; @@ -457,7 +458,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) { - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); int i; trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); @@ -514,7 +515,7 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic) static u64 get_time_ref_counter(struct kvm *kvm) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_vcpu *vcpu; u64 tsc; @@ -534,10 +535,10 @@ static u64 get_time_ref_counter(struct kvm *kvm) static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, bool vcpu_kick) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); set_bit(stimer->index, - vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); + to_hv_vcpu(vcpu)->stimer_pending_bitmap); kvm_make_request(KVM_REQ_HV_STIMER, vcpu); if (vcpu_kick) kvm_vcpu_kick(vcpu); @@ -545,14 +546,14 @@ static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); - trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index); hrtimer_cancel(&stimer->timer); clear_bit(stimer->index, - vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); + to_hv_vcpu(vcpu)->stimer_pending_bitmap); stimer->msg_pending = false; stimer->exp_time = 0; } @@ -562,7 +563,7 @@ static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) struct kvm_vcpu_hv_stimer *stimer; stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); - trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index); stimer_mark_pending(stimer, true); @@ -579,7 +580,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) u64 time_now; ktime_t ktime_now; - time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); + time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm); ktime_now = ktime_get(); if (stimer->config.periodic) { @@ -596,7 +597,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) stimer->exp_time = time_now + stimer->count; trace_kvm_hv_stimer_start_periodic( - stimer_to_vcpu(stimer)->vcpu_id, + hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, time_now, stimer->exp_time); @@ -618,7 +619,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) return 0; } - trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, time_now, stimer->count); @@ -633,13 +634,13 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, { union hv_stimer_config new_config = {.as_uint64 = config}, old_config = {.as_uint64 = stimer->config.as_uint64}; - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && !host) return 1; - trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, config, host); stimer_cleanup(stimer); @@ -657,13 +658,13 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, bool host) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && !host) return 1; - trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, count, host); stimer_cleanup(stimer); @@ -694,7 +695,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, struct hv_message *src_msg, bool no_retry) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); int msg_off = offsetof(struct hv_message_page, sint_message[sint]); gfn_t msg_page_gfn; struct hv_message_header hv_hdr; @@ -750,7 +751,7 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct hv_message *msg = &stimer->msg; struct hv_timer_message_payload *payload = (struct hv_timer_message_payload *)&msg->u.payload; @@ -763,14 +764,14 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) payload->expiration_time = stimer->exp_time; payload->delivery_time = get_time_ref_counter(vcpu->kvm); - return synic_deliver_msg(vcpu_to_synic(vcpu), + return synic_deliver_msg(to_hv_synic(vcpu), stimer->config.sintx, msg, no_retry); } static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct kvm_lapic_irq irq = { .delivery_mode = APIC_DM_FIXED, .vector = stimer->config.apic_vector @@ -790,7 +791,7 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) r = stimer_send_msg(stimer); else r = stimer_notify_direct(stimer); - trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, direct, r); if (!r) { stimer->msg_pending = false; @@ -801,11 +802,14 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv_stimer *stimer; u64 time_now, exp_time; int i; + if (!hv_vcpu) + return; + for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { stimer = &hv_vcpu->stimer[i]; @@ -831,16 +835,27 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); int i; + if (!hv_vcpu) + return; + for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) stimer_cleanup(&hv_vcpu->stimer[i]); + + kfree(hv_vcpu); + vcpu->arch.hyperv = NULL; } bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) { - if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + if (!hv_vcpu) + return false; + + if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) return false; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } @@ -880,28 +895,41 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) stimer_prepare_msg(stimer); } -void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) +static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu; int i; + hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); + if (!hv_vcpu) + return -ENOMEM; + + vcpu->arch.hyperv = hv_vcpu; + hv_vcpu->vcpu = vcpu; + synic_init(&hv_vcpu->synic); bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) stimer_init(&hv_vcpu->stimer[i], i); -} - -void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) -{ - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); + + return 0; } int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) { - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic; + int r; + + if (!to_hv_vcpu(vcpu)) { + r = kvm_hv_vcpu_init(vcpu); + if (r) + return r; + } + + synic = to_hv_synic(vcpu); /* * Hyper-V SynIC auto EOI SINT's are @@ -939,10 +967,9 @@ static bool kvm_hv_msr_partition_wide(u32 msr) return r; } -static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, - u32 index, u64 *pdata) +static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); size_t size = ARRAY_SIZE(hv->hv_crash_param); if (WARN_ON_ONCE(index >= size)) @@ -952,41 +979,26 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, return 0; } -static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) +static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); *pdata = hv->hv_crash_ctl; return 0; } -static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) +static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; - - if (host) - hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; - - if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) { + struct kvm_hv *hv = to_kvm_hv(kvm); - vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", - hv->hv_crash_param[0], - hv->hv_crash_param[1], - hv->hv_crash_param[2], - hv->hv_crash_param[3], - hv->hv_crash_param[4]); - - /* Send notification about crash to user space */ - kvm_make_request(KVM_REQ_HV_CRASH, vcpu); - } + hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; return 0; } -static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, - u32 index, u64 data) +static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); size_t size = ARRAY_SIZE(hv->hv_crash_param); if (WARN_ON_ONCE(index >= size)) @@ -1068,7 +1080,7 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); u32 tsc_seq; u64 gfn; @@ -1078,7 +1090,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) return; - mutex_lock(&kvm->arch.hyperv.hv_lock); + mutex_lock(&hv->hv_lock); if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) goto out_unlock; @@ -1122,14 +1134,14 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); out_unlock: - mutex_unlock(&kvm->arch.hyperv.hv_lock); + mutex_unlock(&hv->hv_lock); } static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { struct kvm *kvm = vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); switch (msr) { case HV_X64_MSR_GUEST_OS_ID: @@ -1139,9 +1151,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { - u64 gfn; - unsigned long addr; - u8 instructions[4]; + u8 instructions[9]; + int i = 0; + u64 addr; /* if guest os id is not set hypercall should remain disabled */ if (!hv->hv_guest_os_id) @@ -1150,16 +1162,33 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, hv->hv_hypercall = data; break; } - gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) - return 1; - kvm_x86_ops.patch_hypercall(vcpu, instructions); - ((unsigned char *)instructions)[3] = 0xc3; /* ret */ - if (__copy_to_user((void __user *)addr, instructions, 4)) + + /* + * If Xen and Hyper-V hypercalls are both enabled, disambiguate + * the same way Xen itself does, by setting the bit 31 of EAX + * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just + * going to be clobbered on 64-bit. + */ + if (kvm_xen_hypercall_enabled(kvm)) { + /* orl $0x80000000, %eax */ + instructions[i++] = 0x0d; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x80; + } + + /* vmcall/vmmcall */ + static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i); + i += 3; + + /* ret */ + ((unsigned char *)instructions)[i++] = 0xc3; + + addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK; + if (kvm_vcpu_write_guest(vcpu, addr, instructions, i)) return 1; hv->hv_hypercall = data; - mark_page_dirty(kvm, gfn); break; } case HV_X64_MSR_REFERENCE_TSC: @@ -1168,11 +1197,25 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: - return kvm_hv_msr_set_crash_data(vcpu, + return kvm_hv_msr_set_crash_data(kvm, msr - HV_X64_MSR_CRASH_P0, data); case HV_X64_MSR_CRASH_CTL: - return kvm_hv_msr_set_crash_ctl(vcpu, data, host); + if (host) + return kvm_hv_msr_set_crash_ctl(kvm, data); + + if (data & HV_CRASH_CTL_CRASH_NOTIFY) { + vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", + hv->hv_crash_param[0], + hv->hv_crash_param[1], + hv->hv_crash_param[2], + hv->hv_crash_param[3], + hv->hv_crash_param[4]); + + /* Send notification about crash to user space */ + kvm_make_request(KVM_REQ_HV_CRASH, vcpu); + } + break; case HV_X64_MSR_RESET: if (data == 1) { vcpu_debug(vcpu, "hyper-v reset requested\n"); @@ -1216,11 +1259,11 @@ static u64 current_task_runtime_100ns(void) static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); switch (msr) { case HV_X64_MSR_VP_INDEX: { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); int vcpu_idx = kvm_vcpu_get_idx(vcpu); u32 new_vp_index = (u32)data; @@ -1291,14 +1334,14 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_SIMP: case HV_X64_MSR_EOM: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: - return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); + return synic_set_msr(to_hv_synic(vcpu), msr, data, host); case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER3_CONFIG: { int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; - return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), + return stimer_set_config(to_hv_stimer(vcpu, timer_index), data, host); } case HV_X64_MSR_STIMER0_COUNT: @@ -1307,7 +1350,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_STIMER3_COUNT: { int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; - return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), + return stimer_set_count(to_hv_stimer(vcpu, timer_index), data, host); } case HV_X64_MSR_TSC_FREQUENCY: @@ -1330,7 +1373,7 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, { u64 data = 0; struct kvm *kvm = vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); switch (msr) { case HV_X64_MSR_GUEST_OS_ID: @@ -1346,11 +1389,11 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, data = hv->hv_tsc_page; break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: - return kvm_hv_msr_get_crash_data(vcpu, + return kvm_hv_msr_get_crash_data(kvm, msr - HV_X64_MSR_CRASH_P0, pdata); case HV_X64_MSR_CRASH_CTL: - return kvm_hv_msr_get_crash_ctl(vcpu, pdata); + return kvm_hv_msr_get_crash_ctl(kvm, pdata); case HV_X64_MSR_RESET: data = 0; break; @@ -1379,7 +1422,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { u64 data = 0; - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); switch (msr) { case HV_X64_MSR_VP_INDEX: @@ -1403,14 +1446,14 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_SIMP: case HV_X64_MSR_EOM: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: - return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host); + return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host); case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER3_CONFIG: { int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; - return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), + return stimer_get_config(to_hv_stimer(vcpu, timer_index), pdata); } case HV_X64_MSR_STIMER0_COUNT: @@ -1419,7 +1462,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_STIMER3_COUNT: { int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; - return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), + return stimer_get_count(to_hv_stimer(vcpu, timer_index), pdata); } case HV_X64_MSR_TSC_FREQUENCY: @@ -1438,12 +1481,22 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + + if (!host && !vcpu->arch.hyperv_enabled) + return 1; + + if (!to_hv_vcpu(vcpu)) { + if (kvm_hv_vcpu_init(vcpu)) + return 1; + } + if (kvm_hv_msr_partition_wide(msr)) { int r; - mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_lock(&hv->hv_lock); r = kvm_hv_set_msr_pw(vcpu, msr, data, host); - mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_unlock(&hv->hv_lock); return r; } else return kvm_hv_set_msr(vcpu, msr, data, host); @@ -1451,12 +1504,22 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + + if (!host && !vcpu->arch.hyperv_enabled) + return 1; + + if (!to_hv_vcpu(vcpu)) { + if (kvm_hv_vcpu_init(vcpu)) + return 1; + } + if (kvm_hv_msr_partition_wide(msr)) { int r; - mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_lock(&hv->hv_lock); r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host); - mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_unlock(&hv->hv_lock); return r; } else return kvm_hv_get_msr(vcpu, msr, pdata, host); @@ -1466,7 +1529,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask, u64 *vp_bitmap, unsigned long *vcpu_bitmap) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_vcpu *vcpu; int i, bank, sbank = 0; @@ -1483,18 +1546,16 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, - (unsigned long *)vp_bitmap)) + if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap)) __set_bit(i, vcpu_bitmap); } return vcpu_bitmap; } -static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, - u16 rep_cnt, bool ex) +static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex) { - struct kvm *kvm = current_vcpu->kvm; - struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; @@ -1592,10 +1653,10 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, } } -static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, +static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa, bool ex, bool fast) { - struct kvm *kvm = current_vcpu->kvm; + struct kvm *kvm = vcpu->kvm; struct hv_send_ipi_ex send_ipi_ex; struct hv_send_ipi send_ipi; u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; @@ -1666,9 +1727,20 @@ ret_success: return HV_STATUS_SUCCESS; } -bool kvm_hv_hypercall_enabled(struct kvm *kvm) +void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) { - return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0; + struct kvm_cpuid_entry2 *entry; + + entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); + if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) + vcpu->arch.hyperv_enabled = true; + else + vcpu->arch.hyperv_enabled = false; +} + +bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; } static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) @@ -1698,6 +1770,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) { + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); struct eventfd_ctx *eventfd; if (unlikely(!fast)) { @@ -1726,7 +1799,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ rcu_read_lock(); - eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); + eventfd = idr_find(&hv->conn_to_evt, param); rcu_read_unlock(); if (!eventfd) return HV_STATUS_INVALID_PORT_ID; @@ -1745,7 +1818,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ - if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } @@ -1793,7 +1866,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) fallthrough; /* maybe userspace knows this conn_id */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ - if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { + if (unlikely(rep || !to_hv_synic(vcpu)->active)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; } @@ -1855,7 +1928,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) } fallthrough; case HVCALL_RESET_DEBUG_SESSION: { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu)) { ret = HV_STATUS_INVALID_HYPERCALL_CODE; @@ -1885,23 +1958,26 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) void kvm_hv_init_vm(struct kvm *kvm) { - mutex_init(&kvm->arch.hyperv.hv_lock); - idr_init(&kvm->arch.hyperv.conn_to_evt); + struct kvm_hv *hv = to_kvm_hv(kvm); + + mutex_init(&hv->hv_lock); + idr_init(&hv->conn_to_evt); } void kvm_hv_destroy_vm(struct kvm *kvm) { + struct kvm_hv *hv = to_kvm_hv(kvm); struct eventfd_ctx *eventfd; int i; - idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) + idr_for_each_entry(&hv->conn_to_evt, eventfd, i) eventfd_ctx_put(eventfd); - idr_destroy(&kvm->arch.hyperv.conn_to_evt); + idr_destroy(&hv->conn_to_evt); } static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct eventfd_ctx *eventfd; int ret; @@ -1925,7 +2001,7 @@ static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct eventfd_ctx *eventfd; mutex_lock(&hv->hv_lock); @@ -1997,8 +2073,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, break; case HYPERV_CPUID_INTERFACE: - memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); - ent->eax = signature[0]; + ent->eax = HYPERV_CPUID_SIGNATURE_EAX; break; case HYPERV_CPUID_VERSION: diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 6d7def2b0aad..e951af1fcb2c 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -50,38 +50,46 @@ /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) -static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu) +static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm) { - return &vcpu->arch.hyperv; + return &kvm->arch.hyperv; } -static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu) +static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_arch *arch; - - arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv); - return container_of(arch, struct kvm_vcpu, arch); + return vcpu->arch.hyperv; } -static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu) +static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) { - return &vcpu->arch.hyperv.synic; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + return &hv_vcpu->synic; } -static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) +static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) { - return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic)); + struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic); + + return hv_vcpu->vcpu; } -static inline struct kvm_hv_syndbg *vcpu_to_hv_syndbg(struct kvm_vcpu *vcpu) +static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) { return &vcpu->kvm->arch.hyperv.hv_syndbg; } +static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu); +} + int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); -bool kvm_hv_hypercall_enabled(struct kvm *kvm); +bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu); int kvm_hv_hypercall(struct kvm_vcpu *vcpu); void kvm_hv_irq_routing_update(struct kvm *kvm); @@ -89,32 +97,35 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); -void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, struct hv_vp_assist_page *assist_page); -static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, - int timer_index) +static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, + int timer_index) { - return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index]; + return &to_hv_vcpu(vcpu)->stimer[timer_index]; } -static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) +static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) { struct kvm_vcpu_hv *hv_vcpu; hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv, stimer[0]); - return hv_vcpu_to_vcpu(hv_vcpu); + return hv_vcpu->vcpu; } static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) { - return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap, + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + if (!hv_vcpu) + return false; + + return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); } @@ -125,6 +136,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, void kvm_hv_init_vm(struct kvm *kvm); void kvm_hv_destroy_vm(struct kvm *kvm); +void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 814698e5b152..172b05343cfd 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -14,6 +14,7 @@ #include "irq.h" #include "i8254.h" #include "x86.h" +#include "xen.h" /* * check if there are pending timer events @@ -56,6 +57,9 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.injected; + if (kvm_xen_has_interrupt(v)) + return 1; + if (!kvm_apic_accept_pic_intr(v)) return 0; @@ -110,6 +114,9 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.nr; + if (kvm_xen_has_interrupt(v)) + return v->kvm->arch.xen.upcall_vector; + if (irqchip_split(v->kvm)) { int vector = v->arch.pending_external_vector; @@ -143,8 +150,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { __kvm_migrate_apic_timer(vcpu); __kvm_migrate_pit_timer(vcpu); - if (kvm_x86_ops.migrate_timers) - kvm_x86_ops.migrate_timers(vcpu); + static_call_cond(kvm_x86_migrate_timers)(vcpu); } bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index a889563ad02d..2e11da2f5621 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) return 0; if (!kvm_register_is_available(vcpu, reg)) - kvm_x86_ops.cache_reg(vcpu, reg); + static_call(kvm_x86_cache_reg)(vcpu, reg); return vcpu->arch.regs[reg]; } @@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) might_sleep(); /* on svm */ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; } @@ -118,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; if ((tmask & vcpu->arch.cr0_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); return vcpu->arch.cr0 & mask; } @@ -132,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; if ((tmask & vcpu->arch.cr4_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); return vcpu->arch.cr4 & mask; } static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); return vcpu->arch.cr3; } diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 43c93ffa76ed..0d359115429a 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -205,7 +205,7 @@ struct x86_emulate_ops { ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); int (*cpl)(struct x86_emulate_ctxt *ctxt); - int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); + void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt); void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 43cceadd073e..45d40bfacb7c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -91,8 +91,8 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap) return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } -struct static_key_deferred apic_hw_disabled __read_mostly; -struct static_key_deferred apic_sw_disabled __read_mostly; +__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ); +__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ); static inline int apic_enabled(struct kvm_lapic *apic) { @@ -290,9 +290,9 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) if (enabled != apic->sw_enabled) { apic->sw_enabled = enabled; if (enabled) - static_key_slow_dec_deferred(&apic_sw_disabled); + static_branch_slow_dec_deferred(&apic_sw_disabled); else - static_key_slow_inc(&apic_sw_disabled.key); + static_branch_inc(&apic_sw_disabled.key); atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } @@ -484,7 +484,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) if (unlikely(vcpu->arch.apicv_active)) { /* need to update RVI */ kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); - kvm_x86_ops.hwapic_irr_update(vcpu, + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); } else { apic->irr_pending = false; @@ -515,7 +515,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) * just set SVI. */ if (unlikely(vcpu->arch.apicv_active)) - kvm_x86_ops.hwapic_isr_update(vcpu, vec); + static_call(kvm_x86_hwapic_isr_update)(vcpu, vec); else { ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); @@ -563,8 +563,8 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) * and must be left alone. */ if (unlikely(vcpu->arch.apicv_active)) - kvm_x86_ops.hwapic_isr_update(vcpu, - apic_find_highest_isr(apic)); + static_call(kvm_x86_hwapic_isr_update)(vcpu, + apic_find_highest_isr(apic)); else { --apic->isr_count; BUG_ON(apic->isr_count < 0); @@ -701,7 +701,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; if (apic->vcpu->arch.apicv_active) - highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu); + highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) @@ -1090,7 +1090,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, apic->regs + APIC_TMR); } - if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) { + if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) { kvm_lapic_set_irr(vector, apic); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); @@ -1245,7 +1245,8 @@ static int apic_set_eoi(struct kvm_lapic *apic) apic_clear_isr(vector, apic); apic_update_ppr(apic); - if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap)) + if (to_hv_vcpu(apic->vcpu) && + test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) kvm_hv_synic_send_eoi(apic->vcpu, vector); kvm_ioapic_send_eoi(apic, vector); @@ -1814,7 +1815,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic) { WARN_ON(preemptible()); WARN_ON(!apic->lapic_timer.hv_timer_in_use); - kvm_x86_ops.cancel_hv_timer(apic->vcpu); + static_call(kvm_x86_cancel_hv_timer)(apic->vcpu); apic->lapic_timer.hv_timer_in_use = false; } @@ -1831,7 +1832,7 @@ static bool start_hv_timer(struct kvm_lapic *apic) if (!ktimer->tscdeadline) return false; - if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) + if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired)) return false; ktimer->hv_timer_in_use = true; @@ -2175,10 +2176,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) hrtimer_cancel(&apic->lapic_timer.timer); if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) - static_key_slow_dec_deferred(&apic_hw_disabled); + static_branch_slow_dec_deferred(&apic_hw_disabled); if (!apic->sw_enabled) - static_key_slow_dec_deferred(&apic_sw_disabled); + static_branch_slow_dec_deferred(&apic_sw_disabled); if (apic->regs) free_page((unsigned long)apic->regs); @@ -2250,9 +2251,9 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { if (value & MSR_IA32_APICBASE_ENABLE) { kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); - static_key_slow_dec_deferred(&apic_hw_disabled); + static_branch_slow_dec_deferred(&apic_hw_disabled); } else { - static_key_slow_inc(&apic_hw_disabled.key); + static_branch_inc(&apic_hw_disabled.key); atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } } @@ -2261,7 +2262,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) - kvm_x86_ops.set_virtual_apic_mode(vcpu); + static_call(kvm_x86_set_virtual_apic_mode)(vcpu); apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; @@ -2338,9 +2339,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); if (vcpu->arch.apicv_active) { - kvm_x86_ops.apicv_post_state_restore(vcpu); - kvm_x86_ops.hwapic_irr_update(vcpu, -1); - kvm_x86_ops.hwapic_isr_update(vcpu, -1); + static_call(kvm_x86_apicv_post_state_restore)(vcpu); + static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); + static_call(kvm_x86_hwapic_isr_update)(vcpu, -1); } vcpu->arch.apic_arb_prio = 0; @@ -2449,7 +2450,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) * thinking that APIC state has changed. */ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; - static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ + static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */ kvm_iodevice_init(&apic->dev, &apic_mmio_ops); return 0; @@ -2512,7 +2513,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) */ apic_clear_irr(vector, apic); - if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) { + if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { /* * For auto-EOI interrupts, there might be another pending * interrupt above PPR, so check whether to raise another @@ -2601,10 +2602,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) kvm_apic_update_apicv(vcpu); apic->highest_isr_cache = -1; if (vcpu->arch.apicv_active) { - kvm_x86_ops.apicv_post_state_restore(vcpu); - kvm_x86_ops.hwapic_irr_update(vcpu, + static_call(kvm_x86_apicv_post_state_restore)(vcpu); + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); - kvm_x86_ops.hwapic_isr_update(vcpu, + static_call(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); @@ -2904,13 +2905,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) } } -void kvm_lapic_init(void) -{ - /* do not patch jump label more than once per second */ - jump_label_rate_limit(&apic_hw_disabled, HZ); - jump_label_rate_limit(&apic_sw_disabled, HZ); -} - void kvm_lapic_exit(void) { static_key_deferred_flush(&apic_hw_disabled); diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 4fb86e3a9dd3..997c45a5963a 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -6,6 +6,8 @@ #include <linux/kvm_host.h> +#include "hyperv.h" + #define KVM_APIC_INIT 0 #define KVM_APIC_SIPI 1 #define KVM_APIC_LVT_NUM 6 @@ -125,13 +127,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); -static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; -} - int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len); -void kvm_lapic_init(void); void kvm_lapic_exit(void); #define VEC_POS(v) ((v) & (32 - 1)) @@ -172,29 +168,29 @@ static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 va __kvm_lapic_set_reg(apic->regs, reg_off, val); } -extern struct static_key kvm_no_apic_vcpu; +DECLARE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu) { - if (static_key_false(&kvm_no_apic_vcpu)) + if (static_branch_unlikely(&kvm_has_noapic_vcpu)) return vcpu->arch.apic; return true; } -extern struct static_key_deferred apic_hw_disabled; +extern struct static_key_false_deferred apic_hw_disabled; static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic) { - if (static_key_false(&apic_hw_disabled.key)) + if (static_branch_unlikely(&apic_hw_disabled.key)) return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; return MSR_IA32_APICBASE_ENABLE; } -extern struct static_key_deferred apic_sw_disabled; +extern struct static_key_false_deferred apic_sw_disabled; static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic) { - if (static_key_false(&apic_sw_disabled.key)) + if (static_branch_unlikely(&apic_sw_disabled.key)) return apic->sw_enabled; return true; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 261be1d2032b..c68bfc3e2402 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -102,7 +102,7 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) if (!VALID_PAGE(root_hpa)) return; - kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu), + static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu), vcpu->arch.mmu->shadow_root_level); } @@ -152,7 +152,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * * TODO: introduce APIs to split these two cases. */ -static inline int is_writable_pte(unsigned long pte) +static inline bool is_writable_pte(unsigned long pte) { return pte & PT_WRITABLE_MASK; } @@ -174,8 +174,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey, unsigned pfec) { - int cpl = kvm_x86_ops.get_cpl(vcpu); - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + int cpl = static_call(kvm_x86_get_cpl)(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); /* * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6d16481aa29d..d75524bc8423 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -190,7 +190,7 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, int ret = -ENOTSUPP; if (range && kvm_x86_ops.tlb_remote_flush_with_range) - ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); + ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range); if (ret) kvm_flush_remote_tlbs(kvm); @@ -844,17 +844,17 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, int i, count = 0; if (!rmap_head->val) { - rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); + rmap_printk("%p %llx 0->1\n", spte, *spte); rmap_head->val = (unsigned long)spte; } else if (!(rmap_head->val & 1)) { - rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); + rmap_printk("%p %llx 1->many\n", spte, *spte); desc = mmu_alloc_pte_list_desc(vcpu); desc->sptes[0] = (u64 *)rmap_head->val; desc->sptes[1] = spte; rmap_head->val = (unsigned long)desc | 1; ++count; } else { - rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); + rmap_printk("%p %llx many->many\n", spte, *spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); while (desc->sptes[PTE_LIST_EXT-1]) { count += PTE_LIST_EXT; @@ -906,14 +906,14 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) pr_err("%s: %p 0->BUG\n", __func__, spte); BUG(); } else if (!(rmap_head->val & 1)) { - rmap_printk("%s: %p 1->0\n", __func__, spte); + rmap_printk("%p 1->0\n", spte); if ((u64 *)rmap_head->val != spte) { pr_err("%s: %p 1->BUG\n", __func__, spte); BUG(); } rmap_head->val = 0; } else { - rmap_printk("%s: %p many->many\n", __func__, spte); + rmap_printk("%p many->many\n", spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); prev_desc = NULL; while (desc) { @@ -1115,7 +1115,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) !(pt_protect && spte_can_locklessly_be_made_writable(spte))) return false; - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); + rmap_printk("spte %p %llx\n", sptep, *sptep); if (pt_protect) spte &= ~SPTE_MMU_WRITEABLE; @@ -1142,7 +1142,7 @@ static bool spte_clear_dirty(u64 *sptep) { u64 spte = *sptep; - rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); + rmap_printk("spte %p %llx\n", sptep, *sptep); MMU_WARN_ON(!spte_ad_enabled(spte)); spte &= ~shadow_dirty_mask; @@ -1165,7 +1165,8 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep) * - W bit on ad-disabled SPTEs. * Returns true iff any D or W bits were cleared. */ -static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot) { u64 *sptep; struct rmap_iterator iter; @@ -1180,35 +1181,6 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) return flush; } -static bool spte_set_dirty(u64 *sptep) -{ - u64 spte = *sptep; - - rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); - - /* - * Similar to the !kvm_x86_ops.slot_disable_log_dirty case, - * do not bother adding back write access to pages marked - * SPTE_AD_WRPROT_ONLY_MASK. - */ - spte |= shadow_dirty_mask; - - return mmu_spte_update(sptep, spte); -} - -static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) -{ - u64 *sptep; - struct rmap_iterator iter; - bool flush = false; - - for_each_rmap_spte(rmap_head, &iter, sptep) - if (spte_ad_enabled(*sptep)) - flush |= spte_set_dirty(sptep); - - return flush; -} - /** * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages * @kvm: kvm instance @@ -1225,7 +1197,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, { struct kvm_rmap_head *rmap_head; - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, true); while (mask) { @@ -1248,25 +1220,24 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, * * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. */ -void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t gfn_offset, unsigned long mask) +static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) { struct kvm_rmap_head *rmap_head; - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, false); while (mask) { rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), PG_LEVEL_4K, slot); - __rmap_clear_dirty(kvm, rmap_head); + __rmap_clear_dirty(kvm, rmap_head, slot); /* clear the first set bit */ mask &= mask - 1; } } -EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); /** * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected @@ -1282,19 +1253,15 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { - if (kvm_x86_ops.enable_log_dirty_pt_masked) - kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, - mask); + if (kvm_x86_ops.cpu_dirty_log_size) + kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask); else kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } int kvm_cpu_dirty_log_size(void) { - if (kvm_x86_ops.cpu_dirty_log_size) - return kvm_x86_ops.cpu_dirty_log_size(); - - return 0; + return kvm_x86_ops.cpu_dirty_log_size; } bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, @@ -1309,7 +1276,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, write_protected |= __rmap_write_protect(kvm, rmap_head, true); } - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) write_protected |= kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn); @@ -1324,14 +1291,15 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); } -static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) +static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot) { u64 *sptep; struct rmap_iterator iter; bool flush = false; while ((sptep = rmap_get_first(rmap_head, &iter))) { - rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); + rmap_printk("spte %p %llx.\n", sptep, *sptep); pte_list_remove(rmap_head, sptep); flush = true; @@ -1344,7 +1312,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data) { - return kvm_zap_rmapp(kvm, rmap_head); + return kvm_zap_rmapp(kvm, rmap_head, slot); } static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, @@ -1363,7 +1331,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, restart: for_each_rmap_spte(rmap_head, &iter, sptep) { - rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", + rmap_printk("spte %p %llx gfn %llx (%d)\n", sptep, *sptep, gfn, level); need_flush = 1; @@ -1456,16 +1424,17 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) slot_rmap_walk_okay(_iter_); \ slot_rmap_walk_next(_iter_)) -static int kvm_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, - gfn_t gfn, - int level, - unsigned long data)) +static __always_inline int +kvm_handle_hva_range(struct kvm *kvm, + unsigned long start, + unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, + int level, + unsigned long data)) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; @@ -1521,7 +1490,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end); return r; @@ -1533,7 +1502,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte); return r; @@ -1588,7 +1557,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) int young = false; young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) young |= kvm_tdp_mmu_age_hva_range(kvm, start, end); return young; @@ -1599,7 +1568,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) int young = false; young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) young |= kvm_tdp_mmu_test_age_hva(kvm, hva); return young; @@ -1723,13 +1692,6 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu, return 0; } -static void nonpaging_update_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - const void *pte) -{ - WARN_ON(1); -} - #define KVM_PAGE_ARRAY_NR 16 struct kvm_mmu_pages { @@ -2016,9 +1978,9 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, flush |= kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } - if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); - cond_resched_lock(&vcpu->kvm->mmu_lock); + cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); flush = false; } } @@ -2417,7 +2379,7 @@ static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, return 0; restart: - list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) { + list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { /* * Don't zap active root pages, the page itself can't be freed * and zapping it will just force vCPUs to realloc and reload. @@ -2470,7 +2432,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) */ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) { - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - @@ -2481,7 +2443,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) @@ -2492,7 +2454,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { pgprintk("%s: gfn %llx role %x\n", __func__, gfn, sp->role.word); @@ -2500,11 +2462,25 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); + + return r; +} + +static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) +{ + gpa_t gpa; + int r; + + if (vcpu->arch.mmu->direct_map) + return 0; + + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); + + r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); return r; } -EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -2758,11 +2734,18 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) if (sp->role.level > PG_LEVEL_4K) return; + /* + * If addresses are being invalidated, skip prefetching to avoid + * accidentally prefetching those addresses. + */ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return; + __direct_pte_prefetch(vcpu, sp, sptep); } -static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, - kvm_pfn_t pfn, struct kvm_memory_slot *slot) +static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) { unsigned long hva; pte_t *pte; @@ -2781,19 +2764,36 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, */ hva = __gfn_to_hva_memslot(slot, gfn); - pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level); + pte = lookup_address_in_mm(kvm->mm, hva, &level); if (unlikely(!pte)) return PG_LEVEL_4K; return level; } +int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, kvm_pfn_t pfn, int max_level) +{ + struct kvm_lpage_info *linfo; + + max_level = min(max_level, max_huge_page_level); + for ( ; max_level > PG_LEVEL_4K; max_level--) { + linfo = lpage_info_slot(gfn, slot, max_level); + if (!linfo->disallow_lpage) + break; + } + + if (max_level == PG_LEVEL_4K) + return PG_LEVEL_4K; + + return host_pfn_mapping_level(kvm, gfn, pfn, slot); +} + int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level) { struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; kvm_pfn_t pfn = *pfnp; kvm_pfn_t mask; int level; @@ -2810,17 +2810,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, if (!slot) return PG_LEVEL_4K; - max_level = min(max_level, max_huge_page_level); - for ( ; max_level > PG_LEVEL_4K; max_level--) { - linfo = lpage_info_slot(gfn, slot, max_level); - if (!linfo->disallow_lpage) - break; - } - - if (max_level == PG_LEVEL_4K) - return PG_LEVEL_4K; - - level = host_pfn_mapping_level(vcpu, gfn, pfn, slot); + level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level); if (level == PG_LEVEL_4K) return level; @@ -3161,7 +3151,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); if (kvm_mmu_put_root(kvm, sp)) { - if (sp->tdp_mmu_page) + if (is_tdp_mmu_page(sp)) kvm_tdp_mmu_free_root(kvm, sp); else if (sp->role.invalid) kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); @@ -3192,7 +3182,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) @@ -3215,7 +3205,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); @@ -3236,16 +3226,16 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, { struct kvm_mmu_page *sp; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu)) { - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return INVALID_PAGE; } sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return __pa(sp->spt); } @@ -3255,7 +3245,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) hpa_t root; unsigned i; - if (vcpu->kvm->arch.tdp_mmu_enabled) { + if (is_tdp_mmu_enabled(vcpu->kvm)) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); if (!VALID_PAGE(root)) @@ -3416,17 +3406,17 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) !smp_load_acquire(&sp->unsync_children)) return; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); mmu_sync_children(vcpu, sp); kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return; } - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); for (i = 0; i < 4; ++i) { @@ -3440,9 +3430,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) } kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } -EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, u32 access, struct x86_exception *exception) @@ -3658,8 +3647,8 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, } static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, - bool *writable) + gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva, + bool write, bool *writable) { struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); bool async; @@ -3672,7 +3661,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, } async = false; - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, + write, writable, hva); if (!async) return false; /* *pfn has correct page already */ @@ -3686,7 +3676,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, return true; } - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); + *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, + write, writable, hva); return false; } @@ -3699,6 +3690,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, gfn_t gfn = gpa >> PAGE_SHIFT; unsigned long mmu_seq; kvm_pfn_t pfn; + hva_t hva; int r; if (page_fault_handle_page_track(vcpu, error_code, gfn)) @@ -3717,15 +3709,21 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) + if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva, + write, &map_writable)) return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) return r; r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) + + if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + read_lock(&vcpu->kvm->mmu_lock); + else + write_lock(&vcpu->kvm->mmu_lock); + + if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) goto out_unlock; r = make_mmu_pages_available(vcpu); if (r) @@ -3739,7 +3737,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, prefault, is_tdp); out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + read_unlock(&vcpu->kvm->mmu_lock); + else + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } @@ -3813,7 +3814,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, context->gva_to_gpa = nonpaging_gva_to_gpa; context->sync_page = nonpaging_sync_page; context->invlpg = NULL; - context->update_pte = nonpaging_update_pte; context->root_level = 0; context->shadow_root_level = PT32E_ROOT_LEVEL; context->direct_map = true; @@ -3984,20 +3984,27 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, static void __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct rsvd_bits_validate *rsvd_check, - int maxphyaddr, int level, bool nx, bool gbpages, + u64 pa_bits_rsvd, int level, bool nx, bool gbpages, bool pse, bool amd) { - u64 exb_bit_rsvd = 0; u64 gbpages_bit_rsvd = 0; u64 nonleaf_bit8_rsvd = 0; + u64 high_bits_rsvd; rsvd_check->bad_mt_xwr = 0; - if (!nx) - exb_bit_rsvd = rsvd_bits(63, 63); if (!gbpages) gbpages_bit_rsvd = rsvd_bits(7, 7); + if (level == PT32E_ROOT_LEVEL) + high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62); + else + high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); + + /* Note, NX doesn't exist in PDPTEs, this is handled below. */ + if (!nx) + high_bits_rsvd |= rsvd_bits(63, 63); + /* * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for * leaf entries) on AMD CPUs only. @@ -4026,45 +4033,39 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); break; case PT32E_ROOT_LEVEL: - rsvd_check->rsvd_bits_mask[0][2] = - rsvd_bits(maxphyaddr, 63) | - rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ - rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62); /* PDE */ - rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62); /* PTE */ - rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62) | - rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) | + high_bits_rsvd | + rsvd_bits(5, 8) | + rsvd_bits(1, 2); /* PDPTE */ + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */ + rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */ + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | + rsvd_bits(13, 20); /* large page */ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; case PT64_ROOT_5LEVEL: - rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | - rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | + nonleaf_bit8_rsvd | + rsvd_bits(7, 7); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; fallthrough; case PT64_ROOT_4LEVEL: - rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | - gbpages_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | + nonleaf_bit8_rsvd | + rsvd_bits(7, 7); + rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | + gbpages_bit_rsvd; + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; + rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | - gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | - rsvd_bits(13, 29); - rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51) | - rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | + gbpages_bit_rsvd | + rsvd_bits(13, 29); + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | + rsvd_bits(13, 20); /* large page */ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; @@ -4075,8 +4076,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, - cpuid_maxphyaddr(vcpu), context->root_level, - context->nx, + vcpu->arch.reserved_gpa_bits, + context->root_level, context->nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), guest_cpuid_is_amd_or_hygon(vcpu)); @@ -4084,27 +4085,22 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, - int maxphyaddr, bool execonly) + u64 pa_bits_rsvd, bool execonly) { + u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); u64 bad_mt_xwr; - rsvd_check->rsvd_bits_mask[0][4] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][3] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][2] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][1] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* large page */ rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); - rsvd_check->rsvd_bits_mask[1][1] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); + rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29); + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20); rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ @@ -4123,7 +4119,12 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, - cpuid_maxphyaddr(vcpu), execonly); + vcpu->arch.reserved_gpa_bits, execonly); +} + +static inline u64 reserved_hpa_bits(void) +{ + return rsvd_bits(shadow_phys_bits, 63); } /* @@ -4145,7 +4146,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) */ shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - shadow_phys_bits, + reserved_hpa_bits(), context->shadow_root_level, uses_nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), true); @@ -4182,14 +4183,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - shadow_phys_bits, + reserved_hpa_bits(), context->shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), true, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, - shadow_phys_bits, - false); + reserved_hpa_bits(), false); if (!shadow_me_mask) return; @@ -4209,7 +4209,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - shadow_phys_bits, execonly); + reserved_hpa_bits(), execonly); } #define BYTE_MASK(access) \ @@ -4395,7 +4395,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu, context->gva_to_gpa = paging64_gva_to_gpa; context->sync_page = paging64_sync_page; context->invlpg = paging64_invlpg; - context->update_pte = paging64_update_pte; context->shadow_root_level = level; context->direct_map = false; } @@ -4424,7 +4423,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu, context->gva_to_gpa = paging32_gva_to_gpa; context->sync_page = paging32_sync_page; context->invlpg = paging32_invlpg; - context->update_pte = paging32_update_pte; context->shadow_root_level = PT32E_ROOT_LEVEL; context->direct_map = false; } @@ -4506,7 +4504,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->page_fault = kvm_tdp_page_fault; context->sync_page = nonpaging_sync_page; context->invlpg = NULL; - context->update_pte = nonpaging_update_pte; context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu); context->direct_map = true; context->get_guest_pgd = get_cr3; @@ -4678,7 +4675,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->gva_to_gpa = ept_gva_to_gpa; context->sync_page = ept_sync_page; context->invlpg = ept_invlpg; - context->update_pte = ept_update_pte; context->root_level = level; context->direct_map = false; context->mmu_role.as_u64 = new_role.as_u64; @@ -4811,7 +4807,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) if (r) goto out; kvm_mmu_load_pgd(vcpu); - kvm_x86_ops.tlb_flush_current(vcpu); + static_call(kvm_x86_tlb_flush_current)(vcpu); out: return r; } @@ -4826,19 +4822,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_mmu_unload); -static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - const void *new) -{ - if (sp->role.level != PG_LEVEL_4K) { - ++vcpu->kvm->stat.mmu_pde_zapped; - return; - } - - ++vcpu->kvm->stat.mmu_pte_updated; - vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); -} - static bool need_remote_flush(u64 old, u64 new) { if (!is_shadow_present_pte(old)) @@ -4954,22 +4937,6 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) return spte; } -/* - * Ignore various flags when determining if a SPTE can be immediately - * overwritten for the current MMU. - * - level: explicitly checked in mmu_pte_write_new_pte(), and will never - * match the current MMU role, as MMU's level tracks the root level. - * - access: updated based on the new guest PTE - * - quadrant: handled by get_written_sptes() - * - invalid: always false (loop only walks valid shadow pages) - */ -static const union kvm_mmu_page_role role_ign = { - .level = 0xf, - .access = 0x7, - .quadrant = 0x3, - .invalid = 0x1, -}; - static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes, struct kvm_page_track_notifier_node *node) @@ -4999,7 +4966,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, */ mmu_topup_memory_caches(vcpu, true); - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); @@ -5020,14 +4987,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, local_flush = true; while (npte--) { - u32 base_role = vcpu->arch.mmu->mmu_role.base.word; - entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); - if (gentry && - !((sp->role.word ^ base_role) & ~role_ign.word) && - rmap_can_add(vcpu)) - mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); + if (gentry && sp->role.level != PG_LEVEL_4K) + ++vcpu->kvm->stat.mmu_pde_zapped; if (need_remote_flush(entry, *spte)) remote_flush = true; ++spte; @@ -5035,24 +4998,8 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, } kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); - spin_unlock(&vcpu->kvm->mmu_lock); -} - -int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) -{ - gpa_t gpa; - int r; - - if (vcpu->arch.mmu->direct_map) - return 0; - - gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); - - r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); - - return r; + write_unlock(&vcpu->kvm->mmu_lock); } -EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len) @@ -5125,7 +5072,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, if (is_noncanonical_address(gva, vcpu)) return; - kvm_x86_ops.tlb_flush_gva(vcpu, gva); + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); } if (!mmu->invlpg) @@ -5152,7 +5099,6 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, mmu->invlpg(vcpu, gva, root_hpa); } } -EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { @@ -5182,7 +5128,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) } if (tlb_flush) - kvm_x86_ops.tlb_flush_gva(vcpu, gva); + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); ++vcpu->stat.invlpg; @@ -5192,7 +5138,6 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) * for them. */ } -EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, int tdp_huge_page_level) @@ -5217,7 +5162,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, EXPORT_SYMBOL_GPL(kvm_configure_mmu); /* The return value indicates if tlb flush on all vcpus is needed. */ -typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); +typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot); /* The caller should hold mmu-lock before calling this function. */ static __always_inline bool @@ -5231,16 +5177,16 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, end_gfn, &iterator) { if (iterator.rmap) - flush |= fn(kvm, iterator.rmap); + flush |= fn(kvm, iterator.rmap, memslot); - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (flush && lock_flush_tlb) { kvm_flush_remote_tlbs_with_address(kvm, start_gfn, iterator.gfn - start_gfn + 1); flush = false; } - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } } @@ -5265,22 +5211,6 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, } static __always_inline bool -slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, bool lock_flush_tlb) -{ - return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, - KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); -} - -static __always_inline bool -slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, bool lock_flush_tlb) -{ - return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1, - KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); -} - -static __always_inline bool slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5390,7 +5320,7 @@ restart: * be in active use by the guest. */ if (batch >= BATCH_ZAP_PAGES && - cond_resched_lock(&kvm->mmu_lock)) { + cond_resched_rwlock_write(&kvm->mmu_lock)) { batch = 0; goto restart; } @@ -5423,7 +5353,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) { lockdep_assert_held(&kvm->slots_lock); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); trace_kvm_mmu_zap_all_fast(kvm); /* @@ -5447,10 +5377,10 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) kvm_zap_obsolete_pages(kvm); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_zap_all(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) @@ -5492,7 +5422,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) int i; bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(memslot, slots) { @@ -5510,17 +5440,18 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) } } - if (kvm->arch.tdp_mmu_enabled) { + if (is_tdp_mmu_enabled(kvm)) { flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); if (flush) kvm_flush_remote_tlbs(kvm); } - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool slot_rmap_write_protect(struct kvm *kvm, - struct kvm_rmap_head *rmap_head) + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot) { return __rmap_write_protect(kvm, rmap_head, false); } @@ -5531,12 +5462,12 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, start_level, KVM_MAX_HUGEPAGE_LEVEL, false); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); /* * We can flush all the TLBs out of the mmu lock without TLB @@ -5554,7 +5485,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, } static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, - struct kvm_rmap_head *rmap_head) + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot) { u64 *sptep; struct rmap_iterator iter; @@ -5575,8 +5507,8 @@ restart: * mapping if the indirect sp has level = 1. */ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && - (kvm_is_zone_device_pfn(pfn) || - PageCompound(pfn_to_page(pfn)))) { + sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn, + pfn, PG_LEVEL_NUM)) { pte_list_remove(rmap_head, sptep); if (kvm_available_flush_tlb_with_range()) @@ -5596,13 +5528,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot) { /* FIXME: const-ify all uses of struct kvm_memory_slot. */ - spin_lock(&kvm->mmu_lock); - slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, - kvm_mmu_zap_collapsible_spte, true); + struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot; - if (kvm->arch.tdp_mmu_enabled) - kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); + slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); + + if (is_tdp_mmu_enabled(kvm)) + kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); + write_unlock(&kvm->mmu_lock); } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, @@ -5625,11 +5558,11 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); /* * It's also safe to flush TLBs out of mmu lock here as currently this @@ -5640,40 +5573,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); } -EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); - -void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - bool flush; - - spin_lock(&kvm->mmu_lock); - flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, - false); - if (kvm->arch.tdp_mmu_enabled) - flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M); - spin_unlock(&kvm->mmu_lock); - - if (flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); -} -EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); - -void kvm_mmu_slot_set_dirty(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - bool flush; - - spin_lock(&kvm->mmu_lock); - flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); - if (kvm->arch.tdp_mmu_enabled) - flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot); - spin_unlock(&kvm->mmu_lock); - - if (flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); -} -EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); void kvm_mmu_zap_all(struct kvm *kvm) { @@ -5681,23 +5580,23 @@ void kvm_mmu_zap_all(struct kvm *kvm) LIST_HEAD(invalid_list); int ign; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { if (WARN_ON(sp->role.invalid)) continue; if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) goto restart; - if (cond_resched_lock(&kvm->mmu_lock)) + if (cond_resched_rwlock_write(&kvm->mmu_lock)) goto restart; } kvm_mmu_commit_zap_page(kvm, &invalid_list); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_zap_all(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) @@ -5757,7 +5656,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) continue; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvm_has_zapped_obsolete_pages(kvm)) { kvm_mmu_commit_zap_page(kvm, @@ -5768,7 +5667,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan); unlock: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); /* @@ -5988,7 +5887,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ulong to_zap; rcu_idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); ratio = READ_ONCE(nx_huge_pages_recovery_ratio); to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; @@ -6005,22 +5904,22 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) struct kvm_mmu_page, lpage_disallowed_link); WARN_ON_ONCE(!sp->lpage_disallowed); - if (sp->tdp_mmu_page) + if (is_tdp_mmu_page(sp)) { kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); - else { + } else { kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); WARN_ON_ONCE(sp->lpage_disallowed); } - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { kvm_mmu_commit_zap_page(kvm, &invalid_list); - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); } diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c index c8d51a37e2ce..ced15fd58fde 100644 --- a/arch/x86/kvm/mmu/mmu_audit.c +++ b/arch/x86/kvm/mmu/mmu_audit.c @@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu) } static bool mmu_audit; -static struct static_key mmu_audit_key; +static DEFINE_STATIC_KEY_FALSE(mmu_audit_key); static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { @@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { - if (static_key_false((&mmu_audit_key))) + if (static_branch_unlikely((&mmu_audit_key))) __kvm_mmu_audit(vcpu, point); } @@ -259,7 +259,7 @@ static void mmu_audit_enable(void) if (mmu_audit) return; - static_key_slow_inc(&mmu_audit_key); + static_branch_inc(&mmu_audit_key); mmu_audit = true; } @@ -268,7 +268,7 @@ static void mmu_audit_disable(void) if (!mmu_audit) return; - static_key_slow_dec(&mmu_audit_key); + static_branch_dec(&mmu_audit_key); mmu_audit = false; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index bfc6389edc28..ec4fc28b325a 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -12,7 +12,7 @@ extern bool dbg; #define pgprintk(x...) do { if (dbg) printk(x); } while (0) -#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) +#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) #define MMU_WARN_ON(x) WARN_ON(x) #else #define pgprintk(x...) do { } while (0) @@ -56,7 +56,12 @@ struct kvm_mmu_page { /* Number of writes since the last time traversal visited this page. */ atomic_t write_flooding_count; +#ifdef CONFIG_X86_64 bool tdp_mmu_page; + + /* Used for freeing the page asyncronously if it is a TDP MMU page. */ + struct rcu_head rcu_head; +#endif }; extern struct kmem_cache *mmu_page_header_cache; @@ -76,12 +81,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) { /* - * When using the EPT page-modification log, the GPAs in the log - * would come from L2 rather than L1. Therefore, we need to rely - * on write protection to record dirty pages. This also bypasses - * PML, since writes now result in a vmexit. + * When using the EPT page-modification log, the GPAs in the CPU dirty + * log would come from L2 rather than L1. Therefore, we need to rely + * on write protection to record dirty pages, which bypasses PML, since + * writes now result in a vmexit. Note, the check on CPU dirty logging + * being enabled is mandatory as the bits used to denote WP-only SPTEs + * are reserved for NPT w/ PAE (32-bit KVM). */ - return vcpu->arch.mmu == &vcpu->arch.guest_mmu; + return vcpu->arch.mmu == &vcpu->arch.guest_mmu && + kvm_x86_ops.cpu_dirty_log_size; } bool is_nx_huge_page_enabled(void); @@ -133,6 +141,8 @@ enum { #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) +int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, kvm_pfn_t pfn, int max_level); int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 8443a675715b..34bb0ec69bd8 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -184,9 +184,9 @@ kvm_page_track_register_notifier(struct kvm *kvm, head = &kvm->arch.track_notifier_head; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); @@ -202,9 +202,9 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, head = &kvm->arch.track_notifier_head; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); hlist_del_rcu(&n->node); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); synchronize_srcu(&head->track_srcu); } EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 50e268eb8e1a..55d7b473ac44 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -601,6 +601,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, if (sp->role.level > PG_LEVEL_4K) return; + /* + * If addresses are being invalidated, skip prefetching to avoid + * accidentally prefetching those addresses. + */ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return; + if (sp->role.direct) return __direct_pte_prefetch(vcpu, sp, sptep); @@ -790,6 +797,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, struct guest_walker walker; int r; kvm_pfn_t pfn; + hva_t hva; unsigned long mmu_seq; bool map_writable, is_self_change_mapping; int max_level; @@ -840,8 +848,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, - &map_writable)) + if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, &hva, + write_fault, &map_writable)) return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) @@ -868,8 +876,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, } r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) + write_lock(&vcpu->kvm->mmu_lock); + if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) goto out_unlock; kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); @@ -881,7 +889,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } @@ -919,7 +927,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) return; } - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { level = iterator.level; sptep = iterator.sptep; @@ -954,7 +962,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) break; } - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index c51ad544f25b..ef55f0bc4ccf 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -120,7 +120,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (level > PG_LEVEL_4K) spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) - spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, + spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); if (host_writable) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 2b3a30bd38b0..6de3950fd704 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -131,6 +131,25 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT /* + * If a thread running without exclusive control of the MMU lock must perform a + * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a + * non-present intermediate value. Other threads which encounter this value + * should not modify the SPTE. + * + * This constant works because it is considered non-present on both AMD and + * Intel CPUs and does not create a L1TF vulnerability because the pfn section + * is zeroed out. + * + * Only used by the TDP MMU. + */ +#define REMOVED_SPTE (1ull << 59) + +static inline bool is_removed_spte(u64 spte) +{ + return spte == REMOVED_SPTE; +} + +/* * In some cases, we need to preserve the GFN of a non-present or reserved * SPTE when we usurp the upper five bits of the physical address space to * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll @@ -185,23 +204,19 @@ static inline bool is_access_track_spte(u64 spte) return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; } -static inline int is_shadow_present_pte(u64 pte) +static inline bool is_shadow_present_pte(u64 pte) { - return (pte != 0) && !is_mmio_spte(pte); + return (pte != 0) && !is_mmio_spte(pte) && !is_removed_spte(pte); } -static inline int is_large_pte(u64 pte) +static inline bool is_large_pte(u64 pte) { return pte & PT_PAGE_SIZE_MASK; } -static inline int is_last_spte(u64 pte, int level) +static inline bool is_last_spte(u64 pte, int level) { - if (level == PG_LEVEL_4K) - return 1; - if (is_large_pte(pte)) - return 1; - return 0; + return (level == PG_LEVEL_4K) || is_large_pte(pte); } static inline bool is_executable_pte(u64 spte) diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 87b7e16911db..e5f148106e20 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -12,7 +12,7 @@ static void tdp_iter_refresh_sptep(struct tdp_iter *iter) { iter->sptep = iter->pt_path[iter->level - 1] + SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); - iter->old_spte = READ_ONCE(*iter->sptep); + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); } static gfn_t round_gfn_for_level(gfn_t gfn, int level) @@ -22,21 +22,22 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level) /* * Sets a TDP iterator to walk a pre-order traversal of the paging structure - * rooted at root_pt, starting with the walk to translate goal_gfn. + * rooted at root_pt, starting with the walk to translate next_last_level_gfn. */ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, - int min_level, gfn_t goal_gfn) + int min_level, gfn_t next_last_level_gfn) { WARN_ON(root_level < 1); WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); - iter->goal_gfn = goal_gfn; + iter->next_last_level_gfn = next_last_level_gfn; + iter->yielded_gfn = iter->next_last_level_gfn; iter->root_level = root_level; iter->min_level = min_level; iter->level = root_level; - iter->pt_path[iter->level - 1] = root_pt; + iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt; - iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter); iter->valid = true; @@ -47,7 +48,7 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, * address of the child page table referenced by the SPTE. Returns null if * there is no such entry. */ -u64 *spte_to_child_pt(u64 spte, int level) +tdp_ptep_t spte_to_child_pt(u64 spte, int level) { /* * There's no child entry if this entry isn't present or is a @@ -56,7 +57,7 @@ u64 *spte_to_child_pt(u64 spte, int level) if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) return NULL; - return __va(spte_to_pfn(spte) << PAGE_SHIFT); + return (tdp_ptep_t)__va(spte_to_pfn(spte) << PAGE_SHIFT); } /* @@ -65,7 +66,7 @@ u64 *spte_to_child_pt(u64 spte, int level) */ static bool try_step_down(struct tdp_iter *iter) { - u64 *child_pt; + tdp_ptep_t child_pt; if (iter->level == iter->min_level) return false; @@ -74,7 +75,7 @@ static bool try_step_down(struct tdp_iter *iter) * Reread the SPTE before stepping down to avoid traversing into page * tables that are no longer linked from this entry. */ - iter->old_spte = READ_ONCE(*iter->sptep); + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); child_pt = spte_to_child_pt(iter->old_spte, iter->level); if (!child_pt) @@ -82,7 +83,7 @@ static bool try_step_down(struct tdp_iter *iter) iter->level--; iter->pt_path[iter->level - 1] = child_pt; - iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter); return true; @@ -106,9 +107,9 @@ static bool try_step_side(struct tdp_iter *iter) return false; iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); - iter->goal_gfn = iter->gfn; + iter->next_last_level_gfn = iter->gfn; iter->sptep++; - iter->old_spte = READ_ONCE(*iter->sptep); + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); return true; } @@ -158,24 +159,7 @@ void tdp_iter_next(struct tdp_iter *iter) iter->valid = false; } -/* - * Restart the walk over the paging structure from the root, starting from the - * highest gfn the iterator had previously reached. Assumes that the entire - * paging structure, except the root page, may have been completely torn down - * and rebuilt. - */ -void tdp_iter_refresh_walk(struct tdp_iter *iter) -{ - gfn_t goal_gfn = iter->goal_gfn; - - if (iter->gfn > goal_gfn) - goal_gfn = iter->gfn; - - tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], - iter->root_level, iter->min_level, goal_gfn); -} - -u64 *tdp_iter_root_pt(struct tdp_iter *iter) +tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter) { return iter->pt_path[iter->root_level - 1]; } diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index 47170d0dc98e..4cc177d75c4a 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -7,6 +7,8 @@ #include "mmu.h" +typedef u64 __rcu *tdp_ptep_t; + /* * A TDP iterator performs a pre-order walk over a TDP paging structure. */ @@ -15,11 +17,17 @@ struct tdp_iter { * The iterator will traverse the paging structure towards the mapping * for this GFN. */ - gfn_t goal_gfn; + gfn_t next_last_level_gfn; + /* + * The next_last_level_gfn at the time when the thread last + * yielded. Only yielding when the next_last_level_gfn != + * yielded_gfn helps ensure forward progress. + */ + gfn_t yielded_gfn; /* Pointers to the page tables traversed to reach the current SPTE */ - u64 *pt_path[PT64_ROOT_MAX_LEVEL]; + tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL]; /* A pointer to the current SPTE */ - u64 *sptep; + tdp_ptep_t sptep; /* The lowest GFN mapped by the current SPTE */ gfn_t gfn; /* The level of the root page given to the iterator */ @@ -49,12 +57,11 @@ struct tdp_iter { #define for_each_tdp_pte(iter, root, root_level, start, end) \ for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end) -u64 *spte_to_child_pt(u64 pte, int level); +tdp_ptep_t spte_to_child_pt(u64 pte, int level); void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, - int min_level, gfn_t goal_gfn); + int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); -void tdp_iter_refresh_walk(struct tdp_iter *iter); -u64 *tdp_iter_root_pt(struct tdp_iter *iter); +tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index b56d604809b8..c926c6b899a1 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -7,32 +7,23 @@ #include "tdp_mmu.h" #include "spte.h" +#include <asm/cmpxchg.h> #include <trace/events/kvm.h> -#ifdef CONFIG_X86_64 static bool __read_mostly tdp_mmu_enabled = false; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); -#endif - -static bool is_tdp_mmu_enabled(void) -{ -#ifdef CONFIG_X86_64 - return tdp_enabled && READ_ONCE(tdp_mmu_enabled); -#else - return false; -#endif /* CONFIG_X86_64 */ -} /* Initializes the TDP MMU for the VM, if enabled. */ void kvm_mmu_init_tdp_mmu(struct kvm *kvm) { - if (!is_tdp_mmu_enabled()) + if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) return; /* This should not be changed for the lifetime of the VM. */ kvm->arch.tdp_mmu_enabled = true; INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); + spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); } @@ -42,6 +33,12 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) return; WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); + + /* + * Ensure that all the outstanding RCU callbacks to free shadow pages + * can run before the VM is torn down. + */ + rcu_barrier(); } static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) @@ -53,7 +50,7 @@ static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, struct kvm_mmu_page *root) { - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) return false; @@ -88,22 +85,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, #define for_each_tdp_mmu_root(_kvm, _root) \ list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) -bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) -{ - struct kvm_mmu_page *sp; - - if (!kvm->arch.tdp_mmu_enabled) - return false; - if (WARN_ON(!VALID_PAGE(hpa))) - return false; - - sp = to_shadow_page(hpa); - if (WARN_ON(!sp)) - return false; - - return sp->tdp_mmu_page && sp->root_count; -} - static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, bool can_yield); @@ -111,7 +92,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) { gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); WARN_ON(root->root_count); WARN_ON(!root->tdp_mmu_page); @@ -164,13 +145,13 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); /* Check for an existing root before allocating a new one. */ for_each_tdp_mmu_root(kvm, root) { if (root->role.word == role.word) { kvm_mmu_get_root(kvm, root); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return root; } } @@ -180,7 +161,7 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) list_add(&root->link, &kvm->arch.tdp_mmu_roots); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return root; } @@ -196,8 +177,31 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) return __pa(root->spt); } +static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) +{ + free_page((unsigned long)sp->spt); + kmem_cache_free(mmu_page_header_cache, sp); +} + +/* + * This is called through call_rcu in order to free TDP page table memory + * safely with respect to other kernel threads that may be operating on + * the memory. + * By only accessing TDP MMU page table memory in an RCU read critical + * section, and freeing it after a grace period, lockless access to that + * memory won't use it after it is freed. + */ +static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) +{ + struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, + rcu_head); + + tdp_mmu_free_sp(sp); +} + static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level); + u64 old_spte, u64 new_spte, int level, + bool shared); static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) { @@ -235,6 +239,128 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, } /** + * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU + * + * @kvm: kvm instance + * @sp: the new page + * @shared: This operation may not be running under the exclusive use of + * the MMU lock and the operation must synchronize with other + * threads that might be adding or removing pages. + * @account_nx: This page replaces a NX large page and should be marked for + * eventual reclaim. + */ +static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, + bool shared, bool account_nx) +{ + if (shared) + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + else + lockdep_assert_held_write(&kvm->mmu_lock); + + list_add(&sp->link, &kvm->arch.tdp_mmu_pages); + if (account_nx) + account_huge_nx_page(kvm, sp); + + if (shared) + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); +} + +/** + * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU + * + * @kvm: kvm instance + * @sp: the page to be removed + * @shared: This operation may not be running under the exclusive use of + * the MMU lock and the operation must synchronize with other + * threads that might be adding or removing pages. + */ +static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, + bool shared) +{ + if (shared) + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + else + lockdep_assert_held_write(&kvm->mmu_lock); + + list_del(&sp->link); + if (sp->lpage_disallowed) + unaccount_huge_nx_page(kvm, sp); + + if (shared) + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); +} + +/** + * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure + * + * @kvm: kvm instance + * @pt: the page removed from the paging structure + * @shared: This operation may not be running under the exclusive use + * of the MMU lock and the operation must synchronize with other + * threads that might be modifying SPTEs. + * + * Given a page table that has been removed from the TDP paging structure, + * iterates through the page table to clear SPTEs and free child page tables. + */ +static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, + bool shared) +{ + struct kvm_mmu_page *sp = sptep_to_sp(pt); + int level = sp->role.level; + gfn_t base_gfn = sp->gfn; + u64 old_child_spte; + u64 *sptep; + gfn_t gfn; + int i; + + trace_kvm_mmu_prepare_zap_page(sp); + + tdp_mmu_unlink_page(kvm, sp, shared); + + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + sptep = pt + i; + gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)); + + if (shared) { + /* + * Set the SPTE to a nonpresent value that other + * threads will not overwrite. If the SPTE was + * already marked as removed then another thread + * handling a page fault could overwrite it, so + * set the SPTE until it is set from some other + * value to the removed SPTE value. + */ + for (;;) { + old_child_spte = xchg(sptep, REMOVED_SPTE); + if (!is_removed_spte(old_child_spte)) + break; + cpu_relax(); + } + } else { + old_child_spte = READ_ONCE(*sptep); + + /* + * Marking the SPTE as a removed SPTE is not + * strictly necessary here as the MMU lock will + * stop other threads from concurrently modifying + * this SPTE. Using the removed SPTE value keeps + * the two branches consistent and simplifies + * the function. + */ + WRITE_ONCE(*sptep, REMOVED_SPTE); + } + handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, + old_child_spte, REMOVED_SPTE, level - 1, + shared); + } + + kvm_flush_remote_tlbs_with_address(kvm, gfn, + KVM_PAGES_PER_HPAGE(level)); + + call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); +} + +/** * handle_changed_spte - handle bookkeeping associated with an SPTE change * @kvm: kvm instance * @as_id: the address space of the paging structure the SPTE was a part of @@ -242,22 +368,22 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, * @old_spte: The value of the SPTE before the change * @new_spte: The value of the SPTE after the change * @level: the level of the PT the SPTE is part of in the paging structure + * @shared: This operation may not be running under the exclusive use of + * the MMU lock and the operation must synchronize with other + * threads that might be modifying SPTEs. * * Handle bookkeeping that might result from the modification of a SPTE. * This function must be called for all TDP SPTE modifications. */ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level) + u64 old_spte, u64 new_spte, int level, + bool shared) { bool was_present = is_shadow_present_pte(old_spte); bool is_present = is_shadow_present_pte(new_spte); bool was_leaf = was_present && is_last_spte(old_spte, level); bool is_leaf = is_present && is_last_spte(new_spte, level); bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); - u64 *pt; - struct kvm_mmu_page *sp; - u64 old_child_spte; - int i; WARN_ON(level > PT64_ROOT_MAX_LEVEL); WARN_ON(level < PG_LEVEL_4K); @@ -298,15 +424,19 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, */ if (!was_present && !is_present) { /* - * If this change does not involve a MMIO SPTE, it is - * unexpected. Log the change, though it should not impact the - * guest since both the former and current SPTEs are nonpresent. + * If this change does not involve a MMIO SPTE or removed SPTE, + * it is unexpected. Log the change, though it should not + * impact the guest since both the former and current SPTEs + * are nonpresent. */ - if (WARN_ON(!is_mmio_spte(old_spte) && !is_mmio_spte(new_spte))) + if (WARN_ON(!is_mmio_spte(old_spte) && + !is_mmio_spte(new_spte) && + !is_removed_spte(new_spte))) pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" "should not be replaced with another,\n" "different nonpresent SPTE, unless one or both\n" - "are MMIO SPTEs.\n" + "are MMIO SPTEs, or the new SPTE is\n" + "a temporary removed SPTE.\n" "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", as_id, gfn, old_spte, new_spte, level); return; @@ -321,54 +451,127 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * Recursively handle child PTs if the change removed a subtree from * the paging structure. */ - if (was_present && !was_leaf && (pfn_changed || !is_present)) { - pt = spte_to_child_pt(old_spte, level); - sp = sptep_to_sp(pt); + if (was_present && !was_leaf && (pfn_changed || !is_present)) + handle_removed_tdp_mmu_page(kvm, + spte_to_child_pt(old_spte, level), shared); +} + +static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, + u64 old_spte, u64 new_spte, int level, + bool shared) +{ + __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, + shared); + handle_changed_spte_acc_track(old_spte, new_spte, level); + handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, + new_spte, level); +} - trace_kvm_mmu_prepare_zap_page(sp); +/* + * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the + * associated bookkeeping + * + * @kvm: kvm instance + * @iter: a tdp_iter instance currently on the SPTE that should be set + * @new_spte: The value the SPTE should be set to + * Returns: true if the SPTE was set, false if it was not. If false is returned, + * this function will have no side-effects. + */ +static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter, + u64 new_spte) +{ + u64 *root_pt = tdp_iter_root_pt(iter); + struct kvm_mmu_page *root = sptep_to_sp(root_pt); + int as_id = kvm_mmu_page_as_id(root); - list_del(&sp->link); + lockdep_assert_held_read(&kvm->mmu_lock); - if (sp->lpage_disallowed) - unaccount_huge_nx_page(kvm, sp); + /* + * Do not change removed SPTEs. Only the thread that froze the SPTE + * may modify it. + */ + if (iter->old_spte == REMOVED_SPTE) + return false; - for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - old_child_spte = READ_ONCE(*(pt + i)); - WRITE_ONCE(*(pt + i), 0); - handle_changed_spte(kvm, as_id, - gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)), - old_child_spte, 0, level - 1); - } + if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, + new_spte) != iter->old_spte) + return false; - kvm_flush_remote_tlbs_with_address(kvm, gfn, - KVM_PAGES_PER_HPAGE(level)); + handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, + iter->level, true); - free_page((unsigned long)pt); - kmem_cache_free(mmu_page_header_cache, sp); - } + return true; } -static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level) +static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter) { - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level); - handle_changed_spte_acc_track(old_spte, new_spte, level); - handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, - new_spte, level); + /* + * Freeze the SPTE by setting it to a special, + * non-present value. This will stop other threads from + * immediately installing a present entry in its place + * before the TLBs are flushed. + */ + if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) + return false; + + kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, + KVM_PAGES_PER_HPAGE(iter->level)); + + /* + * No other thread can overwrite the removed SPTE as they + * must either wait on the MMU lock or use + * tdp_mmu_set_spte_atomic which will not overrite the + * special removed SPTE value. No bookkeeping is needed + * here since the SPTE is going from non-present + * to non-present. + */ + WRITE_ONCE(*iter->sptep, 0); + + return true; } + +/* + * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping + * @kvm: kvm instance + * @iter: a tdp_iter instance currently on the SPTE that should be set + * @new_spte: The value the SPTE should be set to + * @record_acc_track: Notify the MM subsystem of changes to the accessed state + * of the page. Should be set unless handling an MMU + * notifier for access tracking. Leaving record_acc_track + * unset in that case prevents page accesses from being + * double counted. + * @record_dirty_log: Record the page as dirty in the dirty bitmap if + * appropriate for the change being made. Should be set + * unless performing certain dirty logging operations. + * Leaving record_dirty_log unset in that case prevents page + * writes from being double counted. + */ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte, bool record_acc_track, bool record_dirty_log) { - u64 *root_pt = tdp_iter_root_pt(iter); + tdp_ptep_t root_pt = tdp_iter_root_pt(iter); struct kvm_mmu_page *root = sptep_to_sp(root_pt); int as_id = kvm_mmu_page_as_id(root); - WRITE_ONCE(*iter->sptep, new_spte); + lockdep_assert_held_write(&kvm->mmu_lock); + + /* + * No thread should be using this function to set SPTEs to the + * temporary removed SPTE value. + * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic + * should be used. If operating under the MMU lock in write mode, the + * use of the removed SPTE should not be necessary. + */ + WARN_ON(iter->old_spte == REMOVED_SPTE); + + WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, - iter->level); + iter->level, false); if (record_acc_track) handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); @@ -413,27 +616,46 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, _mmu->shadow_root_level, _start, _end) /* - * Flush the TLB if the process should drop kvm->mmu_lock. - * Return whether the caller still needs to flush the tlb. + * Yield if the MMU lock is contended or this thread needs to return control + * to the scheduler. + * + * If this function should yield and flush is set, it will perform a remote + * TLB flush before yielding. + * + * If this function yields, it will also reset the tdp_iter's walk over the + * paging structure and the calling function should skip to the next + * iteration to allow the iterator to continue its traversal from the + * paging structure root. + * + * Return true if this function yielded and the iterator's traversal was reset. + * Return false if a yield was not needed. */ -static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter) +static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, + struct tdp_iter *iter, bool flush) { - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - kvm_flush_remote_tlbs(kvm); - cond_resched_lock(&kvm->mmu_lock); - tdp_iter_refresh_walk(iter); + /* Ensure forward progress has been made before yielding. */ + if (iter->next_last_level_gfn == iter->yielded_gfn) return false; - } else { + + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { + rcu_read_unlock(); + + if (flush) + kvm_flush_remote_tlbs(kvm); + + cond_resched_rwlock_write(&kvm->mmu_lock); + rcu_read_lock(); + + WARN_ON(iter->gfn > iter->next_last_level_gfn); + + tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], + iter->root_level, iter->min_level, + iter->next_last_level_gfn); + return true; } -} -static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) -{ - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - cond_resched_lock(&kvm->mmu_lock); - tdp_iter_refresh_walk(iter); - } + return false; } /* @@ -453,7 +675,15 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, struct tdp_iter iter; bool flush_needed = false; + rcu_read_lock(); + tdp_root_for_each_pte(iter, root, start, end) { + if (can_yield && + tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) { + flush_needed = false; + continue; + } + if (!is_shadow_present_pte(iter.old_spte)) continue; @@ -468,12 +698,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, continue; tdp_mmu_set_spte(kvm, &iter, 0); - - if (can_yield) - flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter); - else - flush_needed = true; + flush_needed = true; } + + rcu_read_unlock(); return flush_needed; } @@ -517,21 +745,18 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, int ret = 0; int make_spte_ret = 0; - if (unlikely(is_noslot_pfn(pfn))) { + if (unlikely(is_noslot_pfn(pfn))) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); - trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte); - } else { + else make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, pfn, iter->old_spte, prefault, true, map_writable, !shadow_accessed_mask, &new_spte); - trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); - } if (new_spte == iter->old_spte) ret = RET_PF_SPURIOUS; - else - tdp_mmu_set_spte(vcpu->kvm, iter, new_spte); + else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) + return RET_PF_RETRY; /* * If the page fault was caused by a write but the page is write @@ -545,10 +770,16 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, } /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ - if (unlikely(is_mmio_spte(new_spte))) + if (unlikely(is_mmio_spte(new_spte))) { + trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, + new_spte); ret = RET_PF_EMULATE; + } else + trace_kvm_mmu_set_spte(iter->level, iter->gfn, + rcu_dereference(iter->sptep)); - trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); + trace_kvm_mmu_set_spte(iter->level, iter->gfn, + rcu_dereference(iter->sptep)); if (!prefault) vcpu->stat.pf_fixed++; @@ -586,6 +817,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, huge_page_disallowed, &req_level); trace_kvm_mmu_spte_requested(gpa, level, pfn); + + rcu_read_lock(); + tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { if (nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(iter.old_spte, gfn, @@ -601,49 +835,61 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, */ if (is_shadow_present_pte(iter.old_spte) && is_large_pte(iter.old_spte)) { - tdp_mmu_set_spte(vcpu->kvm, &iter, 0); - - kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn, - KVM_PAGES_PER_HPAGE(iter.level)); + if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) + break; /* * The iter must explicitly re-read the spte here * because the new value informs the !present * path below. */ - iter.old_spte = READ_ONCE(*iter.sptep); + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); } if (!is_shadow_present_pte(iter.old_spte)) { sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); - list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages); child_pt = sp->spt; - clear_page(child_pt); + new_spte = make_nonleaf_spte(child_pt, !shadow_accessed_mask); - trace_kvm_mmu_get_page(sp, true); - if (huge_page_disallowed && req_level >= iter.level) - account_huge_nx_page(vcpu->kvm, sp); + if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, + new_spte)) { + tdp_mmu_link_page(vcpu->kvm, sp, true, + huge_page_disallowed && + req_level >= iter.level); - tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); + trace_kvm_mmu_get_page(sp, true); + } else { + tdp_mmu_free_sp(sp); + break; + } } } - if (WARN_ON(iter.level != level)) + if (iter.level != level) { + rcu_read_unlock(); return RET_PF_RETRY; + } ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, pfn, prefault); + rcu_read_unlock(); return ret; } -static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end, unsigned long data, - int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, - gfn_t end, unsigned long data)) +static __always_inline int +kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, + unsigned long start, + unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_mmu_page *root, + gfn_t start, + gfn_t end, + unsigned long data)) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; @@ -705,6 +951,8 @@ static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, int young = 0; u64 new_spte = 0; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, start, end) { /* * If we have a non-accessed entry we don't need to change the @@ -736,6 +984,8 @@ static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, trace_kvm_age_page(iter.gfn, iter.level, slot, young); } + rcu_read_unlock(); + return young; } @@ -781,6 +1031,8 @@ static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, u64 new_spte; int need_flush = 0; + rcu_read_lock(); + WARN_ON(pte_huge(*ptep)); new_pfn = pte_pfn(*ptep); @@ -809,6 +1061,8 @@ static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, if (need_flush) kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); + rcu_read_unlock(); + return 0; } @@ -832,21 +1086,27 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); for_each_tdp_pte_min_level(iter, root->spt, root->role.level, min_level, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (!is_shadow_present_pte(iter.old_spte) || - !is_last_spte(iter.old_spte, iter.level)) + !is_last_spte(iter.old_spte, iter.level) || + !(iter.old_spte & PT_WRITABLE_MASK)) continue; new_spte = iter.old_spte & ~PT_WRITABLE_MASK; tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter); } + + rcu_read_unlock(); return spte_set; } @@ -888,7 +1148,12 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -903,9 +1168,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter); } + + rcu_read_unlock(); return spte_set; } @@ -947,6 +1212,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, struct tdp_iter iter; u64 new_spte; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), gfn + BITS_PER_LONG) { if (!mask) @@ -956,6 +1223,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, !(mask & (1UL << (iter.gfn - gfn)))) continue; + mask &= ~(1UL << (iter.gfn - gfn)); + if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -969,9 +1238,9 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, } tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); - - mask &= ~(1UL << (iter.gfn - gfn)); } + + rcu_read_unlock(); } /* @@ -989,7 +1258,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root; int root_as_id; - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) @@ -1000,81 +1269,43 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, } /* - * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is - * only used for PML, and so will involve setting the dirty bit on each SPTE. - * Returns true if an SPTE has been changed and the TLBs need to be flushed. - */ -static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end) -{ - struct tdp_iter iter; - u64 new_spte; - bool spte_set = false; - - tdp_root_for_each_pte(iter, root, start, end) { - if (!is_shadow_present_pte(iter.old_spte)) - continue; - - new_spte = iter.old_spte | shadow_dirty_mask; - - tdp_mmu_set_spte(kvm, &iter, new_spte); - spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter); - } - - return spte_set; -} - -/* - * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is - * only used for PML, and so will involve setting the dirty bit on each SPTE. - * Returns true if an SPTE has been changed and the TLBs need to be flushed. - */ -bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) -{ - struct kvm_mmu_page *root; - int root_as_id; - bool spte_set = false; - - for_each_tdp_mmu_root_yield_safe(kvm, root) { - root_as_id = kvm_mmu_page_as_id(root); - if (root_as_id != slot->as_id) - continue; - - spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, - slot->base_gfn + slot->npages); - } - return spte_set; -} - -/* * Clear leaf entries which could be replaced by large mappings, for * GFNs within the slot. */ static void zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end) + struct kvm_memory_slot *slot) { + gfn_t start = slot->base_gfn; + gfn_t end = start + slot->npages; struct tdp_iter iter; kvm_pfn_t pfn; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { + spte_set = false; + continue; + } + if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; pfn = spte_to_pfn(iter.old_spte); if (kvm_is_reserved_pfn(pfn) || - !PageTransCompoundMap(pfn_to_page(pfn))) + iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, + pfn, PG_LEVEL_NUM)) continue; tdp_mmu_set_spte(kvm, &iter, 0); - spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter); + spte_set = true; } + rcu_read_unlock(); if (spte_set) kvm_flush_remote_tlbs(kvm); } @@ -1084,7 +1315,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm, * be replaced by large mappings, for GFNs within the slot. */ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot) + struct kvm_memory_slot *slot) { struct kvm_mmu_page *root; int root_as_id; @@ -1094,8 +1325,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, if (root_as_id != slot->as_id) continue; - zap_collapsible_spte_range(kvm, root, slot->base_gfn, - slot->base_gfn + slot->npages); + zap_collapsible_spte_range(kvm, root, slot); } } @@ -1111,6 +1341,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { if (!is_writable_pte(iter.old_spte)) break; @@ -1122,6 +1354,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, spte_set = true; } + rcu_read_unlock(); + return spte_set; } @@ -1137,7 +1371,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, int root_as_id; bool spte_set = false; - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) @@ -1162,10 +1396,14 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, *root_level = vcpu->arch.mmu->shadow_root_level; + rcu_read_lock(); + tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { leaf = iter.level; sptes[leaf] = iter.old_spte; } + rcu_read_unlock(); + return leaf; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index cbbdbadd1526..3b761c111bff 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -5,10 +5,6 @@ #include <linux/kvm_host.h> -void kvm_mmu_init_tdp_mmu(struct kvm *kvm); -void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); - -bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root); hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); @@ -37,9 +33,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot); -bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot); + struct kvm_memory_slot *slot); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn); @@ -47,4 +42,32 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level); +#ifdef CONFIG_X86_64 +void kvm_mmu_init_tdp_mmu(struct kvm *kvm); +void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); +static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } +static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } +#else +static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} +static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} +static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } +static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } +#endif + +static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) +{ + struct kvm_mmu_page *sp; + + if (!is_tdp_mmu_enabled(kvm)) + return false; + if (WARN_ON(!VALID_PAGE(hpa))) + return false; + + sp = to_shadow_page(hpa); + if (WARN_ON(!sp)) + return false; + + return is_tdp_mmu_page(sp) && sp->root_count; +} + #endif /* __KVM_X86_MMU_TDP_MMU_H */ diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index f472fdb6ae7e..a8502e02f479 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -75,7 +75,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); - mask = (~0ULL) << cpuid_maxphyaddr(vcpu); + mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) @@ -351,14 +351,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (var_mtrr_range_is_valid(cur)) list_del(&mtrr_state->var_ranges[index].node); - /* Extend the mask with all 1 bits to the left, since those - * bits must implicitly be 0. The bits are then cleared - * when reading them. + /* + * Set all illegal GPA bits in the mask, since those bits must + * implicitly be 0. The bits are then cleared when reading them. */ if (!is_mtrr_mask) cur->base = data; else - cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); + cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu); /* add it to the list if it's enabled. */ if (var_mtrr_range_is_valid(cur)) { @@ -426,7 +426,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) else *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; - *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; + *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu); } return 0; diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 67741d2a0308..827886c12c16 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -373,7 +373,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) return 1; if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) && - (kvm_x86_ops.get_cpl(vcpu) != 0) && + (static_call(kvm_x86_get_cpl)(vcpu) != 0) && (kvm_read_cr0(vcpu) & X86_CR0_PE)) return 1; @@ -383,8 +383,11 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) { - if (lapic_in_kernel(vcpu)) + if (lapic_in_kernel(vcpu)) { + if (kvm_x86_ops.pmu_ops->deliver_pmi) + kvm_x86_ops.pmu_ops->deliver_pmi(vcpu); kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); + } } bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) @@ -473,6 +476,9 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) pmc_stop_counter(pmc); } + if (kvm_x86_ops.pmu_ops->cleanup) + kvm_x86_ops.pmu_ops->cleanup(vcpu); + bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 067fef51760c..7b30bc967af3 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -39,6 +39,8 @@ struct kvm_pmu_ops { void (*refresh)(struct kvm_vcpu *vcpu); void (*init)(struct kvm_vcpu *vcpu); void (*reset)(struct kvm_vcpu *vcpu); + void (*deliver_pmi)(struct kvm_vcpu *vcpu); + void (*cleanup)(struct kvm_vcpu *vcpu); }; static inline u64 pmc_bitmask(struct kvm_pmc *pmc) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 0ef84d57b72e..78bdcfac4e40 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -298,6 +298,23 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) return 0; } +static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, + u32 icrl, u32 icrh) +{ + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + bool m = kvm_apic_match_dest(vcpu, source, + icrl & APIC_SHORT_MASK, + GET_APIC_DEST_FIELD(icrh), + icrl & APIC_DEST_MASK); + + if (m && !avic_vcpu_is_running(vcpu)) + kvm_vcpu_wake_up(vcpu); + } +} + int avic_incomplete_ipi_interception(struct vcpu_svm *svm) { u32 icrh = svm->vmcb->control.exit_info_1 >> 32; @@ -324,28 +341,14 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm) kvm_lapic_reg_write(apic, APIC_ICR2, icrh); kvm_lapic_reg_write(apic, APIC_ICR, icrl); break; - case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { - int i; - struct kvm_vcpu *vcpu; - struct kvm *kvm = svm->vcpu.kvm; - struct kvm_lapic *apic = svm->vcpu.arch.apic; - + case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: /* * At this point, we expect that the AVIC HW has already * set the appropriate IRR bits on the valid target * vcpus. So, we just need to kick the appropriate vcpu. */ - kvm_for_each_vcpu(i, vcpu, kvm) { - bool m = kvm_apic_match_dest(vcpu, apic, - icrl & APIC_SHORT_MASK, - GET_APIC_DEST_FIELD(icrh), - icrl & APIC_DEST_MASK); - - if (m && !avic_vcpu_is_running(vcpu)) - kvm_vcpu_wake_up(vcpu); - } + avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh); break; - } case AVIC_IPI_FAILURE_INVALID_TARGET: WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", index, svm->vcpu.vcpu_id, icrh, icrl); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index db30670dd8c4..35891d9a1099 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -51,6 +51,23 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, nested_svm_vmexit(svm); } +static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) +{ + struct vcpu_svm *svm = to_svm(vcpu); + WARN_ON(!is_guest_mode(vcpu)); + + if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && + !svm->nested.nested_run_pending) { + svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; + svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_info_1 = fault->error_code; + svm->vmcb->control.exit_info_2 = fault->address; + nested_svm_vmexit(svm); + } else { + kvm_inject_page_fault(vcpu, fault); + } +} + static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) { struct vcpu_svm *svm = to_svm(vcpu); @@ -58,7 +75,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) u64 pdpte; int ret; - ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, + ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, offset_in_page(cr3) + index * 8, 8); if (ret) return 0; @@ -248,7 +265,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) if (vmcb12_lma) { if (!(vmcb12->save.cr4 & X86_CR4_PAE) || !(vmcb12->save.cr0 & X86_CR0_PE) || - (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits)) + kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3)) return false; } if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) @@ -345,7 +362,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt) { - if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)) + if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) return -EINVAL; if (!nested_npt && is_pae_paging(vcpu) && @@ -392,7 +409,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) svm->vmcb->save.rsp = vmcb12->save.rsp; svm->vmcb->save.rip = vmcb12->save.rip; svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; - svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_FIXED_1 | DR6_RTM; + svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; svm->vmcb->save.cpl = vmcb12->save.cpl; } @@ -436,16 +453,33 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, { int ret; + trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, + vmcb12->save.rip, + vmcb12->control.int_ctl, + vmcb12->control.event_inj, + vmcb12->control.nested_ctl); + + trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, + vmcb12->control.intercepts[INTERCEPT_CR] >> 16, + vmcb12->control.intercepts[INTERCEPT_EXCEPTION], + vmcb12->control.intercepts[INTERCEPT_WORD3], + vmcb12->control.intercepts[INTERCEPT_WORD4], + vmcb12->control.intercepts[INTERCEPT_WORD5]); + + svm->nested.vmcb12_gpa = vmcb12_gpa; load_nested_vmcb_control(svm, &vmcb12->control); - nested_prepare_vmcb_save(svm, vmcb12); nested_prepare_vmcb_control(svm); + nested_prepare_vmcb_save(svm, vmcb12); ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, nested_npt_enabled(svm)); if (ret) return ret; + if (!npt_enabled) + svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested; + svm_set_gif(svm, true); return 0; @@ -489,18 +523,6 @@ int nested_svm_vmrun(struct vcpu_svm *svm) goto out; } - trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, - vmcb12->save.rip, - vmcb12->control.int_ctl, - vmcb12->control.event_inj, - vmcb12->control.nested_ctl); - - trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, - vmcb12->control.intercepts[INTERCEPT_CR] >> 16, - vmcb12->control.intercepts[INTERCEPT_EXCEPTION], - vmcb12->control.intercepts[INTERCEPT_WORD3], - vmcb12->control.intercepts[INTERCEPT_WORD4], - vmcb12->control.intercepts[INTERCEPT_WORD5]); /* Clear internal status */ kvm_clear_exception_queue(&svm->vcpu); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 48017fef1cd9..874ea309279f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -22,6 +22,7 @@ #include "x86.h" #include "svm.h" +#include "svm_ops.h" #include "cpuid.h" #include "trace.h" @@ -1041,6 +1042,74 @@ e_unpin_memory: return ret; } +static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + void __user *report = (void __user *)(uintptr_t)argp->data; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data; + struct kvm_sev_attestation_report params; + void __user *p; + void *blob = NULL; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (!data) + return -ENOMEM; + + /* User wants to query the blob length */ + if (!params.len) + goto cmd; + + p = (void __user *)(uintptr_t)params.uaddr; + if (p) { + if (params.len > SEV_FW_BLOB_MAX_SIZE) { + ret = -EINVAL; + goto e_free; + } + + ret = -ENOMEM; + blob = kmalloc(params.len, GFP_KERNEL); + if (!blob) + goto e_free; + + data->address = __psp_pa(blob); + data->len = params.len; + memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce)); + } +cmd: + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error); + /* + * If we query the session length, FW responded with expected data. + */ + if (!params.len) + goto done; + + if (ret) + goto e_free_blob; + + if (blob) { + if (copy_to_user(p, blob, params.len)) + ret = -EFAULT; + } + +done: + params.len = data->len; + if (copy_to_user(report, ¶ms, sizeof(params))) + ret = -EFAULT; +e_free_blob: + kfree(blob); +e_free: + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1091,6 +1160,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_LAUNCH_SECRET: r = sev_launch_secret(kvm, &sev_cmd); break; + case KVM_SEV_GET_ATTESTATION_REPORT: + r = sev_get_attestation_report(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; @@ -1994,29 +2066,17 @@ void sev_es_create_vcpu(struct vcpu_svm *svm) sev_enc_bit)); } -void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) +void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); struct vmcb_save_area *hostsa; - unsigned int i; /* * As an SEV-ES guest, hardware will restore the host state on VMEXIT, * of which one step is to perform a VMLOAD. Since hardware does not * perform a VMSAVE on VMRUN, the host savearea must be updated. */ - asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory"); - - /* - * Certain MSRs are restored on VMEXIT, only save ones that aren't - * restored. - */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { - if (host_save_user_msrs[i].sev_es_restored) - continue; - - rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); - } + vmsave(__sme_page_pa(sd->save_area)); /* XCR0 is restored on VMEXIT, save the current host value */ hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); @@ -2029,22 +2089,6 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) hostsa->xss = host_xss; } -void sev_es_vcpu_put(struct vcpu_svm *svm) -{ - unsigned int i; - - /* - * Certain MSRs are restored on VMEXIT and were saved with vmsave in - * sev_es_vcpu_load() above. Only restore ones that weren't. - */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { - if (host_save_user_msrs[i].sev_es_restored) - continue; - - wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); - } -} - void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) { struct vcpu_svm *svm = to_svm(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3442d44ca53b..baee91c1e936 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -41,6 +41,7 @@ #include "trace.h" #include "svm.h" +#include "svm_ops.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) @@ -200,9 +201,9 @@ module_param(sev_es, int, 0444); bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); -static u8 rsm_ins_bytes[] = "\x0f\xaa"; +static bool svm_gp_erratum_intercept = true; -static void svm_complete_interrupts(struct vcpu_svm *svm); +static u8 rsm_ins_bytes[] = "\x0f\xaa"; static unsigned long iopm_base; @@ -246,21 +247,6 @@ u32 svm_msrpm_offset(u32 msr) #define MAX_INST_SIZE 15 -static inline void clgi(void) -{ - asm volatile (__ex("clgi")); -} - -static inline void stgi(void) -{ - asm volatile (__ex("stgi")); -} - -static inline void invlpga(unsigned long addr, u32 asid) -{ - asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr)); -} - static int get_max_npt_level(void) { #ifdef CONFIG_X86_64 @@ -288,6 +274,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if (!(efer & EFER_SVME)) { svm_leave_nested(svm); svm_set_gif(svm, true); + /* #GP intercept is still needed for vmware backdoor */ + if (!enable_vmware_backdoor) + clr_exception_intercept(svm, GP_VECTOR); /* * Free the nested guest state, unless we are in SMM. @@ -304,6 +293,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) vcpu->arch.efer = old_efer; return ret; } + + if (svm_gp_erratum_intercept) + set_exception_intercept(svm, GP_VECTOR); } } @@ -925,15 +917,15 @@ static __init void svm_set_cpu_caps(void) if (npt_enabled) kvm_cpu_cap_set(X86_FEATURE_NPT); + + /* Nested VM can receive #VMEXIT instead of triggering #GP */ + kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); } /* CPUID 0x80000008 */ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); - - /* Enable INVPCID feature */ - kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID); } static __init int svm_hardware_setup(void) @@ -1032,6 +1024,9 @@ static __init int svm_hardware_setup(void) } } + if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) + svm_gp_erratum_intercept = false; + if (vgif) { if (!boot_cpu_has(X86_FEATURE_VGIF)) vgif = false; @@ -1105,12 +1100,12 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) static void svm_check_invpcid(struct vcpu_svm *svm) { /* - * Intercept INVPCID instruction only if shadow page table is - * enabled. Interception is not required with nested page table - * enabled. + * Intercept INVPCID if shadow paging is enabled to sync/free shadow + * roots, or if INVPCID is disabled in the guest to inject #UD. */ if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { - if (!npt_enabled) + if (!npt_enabled || + !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) svm_set_intercept(svm, INTERCEPT_INVPCID); else svm_clr_intercept(svm, INTERCEPT_INVPCID); @@ -1205,9 +1200,10 @@ static void init_vmcb(struct vcpu_svm *svm) init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); + svm_set_cr4(&svm->vcpu, 0); svm_set_efer(&svm->vcpu, 0); save->dr6 = 0xffff0ff0; - kvm_set_rflags(&svm->vcpu, 2); + kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED); save->rip = 0x0000fff0; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; @@ -1366,6 +1362,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) svm->vmsa = page_address(vmsa_page); svm->asid_generation = 0; + svm->guest_state_loaded = false; init_vmcb(svm); svm_init_osvw(vcpu); @@ -1413,30 +1410,31 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); } -static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - struct svm_cpu_data *sd = per_cpu(svm_data, cpu); - int i; + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + unsigned int i; - if (unlikely(cpu != vcpu->cpu)) { - svm->asid_generation = 0; - vmcb_mark_all_dirty(svm->vmcb); - } + if (svm->guest_state_loaded) + return; + /* + * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save + * area (non-sev-es). Save ones that aren't so we can restore them + * individually later. + */ + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); + + /* + * Save additional host state that will be restored on VMEXIT (sev-es) + * or subsequent vmload of host save area. + */ if (sev_es_guest(svm->vcpu.kvm)) { - sev_es_vcpu_load(svm, cpu); + sev_es_prepare_guest_switch(svm, vcpu->cpu); } else { -#ifdef CONFIG_X86_64 - rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); -#endif - savesegment(fs, svm->host.fs); - savesegment(gs, svm->host.gs); - svm->host.ldt = kvm_read_ldt(); - - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i].index, - svm->host_user_msrs[i]); + vmsave(__sme_page_pa(sd->save_area)); } if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { @@ -1446,10 +1444,42 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } + /* This assumes that the kernel never uses MSR_TSC_AUX */ if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + svm->guest_state_loaded = true; +} + +static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + unsigned int i; + + if (!svm->guest_state_loaded) + return; + + /* + * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save + * area (non-sev-es). Restore the ones that weren't. + */ + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); + + svm->guest_state_loaded = false; +} + +static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + + if (unlikely(cpu != vcpu->cpu)) { + svm->asid_generation = 0; + vmcb_mark_all_dirty(svm->vmcb); + } + if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; indirect_branch_prediction_barrier(); @@ -1459,30 +1489,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void svm_vcpu_put(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); - int i; - avic_vcpu_put(vcpu); + svm_prepare_host_switch(vcpu); ++vcpu->stat.host_state_reload; - if (sev_es_guest(svm->vcpu.kvm)) { - sev_es_vcpu_put(svm); - } else { - kvm_load_ldt(svm->host.ldt); -#ifdef CONFIG_X86_64 - loadsegment(fs, svm->host.fs); - wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase); - load_gs_index(svm->host.gs); -#else -#ifdef CONFIG_X86_32_LAZY_GS - loadsegment(gs, svm->host.gs); -#endif -#endif - - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i].index, - svm->host_user_msrs[i]); - } } static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) @@ -1815,7 +1825,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, vmcb_mark_dirty(svm->vmcb, VMCB_SEG); } -static void update_exception_bitmap(struct kvm_vcpu *vcpu) +static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1865,7 +1875,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); /* - * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here, + * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, * because db_interception might need it. We can do it before vmentry. */ vcpu->arch.dr6 = svm->vmcb->save.dr6; @@ -1916,7 +1926,7 @@ static int db_interception(struct vcpu_svm *svm) if (!(svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && !svm->nmi_singlestep) { - u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1; + u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); return 1; } @@ -1962,24 +1972,6 @@ static int ac_interception(struct vcpu_svm *svm) return 1; } -static int gp_interception(struct vcpu_svm *svm) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; - u32 error_code = svm->vmcb->control.exit_info_1; - - WARN_ON_ONCE(!enable_vmware_backdoor); - - /* - * VMware backdoor emulation on #GP interception only handles IN{S}, - * OUT{S}, and RDPMC, none of which generate a non-zero error code. - */ - if (error_code) { - kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); - return 1; - } - return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); -} - static bool is_erratum_383(void) { int err, i; @@ -2178,6 +2170,107 @@ static int vmrun_interception(struct vcpu_svm *svm) return nested_svm_vmrun(svm); } +enum { + NONE_SVM_INSTR, + SVM_INSTR_VMRUN, + SVM_INSTR_VMLOAD, + SVM_INSTR_VMSAVE, +}; + +/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ +static int svm_instr_opcode(struct kvm_vcpu *vcpu) +{ + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + + if (ctxt->b != 0x1 || ctxt->opcode_len != 2) + return NONE_SVM_INSTR; + + switch (ctxt->modrm) { + case 0xd8: /* VMRUN */ + return SVM_INSTR_VMRUN; + case 0xda: /* VMLOAD */ + return SVM_INSTR_VMLOAD; + case 0xdb: /* VMSAVE */ + return SVM_INSTR_VMSAVE; + default: + break; + } + + return NONE_SVM_INSTR; +} + +static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) +{ + const int guest_mode_exit_codes[] = { + [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, + [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, + [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, + }; + int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = { + [SVM_INSTR_VMRUN] = vmrun_interception, + [SVM_INSTR_VMLOAD] = vmload_interception, + [SVM_INSTR_VMSAVE] = vmsave_interception, + }; + struct vcpu_svm *svm = to_svm(vcpu); + int ret; + + if (is_guest_mode(vcpu)) { + svm->vmcb->control.exit_code = guest_mode_exit_codes[opcode]; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; + + /* Returns '1' or -errno on failure, '0' on success. */ + ret = nested_svm_vmexit(svm); + if (ret) + return ret; + return 1; + } + return svm_instr_handlers[opcode](svm); +} + +/* + * #GP handling code. Note that #GP can be triggered under the following two + * cases: + * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on + * some AMD CPUs when EAX of these instructions are in the reserved memory + * regions (e.g. SMM memory on host). + * 2) VMware backdoor + */ +static int gp_interception(struct vcpu_svm *svm) +{ + struct kvm_vcpu *vcpu = &svm->vcpu; + u32 error_code = svm->vmcb->control.exit_info_1; + int opcode; + + /* Both #GP cases have zero error_code */ + if (error_code) + goto reinject; + + /* Decode the instruction for usage later */ + if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) + goto reinject; + + opcode = svm_instr_opcode(vcpu); + + if (opcode == NONE_SVM_INSTR) { + if (!enable_vmware_backdoor) + goto reinject; + + /* + * VMware backdoor emulation on #GP interception only handles + * IN{S}, OUT{S}, and RDPMC. + */ + if (!is_guest_mode(vcpu)) + return kvm_emulate_instruction(vcpu, + EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); + } else + return emulate_svm_instr(vcpu, opcode); + +reinject: + kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); + return 1; +} + void svm_set_gif(struct vcpu_svm *svm, bool value) { if (value) { @@ -2265,11 +2358,8 @@ static int xsetbv_interception(struct vcpu_svm *svm) u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u32 index = kvm_rcx_read(&svm->vcpu); - if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { - return kvm_skip_emulated_instruction(&svm->vcpu); - } - - return 1; + int err = kvm_set_xcr(&svm->vcpu, index, new_bv); + return kvm_complete_insn_gp(&svm->vcpu, err); } static int rdpru_interception(struct vcpu_svm *svm) @@ -2530,6 +2620,7 @@ static int dr_interception(struct vcpu_svm *svm) { int reg, dr; unsigned long val; + int err = 0; if (svm->vcpu.guest_debug == 0) { /* @@ -2547,20 +2638,16 @@ static int dr_interception(struct vcpu_svm *svm) reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; - - if (dr >= 16) { /* mov to DRn */ - if (!kvm_require_dr(&svm->vcpu, dr - 16)) - return 1; + if (dr >= 16) { /* mov to DRn */ + dr -= 16; val = kvm_register_read(&svm->vcpu, reg); - kvm_set_dr(&svm->vcpu, dr - 16, val); + err = kvm_set_dr(&svm->vcpu, dr, val); } else { - if (!kvm_require_dr(&svm->vcpu, dr)) - return 1; kvm_get_dr(&svm->vcpu, dr, &val); kvm_register_write(&svm->vcpu, reg, val); } - return kvm_skip_emulated_instruction(&svm->vcpu); + return kvm_complete_insn_gp(&svm->vcpu, err); } static int cr8_write_interception(struct vcpu_svm *svm) @@ -3354,7 +3441,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu) SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3479,7 +3566,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !svm_interrupt_blocked(vcpu); } -static void enable_irq_window(struct kvm_vcpu *vcpu) +static void svm_enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3503,7 +3590,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) } } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3560,10 +3647,6 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) invlpga(gva, svm->vmcb->control.asid); } -static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) -{ -} - static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3708,16 +3791,11 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, if (sev_es_guest(svm->vcpu.kvm)) { __svm_sev_es_vcpu_run(svm->vmcb_pa); } else { + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); -#ifdef CONFIG_X86_64 - native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); -#else - loadsegment(fs, svm->host.fs); -#ifndef CONFIG_X86_32_LAZY_GS - loadsegment(gs, svm->host.gs); -#endif -#endif + vmload(__sme_page_pa(sd->save_area)); } /* @@ -3783,7 +3861,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) svm_set_dr6(svm, vcpu->arch.dr6); else - svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM); + svm_set_dr6(svm, DR6_ACTIVE_LOW); clgi(); kvm_load_guest_xsave_state(vcpu); @@ -3978,7 +4056,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) if (sev_guest(vcpu->kvm)) { best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); if (best) - vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f)); + vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); } if (!kvm_vcpu_apicv_active(vcpu)) @@ -4285,7 +4363,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) return ret; } -static void enable_smi_window(struct kvm_vcpu *vcpu) +static void svm_enable_smi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4439,7 +4517,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_blocking = svm_vcpu_blocking, .vcpu_unblocking = svm_vcpu_unblocking, - .update_exception_bitmap = update_exception_bitmap, + .update_exception_bitmap = svm_update_exception_bitmap, .get_msr_feature = svm_get_msr_feature, .get_msr = svm_get_msr, .set_msr = svm_set_msr, @@ -4482,9 +4560,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .nmi_allowed = svm_nmi_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, + .enable_nmi_window = svm_enable_nmi_window, + .enable_irq_window = svm_enable_irq_window, + .update_cr8_intercept = svm_update_cr8_intercept, .set_virtual_apic_mode = svm_set_virtual_apic_mode, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, @@ -4527,7 +4605,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .smi_allowed = svm_smi_allowed, .pre_enter_smm = svm_pre_enter_smm, .pre_leave_smm = svm_pre_leave_smm, - .enable_smi_window = enable_smi_window, + .enable_smi_window = svm_enable_smi_window, .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 6e7d070f8b86..39e071fdab0c 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -23,22 +23,8 @@ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) -static const struct svm_host_save_msrs { - u32 index; /* Index of the MSR */ - bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */ -} host_save_user_msrs[] = { -#ifdef CONFIG_X86_64 - { .index = MSR_STAR, .sev_es_restored = true }, - { .index = MSR_LSTAR, .sev_es_restored = true }, - { .index = MSR_CSTAR, .sev_es_restored = true }, - { .index = MSR_SYSCALL_MASK, .sev_es_restored = true }, - { .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true }, - { .index = MSR_FS_BASE, .sev_es_restored = true }, -#endif - { .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true }, - { .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true }, - { .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true }, - { .index = MSR_TSC_AUX, .sev_es_restored = false }, +static const u32 host_save_user_msrs[] = { + MSR_TSC_AUX, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) @@ -130,12 +116,6 @@ struct vcpu_svm { u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; - struct { - u16 fs; - u16 gs; - u16 ldt; - u64 gs_base; - } host; u64 spec_ctrl; /* @@ -192,6 +172,8 @@ struct vcpu_svm { u64 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + + bool guest_state_loaded; }; struct svm_cpu_data { @@ -587,9 +569,8 @@ int sev_handle_vmgexit(struct vcpu_svm *svm); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); void sev_es_init_vmcb(struct vcpu_svm *svm); void sev_es_create_vcpu(struct vcpu_svm *svm); -void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu); -void sev_es_vcpu_put(struct vcpu_svm *svm); void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); +void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); /* vmenter.S */ diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h new file mode 100644 index 000000000000..8170f2a5a16f --- /dev/null +++ b/arch/x86/kvm/svm/svm_ops.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_SVM_OPS_H +#define __KVM_X86_SVM_OPS_H + +#include <linux/compiler_types.h> + +#include <asm/kvm_host.h> + +#define svm_asm(insn, clobber...) \ +do { \ + asm_volatile_goto("1: " __stringify(insn) "\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + ::: clobber : fault); \ + return; \ +fault: \ + kvm_spurious_fault(); \ +} while (0) + +#define svm_asm1(insn, op1, clobber...) \ +do { \ + asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + :: op1 : clobber : fault); \ + return; \ +fault: \ + kvm_spurious_fault(); \ +} while (0) + +#define svm_asm2(insn, op1, op2, clobber...) \ +do { \ + asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + :: op1, op2 : clobber : fault); \ + return; \ +fault: \ + kvm_spurious_fault(); \ +} while (0) + +static inline void clgi(void) +{ + svm_asm(clgi); +} + +static inline void stgi(void) +{ + svm_asm(stgi); +} + +static inline void invlpga(unsigned long addr, u32 asid) +{ + svm_asm2(invlpga, "c"(asid), "a"(addr)); +} + +/* + * Despite being a physical address, the portion of rAX that is consumed by + * VMSAVE, VMLOAD, etc... is still controlled by the effective address size, + * hence 'unsigned long' instead of 'hpa_t'. + */ +static inline void vmsave(unsigned long pa) +{ + svm_asm1(vmsave, "a" (pa), "memory"); +} + +static inline void vmload(unsigned long pa) +{ + svm_asm1(vmload, "a" (pa), "memory"); +} + +#endif /* __KVM_X86_SVM_OPS_H */ diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 2de30c20bc26..a61c015870e3 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -93,6 +93,42 @@ TRACE_EVENT(kvm_hv_hypercall, ); /* + * Tracepoint for Xen hypercall. + */ +TRACE_EVENT(kvm_xen_hypercall, + TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5), + TP_ARGS(nr, a0, a1, a2, a3, a4, a5), + + TP_STRUCT__entry( + __field(unsigned long, nr) + __field(unsigned long, a0) + __field(unsigned long, a1) + __field(unsigned long, a2) + __field(unsigned long, a3) + __field(unsigned long, a4) + __field(unsigned long, a5) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->a0 = a0; + __entry->a1 = a1; + __entry->a2 = a2; + __entry->a3 = a3; + __entry->a4 = a4; + __entry->a4 = a5; + ), + + TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", + __entry->nr, __entry->a0, __entry->a1, __entry->a2, + __entry->a3, __entry->a4, __entry->a5) +); + + + +/* * Tracepoint for PIO. */ @@ -256,7 +292,7 @@ TRACE_EVENT(name, \ __entry->guest_rip = kvm_rip_read(vcpu); \ __entry->isa = isa; \ __entry->vcpu_id = vcpu->vcpu_id; \ - kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \ + static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \ &__entry->info2, \ &__entry->intr_info, \ &__entry->error_code); \ @@ -738,7 +774,7 @@ TRACE_EVENT(kvm_emulate_insn, ), TP_fast_assign( - __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS); + __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr - vcpu->arch.emulate_ctxt->fetch.data; __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 3a1861403d73..d1d77985e889 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -19,6 +19,9 @@ extern int __read_mostly pt_mode; #define PT_MODE_HOST_GUEST 1 #define PMU_CAP_FW_WRITES (1ULL << 13) +#define PMU_CAP_LBR_FMT 0x3f + +#define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) struct nested_vmx_msrs { /* @@ -262,6 +265,12 @@ static inline bool cpu_has_vmx_tsc_scaling(void) SECONDARY_EXEC_TSC_SCALING; } +static inline bool cpu_has_vmx_bus_lock_detection(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_BUS_LOCK_DETECTION; +} + static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && @@ -371,11 +380,28 @@ static inline bool vmx_pt_mode_is_host_guest(void) static inline u64 vmx_get_perf_capabilities(void) { + u64 perf_cap = 0; + + if (boot_cpu_has(X86_FEATURE_PDCM)) + rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap); + + perf_cap &= PMU_CAP_LBR_FMT; + /* * Since counters are virtualized, KVM would support full * width counting unconditionally, even if the host lacks it. */ - return PMU_CAP_FW_WRITES; + return PMU_CAP_FW_WRITES | perf_cap; +} + +static inline u64 vmx_supported_debugctl(void) +{ + u64 debugctl = 0; + + if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) + debugctl |= DEBUGCTLMSR_LBR_MASK; + + return debugctl; } #endif /* __KVM_X86_VMX_CAPS_H */ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f2b9bfb58206..bcca0b80e0d0 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -12,6 +12,7 @@ #include "nested.h" #include "pmu.h" #include "trace.h" +#include "vmx.h" #include "x86.h" static bool __read_mostly enable_shadow_vmcs = 1; @@ -411,8 +412,8 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit if (nr == DB_VECTOR) { if (!has_payload) { payload = vcpu->arch.dr6; - payload &= ~(DR6_FIXED_1 | DR6_BT); - payload ^= DR6_RTM; + payload &= ~DR6_BT; + payload ^= DR6_ACTIVE_LOW; } *exit_qual = payload; } else @@ -744,8 +745,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, (CC(!nested_cpu_has_vid(vmcs12)) || CC(!nested_exit_intr_ack_set(vcpu)) || CC((vmcs12->posted_intr_nv & 0xff00)) || - CC((vmcs12->posted_intr_desc_addr & 0x3f)) || - CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))) + CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ @@ -758,13 +758,11 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, u32 count, u64 addr) { - int maxphyaddr; - if (count == 0) return 0; - maxphyaddr = cpuid_maxphyaddr(vcpu); - if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || - (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) + + if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || + !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) return -EINVAL; return 0; @@ -1062,14 +1060,6 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, } } -static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) -{ - unsigned long invalid_mask; - - invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); - return (val & invalid_mask) == 0; -} - /* * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit. * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't @@ -1121,7 +1111,7 @@ static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu) static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, enum vm_entry_failure_code *entry_failure_code) { - if (CC(!nested_cr3_valid(vcpu, cr3))) { + if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; } @@ -2177,15 +2167,13 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); /* - * The PML address never changes, so it is constant in vmcs02. - * Conceptually we want to copy the PML index from vmcs01 here, - * and then back to vmcs01 on nested vmexit. But since we flush - * the log and reset GUEST_PML_INDEX on each vmexit, the PML - * index is also effectively constant in vmcs02. + * PML is emulated for L2, but never enabled in hardware as the MMU + * handles A/D emulation. Disabling PML for L2 also avoids having to + * deal with filtering out L2 GPAs from the buffer. */ if (enable_pml) { - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + vmcs_write64(PML_ADDRESS, 0); + vmcs_write16(GUEST_PML_INDEX, -1); } if (cpu_has_vmx_encls_vmexit()) @@ -2220,7 +2208,7 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) { - u32 exec_control, vmcs12_exec_ctrl; + u32 exec_control; u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) @@ -2294,11 +2282,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_ENABLE_VMFUNC); if (nested_cpu_has(vmcs12, - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { - vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & - ~SECONDARY_EXEC_ENABLE_PML; - exec_control |= vmcs12_exec_ctrl; - } + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) + exec_control |= vmcs12->secondary_vm_exec_control; + + /* PML is emulated and never enabled in hardware for L2. */ + exec_control &= ~SECONDARY_EXEC_ENABLE_PML; /* VMCS shadowing for L2 is emulated for now */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; @@ -2532,7 +2520,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); @@ -2635,7 +2623,6 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) { struct vcpu_vmx *vmx = to_vmx(vcpu); - int maxphyaddr = cpuid_maxphyaddr(vcpu); /* Check for memory type validity */ switch (new_eptp & VMX_EPTP_MT_MASK) { @@ -2666,7 +2653,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) } /* Reserved bits should not be set */ - if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f))) + if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) return false; /* AD, if set, should be supported */ @@ -2850,7 +2837,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || - CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3))) + CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) return -EINVAL; if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || @@ -3057,35 +3044,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->host_state.cr4 = cr4; } - asm( - "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ - "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" - "je 1f \n\t" - __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" - "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" - "1: \n\t" - "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ - - /* Check if vmlaunch or vmresume is needed */ - "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" - - /* - * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set - * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail - * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the - * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. - */ - "call vmx_vmenter\n\t" - - CC_SET(be) - : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) - : [HOST_RSP]"r"((unsigned long)HOST_RSP), - [loaded_vmcs]"r"(vmx->loaded_vmcs), - [launched]"i"(offsetof(struct loaded_vmcs, launched)), - [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), - [wordsize]"i"(sizeof(ulong)) - : "memory" - ); + vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, + vmx->loaded_vmcs->launched); if (vmx->msr_autoload.host.nr) vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); @@ -3330,7 +3290,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12 = get_vmcs12(vcpu); enum vm_entry_failure_code entry_failure_code; bool evaluate_pending_interrupts; - u32 exit_reason, failed_index; + union vmx_exit_reason exit_reason = { + .basic = EXIT_REASON_INVALID_STATE, + .failed_vmentry = 1, + }; + u32 failed_index; if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) kvm_vcpu_flush_tlb_current(vcpu); @@ -3382,7 +3346,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (nested_vmx_check_guest_state(vcpu, vmcs12, &entry_failure_code)) { - exit_reason = EXIT_REASON_INVALID_STATE; + exit_reason.basic = EXIT_REASON_INVALID_STATE; vmcs12->exit_qualification = entry_failure_code; goto vmentry_fail_vmexit; } @@ -3393,7 +3357,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vcpu->arch.tsc_offset += vmcs12->tsc_offset; if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) { - exit_reason = EXIT_REASON_INVALID_STATE; + exit_reason.basic = EXIT_REASON_INVALID_STATE; vmcs12->exit_qualification = entry_failure_code; goto vmentry_fail_vmexit_guest_mode; } @@ -3403,7 +3367,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); if (failed_index) { - exit_reason = EXIT_REASON_MSR_LOAD_FAIL; + exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; vmcs12->exit_qualification = failed_index; goto vmentry_fail_vmexit_guest_mode; } @@ -3471,7 +3435,7 @@ vmentry_fail_vmexit: return NVMX_VMENTRY_VMEXIT; load_vmcs12_host_state(vcpu, vmcs12); - vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->vm_exit_reason = exit_reason.full; if (enable_shadow_vmcs || vmx->nested.hv_evmcs) vmx->nested.need_vmcs12_to_shadow_sync = true; return NVMX_VMENTRY_VMEXIT; @@ -4234,9 +4198,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; - nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); @@ -4529,6 +4490,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vmx_set_virtual_apic_mode(vcpu); } + if (vmx->nested.update_vmcs01_cpu_dirty_logging) { + vmx->nested.update_vmcs01_cpu_dirty_logging = false; + vmx_update_cpu_dirty_logging(vcpu); + } + /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_clean(vmx->nested.apic_access_page); @@ -5559,7 +5525,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); fail: - nested_vmx_vmexit(vcpu, vmx->exit_reason, + /* + * This is effectively a reflected VM-Exit, as opposed to a synthesized + * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode + * EXIT_REASON_VMFUNC as the exit reason. + */ + nested_vmx_vmexit(vcpu, vmx->exit_reason.full, vmx_get_intr_info(vcpu), vmx_get_exit_qual(vcpu)); return 1; @@ -5627,7 +5598,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, u32 exit_reason) + struct vmcs12 *vmcs12, + union vmx_exit_reason exit_reason) { u32 msr_index = kvm_rcx_read(vcpu); gpa_t bitmap; @@ -5641,7 +5613,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; - if (exit_reason == EXIT_REASON_MSR_WRITE) + if (exit_reason.basic == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; @@ -5778,11 +5750,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) * Return true if L0 wants to handle an exit from L2 regardless of whether or not * L1 wants the exit. Only call this when in is_guest_mode (L2). */ -static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) +static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, + union vmx_exit_reason exit_reason) { u32 intr_info; - switch ((u16)exit_reason) { + switch ((u16)exit_reason.basic) { case EXIT_REASON_EXCEPTION_NMI: intr_info = vmx_get_intr_info(vcpu); if (is_nmi(intr_info)) @@ -5820,7 +5793,10 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) case EXIT_REASON_PREEMPTION_TIMER: return true; case EXIT_REASON_PML_FULL: - /* We emulate PML support to L1. */ + /* + * PML is emulated for an L1 VMM and should never be enabled in + * vmcs02, always "handle" PML_FULL by exiting to userspace. + */ return true; case EXIT_REASON_VMFUNC: /* VM functions are emulated through L2->L0 vmexits. */ @@ -5838,12 +5814,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) * Return 1 if L1 wants to intercept an exit from L2. Only call this when in * is_guest_mode (L2). */ -static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) +static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, + union vmx_exit_reason exit_reason) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 intr_info; - switch ((u16)exit_reason) { + switch ((u16)exit_reason.basic) { case EXIT_REASON_EXCEPTION_NMI: intr_info = vmx_get_intr_info(vcpu); if (is_nmi(intr_info)) @@ -5962,7 +5939,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exit_reason = vmx->exit_reason; + union vmx_exit_reason exit_reason = vmx->exit_reason; unsigned long exit_qual; u32 exit_intr_info; @@ -5981,7 +5958,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) goto reflect_vmexit; } - trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX); + trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX); /* If L0 (KVM) wants the exit, it trumps L1's desires. */ if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) @@ -6007,7 +5984,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) exit_qual = vmx_get_exit_qual(vcpu); reflect_vmexit: - nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual); + nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); return true; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index cdf5f34518f4..9efc1a6b8693 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -152,12 +152,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, return &counters[array_index_nospec(idx, num_counters)]; } -static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) +static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu) { if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) - return false; + return 0; - return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES; + return vcpu->arch.perf_capabilities; +} + +static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) +{ + return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0; } static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) @@ -168,6 +173,41 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); } +bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) +{ + /* + * As a first step, a guest could only enable LBR feature if its + * cpu model is the same as the host because the LBR registers + * would be pass-through to the guest and they're model specific. + */ + return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); +} + +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) +{ + struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); + + return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT); +} + +static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) +{ + struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); + bool ret = false; + + if (!intel_pmu_lbr_is_enabled(vcpu)) + return ret; + + ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || + (index >= records->from && index < records->from + records->nr) || + (index >= records->to && index < records->to + records->nr); + + if (!ret && records->info) + ret = (index >= records->info && index < records->info + records->nr); + + return ret; +} + static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -183,7 +223,8 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) default: ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || - get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr); + get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || + intel_pmu_is_valid_lbr_msr(vcpu, msr); break; } @@ -202,6 +243,111 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) return pmc; } +static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (lbr_desc->event) { + perf_event_release_kernel(lbr_desc->event); + lbr_desc->event = NULL; + vcpu_to_pmu(vcpu)->event_count--; + } +} + +int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct perf_event *event; + + /* + * The perf_event_attr is constructed in the minimum efficient way: + * - set 'pinned = true' to make it task pinned so that if another + * cpu pinned event reclaims LBR, the event->oncpu will be set to -1; + * - set '.exclude_host = true' to record guest branches behavior; + * + * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf + * schedule the event without a real HW counter but a fake one; + * check is_guest_lbr_event() and __intel_get_event_constraints(); + * + * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and + * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | + * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack + * event, which helps KVM to save/restore guest LBR records + * during host context switches and reduces quite a lot overhead, + * check branch_user_callstack() and intel_pmu_lbr_sched_task(); + */ + struct perf_event_attr attr = { + .type = PERF_TYPE_RAW, + .size = sizeof(attr), + .config = INTEL_FIXED_VLBR_EVENT, + .sample_type = PERF_SAMPLE_BRANCH_STACK, + .pinned = true, + .exclude_host = true, + .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | + PERF_SAMPLE_BRANCH_USER, + }; + + if (unlikely(lbr_desc->event)) { + __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); + return 0; + } + + event = perf_event_create_kernel_counter(&attr, -1, + current, NULL, NULL); + if (IS_ERR(event)) { + pr_debug_ratelimited("%s: failed %ld\n", + __func__, PTR_ERR(event)); + return PTR_ERR(event); + } + lbr_desc->event = event; + pmu->event_count++; + __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); + return 0; +} + +/* + * It's safe to access LBR msrs from guest when they have not + * been passthrough since the host would help restore or reset + * the LBR msrs records when the guest LBR event is scheduled in. + */ +static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, + struct msr_data *msr_info, bool read) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + u32 index = msr_info->index; + + if (!intel_pmu_is_valid_lbr_msr(vcpu, index)) + return false; + + if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0) + goto dummy; + + /* + * Disable irq to ensure the LBR feature doesn't get reclaimed by the + * host at the time the value is read from the msr, and this avoids the + * host LBR value to be leaked to the guest. If LBR has been reclaimed, + * return 0 on guest reads. + */ + local_irq_disable(); + if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { + if (read) + rdmsrl(index, msr_info->data); + else + wrmsrl(index, msr_info->data); + __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); + local_irq_enable(); + return true; + } + clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); + local_irq_enable(); + +dummy: + if (read) + msr_info->data = 0; + return true; +} + static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -236,7 +382,8 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { msr_info->data = pmc->eventsel; return 0; - } + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) + return 0; } return 1; @@ -307,7 +454,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) reprogram_gp_counter(pmc, data); return 0; } - } + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) + return 0; } return 1; @@ -316,6 +464,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static void intel_pmu_refresh(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + struct x86_pmu_capability x86_pmu; struct kvm_cpuid_entry2 *entry; union cpuid10_eax eax; @@ -327,7 +477,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; pmu->reserved_bits = 0xffffffff00200000ull; - vcpu->arch.perf_capabilities = 0; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) @@ -340,8 +489,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) return; perf_get_x86_pmu_capability(&x86_pmu); - if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) - vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, x86_pmu.num_counters_gp); @@ -385,12 +532,21 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); nested_vmx_pmu_entry_exit_ctls_update(vcpu); + + if (intel_pmu_lbr_is_compatible(vcpu)) + x86_perf_get_lbr(&lbr_desc->records); + else + lbr_desc->records.nr = 0; + + if (lbr_desc->records.nr) + bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); } static void intel_pmu_init(struct kvm_vcpu *vcpu) { int i; struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { pmu->gp_counters[i].type = KVM_PMC_GP; @@ -405,6 +561,11 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; pmu->fixed_counters[i].current_config = 0; } + + vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); + lbr_desc->records.nr = 0; + lbr_desc->event = NULL; + lbr_desc->msr_passthrough = false; } static void intel_pmu_reset(struct kvm_vcpu *vcpu) @@ -429,6 +590,119 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = pmu->global_ovf_ctrl = 0; + + intel_pmu_release_guest_lbr_event(vcpu); +} + +/* + * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4. + * + * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and + * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL. + * + * Guest needs to re-enable LBR to resume branches recording. + */ +static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) +{ + u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL); + + if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) { + data &= ~DEBUGCTLMSR_LBR; + vmcs_write64(GUEST_IA32_DEBUGCTL, data); + } +} + +static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu) +{ + u8 version = vcpu_to_pmu(vcpu)->version; + + if (!intel_pmu_lbr_is_enabled(vcpu)) + return; + + if (version > 1 && version < 4) + intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu); +} + +static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) +{ + struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); + int i; + + for (i = 0; i < lbr->nr; i++) { + vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set); + vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set); + if (lbr->info) + vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); + } + + vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); + vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); +} + +static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (!lbr_desc->msr_passthrough) + return; + + vmx_update_intercept_for_lbr_msrs(vcpu, true); + lbr_desc->msr_passthrough = false; +} + +static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (lbr_desc->msr_passthrough) + return; + + vmx_update_intercept_for_lbr_msrs(vcpu, false); + lbr_desc->msr_passthrough = true; +} + +/* + * Higher priority host perf events (e.g. cpu pinned) could reclaim the + * pmu resources (e.g. LBR) that were assigned to the guest. This is + * usually done via ipi calls (more details in perf_install_in_context). + * + * Before entering the non-root mode (with irq disabled here), double + * confirm that the pmu features enabled to the guest are not reclaimed + * by higher priority host events. Otherwise, disallow vcpu's access to + * the reclaimed features. + */ +void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (!lbr_desc->event) { + vmx_disable_lbr_msrs_passthrough(vcpu); + if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) + goto warn; + if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) + goto warn; + return; + } + + if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { + vmx_disable_lbr_msrs_passthrough(vcpu); + __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); + goto warn; + } else + vmx_enable_lbr_msrs_passthrough(vcpu); + + return; + +warn: + pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", + vcpu->vcpu_id); +} + +static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) +{ + if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) + intel_pmu_release_guest_lbr_event(vcpu); } struct kvm_pmu_ops intel_pmu_ops = { @@ -445,4 +719,6 @@ struct kvm_pmu_ops intel_pmu_ops = { .refresh = intel_pmu_refresh, .init = intel_pmu_init, .reset = intel_pmu_reset, + .deliver_pmi = intel_pmu_deliver_pmi, + .cleanup = intel_pmu_cleanup, }; diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index f02962dcc72c..4831bc44ce66 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -54,7 +54,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) dest = cpu_physical_id(cpu); - if (x2apic_enabled()) + if (x2apic_mode) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; @@ -104,7 +104,7 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) dest = cpu_physical_id(vcpu->cpu); - if (x2apic_enabled()) + if (x2apic_mode) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; @@ -174,7 +174,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu) */ dest = cpu_physical_id(vcpu->pre_pcpu); - if (x2apic_enabled()) + if (x2apic_mode) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index e85aa5faa22d..3a6461694fc2 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -44,7 +44,7 @@ * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump * to vmx_vmexit. */ -SYM_FUNC_START(vmx_vmenter) +SYM_FUNC_START_LOCAL(vmx_vmenter) /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ je 2f diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index eb69fef57485..50810d471462 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -50,6 +50,7 @@ #include "capabilities.h" #include "cpuid.h" #include "evmcs.h" +#include "hyperv.h" #include "irq.h" #include "kvm_cache_regs.h" #include "lapic.h" @@ -552,7 +553,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) { struct hv_enlightened_vmcs *evmcs; struct hv_partition_assist_pg **p_hv_pa_pg = - &vcpu->kvm->arch.hyperv.hv_pa_pg; + &to_kvm_hv(vcpu->kvm)->hv_pa_pg; /* * Synthetic VM-Exit is not enabled in current code and so All * evmcs in singe VM shares same assist page. @@ -658,6 +659,14 @@ static bool is_valid_passthrough_msr(u32 msr) case MSR_IA32_RTIT_CR3_MATCH: case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: /* PT MSRs. These are handled in pt_update_intercept_for_msr() */ + case MSR_LBR_SELECT: + case MSR_LBR_TOS: + case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31: + case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31: + case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: + case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: + case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: + /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ return true; } @@ -806,7 +815,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) return *p; } -void update_exception_bitmap(struct kvm_vcpu *vcpu) +void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; @@ -1102,7 +1111,7 @@ static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base) { /* The base must be 128-byte aligned and a legal physical address. */ - return !kvm_vcpu_is_illegal_gpa(vcpu, base) && !(base & 0x7f); + return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128); } static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) @@ -1577,7 +1586,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) * i.e. we end up advancing IP with some random value. */ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || - to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { + to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { orig_rip = kvm_rip_read(vcpu); rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); #ifdef CONFIG_X86_64 @@ -1924,6 +1933,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; goto find_uret_msr; + case MSR_IA32_DEBUGCTLMSR: + msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); + break; default: find_uret_msr: msr = vmx_find_uret_msr(vmx, msr_info->index); @@ -1947,6 +1959,16 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, return (unsigned long)data; } +static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) +{ + u64 debugctl = vmx_supported_debugctl(); + + if (!intel_pmu_lbr_is_enabled(vcpu)) + debugctl &= ~DEBUGCTLMSR_LBR_MASK; + + return debugctl; +} + /* * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -1997,14 +2019,29 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } vmcs_writel(GUEST_SYSENTER_ESP, data); break; - case MSR_IA32_DEBUGCTLMSR: + case MSR_IA32_DEBUGCTLMSR: { + u64 invalid = data & ~vcpu_supported_debugctl(vcpu); + if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { + if (report_ignored_msrs) + vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", + __func__, data); + data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); + invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); + } + + if (invalid) + return 1; + if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) get_vmcs12(vcpu)->guest_ia32_debugctl = data; - ret = kvm_set_msr_common(vcpu, msr_info); - break; - + vmcs_write64(GUEST_IA32_DEBUGCTL, data); + if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && + (data & DEBUGCTLMSR_LBR)) + intel_pmu_create_guest_lbr_event(vcpu); + return 0; + } case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && @@ -2196,6 +2233,18 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if ((data >> 32) != 0) return 1; goto find_uret_msr; + case MSR_IA32_PERF_CAPABILITIES: + if (data && !vcpu_to_pmu(vcpu)->version) + return 1; + if (data & PMU_CAP_LBR_FMT) { + if ((data & PMU_CAP_LBR_FMT) != + (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)) + return 1; + if (!intel_pmu_lbr_is_compatible(vcpu)) + return 1; + } + ret = kvm_set_msr_common(vcpu, msr_info); + break; default: find_uret_msr: @@ -2265,7 +2314,6 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) u64 msr; cr4_set_bits(X86_CR4_VMXE); - intel_pt_handle_vmx(1); asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, %l[fault]) @@ -2276,7 +2324,6 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) fault: WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); - intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); return -EFAULT; @@ -2299,9 +2346,13 @@ static int hardware_enable(void) !hv_get_vp_assist_page(cpu)) return -EFAULT; + intel_pt_handle_vmx(1); + r = kvm_cpu_vmxon(phys_addr); - if (r) + if (r) { + intel_pt_handle_vmx(0); return r; + } if (enable_ept) ept_sync_global(); @@ -2319,22 +2370,14 @@ static void vmclear_local_loaded_vmcss(void) __loaded_vmcs_clear(v); } - -/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() - * tricks. - */ -static void kvm_cpu_vmxoff(void) -{ - asm volatile (__ex("vmxoff")); - - intel_pt_handle_vmx(0); - cr4_clear_bits(X86_CR4_VMXE); -} - static void hardware_disable(void) { vmclear_local_loaded_vmcss(); - kvm_cpu_vmxoff(); + + if (cpu_vmxoff()) + kvm_spurious_fault(); + + intel_pt_handle_vmx(0); } /* @@ -2428,7 +2471,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX | - SECONDARY_EXEC_ENABLE_VMFUNC; + SECONDARY_EXEC_ENABLE_VMFUNC | + SECONDARY_EXEC_BUS_LOCK_DETECTION; if (cpu_has_sgx()) opt2 |= SECONDARY_EXEC_ENCLS_EXITING; if (adjust_vmx_controls(min2, opt2, @@ -2739,7 +2783,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); @@ -2819,7 +2863,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); @@ -3774,7 +3818,7 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, vmx_set_msr_bitmap_write(msr_bitmap, msr); } -static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value) { if (value) @@ -4233,7 +4277,12 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; - if (!enable_pml) + /* + * PML is enabled/disabled when dirty logging of memsmlots changes, but + * it needs to be set here when dirty logging is already active, e.g. + * if this vCPU was created after dirty logging was enabled. + */ + if (!vcpu->kvm->arch.cpu_dirty_logging_count) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; if (cpu_has_vmx_xsaves()) { @@ -4251,24 +4300,17 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) } vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP); - - /* - * Expose INVPCID if and only if PCID is also exposed to the guest. - * INVPCID takes a #UD when it's disabled in the VMCS, but a #GP or #PF - * if CR4.PCIDE=0. Enumerating CPUID.INVPCID=1 would lead to incorrect - * behavior from the guest perspective (it would expect #GP or #PF). - */ - if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) - guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID); - vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND); vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED); vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, ENABLE_USR_WAIT_PAUSE, false); + if (!vcpu->kvm->arch.bus_lock_detection_enabled) + exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION; + vmx->secondary_exec_control = exec_control; } @@ -4467,23 +4509,23 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); if (init_event) vmx_clear_hlt(vcpu); } -static void enable_irq_window(struct kvm_vcpu *vcpu) +static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) { exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu) { if (!enable_vnmi || vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { - enable_irq_window(vcpu); + vmx_enable_irq_window(vcpu); return; } @@ -4824,7 +4866,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); return 1; } - kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); fallthrough; case BP_VECTOR: @@ -5049,6 +5091,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, dr7, reg; + int err = 1; exit_qualification = vmx_get_exit_qual(vcpu); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; @@ -5057,9 +5100,9 @@ static int handle_dr(struct kvm_vcpu *vcpu) if (!kvm_require_dr(vcpu, dr)) return 1; - /* Do not handle if the CPL > 0, will trigger GP on re-entry */ - if (!kvm_require_cpl(vcpu, 0)) - return 1; + if (kvm_x86_ops.get_cpl(vcpu) > 0) + goto out; + dr7 = vmcs_readl(GUEST_DR7); if (dr7 & DR7_GD) { /* @@ -5068,7 +5111,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { - vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1; + vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW; vcpu->run->debug.arch.dr7 = dr7; vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); vcpu->run->debug.arch.exception = DB_VECTOR; @@ -5096,14 +5139,15 @@ static int handle_dr(struct kvm_vcpu *vcpu) if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; - if (kvm_get_dr(vcpu, dr, &val)) - return 1; + kvm_get_dr(vcpu, dr, &val); kvm_register_write(vcpu, reg, val); - } else - if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) - return 1; + err = 0; + } else { + err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)); + } - return kvm_skip_emulated_instruction(vcpu); +out: + return kvm_complete_insn_gp(vcpu, err); } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) @@ -5177,9 +5221,8 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_rcx_read(vcpu); - if (kvm_set_xcr(vcpu, index, new_bv) == 0) - return kvm_skip_emulated_instruction(vcpu); - return 1; + int err = kvm_set_xcr(vcpu, index, new_bv); + return kvm_complete_insn_gp(vcpu, err); } static int handle_apic_access(struct kvm_vcpu *vcpu) @@ -5600,6 +5643,13 @@ static int handle_encls(struct kvm_vcpu *vcpu) return 1; } +static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) +{ + vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; + vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; + return 0; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -5656,6 +5706,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_VMFUNC] = handle_vmx_instruction, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, [EXIT_REASON_ENCLS] = handle_encls, + [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit, }; static const int kvm_vmx_max_exit_handlers = @@ -5667,7 +5718,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, struct vcpu_vmx *vmx = to_vmx(vcpu); *info1 = vmx_get_exit_qual(vcpu); - if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { + if (!(vmx->exit_reason.failed_vmentry)) { *info2 = vmx->idt_vectoring_info; *intr_info = vmx_get_intr_info(vcpu); if (is_exception_with_error_code(*intr_info)) @@ -5720,24 +5771,6 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } -/* - * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. - * Called before reporting dirty_bitmap to userspace. - */ -static void kvm_flush_pml_buffers(struct kvm *kvm) -{ - int i; - struct kvm_vcpu *vcpu; - /* - * We only need to kick vcpu out of guest mode here, as PML buffer - * is flushed at beginning of all VMEXITs, and it's obvious that only - * vcpus running in guest are possible to have unflushed GPAs in PML - * buffer. - */ - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vcpu_kick(vcpu); -} - static void vmx_dump_sel(char *name, uint32_t sel) { pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", @@ -5908,20 +5941,22 @@ void dump_vmcs(void) * The guest has exited. See if we can fix it or if we need userspace * assistance. */ -static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exit_reason = vmx->exit_reason; + union vmx_exit_reason exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; + u16 exit_handler_index; /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before * querying dirty_bitmap, we only need to kick all vcpus out of guest * mode as if vcpus is in root mode, the PML buffer must has been - * flushed already. + * flushed already. Note, PML is never enabled in hardware while + * running L2. */ - if (enable_pml) + if (enable_pml && !is_guest_mode(vcpu)) vmx_flush_pml_buffer(vcpu); /* @@ -5938,6 +5973,13 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (is_guest_mode(vcpu)) { /* + * PML is never enabled when running L2, bail immediately if a + * PML full exit occurs as something is horribly wrong. + */ + if (exit_reason.basic == EXIT_REASON_PML_FULL) + goto unexpected_vmexit; + + /* * The host physical addresses of some pages of guest memory * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC * Page). The CPU may write to these pages via their host @@ -5954,11 +5996,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) return 1; } - if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { + if (exit_reason.failed_vmentry) { dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason - = exit_reason; + = exit_reason.full; vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; return 0; } @@ -5980,18 +6022,18 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && - (exit_reason != EXIT_REASON_EXCEPTION_NMI && - exit_reason != EXIT_REASON_EPT_VIOLATION && - exit_reason != EXIT_REASON_PML_FULL && - exit_reason != EXIT_REASON_APIC_ACCESS && - exit_reason != EXIT_REASON_TASK_SWITCH)) { + (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI && + exit_reason.basic != EXIT_REASON_EPT_VIOLATION && + exit_reason.basic != EXIT_REASON_PML_FULL && + exit_reason.basic != EXIT_REASON_APIC_ACCESS && + exit_reason.basic != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vectoring_info; - vcpu->run->internal.data[1] = exit_reason; + vcpu->run->internal.data[1] = exit_reason.full; vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; - if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { + if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) { vcpu->run->internal.ndata++; vcpu->run->internal.data[3] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); @@ -6023,42 +6065,62 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (exit_fastpath != EXIT_FASTPATH_NONE) return 1; - if (exit_reason >= kvm_vmx_max_exit_handlers) + if (exit_reason.basic >= kvm_vmx_max_exit_handlers) goto unexpected_vmexit; #ifdef CONFIG_RETPOLINE - if (exit_reason == EXIT_REASON_MSR_WRITE) + if (exit_reason.basic == EXIT_REASON_MSR_WRITE) return kvm_emulate_wrmsr(vcpu); - else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER) + else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER) return handle_preemption_timer(vcpu); - else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW) + else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW) return handle_interrupt_window(vcpu); - else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) + else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) return handle_external_interrupt(vcpu); - else if (exit_reason == EXIT_REASON_HLT) + else if (exit_reason.basic == EXIT_REASON_HLT) return kvm_emulate_halt(vcpu); - else if (exit_reason == EXIT_REASON_EPT_MISCONFIG) + else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) return handle_ept_misconfig(vcpu); #endif - exit_reason = array_index_nospec(exit_reason, - kvm_vmx_max_exit_handlers); - if (!kvm_vmx_exit_handlers[exit_reason]) + exit_handler_index = array_index_nospec((u16)exit_reason.basic, + kvm_vmx_max_exit_handlers); + if (!kvm_vmx_exit_handlers[exit_handler_index]) goto unexpected_vmexit; - return kvm_vmx_exit_handlers[exit_reason](vcpu); + return kvm_vmx_exit_handlers[exit_handler_index](vcpu); unexpected_vmexit: - vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); + vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", + exit_reason.full); dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; vcpu->run->internal.ndata = 2; - vcpu->run->internal.data[0] = exit_reason; + vcpu->run->internal.data[0] = exit_reason.full; vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; return 0; } +static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + int ret = __vmx_handle_exit(vcpu, exit_fastpath); + + /* + * Even when current exit reason is handled by KVM internally, we + * still need to exit to user space when bus lock detected to inform + * that there is a bus lock in guest. + */ + if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { + if (ret > 0) + vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; + + vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; + return 0; + } + return ret; +} + /* * Software based L1D cache flush which is used when microcode providing * the cache control MSR is not loaded. @@ -6129,7 +6191,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) : "eax", "ebx", "ecx", "edx"); } -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); int tpr_threshold; @@ -6373,9 +6435,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) + if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) handle_external_interrupt_irqoff(vcpu); - else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI) + else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) handle_exception_nmi_irqoff(vmx); } @@ -6567,7 +6629,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { - switch (to_vmx(vcpu)->exit_reason) { + switch (to_vmx(vcpu)->exit_reason.basic) { case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); case EXIT_REASON_PREEMPTION_TIMER: @@ -6577,8 +6639,6 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) } } -bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); - static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) { @@ -6638,11 +6698,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) { - fastpath_t exit_fastpath; struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4; -reenter_guest: /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && vmx->loaded_vmcs->soft_vnmi_blocked)) @@ -6696,6 +6754,8 @@ reenter_guest: pt_guest_enter(vmx); atomic_switch_perf_msrs(vmx); + if (intel_pmu_lbr_is_enabled(vcpu)) + vmx_passthrough_lbr_msrs(vcpu); if (enable_preemption_timer) vmx_update_hv_timer(vcpu); @@ -6734,12 +6794,12 @@ reenter_guest: x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); /* All fields are clean at this point */ - if (static_branch_unlikely(&enable_evmcs)) + if (static_branch_unlikely(&enable_evmcs)) { current_evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; - if (static_branch_unlikely(&enable_evmcs)) - current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index; + current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); + } /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (vmx->host_debugctlmsr) @@ -6768,21 +6828,23 @@ reenter_guest: vmx->idt_vectoring_info = 0; if (unlikely(vmx->fail)) { - vmx->exit_reason = 0xdead; + vmx->exit_reason.full = 0xdead; return EXIT_FASTPATH_NONE; } - vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); - if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)) + vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); + if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) kvm_machine_check(); - trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); + if (likely(!vmx->exit_reason.failed_vmentry)) + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); + + trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX); - if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + if (unlikely(vmx->exit_reason.failed_vmentry)) return EXIT_FASTPATH_NONE; vmx->loaded_vmcs->launched = 1; - vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); @@ -6790,22 +6852,7 @@ reenter_guest: if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; - exit_fastpath = vmx_exit_handlers_fastpath(vcpu); - if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) { - if (!kvm_vcpu_exit_request(vcpu)) { - /* - * FIXME: this goto should be a loop in vcpu_enter_guest, - * but it would incur the cost of a retpoline for now. - * Revisit once static calls are available. - */ - if (vcpu->arch.apicv_active) - vmx_sync_pir_to_irr(vcpu); - goto reenter_guest; - } - exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; - } - - return exit_fastpath; + return vmx_exit_handlers_fastpath(vcpu); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) @@ -7256,7 +7303,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) set_cr4_guest_host_mask(vmx); /* Refresh #PF interception to account for MAXPHYADDR changes. */ - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); } static __init void vmx_set_cpu_caps(void) @@ -7270,8 +7317,8 @@ static __init void vmx_set_cpu_caps(void) /* CPUID 0x7 */ if (kvm_mpx_supported()) kvm_cpu_cap_check_and_set(X86_FEATURE_MPX); - if (cpu_has_vmx_invpcid()) - kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID); + if (!cpu_has_vmx_invpcid()) + kvm_cpu_cap_clear(X86_FEATURE_INVPCID); if (vmx_pt_mode_is_host_guest()) kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); @@ -7449,30 +7496,24 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) shrink_ple_window(vcpu); } -static void vmx_slot_enable_log_dirty(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ - if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) - kvm_mmu_slot_leaf_clear_dirty(kvm, slot); - kvm_mmu_slot_largepage_remove_write_access(kvm, slot); -} - -static void vmx_slot_disable_log_dirty(struct kvm *kvm, - struct kvm_memory_slot *slot) +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) { - kvm_mmu_slot_set_dirty(kvm, slot); -} + struct vcpu_vmx *vmx = to_vmx(vcpu); -static void vmx_flush_log_dirty(struct kvm *kvm) -{ - kvm_flush_pml_buffers(kvm); -} + if (is_guest_mode(vcpu)) { + vmx->nested.update_vmcs01_cpu_dirty_logging = true; + return; + } -static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *memslot, - gfn_t offset, unsigned long mask) -{ - kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); + /* + * Note, cpu_dirty_logging_count can be changed concurrent with this + * code, but in that case another update request will be made and so + * the guest will never run with a stale PML value. + */ + if (vcpu->kvm->arch.cpu_dirty_logging_count) + secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML); + else + secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); } static int vmx_pre_block(struct kvm_vcpu *vcpu) @@ -7546,7 +7587,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) return 0; } -static void enable_smi_window(struct kvm_vcpu *vcpu) +static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) { /* RSM will cause a vmexit anyway. */ } @@ -7582,11 +7623,6 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit) return supported & BIT(bit); } -static int vmx_cpu_dirty_log_size(void) -{ - return enable_pml ? PML_ENTITY_NUM : 0; -} - static struct kvm_x86_ops vmx_x86_ops __initdata = { .hardware_unsetup = hardware_unsetup, @@ -7606,7 +7642,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, - .update_exception_bitmap = update_exception_bitmap, + .update_exception_bitmap = vmx_update_exception_bitmap, .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, @@ -7649,9 +7685,9 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, + .enable_nmi_window = vmx_enable_nmi_window, + .enable_irq_window = vmx_enable_irq_window, + .update_cr8_intercept = vmx_update_cr8_intercept, .set_virtual_apic_mode = vmx_set_virtual_apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, @@ -7686,10 +7722,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .sched_in = vmx_sched_in, - .slot_enable_log_dirty = vmx_slot_enable_log_dirty, - .slot_disable_log_dirty = vmx_slot_disable_log_dirty, - .flush_log_dirty = vmx_flush_log_dirty, - .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, + .cpu_dirty_log_size = PML_ENTITY_NUM, + .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging, .pre_block = vmx_pre_block, .post_block = vmx_post_block, @@ -7709,7 +7743,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .smi_allowed = vmx_smi_allowed, .pre_enter_smm = vmx_pre_enter_smm, .pre_leave_smm = vmx_pre_leave_smm, - .enable_smi_window = enable_smi_window, + .enable_smi_window = vmx_enable_smi_window, .can_emulate_instruction = vmx_can_emulate_instruction, .apic_init_signal_blocked = vmx_apic_init_signal_blocked, @@ -7717,7 +7751,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .msr_filter_changed = vmx_msr_filter_changed, .complete_emulated_msr = kvm_complete_insn_gp, - .cpu_dirty_log_size = vmx_cpu_dirty_log_size, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, }; @@ -7810,6 +7843,8 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } + kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ if (enable_ept) @@ -7832,13 +7867,8 @@ static __init int hardware_setup(void) if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) enable_pml = 0; - if (!enable_pml) { - vmx_x86_ops.slot_enable_log_dirty = NULL; - vmx_x86_ops.slot_disable_log_dirty = NULL; - vmx_x86_ops.flush_log_dirty = NULL; - vmx_x86_ops.enable_log_dirty_pt_masked = NULL; - vmx_x86_ops.cpu_dirty_log_size = NULL; - } + if (!enable_pml) + vmx_x86_ops.cpu_dirty_log_size = 0; if (!cpu_has_vmx_preemption_timer()) enable_preemption_timer = false; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 9d3a557949ac..89da5e1251f1 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -70,6 +70,54 @@ struct pt_desc { struct pt_ctx guest; }; +union vmx_exit_reason { + struct { + u32 basic : 16; + u32 reserved16 : 1; + u32 reserved17 : 1; + u32 reserved18 : 1; + u32 reserved19 : 1; + u32 reserved20 : 1; + u32 reserved21 : 1; + u32 reserved22 : 1; + u32 reserved23 : 1; + u32 reserved24 : 1; + u32 reserved25 : 1; + u32 bus_lock_detected : 1; + u32 enclave_mode : 1; + u32 smi_pending_mtf : 1; + u32 smi_from_vmx_root : 1; + u32 reserved30 : 1; + u32 failed_vmentry : 1; + }; + u32 full; +}; + +#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc) +#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) + +bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); + +int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); +void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu); + +struct lbr_desc { + /* Basic info about guest LBR records. */ + struct x86_pmu_lbr records; + + /* + * Emulate LBR feature via passthrough LBR registers when the + * per-vcpu guest LBR event is scheduled on the current pcpu. + * + * The records may be inaccurate if the host reclaims the LBR. + */ + struct perf_event *event; + + /* True if LBRs are marked as not intercepted in the MSR bitmap */ + bool msr_passthrough; +}; + /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -117,6 +165,7 @@ struct nested_vmx { bool change_vmcs01_virtual_apic_mode; bool reload_vmcs01_apic_access_page; + bool update_vmcs01_cpu_dirty_logging; /* * Enlightened VMCS has been enabled. It does not mean that L1 has to @@ -244,7 +293,7 @@ struct vcpu_vmx { int vpid; bool emulation_required; - u32 exit_reason; + union vmx_exit_reason exit_reason; /* Posted interrupt descriptor */ struct pi_desc pi_desc; @@ -279,6 +328,7 @@ struct vcpu_vmx { u64 ept_pointer; struct pt_desc pt_desc; + struct lbr_desc lbr_desc; /* Save desired MSR intercept (read: pass-through) state */ #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13 @@ -329,7 +379,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, int root_level); -void update_exception_bitmap(struct kvm_vcpu *vcpu); +void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); @@ -339,8 +389,12 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); +bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, + u32 msr, int type, bool value); +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); static inline u8 vmx_get_rvi(void) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b967c1c774a1..2a20ce60152e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -29,6 +29,7 @@ #include "pmu.h" #include "hyperv.h" #include "lapic.h" +#include "xen.h" #include <linux/clocksource.h> #include <linux/interrupt.h> @@ -114,11 +115,21 @@ static int sync_regs(struct kvm_vcpu *vcpu); struct kvm_x86_ops kvm_x86_ops __read_mostly; EXPORT_SYMBOL_GPL(kvm_x86_ops); +#define KVM_X86_OP(func) \ + DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ + *(((struct kvm_x86_ops *)0)->func)); +#define KVM_X86_OP_NULL KVM_X86_OP +#include <asm/kvm-x86-ops.h> +EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); +EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); +EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current); + static bool __read_mostly ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); -static bool __read_mostly report_ignored_msrs = true; +bool __read_mostly report_ignored_msrs = true; module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); +EXPORT_SYMBOL_GPL(report_ignored_msrs); unsigned int min_timer_period_us = 200; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); @@ -136,6 +147,8 @@ u64 __read_mostly kvm_max_tsc_scaling_ratio; EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio); u64 __read_mostly kvm_default_tsc_scaling_ratio; EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio); +bool __read_mostly kvm_has_bus_lock_exit; +EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 __read_mostly tsc_tolerance_ppm = 250; @@ -234,7 +247,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped), VM_STAT("mmu_pte_write", mmu_pte_write), - VM_STAT("mmu_pte_updated", mmu_pte_updated), VM_STAT("mmu_pde_zapped", mmu_pde_zapped), VM_STAT("mmu_flooded", mmu_flooded), VM_STAT("mmu_recycled", mmu_recycled), @@ -395,7 +407,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); - u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | + u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) @@ -484,19 +496,24 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) */ vcpu->arch.dr6 &= ~DR_TRAP_BITS; /* - * DR6.RTM is set by all #DB exceptions that don't clear it. + * In order to reflect the #DB exception payload in guest + * dr6, three components need to be considered: active low + * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, + * DR6_BS and DR6_BT) + * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. + * In the target guest dr6: + * FIXED_1 bits should always be set. + * Active low bits should be cleared if 1-setting in payload. + * Active high bits should be set if 1-setting in payload. + * + * Note, the payload is compatible with the pending debug + * exceptions/exit qualification under VMX, that active_low bits + * are active high in payload. + * So they need to be flipped for DR6. */ - vcpu->arch.dr6 |= DR6_RTM; + vcpu->arch.dr6 |= DR6_ACTIVE_LOW; vcpu->arch.dr6 |= payload; - /* - * Bit 16 should be set in the payload whenever the #DB - * exception should clear DR6.RTM. This makes the payload - * compatible with the pending debug exceptions under VMX. - * Though not currently documented in the SDM, this also - * makes the payload compatible with the exit qualification - * for #DB exceptions under VMX. - */ - vcpu->arch.dr6 ^= payload & DR6_RTM; + vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW; /* * The #DB payload is defined as compatible with the 'pending @@ -692,7 +709,7 @@ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { - if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) + if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; @@ -742,8 +759,7 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) { - return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | - rsvd_bits(1, 2); + return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); } /* @@ -852,7 +868,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!is_pae(vcpu)) return 1; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } @@ -865,7 +881,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; - kvm_x86_ops.set_cr0(vcpu, cr0); + static_call(kvm_x86_set_cr0)(vcpu, cr0); kvm_post_set_cr0(vcpu, old_cr0, cr0); @@ -970,12 +986,10 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (kvm_x86_ops.get_cpl(vcpu) != 0 || - __kvm_set_xcr(vcpu, index, xcr)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - return 0; + if (static_call(kvm_x86_get_cpl)(vcpu) == 0) + return __kvm_set_xcr(vcpu, index, xcr); + + return 1; } EXPORT_SYMBOL_GPL(kvm_set_xcr); @@ -987,7 +1001,7 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) return false; - return kvm_x86_ops.is_valid_cr4(vcpu, cr4); + return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); } EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); @@ -1031,7 +1045,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } - kvm_x86_ops.set_cr4(vcpu, cr4); + static_call(kvm_x86_set_cr4)(vcpu, cr4); kvm_post_set_cr4(vcpu, old_cr4, cr4); @@ -1059,8 +1073,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) return 0; } - if (is_long_mode(vcpu) && - (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) + if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3)) return 1; else if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) @@ -1114,7 +1127,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; - kvm_x86_ops.set_dr7(vcpu, dr7); + static_call(kvm_x86_set_dr7)(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; @@ -1130,7 +1143,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) return fixed; } -static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) +int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { size_t size = ARRAY_SIZE(vcpu->arch.db); @@ -1143,13 +1156,13 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) case 4: case 6: if (!kvm_dr6_valid(val)) - return -1; /* #GP */ + return 1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; case 5: default: /* 7 */ if (!kvm_dr7_valid(val)) - return -1; /* #GP */ + return 1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; @@ -1157,18 +1170,9 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) return 0; } - -int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) -{ - if (__kvm_set_dr(vcpu, dr, val)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - return 0; -} EXPORT_SYMBOL_GPL(kvm_set_dr); -int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) +void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { size_t size = ARRAY_SIZE(vcpu->arch.db); @@ -1185,7 +1189,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) *val = vcpu->arch.dr7; break; } - return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); @@ -1426,7 +1429,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) rdmsrl_safe(msr->index, &msr->data); break; default: - return kvm_x86_ops.get_msr_feature(msr); + return static_call(kvm_x86_get_msr_feature)(msr); } return 0; } @@ -1502,7 +1505,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; - r = kvm_x86_ops.set_efer(vcpu, efer); + r = static_call(kvm_x86_set_efer)(vcpu, efer); if (r) { WARN_ON(r > 0); return r; @@ -1599,7 +1602,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, msr.index = index; msr.host_initiated = host_initiated; - return kvm_x86_ops.set_msr(vcpu, &msr); + return static_call(kvm_x86_set_msr)(vcpu, &msr); } static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, @@ -1632,7 +1635,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, msr.index = index; msr.host_initiated = host_initiated; - ret = kvm_x86_ops.get_msr(vcpu, &msr); + ret = static_call(kvm_x86_get_msr)(vcpu, &msr); if (!ret) *data = msr.data; return ret; @@ -1673,12 +1676,12 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); } - return kvm_x86_ops.complete_emulated_msr(vcpu, err); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); } static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); } static u64 kvm_msr_reason(int r) @@ -1750,7 +1753,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_read_ex(ecx); } - return kvm_x86_ops.complete_emulated_msr(vcpu, r); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); @@ -1776,17 +1779,16 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) else trace_kvm_msr_write_ex(ecx, data); - return kvm_x86_ops.complete_emulated_msr(vcpu, r); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); -bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) { xfer_to_guest_mode_prepare(); return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending(); } -EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request); /* * The fast path for frequent and performance sensitive wrmsr emulation, @@ -1936,15 +1938,14 @@ static s64 get_kvmclock_base_ns(void) } #endif -static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) +void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) { int version; int r; struct pvclock_wall_clock wc; + u32 wc_sec_hi; u64 wall_nsec; - kvm->arch.wall_clock = wall_clock; - if (!wall_clock) return; @@ -1973,6 +1974,12 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); + if (sec_hi_ofs) { + wc_sec_hi = wall_nsec >> 32; + kvm_write_guest(kvm, wall_clock + sec_hi_ofs, + &wc_sec_hi, sizeof(wc_sec_hi)); + } + version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } @@ -2209,7 +2216,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { vcpu->arch.l1_tsc_offset = offset; - vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); + vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset); } static inline bool kvm_check_tsc_unstable(void) @@ -2592,13 +2599,15 @@ u64 get_kvmclock_ns(struct kvm *kvm) return ret; } -static void kvm_setup_pvclock_page(struct kvm_vcpu *v) +static void kvm_setup_pvclock_page(struct kvm_vcpu *v, + struct gfn_to_hva_cache *cache, + unsigned int offset) { struct kvm_vcpu_arch *vcpu = &v->arch; struct pvclock_vcpu_time_info guest_hv_clock; - if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, - &guest_hv_clock, sizeof(guest_hv_clock)))) + if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache, + &guest_hv_clock, offset, sizeof(guest_hv_clock)))) return; /* This VCPU is paused, but it's legal for a guest to read another @@ -2621,9 +2630,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) ++guest_hv_clock.version; /* first time write, random junk */ vcpu->hv_clock.version = guest_hv_clock.version + 1; - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock.version)); + kvm_write_guest_offset_cached(v->kvm, cache, + &vcpu->hv_clock, offset, + sizeof(vcpu->hv_clock.version)); smp_wmb(); @@ -2637,16 +2646,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); + kvm_write_guest_offset_cached(v->kvm, cache, + &vcpu->hv_clock, offset, + sizeof(vcpu->hv_clock)); smp_wmb(); vcpu->hv_clock.version++; - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock.version)); + kvm_write_guest_offset_cached(v->kvm, cache, + &vcpu->hv_clock, offset, + sizeof(vcpu->hv_clock.version)); } static int kvm_guest_time_update(struct kvm_vcpu *v) @@ -2733,7 +2742,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.flags = pvclock_flags; if (vcpu->pv_time_enabled) - kvm_setup_pvclock_page(v); + kvm_setup_pvclock_page(v, &vcpu->pv_time, 0); + if (vcpu->xen.vcpu_info_set) + kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, + offsetof(struct compat_vcpu_info, time)); + if (vcpu->xen.vcpu_time_info_set) + kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); if (v == kvm_get_vcpu(v->kvm, 0)) kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); return 0; @@ -2858,32 +2872,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 0; } -static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) -{ - struct kvm *kvm = vcpu->kvm; - int lm = is_long_mode(vcpu); - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; - u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 - : kvm->arch.xen_hvm_config.blob_size_32; - u32 page_num = data & ~PAGE_MASK; - u64 page_addr = data & PAGE_MASK; - u8 *page; - - if (page_num >= blob_size) - return 1; - - page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); - if (IS_ERR(page)) - return PTR_ERR(page); - - if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { - kfree(page); - return 1; - } - return 0; -} - static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) { u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; @@ -2955,13 +2943,13 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_all(vcpu); + static_call(kvm_x86_tlb_flush_all)(vcpu); } static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_guest(vcpu); + static_call(kvm_x86_tlb_flush_guest)(vcpu); } static void record_steal_time(struct kvm_vcpu *vcpu) @@ -2969,6 +2957,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu) struct kvm_host_map map; struct kvm_steal_time *st; + if (kvm_xen_msr_enabled(vcpu->kvm)) { + kvm_xen_runstate_set_running(vcpu); + return; + } + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -3017,6 +3010,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u32 msr = msr_info->index; u64 data = msr_info->data; + if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) + return kvm_xen_write_hypercall_page(vcpu, data); + switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_WRITE: @@ -3073,18 +3069,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; } break; - case MSR_IA32_DEBUGCTLMSR: - if (!data) { - /* We support the non-activated case already */ - break; - } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { - /* Values other than LBR and BTF are vendor-specific, - thus reserved and should throw a #GP */ - return 1; - } else if (report_ignored_msrs) - vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", - __func__, data); - break; case 0x200 ... 0x2ff: return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: @@ -3153,13 +3137,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) return 1; - kvm_write_wall_clock(vcpu->kvm, data); + vcpu->kvm->arch.wall_clock = data; + kvm_write_wall_clock(vcpu->kvm, data, 0); break; case MSR_KVM_WALL_CLOCK: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) return 1; - kvm_write_wall_clock(vcpu->kvm, data); + vcpu->kvm->arch.wall_clock = data; + kvm_write_wall_clock(vcpu->kvm, data, 0); break; case MSR_KVM_SYSTEM_TIME_NEW: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) @@ -3304,8 +3290,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.msr_misc_features_enables = data; break; default: - if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) - return xen_hvm_config(vcpu, data); if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); return KVM_MSR_RET_INVALID; @@ -3357,7 +3341,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: - case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: @@ -3739,7 +3722,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: - case KVM_CAP_XEN_HVM: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: @@ -3779,6 +3761,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: r = 1; break; +#ifdef CONFIG_KVM_XEN + case KVM_CAP_XEN_HVM: + r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | + KVM_XEN_HVM_CONFIG_SHARED_INFO; + if (sched_info_on()) + r |= KVM_XEN_HVM_CONFIG_RUNSTATE; + break; +#endif case KVM_CAP_SYNC_REGS: r = KVM_SYNC_X86_VALID_FIELDS; break; @@ -3800,10 +3791,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * fringe case that is not enabled except via specific settings * of the module parameters. */ - r = kvm_x86_ops.has_emulated_msr(kvm, MSR_IA32_SMBASE); + r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); break; case KVM_CAP_VAPIC: - r = !kvm_x86_ops.cpu_has_accelerated_tpr(); + r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; @@ -3845,6 +3836,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_STEAL_TIME: r = sched_info_on(); break; + case KVM_CAP_X86_BUS_LOCK_EXIT: + if (kvm_has_bus_lock_exit) + r = KVM_BUS_LOCK_DETECTION_OFF | + KVM_BUS_LOCK_DETECTION_EXIT; + else + r = 0; + break; default: break; } @@ -3962,14 +3960,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { - if (kvm_x86_ops.has_wbinvd_exit()) + if (static_call(kvm_x86_has_wbinvd_exit)()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } - kvm_x86_ops.vcpu_load(vcpu, cpu); + static_call(kvm_x86_vcpu_load)(vcpu, cpu); /* Save host pkru register if supported */ vcpu->arch.host_pkru = read_pkru(); @@ -4015,6 +4013,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { struct kvm_host_map map; struct kvm_steal_time *st; + int idx; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -4022,9 +4021,15 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) if (vcpu->arch.st.preempted) return; + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, &vcpu->arch.st.cache, true)) - return; + goto out; st = map.hva + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); @@ -4032,33 +4037,22 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); + +out: + srcu_read_unlock(&vcpu->kvm->srcu, idx); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - int idx; - if (vcpu->preempted && !vcpu->arch.guest_state_protected) - vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); + vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); - /* - * Disable page faults because we're in atomic context here. - * kvm_write_guest_offset_cached() would call might_fault() - * that relies on pagefault_disable() to tell if there's a - * bug. NOTE: the write to guest memory may not go through if - * during postcopy live migration or if there's heavy guest - * paging. - */ - pagefault_disable(); - /* - * kvm_memslots() will be called by - * kvm_write_guest_offset_cached() so take the srcu lock. - */ - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_steal_time_set_preempted(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, idx); - pagefault_enable(); - kvm_x86_ops.vcpu_put(vcpu); + if (kvm_xen_msr_enabled(vcpu->kvm)) + kvm_xen_runstate_set_preempted(vcpu); + else + kvm_steal_time_set_preempted(vcpu); + + static_call(kvm_x86_vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* * If userspace has set any breakpoints or watchpoints, dr6 is restored @@ -4072,7 +4066,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { if (vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); return kvm_apic_get_state(vcpu, s); } @@ -4182,7 +4176,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; - kvm_x86_ops.setup_mce(vcpu); + static_call(kvm_x86_setup_mce)(vcpu); out: return r; } @@ -4289,11 +4283,11 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; - events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); + events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; - events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); + events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ @@ -4360,13 +4354,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) - kvm_x86_ops.set_interrupt_shadow(vcpu, - events->interrupt.shadow); + static_call(kvm_x86_set_interrupt_shadow)(vcpu, + events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; - kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); + static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && lapic_in_kernel(vcpu)) @@ -4422,9 +4416,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, if (dbgregs->flags) return -EINVAL; - if (dbgregs->dr6 & ~0xffffffffull) + if (!kvm_dr6_valid(dbgregs->dr6)) return -EINVAL; - if (dbgregs->dr7 & ~0xffffffffull) + if (!kvm_dr7_valid(dbgregs->dr7)) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); @@ -4661,7 +4655,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, if (!kvm_x86_ops.enable_direct_tlbflush) return -ENOTTY; - return kvm_x86_ops.enable_direct_tlbflush(vcpu); + return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: vcpu->arch.pv_cpuid.enforce = cap->args[0]; @@ -5032,6 +5026,28 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_GET_SUPPORTED_HV_CPUID: r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); break; +#ifdef CONFIG_KVM_XEN + case KVM_XEN_VCPU_GET_ATTR: { + struct kvm_xen_vcpu_attr xva; + + r = -EFAULT; + if (copy_from_user(&xva, argp, sizeof(xva))) + goto out; + r = kvm_xen_vcpu_get_attr(vcpu, &xva); + if (!r && copy_to_user(argp, &xva, sizeof(xva))) + r = -EFAULT; + break; + } + case KVM_XEN_VCPU_SET_ATTR: { + struct kvm_xen_vcpu_attr xva; + + r = -EFAULT; + if (copy_from_user(&xva, argp, sizeof(xva))) + goto out; + r = kvm_xen_vcpu_set_attr(vcpu, &xva); + break; + } +#endif default: r = -EINVAL; } @@ -5053,14 +5069,14 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; - ret = kvm_x86_ops.set_tss_addr(kvm, addr); + ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { - return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr); + return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, @@ -5214,11 +5230,18 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { + /* - * Flush potentially hardware-cached dirty pages to dirty_bitmap. + * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called + * before reporting dirty_bitmap to userspace. KVM flushes the buffers + * on all VM-Exits, thus we only need to kick running vCPUs to force a + * VM-Exit. */ - if (kvm_x86_ops.flush_log_dirty) - kvm_x86_ops.flush_log_dirty(kvm); + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, @@ -5308,6 +5331,20 @@ split_irqchip_unlock: kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; break; + case KVM_CAP_X86_BUS_LOCK_EXIT: + r = -EINVAL; + if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) + break; + + if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && + (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) + break; + + if (kvm_has_bus_lock_exit && + cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) + kvm->arch.bus_lock_detection_enabled = true; + r = 0; + break; default: r = -EINVAL; break; @@ -5632,18 +5669,36 @@ set_pit2_out: kvm->arch.bsp_vcpu_id = arg; mutex_unlock(&kvm->lock); break; +#ifdef CONFIG_KVM_XEN case KVM_XEN_HVM_CONFIG: { struct kvm_xen_hvm_config xhc; r = -EFAULT; if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; - r = -EINVAL; - if (xhc.flags) + r = kvm_xen_hvm_config(kvm, &xhc); + break; + } + case KVM_XEN_HVM_GET_ATTR: { + struct kvm_xen_hvm_attr xha; + + r = -EFAULT; + if (copy_from_user(&xha, argp, sizeof(xha))) goto out; - memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); - r = 0; + r = kvm_xen_hvm_get_attr(kvm, &xha); + if (!r && copy_to_user(argp, &xha, sizeof(xha))) + r = -EFAULT; + break; + } + case KVM_XEN_HVM_SET_ATTR: { + struct kvm_xen_hvm_attr xha; + + r = -EFAULT; + if (copy_from_user(&xha, argp, sizeof(xha))) + goto out; + r = kvm_xen_hvm_set_attr(kvm, &xha); break; } +#endif case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; @@ -5686,7 +5741,7 @@ set_pit2_out: case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (kvm_x86_ops.mem_enc_op) - r = kvm_x86_ops.mem_enc_op(kvm, argp); + r = static_call(kvm_x86_mem_enc_op)(kvm, argp); break; } case KVM_MEMORY_ENCRYPT_REG_REGION: { @@ -5698,7 +5753,7 @@ set_pit2_out: r = -ENOTTY; if (kvm_x86_ops.mem_enc_reg_region) - r = kvm_x86_ops.mem_enc_reg_region(kvm, ®ion); + r = static_call(kvm_x86_mem_enc_reg_region)(kvm, ®ion); break; } case KVM_MEMORY_ENCRYPT_UNREG_REGION: { @@ -5710,7 +5765,7 @@ set_pit2_out: r = -ENOTTY; if (kvm_x86_ops.mem_enc_unreg_region) - r = kvm_x86_ops.mem_enc_unreg_region(kvm, ®ion); + r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, ®ion); break; } case KVM_HYPERV_EVENTFD: { @@ -5812,7 +5867,7 @@ static void kvm_init_msr_list(void) } for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { - if (!kvm_x86_ops.has_emulated_msr(NULL, emulated_msrs_all[i])) + if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) continue; emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; @@ -5875,13 +5930,13 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - kvm_x86_ops.set_segment(vcpu, var, seg); + static_call(kvm_x86_set_segment)(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - kvm_x86_ops.get_segment(vcpu, var, seg); + static_call(kvm_x86_get_segment)(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, @@ -5901,14 +5956,14 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } @@ -5916,7 +5971,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } @@ -5965,7 +6020,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; @@ -5990,7 +6045,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; /* * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED @@ -6011,7 +6066,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = 0; - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); @@ -6064,7 +6119,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = PFERR_WRITE_MASK; - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, @@ -6089,7 +6144,7 @@ int handle_ud(struct kvm_vcpu *vcpu) char sig[5]; /* ud2; .ascii "kvm" */ struct x86_exception e; - if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0))) + if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0))) return 1; if (force_emulation_prefix && @@ -6123,7 +6178,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { - u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) + u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); /* @@ -6531,7 +6586,7 @@ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { - return kvm_x86_ops.get_segment_base(vcpu, seg); + return static_call(kvm_x86_get_segment_base)(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) @@ -6544,7 +6599,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; - if (kvm_x86_ops.has_wbinvd_exit()) { + if (static_call(kvm_x86_has_wbinvd_exit)()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); @@ -6571,17 +6626,17 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); } -static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, - unsigned long *dest) +static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long *dest) { - return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); + kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { - return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); + return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) @@ -6649,27 +6704,27 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { - return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt)); + return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( @@ -6811,7 +6866,7 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { - return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage, + return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, &ctxt->exception); } @@ -6849,7 +6904,7 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) { - kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked); + static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); } static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) @@ -6865,7 +6920,7 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, const char *smstate) { - return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate); + return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate); } static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt) @@ -6927,7 +6982,7 @@ static const struct x86_emulate_ops emulate_ops = { static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { - u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); + u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or @@ -6938,7 +6993,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { - kvm_x86_ops.set_interrupt_shadow(vcpu, mask); + static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } @@ -6980,7 +7035,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; int cs_db, cs_l; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); ctxt->gpa_available = false; ctxt->eflags = kvm_get_rflags(vcpu); @@ -7041,7 +7096,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) kvm_queue_exception(vcpu, UD_VECTOR); - if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { + if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; @@ -7101,9 +7156,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (vcpu->arch.mmu->direct_map) { unsigned int indirect_shadow_pages; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); @@ -7210,7 +7265,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) struct kvm_run *kvm_run = vcpu->run; if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; @@ -7222,10 +7277,10 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) { - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); int r; - r = kvm_x86_ops.skip_emulated_instruction(vcpu); + r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); if (unlikely(!r)) return 0; @@ -7254,7 +7309,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) vcpu->arch.eff_db); if (dr6 != 0) { - kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; kvm_run->debug.arch.pc = eip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; @@ -7311,6 +7366,42 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) return false; } +/* + * Decode to be emulated instruction. Return EMULATION_OK if success. + */ +int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, + void *insn, int insn_len) +{ + int r = EMULATION_OK; + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + + init_emulate_ctxt(vcpu); + + /* + * We will reenter on the same instruction since we do not set + * complete_userspace_io. This does not handle watchpoints yet, + * those would be handled in the emulate_ops. + */ + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_breakpoint(vcpu, &r)) + return r; + + ctxt->interruptibility = 0; + ctxt->have_exception = false; + ctxt->exception.vector = -1; + ctxt->perm_ok = false; + + ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; + + r = x86_decode_insn(ctxt, insn, insn_len); + + trace_kvm_emulate_insn_start(vcpu); + ++vcpu->stat.insn_emulation; + + return r; +} +EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); + int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len) { @@ -7319,7 +7410,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, bool writeback = true; bool write_fault_to_spt; - if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len))) + if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len))) return 1; vcpu->arch.l1tf_flush_l1d = true; @@ -7330,32 +7421,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, */ write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; vcpu->arch.write_fault_to_shadow_pgtable = false; - kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { - init_emulate_ctxt(vcpu); - - /* - * We will reenter on the same instruction since - * we do not set complete_userspace_io. This does not - * handle watchpoints yet, those would be handled in - * the emulate_ops. - */ - if (!(emulation_type & EMULTYPE_SKIP) && - kvm_vcpu_check_breakpoint(vcpu, &r)) - return r; - - ctxt->interruptibility = 0; - ctxt->have_exception = false; - ctxt->exception.vector = -1; - ctxt->perm_ok = false; + kvm_clear_exception_queue(vcpu); - ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; - - r = x86_decode_insn(ctxt, insn, insn_len); - - trace_kvm_emulate_insn_start(vcpu); - ++vcpu->stat.insn_emulation; + r = x86_decode_emulated_instruction(vcpu, emulation_type, + insn, insn_len); if (r != EMULATION_OK) { if ((emulation_type & EMULTYPE_TRAP_UD) || (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { @@ -7462,7 +7533,7 @@ restart: r = 1; if (writeback) { - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; if (!ctxt->have_exception || @@ -7471,7 +7542,7 @@ restart: if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); if (kvm_x86_ops.update_emulated_instruction) - kvm_x86_ops.update_emulated_instruction(vcpu); + static_call(kvm_x86_update_emulated_instruction)(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -7800,7 +7871,7 @@ static int kvm_is_user_mode(void) int user_mode = 3; if (__this_cpu_read(current_vcpu)) - user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu)); + user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu)); return user_mode != 0; } @@ -7945,7 +8016,6 @@ int kvm_arch_init(void *opaque) supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; } - kvm_lapic_init(); if (pi_inject_timer == -1) pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER); #ifdef CONFIG_X86_64 @@ -7987,6 +8057,10 @@ void kvm_arch_exit(void) kvm_mmu_module_exit(); free_percpu(user_return_msrs); kmem_cache_destroy(x86_fpu_cache); +#ifdef CONFIG_KVM_XEN + static_key_deferred_flush(&kvm_xen_enabled); + WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); +#endif } static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) @@ -8038,7 +8112,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) return -KVM_EOPNOTSUPP; - if (kvm_get_walltime_and_clockread(&ts, &cycle) == false) + if (!kvm_get_walltime_and_clockread(&ts, &cycle)) return -KVM_EOPNOTSUPP; clock_pairing.sec = ts.tv_sec; @@ -8114,7 +8188,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit; - if (kvm_hv_hypercall_enabled(vcpu->kvm)) + if (kvm_xen_hypercall_enabled(vcpu->kvm)) + return kvm_xen_hypercall(vcpu); + + if (kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); nr = kvm_rax_read(vcpu); @@ -8134,7 +8211,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (kvm_x86_ops.get_cpl(vcpu) != 0) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { ret = -KVM_EPERM; goto out; } @@ -8191,7 +8268,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); - kvm_x86_ops.patch_hypercall(vcpu, instruction); + static_call(kvm_x86_patch_hypercall)(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, &ctxt->exception); @@ -8215,12 +8292,14 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) kvm_run->if_flag = !vcpu->arch.guest_state_protected && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; - kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); kvm_run->ready_for_interrupt_injection = pic_in_kernel(vcpu->kvm) || kvm_vcpu_ready_for_interrupt_injection(vcpu); + + if (is_smm(vcpu)) + kvm_run->flags |= KVM_RUN_X86_SMM; } static void update_cr8_intercept(struct kvm_vcpu *vcpu) @@ -8246,7 +8325,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) tpr = kvm_lapic_get_cr8(vcpu); - kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); + static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) @@ -8257,7 +8336,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit /* try to reinject previous events if any */ if (vcpu->arch.exception.injected) { - kvm_x86_ops.queue_exception(vcpu); + static_call(kvm_x86_queue_exception)(vcpu); can_inject = false; } /* @@ -8276,10 +8355,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit */ else if (!vcpu->arch.exception.pending) { if (vcpu->arch.nmi_injected) { - kvm_x86_ops.set_nmi(vcpu); + static_call(kvm_x86_set_nmi)(vcpu); can_inject = false; } else if (vcpu->arch.interrupt.injected) { - kvm_x86_ops.set_irq(vcpu); + static_call(kvm_x86_set_irq)(vcpu); can_inject = false; } } @@ -8320,7 +8399,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit } } - kvm_x86_ops.queue_exception(vcpu); + static_call(kvm_x86_queue_exception)(vcpu); can_inject = false; } @@ -8336,7 +8415,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit * The kvm_x86_ops hooks communicate this by returning -EBUSY. */ if (vcpu->arch.smi_pending) { - r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { @@ -8345,35 +8424,35 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit enter_smm(vcpu); can_inject = false; } else - kvm_x86_ops.enable_smi_window(vcpu); + static_call(kvm_x86_enable_smi_window)(vcpu); } if (vcpu->arch.nmi_pending) { - r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; - kvm_x86_ops.set_nmi(vcpu); + static_call(kvm_x86_set_nmi)(vcpu); can_inject = false; - WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); + WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); } if (vcpu->arch.nmi_pending) - kvm_x86_ops.enable_nmi_window(vcpu); + static_call(kvm_x86_enable_nmi_window)(vcpu); } if (kvm_cpu_has_injectable_intr(vcpu)) { - r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); - kvm_x86_ops.set_irq(vcpu); - WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); + static_call(kvm_x86_set_irq)(vcpu); + WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); } if (kvm_cpu_has_injectable_intr(vcpu)) - kvm_x86_ops.enable_irq_window(vcpu); + static_call(kvm_x86_enable_irq_window)(vcpu); } if (is_guest_mode(vcpu) && @@ -8398,7 +8477,7 @@ static void process_nmi(struct kvm_vcpu *vcpu) * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ - if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) + if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); @@ -8488,11 +8567,11 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7f7c, seg.limit); put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); put_smstate(u32, buf, 0x7f74, dt.address); put_smstate(u32, buf, 0x7f70, dt.size); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); put_smstate(u32, buf, 0x7f58, dt.address); put_smstate(u32, buf, 0x7f54, dt.size); @@ -8542,7 +8621,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7e94, seg.limit); put_smstate(u64, buf, 0x7e98, seg.base); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); put_smstate(u32, buf, 0x7e84, dt.size); put_smstate(u64, buf, 0x7e88, dt.address); @@ -8552,7 +8631,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7e74, seg.limit); put_smstate(u64, buf, 0x7e78, seg.base); - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); put_smstate(u32, buf, 0x7e64, dt.size); put_smstate(u64, buf, 0x7e68, dt.address); @@ -8582,30 +8661,30 @@ static void enter_smm(struct kvm_vcpu *vcpu) * vCPU state (e.g. leave guest mode) after we've saved the state into * the SMM state-save area. */ - kvm_x86_ops.pre_enter_smm(vcpu, buf); + static_call(kvm_x86_pre_enter_smm)(vcpu, buf); vcpu->arch.hflags |= HF_SMM_MASK; kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); - if (kvm_x86_ops.get_nmi_mask(vcpu)) + if (static_call(kvm_x86_get_nmi_mask)(vcpu)) vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else - kvm_x86_ops.set_nmi_mask(vcpu, true); + static_call(kvm_x86_set_nmi_mask)(vcpu, true); kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_rip_write(vcpu, 0x8000); cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); - kvm_x86_ops.set_cr0(vcpu, cr0); + static_call(kvm_x86_set_cr0)(vcpu, cr0); vcpu->arch.cr0 = cr0; - kvm_x86_ops.set_cr4(vcpu, 0); + static_call(kvm_x86_set_cr4)(vcpu, 0); /* Undocumented: IDT limit is set to zero on entry to SMM. */ dt.address = dt.size = 0; - kvm_x86_ops.set_idt(vcpu, &dt); + static_call(kvm_x86_set_idt)(vcpu, &dt); - __kvm_set_dr(vcpu, 7, DR7_FIXED_1); + kvm_set_dr(vcpu, 7, DR7_FIXED_1); cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; cs.base = vcpu->arch.smbase; @@ -8634,7 +8713,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - kvm_x86_ops.set_efer(vcpu, 0); + static_call(kvm_x86_set_efer)(vcpu, 0); #endif kvm_update_cpuid_runtime(vcpu); @@ -8672,7 +8751,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); kvm_apic_update_apicv(vcpu); - kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); + static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); @@ -8689,7 +8768,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) unsigned long old, new, expected; if (!kvm_x86_ops.check_apicv_inhibit_reasons || - !kvm_x86_ops.check_apicv_inhibit_reasons(bit)) + !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) return; old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); @@ -8709,7 +8788,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) trace_kvm_apicv_update_request(activate, bit); if (kvm_x86_ops.pre_update_apicv_exec_ctrl) - kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate); + static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate); /* * Sending request to update APICV for all other vcpus, @@ -8735,7 +8814,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { if (vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); if (ioapic_in_kernel(vcpu->kvm)) kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -8753,9 +8832,12 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; - bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, - vcpu_to_synic(vcpu)->vec_bitmap, 256); - kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); + if (to_hv_vcpu(vcpu)) + bitmap_or((ulong *)eoi_exit_bitmap, + vcpu->arch.ioapic_handled_vectors, + to_hv_synic(vcpu)->vec_bitmap, 256); + + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); } void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, @@ -8780,7 +8862,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) if (!kvm_x86_ops.set_apic_access_page_addr) return; - kvm_x86_ops.set_apic_access_page_addr(vcpu); + static_call(kvm_x86_set_apic_access_page_addr)(vcpu); } void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -8905,8 +8987,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + vcpu->run->exit_reason = KVM_EXIT_HYPERV; - vcpu->run->hyperv = vcpu->arch.hyperv.exit; + vcpu->run->hyperv = hv_vcpu->exit; r = 0; goto out; } @@ -8923,10 +9007,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) kvm_check_async_pf_completion(vcpu); if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) - kvm_x86_ops.msr_filter_changed(vcpu); + static_call(kvm_x86_msr_filter_changed)(vcpu); + + if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) + static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); } - if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { + if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || + kvm_xen_has_interrupt(vcpu)) { ++vcpu->stat.req_event; kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { @@ -8936,7 +9024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) inject_pending_event(vcpu, &req_immediate_exit); if (req_int_win) - kvm_x86_ops.enable_irq_window(vcpu); + static_call(kvm_x86_enable_irq_window)(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); @@ -8951,7 +9039,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) preempt_disable(); - kvm_x86_ops.prepare_guest_switch(vcpu); + static_call(kvm_x86_prepare_guest_switch)(vcpu); /* * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt @@ -8982,7 +9070,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * notified with kvm_vcpu_kick. */ if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -8996,7 +9084,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (req_immediate_exit) { kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_x86_ops.request_immediate_exit(vcpu); + static_call(kvm_x86_request_immediate_exit)(vcpu); } fpregs_assert_state_consistent(); @@ -9013,7 +9101,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; } - exit_fastpath = kvm_x86_ops.run(vcpu); + for (;;) { + exit_fastpath = static_call(kvm_x86_run)(vcpu); + if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) + break; + + if (unlikely(kvm_vcpu_exit_request(vcpu))) { + exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; + break; + } + + if (vcpu->arch.apicv_active) + static_call(kvm_x86_sync_pir_to_irr)(vcpu); + } /* * Do this here before restoring debug registers on the host. And @@ -9023,7 +9123,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); - kvm_x86_ops.sync_dirty_debug_regs(vcpu); + static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); kvm_update_dr0123(vcpu); kvm_update_dr7(vcpu); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; @@ -9045,7 +9145,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - kvm_x86_ops.handle_exit_irqoff(vcpu); + static_call(kvm_x86_handle_exit_irqoff)(vcpu); /* * Consume any pending interrupts, including the possible source of @@ -9087,13 +9187,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); - r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); + r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); return r; cancel_injection: if (req_immediate_exit) kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_x86_ops.cancel_injection(vcpu); + static_call(kvm_x86_cancel_injection)(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: @@ -9103,13 +9203,13 @@ out: static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) { if (!kvm_arch_vcpu_runnable(vcpu) && - (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { + (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_x86_ops.post_block) - kvm_x86_ops.post_block(vcpu); + static_call(kvm_x86_post_block)(vcpu); if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) return 1; @@ -9330,6 +9430,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu_load(vcpu); kvm_sigset_activate(vcpu); + kvm_run->flags = 0; kvm_load_guest_fpu(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { @@ -9504,10 +9605,10 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; @@ -9625,7 +9726,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) */ if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) return false; - if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits) + if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) return false; } else { /* @@ -9660,10 +9761,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) dt.size = sregs->idt.limit; dt.address = sregs->idt.base; - kvm_x86_ops.set_idt(vcpu, &dt); + static_call(kvm_x86_set_idt)(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; - kvm_x86_ops.set_gdt(vcpu, &dt); + static_call(kvm_x86_set_gdt)(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; @@ -9673,14 +9774,14 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; - kvm_x86_ops.set_efer(vcpu, sregs->efer); + static_call(kvm_x86_set_efer)(vcpu, sregs->efer); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; - kvm_x86_ops.set_cr0(vcpu, sregs->cr0); + static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; - kvm_x86_ops.set_cr4(vcpu, sregs->cr4); + static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { @@ -9788,7 +9889,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, */ kvm_set_rflags(vcpu, rflags); - kvm_x86_ops.update_exception_bitmap(vcpu); + static_call(kvm_x86_update_exception_bitmap)(vcpu); r = 0; @@ -9966,7 +10067,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (kvm_apicv_activated(vcpu->kvm)) vcpu->arch.apicv_active = true; } else - static_key_slow_inc(&kvm_no_apic_vcpu); + static_branch_inc(&kvm_has_noapic_vcpu); r = -ENOMEM; @@ -10004,7 +10105,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) fx_init(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); - vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; @@ -10014,9 +10115,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.pending_external_vector = -1; vcpu->arch.preempted_in_kernel = false; - kvm_hv_vcpu_init(vcpu); - - r = kvm_x86_ops.vcpu_create(vcpu); + r = static_call(kvm_x86_vcpu_create)(vcpu); if (r) goto free_guest_fpu; @@ -10052,8 +10151,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - kvm_hv_vcpu_postcreate(vcpu); - if (mutex_lock_killable(&vcpu->mutex)) return; vcpu_load(vcpu); @@ -10079,7 +10176,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvmclock_reset(vcpu); - kvm_x86_ops.vcpu_free(vcpu); + static_call(kvm_x86_vcpu_free)(vcpu); kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); @@ -10096,7 +10193,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); kvfree(vcpu->arch.cpuid_entries); if (!lapic_in_kernel(vcpu)) - static_key_slow_dec(&kvm_no_apic_vcpu); + static_branch_dec(&kvm_has_noapic_vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) @@ -10115,7 +10212,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); - vcpu->arch.dr6 = DR6_INIT; + vcpu->arch.dr6 = DR6_ACTIVE_LOW; vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); @@ -10168,7 +10265,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.ia32_xss = 0; - kvm_x86_ops.vcpu_reset(vcpu, init_event); + static_call(kvm_x86_vcpu_reset)(vcpu, init_event); } void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) @@ -10194,7 +10291,7 @@ int kvm_arch_hardware_enable(void) bool stable, backwards_tsc = false; kvm_user_return_msr_cpu_online(); - ret = kvm_x86_ops.hardware_enable(); + ret = static_call(kvm_x86_hardware_enable)(); if (ret != 0) return ret; @@ -10276,7 +10373,7 @@ int kvm_arch_hardware_enable(void) void kvm_arch_hardware_disable(void) { - kvm_x86_ops.hardware_disable(); + static_call(kvm_x86_hardware_disable)(); drop_user_return_notifiers(); } @@ -10295,6 +10392,7 @@ int kvm_arch_hardware_setup(void *opaque) return r; memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); + kvm_ops_static_call_update(); if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) supported_xss = 0; @@ -10323,7 +10421,7 @@ int kvm_arch_hardware_setup(void *opaque) void kvm_arch_hardware_unsetup(void) { - kvm_x86_ops.hardware_unsetup(); + static_call(kvm_x86_hardware_unsetup)(); } int kvm_arch_check_processor_compat(void *opaque) @@ -10351,8 +10449,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; } -struct static_key kvm_no_apic_vcpu __read_mostly; -EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); +__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); +EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { @@ -10363,12 +10461,12 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) pmu->need_cleanup = true; kvm_make_request(KVM_REQ_PMU, vcpu); } - kvm_x86_ops.sched_in(vcpu, cpu); + static_call(kvm_x86_sched_in)(vcpu, cpu); } void kvm_arch_free_vm(struct kvm *kvm) { - kfree(kvm->arch.hyperv.hv_pa_pg); + kfree(to_kvm_hv(kvm)->hv_pa_pg); vfree(kvm); } @@ -10407,7 +10505,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm); - return kvm_x86_ops.vm_init(kvm); + return static_call(kvm_x86_vm_init)(kvm); } int kvm_arch_post_init_vm(struct kvm *kvm) @@ -10552,8 +10650,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); mutex_unlock(&kvm->slots_lock); } - if (kvm_x86_ops.vm_destroy) - kvm_x86_ops.vm_destroy(kvm); + static_call_cond(kvm_x86_vm_destroy)(kvm); for (i = 0; i < kvm->arch.msr_filter.count; i++) kfree(kvm->arch.msr_filter.ranges[i].bitmap); kvm_pic_destroy(kvm); @@ -10563,6 +10660,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); kvm_mmu_uninit_vm(kvm); kvm_page_track_cleanup(kvm); + kvm_xen_destroy_vm(kvm); kvm_hv_destroy_vm(kvm); } @@ -10681,75 +10779,96 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, return 0; } + +static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) +{ + struct kvm_arch *ka = &kvm->arch; + + if (!kvm_x86_ops.cpu_dirty_log_size) + return; + + if ((enable && ++ka->cpu_dirty_logging_count == 1) || + (!enable && --ka->cpu_dirty_logging_count == 0)) + kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); + + WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); +} + static void kvm_mmu_slot_apply_flags(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { + bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES; + /* - * Nothing to do for RO slots or CREATE/MOVE/DELETE of a slot. - * See comments below. + * Update CPU dirty logging if dirty logging is being toggled. This + * applies to all operations. */ - if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) - return; + if ((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES) + kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); /* - * Dirty logging tracks sptes in 4k granularity, meaning that large - * sptes have to be split. If live migration is successful, the guest - * in the source machine will be destroyed and large sptes will be - * created in the destination. However, if the guest continues to run - * in the source machine (for example if live migration fails), small - * sptes will remain around and cause bad performance. + * Nothing more to do for RO slots (which can't be dirtied and can't be + * made writable) or CREATE/MOVE/DELETE of a slot. * - * Scan sptes if dirty logging has been stopped, dropping those - * which can be collapsed into a single large-page spte. Later - * page faults will create the large-page sptes. - * - * There is no need to do this in any of the following cases: + * For a memslot with dirty logging disabled: * CREATE: No dirty mappings will already exist. * MOVE/DELETE: The old mappings will already have been cleaned up by * kvm_arch_flush_shadow_memslot() + * + * For a memslot with dirty logging enabled: + * CREATE: No shadow pages exist, thus nothing to write-protect + * and no dirty bits to clear. + * MOVE/DELETE: The old mappings will already have been cleaned up by + * kvm_arch_flush_shadow_memslot(). */ - if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) && - !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) - kvm_mmu_zap_collapsible_sptes(kvm, new); + if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) + return; /* - * Enable or disable dirty logging for the slot. - * - * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of the old - * slot have been zapped so no dirty logging updates are needed for - * the old slot. - * For KVM_MR_CREATE and KVM_MR_MOVE, once the new slot is visible - * any mappings that might be created in it will consume the - * properties of the new slot and do not need to be updated here. - * - * When PML is enabled, the kvm_x86_ops dirty logging hooks are - * called to enable/disable dirty logging. - * - * When disabling dirty logging with PML enabled, the D-bit is set - * for sptes in the slot in order to prevent unnecessary GPA - * logging in the PML buffer (and potential PML buffer full VMEXIT). - * This guarantees leaving PML enabled for the guest's lifetime - * won't have any additional overhead from PML when the guest is - * running with dirty logging disabled. - * - * When enabling dirty logging, large sptes are write-protected - * so they can be split on first write. New large sptes cannot - * be created for this slot until the end of the logging. - * See the comments in fast_page_fault(). - * For small sptes, nothing is done if the dirty log is in the - * initial-all-set state. Otherwise, depending on whether pml - * is enabled the D-bit or the W-bit will be cleared. + * READONLY and non-flags changes were filtered out above, and the only + * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty + * logging isn't being toggled on or off. */ - if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { - if (kvm_x86_ops.slot_enable_log_dirty) { - kvm_x86_ops.slot_enable_log_dirty(kvm, new); - } else { - int level = - kvm_dirty_log_manual_protect_and_init_set(kvm) ? - PG_LEVEL_2M : PG_LEVEL_4K; + if (WARN_ON_ONCE(!((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES))) + return; + + if (!log_dirty_pages) { + /* + * Dirty logging tracks sptes in 4k granularity, meaning that + * large sptes have to be split. If live migration succeeds, + * the guest in the source machine will be destroyed and large + * sptes will be created in the destination. However, if the + * guest continues to run in the source machine (for example if + * live migration fails), small sptes will remain around and + * cause bad performance. + * + * Scan sptes if dirty logging has been stopped, dropping those + * which can be collapsed into a single large-page spte. Later + * page faults will create the large-page sptes. + */ + kvm_mmu_zap_collapsible_sptes(kvm, new); + } else { + /* By default, write-protect everything to log writes. */ + int level = PG_LEVEL_4K; + if (kvm_x86_ops.cpu_dirty_log_size) { + /* + * Clear all dirty bits, unless pages are treated as + * dirty from the get-go. + */ + if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) + kvm_mmu_slot_leaf_clear_dirty(kvm, new); + + /* + * Write-protect large pages on write so that dirty + * logging happens at 4k granularity. No need to + * write-protect small SPTEs since write accesses are + * logged by the CPU via dirty bits. + */ + level = PG_LEVEL_2M; + } else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) { /* * If we're with initial-all-set, we don't need * to write protect any small page because @@ -10758,11 +10877,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * so that the page split can happen lazily on * the first write to the huge page. */ - kvm_mmu_slot_remove_write_access(kvm, new, level); + level = PG_LEVEL_2M; } - } else { - if (kvm_x86_ops.slot_disable_log_dirty) - kvm_x86_ops.slot_disable_log_dirty(kvm, new); + kvm_mmu_slot_remove_write_access(kvm, new, level); } } @@ -10801,7 +10918,7 @@ static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) { return (is_guest_mode(vcpu) && kvm_x86_ops.guest_apic_has_interrupt && - kvm_x86_ops.guest_apic_has_interrupt(vcpu)); + static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); } static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) @@ -10820,12 +10937,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_NMI, vcpu) || (vcpu->arch.nmi_pending && - kvm_x86_ops.nmi_allowed(vcpu, false))) + static_call(kvm_x86_nmi_allowed)(vcpu, false))) return true; if (kvm_test_request(KVM_REQ_SMI, vcpu) || (vcpu->arch.smi_pending && - kvm_x86_ops.smi_allowed(vcpu, false))) + static_call(kvm_x86_smi_allowed)(vcpu, false))) return true; if (kvm_arch_interrupt_allowed(vcpu) && @@ -10859,7 +10976,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) kvm_test_request(KVM_REQ_EVENT, vcpu)) return true; - if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) + if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) return true; return false; @@ -10877,7 +10994,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.interrupt_allowed(vcpu, false); + return static_call(kvm_x86_interrupt_allowed)(vcpu, false); } unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) @@ -10903,7 +11020,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; - rflags = kvm_x86_ops.get_rflags(vcpu); + rflags = static_call(kvm_x86_get_rflags)(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; @@ -10915,7 +11032,7 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; - kvm_x86_ops.set_rflags(vcpu, rflags); + static_call(kvm_x86_set_rflags)(vcpu, rflags); } void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) @@ -11045,7 +11162,7 @@ static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) return false; if (!kvm_pv_async_pf_enabled(vcpu) || - (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) + (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0)) return false; return true; @@ -11190,7 +11307,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, irqfd->producer = prod; kvm_arch_start_assignment(irqfd->kvm); - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 1); if (ret) @@ -11215,7 +11332,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, * when the irq is masked/disabled or the consumer side (KVM * int this case doesn't want to receive the interrupts. */ - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); if (ret) printk(KERN_INFO "irq bypass consumer (token %p) unregistration" " fails: %d\n", irqfd->consumer.token, ret); @@ -11226,7 +11343,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { - return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set); + return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set); } bool kvm_vector_hashing_enabled(void) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 0f727b50bd3d..39eb04887141 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -98,7 +98,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) if (!is_long_mode(vcpu)) return false; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); return cs_l; } @@ -129,7 +129,7 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_current(vcpu); + static_call(kvm_x86_tlb_flush_current)(vcpu); } static inline int is_pae(struct kvm_vcpu *vcpu) @@ -244,9 +244,10 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) { - return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); + return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); } +void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs); void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); @@ -273,6 +274,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); +int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, + void *insn, int insn_len); int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len); fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); @@ -296,6 +299,8 @@ extern int pi_inject_timer; extern struct static_key kvm_no_apic_vcpu; +extern bool report_ignored_msrs; + static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, @@ -391,7 +396,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); int kvm_spec_ctrl_test_value(u64 value); bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); -bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e); int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c new file mode 100644 index 000000000000..ae17250e1efe --- /dev/null +++ b/arch/x86/kvm/xen.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * KVM Xen emulation + */ + +#include "x86.h" +#include "xen.h" +#include "hyperv.h" + +#include <linux/kvm_host.h> +#include <linux/sched/stat.h> + +#include <trace/events/kvm.h> +#include <xen/interface/xen.h> +#include <xen/interface/vcpu.h> + +#include "trace.h" + +DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); + +static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) +{ + gpa_t gpa = gfn_to_gpa(gfn); + int wc_ofs, sec_hi_ofs; + int ret; + int idx = srcu_read_lock(&kvm->srcu); + + ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache, + gpa, PAGE_SIZE); + if (ret) + goto out; + + kvm->arch.xen.shinfo_set = true; + + /* Paranoia checks on the 32-bit struct layout */ + BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); + BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); + BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + + /* 32-bit location by default */ + wc_ofs = offsetof(struct compat_shared_info, wc); + sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi); + +#ifdef CONFIG_X86_64 + /* Paranoia checks on the 64-bit struct layout */ + BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00); + BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c); + + if (kvm->arch.xen.long_mode) { + wc_ofs = offsetof(struct shared_info, wc); + sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi); + } +#endif + + kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs); + kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); + +out: + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + +static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) +{ + struct kvm_vcpu_xen *vx = &v->arch.xen; + u64 now = get_kvmclock_ns(v->kvm); + u64 delta_ns = now - vx->runstate_entry_time; + u64 run_delay = current->sched_info.run_delay; + + if (unlikely(!vx->runstate_entry_time)) + vx->current_runstate = RUNSTATE_offline; + + /* + * Time waiting for the scheduler isn't "stolen" if the + * vCPU wasn't running anyway. + */ + if (vx->current_runstate == RUNSTATE_running) { + u64 steal_ns = run_delay - vx->last_steal; + + delta_ns -= steal_ns; + + vx->runstate_times[RUNSTATE_runnable] += steal_ns; + } + vx->last_steal = run_delay; + + vx->runstate_times[vx->current_runstate] += delta_ns; + vx->current_runstate = state; + vx->runstate_entry_time = now; +} + +void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) +{ + struct kvm_vcpu_xen *vx = &v->arch.xen; + uint64_t state_entry_time; + unsigned int offset; + + kvm_xen_update_runstate(v, state); + + if (!vx->runstate_set) + return; + + BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); + + offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time); +#ifdef CONFIG_X86_64 + /* + * The only difference is alignment of uint64_t in 32-bit. + * So the first field 'state' is accessed directly using + * offsetof() (where its offset happens to be zero), while the + * remaining fields which are all uint64_t, start at 'offset' + * which we tweak here by adding 4. + */ + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != + offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4); + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) != + offsetof(struct compat_vcpu_runstate_info, time) + 4); + + if (v->kvm->arch.xen.long_mode) + offset = offsetof(struct vcpu_runstate_info, state_entry_time); +#endif + /* + * First write the updated state_entry_time at the appropriate + * location determined by 'offset'. + */ + state_entry_time = vx->runstate_entry_time; + state_entry_time |= XEN_RUNSTATE_UPDATE; + + BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) != + sizeof(state_entry_time)); + BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) != + sizeof(state_entry_time)); + + if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, + &state_entry_time, offset, + sizeof(state_entry_time))) + return; + smp_wmb(); + + /* + * Next, write the new runstate. This is in the *same* place + * for 32-bit and 64-bit guests, asserted here for paranoia. + */ + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != + offsetof(struct compat_vcpu_runstate_info, state)); + BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) != + sizeof(vx->current_runstate)); + BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) != + sizeof(vx->current_runstate)); + + if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, + &vx->current_runstate, + offsetof(struct vcpu_runstate_info, state), + sizeof(vx->current_runstate))) + return; + + /* + * Write the actual runstate times immediately after the + * runstate_entry_time. + */ + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != + offsetof(struct vcpu_runstate_info, time) - sizeof(u64)); + BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) != + offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64)); + BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) != + sizeof(((struct compat_vcpu_runstate_info *)0)->time)); + BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) != + sizeof(vx->runstate_times)); + + if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, + &vx->runstate_times[0], + offset + sizeof(u64), + sizeof(vx->runstate_times))) + return; + + smp_wmb(); + + /* + * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's + * runstate_entry_time field. + */ + + state_entry_time &= ~XEN_RUNSTATE_UPDATE; + if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, + &state_entry_time, offset, + sizeof(state_entry_time))) + return; +} + +int __kvm_xen_has_interrupt(struct kvm_vcpu *v) +{ + u8 rc = 0; + + /* + * If the global upcall vector (HVMIRQ_callback_vector) is set and + * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending. + */ + struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache; + struct kvm_memslots *slots = kvm_memslots(v->kvm); + unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending); + + /* No need for compat handling here */ + BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != + offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); + BUILD_BUG_ON(sizeof(rc) != + sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending)); + BUILD_BUG_ON(sizeof(rc) != + sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending)); + + /* + * For efficiency, this mirrors the checks for using the valid + * cache in kvm_read_guest_offset_cached(), but just uses + * __get_user() instead. And falls back to the slow path. + */ + if (likely(slots->generation == ghc->generation && + !kvm_is_error_hva(ghc->hva) && ghc->memslot)) { + /* Fast path */ + __get_user(rc, (u8 __user *)ghc->hva + offset); + } else { + /* Slow path */ + kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset, + sizeof(rc)); + } + + return rc; +} + +int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&kvm->lock); + + switch (data->type) { + case KVM_XEN_ATTR_TYPE_LONG_MODE: + if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { + r = -EINVAL; + } else { + kvm->arch.xen.long_mode = !!data->u.long_mode; + r = 0; + } + break; + + case KVM_XEN_ATTR_TYPE_SHARED_INFO: + if (data->u.shared_info.gfn == GPA_INVALID) { + kvm->arch.xen.shinfo_set = false; + r = 0; + break; + } + r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); + break; + + + case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: + if (data->u.vector && data->u.vector < 0x10) + r = -EINVAL; + else { + kvm->arch.xen.upcall_vector = data->u.vector; + r = 0; + } + break; + + default: + break; + } + + mutex_unlock(&kvm->lock); + return r; +} + +int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&kvm->lock); + + switch (data->type) { + case KVM_XEN_ATTR_TYPE_LONG_MODE: + data->u.long_mode = kvm->arch.xen.long_mode; + r = 0; + break; + + case KVM_XEN_ATTR_TYPE_SHARED_INFO: + if (kvm->arch.xen.shinfo_set) + data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); + else + data->u.shared_info.gfn = GPA_INVALID; + r = 0; + break; + + case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: + data->u.vector = kvm->arch.xen.upcall_vector; + r = 0; + break; + + default: + break; + } + + mutex_unlock(&kvm->lock); + return r; +} + +int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) +{ + int idx, r = -ENOENT; + + mutex_lock(&vcpu->kvm->lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); + + switch (data->type) { + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: + /* No compat necessary here. */ + BUILD_BUG_ON(sizeof(struct vcpu_info) != + sizeof(struct compat_vcpu_info)); + BUILD_BUG_ON(offsetof(struct vcpu_info, time) != + offsetof(struct compat_vcpu_info, time)); + + if (data->u.gpa == GPA_INVALID) { + vcpu->arch.xen.vcpu_info_set = false; + r = 0; + break; + } + + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.vcpu_info_cache, + data->u.gpa, + sizeof(struct vcpu_info)); + if (!r) { + vcpu->arch.xen.vcpu_info_set = true; + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); + } + break; + + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + if (data->u.gpa == GPA_INVALID) { + vcpu->arch.xen.vcpu_time_info_set = false; + r = 0; + break; + } + + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.vcpu_time_info_cache, + data->u.gpa, + sizeof(struct pvclock_vcpu_time_info)); + if (!r) { + vcpu->arch.xen.vcpu_time_info_set = true; + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); + } + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + if (data->u.gpa == GPA_INVALID) { + vcpu->arch.xen.runstate_set = false; + r = 0; + break; + } + + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.runstate_cache, + data->u.gpa, + sizeof(struct vcpu_runstate_info)); + if (!r) { + vcpu->arch.xen.runstate_set = true; + } + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + if (data->u.runstate.state > RUNSTATE_offline) { + r = -EINVAL; + break; + } + + kvm_xen_update_runstate(vcpu, data->u.runstate.state); + r = 0; + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + if (data->u.runstate.state > RUNSTATE_offline) { + r = -EINVAL; + break; + } + if (data->u.runstate.state_entry_time != + (data->u.runstate.time_running + + data->u.runstate.time_runnable + + data->u.runstate.time_blocked + + data->u.runstate.time_offline)) { + r = -EINVAL; + break; + } + if (get_kvmclock_ns(vcpu->kvm) < + data->u.runstate.state_entry_time) { + r = -EINVAL; + break; + } + + vcpu->arch.xen.current_runstate = data->u.runstate.state; + vcpu->arch.xen.runstate_entry_time = + data->u.runstate.state_entry_time; + vcpu->arch.xen.runstate_times[RUNSTATE_running] = + data->u.runstate.time_running; + vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = + data->u.runstate.time_runnable; + vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = + data->u.runstate.time_blocked; + vcpu->arch.xen.runstate_times[RUNSTATE_offline] = + data->u.runstate.time_offline; + vcpu->arch.xen.last_steal = current->sched_info.run_delay; + r = 0; + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + if (data->u.runstate.state > RUNSTATE_offline && + data->u.runstate.state != (u64)-1) { + r = -EINVAL; + break; + } + /* The adjustment must add up */ + if (data->u.runstate.state_entry_time != + (data->u.runstate.time_running + + data->u.runstate.time_runnable + + data->u.runstate.time_blocked + + data->u.runstate.time_offline)) { + r = -EINVAL; + break; + } + + if (get_kvmclock_ns(vcpu->kvm) < + (vcpu->arch.xen.runstate_entry_time + + data->u.runstate.state_entry_time)) { + r = -EINVAL; + break; + } + + vcpu->arch.xen.runstate_entry_time += + data->u.runstate.state_entry_time; + vcpu->arch.xen.runstate_times[RUNSTATE_running] += + data->u.runstate.time_running; + vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += + data->u.runstate.time_runnable; + vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += + data->u.runstate.time_blocked; + vcpu->arch.xen.runstate_times[RUNSTATE_offline] += + data->u.runstate.time_offline; + + if (data->u.runstate.state <= RUNSTATE_offline) + kvm_xen_update_runstate(vcpu, data->u.runstate.state); + r = 0; + break; + + default: + break; + } + + srcu_read_unlock(&vcpu->kvm->srcu, idx); + mutex_unlock(&vcpu->kvm->lock); + return r; +} + +int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&vcpu->kvm->lock); + + switch (data->type) { + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: + if (vcpu->arch.xen.vcpu_info_set) + data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; + else + data->u.gpa = GPA_INVALID; + r = 0; + break; + + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + if (vcpu->arch.xen.vcpu_time_info_set) + data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; + else + data->u.gpa = GPA_INVALID; + r = 0; + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + if (vcpu->arch.xen.runstate_set) { + data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; + r = 0; + } + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + data->u.runstate.state = vcpu->arch.xen.current_runstate; + r = 0; + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + data->u.runstate.state = vcpu->arch.xen.current_runstate; + data->u.runstate.state_entry_time = + vcpu->arch.xen.runstate_entry_time; + data->u.runstate.time_running = + vcpu->arch.xen.runstate_times[RUNSTATE_running]; + data->u.runstate.time_runnable = + vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; + data->u.runstate.time_blocked = + vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; + data->u.runstate.time_offline = + vcpu->arch.xen.runstate_times[RUNSTATE_offline]; + r = 0; + break; + + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: + r = -EINVAL; + break; + + default: + break; + } + + mutex_unlock(&vcpu->kvm->lock); + return r; +} + +int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) +{ + struct kvm *kvm = vcpu->kvm; + u32 page_num = data & ~PAGE_MASK; + u64 page_addr = data & PAGE_MASK; + bool lm = is_long_mode(vcpu); + + /* Latch long_mode for shared_info pages etc. */ + vcpu->kvm->arch.xen.long_mode = lm; + + /* + * If Xen hypercall intercept is enabled, fill the hypercall + * page with VMCALL/VMMCALL instructions since that's what + * we catch. Else the VMM has provided the hypercall pages + * with instructions of its own choosing, so use those. + */ + if (kvm_xen_hypercall_enabled(kvm)) { + u8 instructions[32]; + int i; + + if (page_num) + return 1; + + /* mov imm32, %eax */ + instructions[0] = 0xb8; + + /* vmcall / vmmcall */ + kvm_x86_ops.patch_hypercall(vcpu, instructions + 5); + + /* ret */ + instructions[8] = 0xc3; + + /* int3 to pad */ + memset(instructions + 9, 0xcc, sizeof(instructions) - 9); + + for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) { + *(u32 *)&instructions[1] = i; + if (kvm_vcpu_write_guest(vcpu, + page_addr + (i * sizeof(instructions)), + instructions, sizeof(instructions))) + return 1; + } + } else { + /* + * Note, truncation is a non-issue as 'lm' is guaranteed to be + * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. + */ + hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 + : kvm->arch.xen_hvm_config.blob_addr_32; + u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 + : kvm->arch.xen_hvm_config.blob_size_32; + u8 *page; + + if (page_num >= blob_size) + return 1; + + blob_addr += page_num * PAGE_SIZE; + + page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { + kfree(page); + return 1; + } + } + return 0; +} + +int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) +{ + if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) + return -EINVAL; + + /* + * With hypercall interception the kernel generates its own + * hypercall page so it must not be provided. + */ + if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && + (xhc->blob_addr_32 || xhc->blob_addr_64 || + xhc->blob_size_32 || xhc->blob_size_64)) + return -EINVAL; + + mutex_lock(&kvm->lock); + + if (xhc->msr && !kvm->arch.xen_hvm_config.msr) + static_branch_inc(&kvm_xen_enabled.key); + else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) + static_branch_slow_dec_deferred(&kvm_xen_enabled); + + memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); + + mutex_unlock(&kvm->lock); + return 0; +} + +void kvm_xen_destroy_vm(struct kvm *kvm) +{ + if (kvm->arch.xen_hvm_config.msr) + static_branch_slow_dec_deferred(&kvm_xen_enabled); +} + +static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) +{ + kvm_rax_write(vcpu, result); + return kvm_skip_emulated_instruction(vcpu); +} + +static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) + return 1; + + return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); +} + +int kvm_xen_hypercall(struct kvm_vcpu *vcpu) +{ + bool longmode; + u64 input, params[6]; + + input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); + + /* Hyper-V hypercalls get bit 31 set in EAX */ + if ((input & 0x80000000) && + kvm_hv_hypercall_enabled(vcpu)) + return kvm_hv_hypercall(vcpu); + + longmode = is_64_bit_mode(vcpu); + if (!longmode) { + params[0] = (u32)kvm_rbx_read(vcpu); + params[1] = (u32)kvm_rcx_read(vcpu); + params[2] = (u32)kvm_rdx_read(vcpu); + params[3] = (u32)kvm_rsi_read(vcpu); + params[4] = (u32)kvm_rdi_read(vcpu); + params[5] = (u32)kvm_rbp_read(vcpu); + } +#ifdef CONFIG_X86_64 + else { + params[0] = (u64)kvm_rdi_read(vcpu); + params[1] = (u64)kvm_rsi_read(vcpu); + params[2] = (u64)kvm_rdx_read(vcpu); + params[3] = (u64)kvm_r10_read(vcpu); + params[4] = (u64)kvm_r8_read(vcpu); + params[5] = (u64)kvm_r9_read(vcpu); + } +#endif + trace_kvm_xen_hypercall(input, params[0], params[1], params[2], + params[3], params[4], params[5]); + + vcpu->run->exit_reason = KVM_EXIT_XEN; + vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; + vcpu->run->xen.u.hcall.longmode = longmode; + vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu); + vcpu->run->xen.u.hcall.input = input; + vcpu->run->xen.u.hcall.params[0] = params[0]; + vcpu->run->xen.u.hcall.params[1] = params[1]; + vcpu->run->xen.u.hcall.params[2] = params[2]; + vcpu->run->xen.u.hcall.params[3] = params[3]; + vcpu->run->xen.u.hcall.params[4] = params[4]; + vcpu->run->xen.u.hcall.params[5] = params[5]; + vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); + vcpu->arch.complete_userspace_io = + kvm_xen_hypercall_complete_userspace; + + return 0; +} diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h new file mode 100644 index 000000000000..463a7844a8ca --- /dev/null +++ b/arch/x86/kvm/xen.h @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * KVM Xen emulation + */ + +#ifndef __ARCH_X86_KVM_XEN_H__ +#define __ARCH_X86_KVM_XEN_H__ + +#ifdef CONFIG_KVM_XEN +#include <linux/jump_label_ratelimit.h> + +extern struct static_key_false_deferred kvm_xen_enabled; + +int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); +int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); +int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); +int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); +int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); +int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); +int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); +void kvm_xen_destroy_vm(struct kvm *kvm); + +static inline bool kvm_xen_msr_enabled(struct kvm *kvm) +{ + return static_branch_unlikely(&kvm_xen_enabled.key) && + kvm->arch.xen_hvm_config.msr; +} + +static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) +{ + return static_branch_unlikely(&kvm_xen_enabled.key) && + (kvm->arch.xen_hvm_config.flags & + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); +} + +static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) +{ + if (static_branch_unlikely(&kvm_xen_enabled.key) && + vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector) + return __kvm_xen_has_interrupt(vcpu); + + return 0; +} +#else +static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) +{ + return 1; +} + +static inline void kvm_xen_destroy_vm(struct kvm *kvm) +{ +} + +static inline bool kvm_xen_msr_enabled(struct kvm *kvm) +{ + return false; +} + +static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) +{ + return false; +} + +static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif + +int kvm_xen_hypercall(struct kvm_vcpu *vcpu); + +#include <asm/pvclock-abi.h> +#include <asm/xen/interface.h> +#include <xen/interface/vcpu.h> + +void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); + +static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) +{ + kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); +} + +static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) +{ + /* + * If the vCPU wasn't preempted but took a normal exit for + * some reason (hypercalls, I/O, etc.), that is accounted as + * still RUNSTATE_running, as the VMM is still operating on + * behalf of the vCPU. Only if the VMM does actually block + * does it need to enter RUNSTATE_blocked. + */ + if (vcpu->preempted) + kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); +} + +/* 32-bit compatibility definitions, also used natively in 32-bit build */ +struct compat_arch_vcpu_info { + unsigned int cr2; + unsigned int pad[5]; +}; + +struct compat_vcpu_info { + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + uint16_t pad; + uint32_t evtchn_pending_sel; + struct compat_arch_vcpu_info arch; + struct pvclock_vcpu_time_info time; +}; /* 64 bytes (x86) */ + +struct compat_arch_shared_info { + unsigned int max_pfn; + unsigned int pfn_to_mfn_frame_list_list; + unsigned int nmi_reason; + unsigned int p2m_cr3; + unsigned int p2m_vaddr; + unsigned int p2m_generation; + uint32_t wc_sec_hi; +}; + +struct compat_shared_info { + struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; + uint32_t evtchn_pending[32]; + uint32_t evtchn_mask[32]; + struct pvclock_wall_clock wc; + struct compat_arch_shared_info arch; +}; + +struct compat_vcpu_runstate_info { + int state; + uint64_t state_entry_time; + uint64_t time[4]; +} __attribute__((packed)); + +#endif /* __ARCH_X86_KVM_XEN_H__ */ diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 404279563891..435630a6ec97 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -5,6 +5,7 @@ * Copyright (C) IBM Corporation, 2002, 2004, 2009 */ +#include <linux/kernel.h> #ifdef __KERNEL__ #include <linux/string.h> #else @@ -15,15 +16,28 @@ #include <asm/emulate_prefix.h> +#define leXX_to_cpu(t, r) \ +({ \ + __typeof__(t) v; \ + switch (sizeof(t)) { \ + case 4: v = le32_to_cpu(r); break; \ + case 2: v = le16_to_cpu(r); break; \ + case 1: v = r; break; \ + default: \ + BUILD_BUG(); break; \ + } \ + v; \ +}) + /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) #define __get_next(t, insn) \ - ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) + ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) #define __peek_nbyte_next(t, insn, n) \ - ({ t r = *(t*)((insn)->next_byte + n); r; }) + ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); }) #define get_next(t, insn) \ ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); }) @@ -147,9 +161,9 @@ found: b = insn->prefixes.bytes[3]; for (i = 0; i < nb; i++) if (prefixes->bytes[i] == lb) - prefixes->bytes[i] = b; + insn_set_byte(prefixes, i, b); } - insn->prefixes.bytes[3] = lb; + insn_set_byte(&insn->prefixes, 3, lb); } /* Decode REX prefix */ @@ -157,8 +171,7 @@ found: b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); if (inat_is_rex_prefix(attr)) { - insn->rex_prefix.value = b; - insn->rex_prefix.nbytes = 1; + insn_field_set(&insn->rex_prefix, b, 1); insn->next_byte++; if (X86_REX_W(b)) /* REX.W overrides opnd_size */ @@ -181,13 +194,13 @@ found: if (X86_MODRM_MOD(b2) != 3) goto vex_end; } - insn->vex_prefix.bytes[0] = b; - insn->vex_prefix.bytes[1] = b2; + insn_set_byte(&insn->vex_prefix, 0, b); + insn_set_byte(&insn->vex_prefix, 1, b2); if (inat_is_evex_prefix(attr)) { b2 = peek_nbyte_next(insn_byte_t, insn, 2); - insn->vex_prefix.bytes[2] = b2; + insn_set_byte(&insn->vex_prefix, 2, b2); b2 = peek_nbyte_next(insn_byte_t, insn, 3); - insn->vex_prefix.bytes[3] = b2; + insn_set_byte(&insn->vex_prefix, 3, b2); insn->vex_prefix.nbytes = 4; insn->next_byte += 4; if (insn->x86_64 && X86_VEX_W(b2)) @@ -195,7 +208,7 @@ found: insn->opnd_bytes = 8; } else if (inat_is_vex3_prefix(attr)) { b2 = peek_nbyte_next(insn_byte_t, insn, 2); - insn->vex_prefix.bytes[2] = b2; + insn_set_byte(&insn->vex_prefix, 2, b2); insn->vex_prefix.nbytes = 3; insn->next_byte += 3; if (insn->x86_64 && X86_VEX_W(b2)) @@ -207,7 +220,7 @@ found: * Makes it easier to decode vex.W, vex.vvvv, * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0. */ - insn->vex_prefix.bytes[2] = b2 & 0x7f; + insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f); insn->vex_prefix.nbytes = 2; insn->next_byte += 2; } @@ -243,7 +256,7 @@ void insn_get_opcode(struct insn *insn) /* Get first opcode */ op = get_next(insn_byte_t, insn); - opcode->bytes[0] = op; + insn_set_byte(opcode, 0, op); opcode->nbytes = 1; /* Check if there is VEX prefix or not */ @@ -295,8 +308,7 @@ void insn_get_modrm(struct insn *insn) if (inat_has_modrm(insn->attr)) { mod = get_next(insn_byte_t, insn); - modrm->value = mod; - modrm->nbytes = 1; + insn_field_set(modrm, mod, 1); if (inat_is_group(insn->attr)) { pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_group_attribute(mod, pfx_id, @@ -334,7 +346,7 @@ int insn_rip_relative(struct insn *insn) * For rip-relative instructions, the mod field (top 2 bits) * is zero and the r/m field (bottom 3 bits) is 0x5. */ - return (modrm->nbytes && (modrm->value & 0xc7) == 0x5); + return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5); } /** @@ -353,11 +365,11 @@ void insn_get_sib(struct insn *insn) if (!insn->modrm.got) insn_get_modrm(insn); if (insn->modrm.nbytes) { - modrm = (insn_byte_t)insn->modrm.value; + modrm = insn->modrm.bytes[0]; if (insn->addr_bytes != 2 && X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { - insn->sib.value = get_next(insn_byte_t, insn); - insn->sib.nbytes = 1; + insn_field_set(&insn->sib, + get_next(insn_byte_t, insn), 1); } } insn->sib.got = 1; @@ -407,19 +419,18 @@ void insn_get_displacement(struct insn *insn) if (mod == 3) goto out; if (mod == 1) { - insn->displacement.value = get_next(signed char, insn); - insn->displacement.nbytes = 1; + insn_field_set(&insn->displacement, + get_next(signed char, insn), 1); } else if (insn->addr_bytes == 2) { if ((mod == 0 && rm == 6) || mod == 2) { - insn->displacement.value = - get_next(short, insn); - insn->displacement.nbytes = 2; + insn_field_set(&insn->displacement, + get_next(short, insn), 2); } } else { if ((mod == 0 && rm == 5) || mod == 2 || (mod == 0 && base == 5)) { - insn->displacement.value = get_next(int, insn); - insn->displacement.nbytes = 4; + insn_field_set(&insn->displacement, + get_next(int, insn), 4); } } } @@ -435,18 +446,14 @@ static int __get_moffset(struct insn *insn) { switch (insn->addr_bytes) { case 2: - insn->moffset1.value = get_next(short, insn); - insn->moffset1.nbytes = 2; + insn_field_set(&insn->moffset1, get_next(short, insn), 2); break; case 4: - insn->moffset1.value = get_next(int, insn); - insn->moffset1.nbytes = 4; + insn_field_set(&insn->moffset1, get_next(int, insn), 4); break; case 8: - insn->moffset1.value = get_next(int, insn); - insn->moffset1.nbytes = 4; - insn->moffset2.value = get_next(int, insn); - insn->moffset2.nbytes = 4; + insn_field_set(&insn->moffset1, get_next(int, insn), 4); + insn_field_set(&insn->moffset2, get_next(int, insn), 4); break; default: /* opnd_bytes must be modified manually */ goto err_out; @@ -464,13 +471,11 @@ static int __get_immv32(struct insn *insn) { switch (insn->opnd_bytes) { case 2: - insn->immediate.value = get_next(short, insn); - insn->immediate.nbytes = 2; + insn_field_set(&insn->immediate, get_next(short, insn), 2); break; case 4: case 8: - insn->immediate.value = get_next(int, insn); - insn->immediate.nbytes = 4; + insn_field_set(&insn->immediate, get_next(int, insn), 4); break; default: /* opnd_bytes must be modified manually */ goto err_out; @@ -487,18 +492,15 @@ static int __get_immv(struct insn *insn) { switch (insn->opnd_bytes) { case 2: - insn->immediate1.value = get_next(short, insn); - insn->immediate1.nbytes = 2; + insn_field_set(&insn->immediate1, get_next(short, insn), 2); break; case 4: - insn->immediate1.value = get_next(int, insn); + insn_field_set(&insn->immediate1, get_next(int, insn), 4); insn->immediate1.nbytes = 4; break; case 8: - insn->immediate1.value = get_next(int, insn); - insn->immediate1.nbytes = 4; - insn->immediate2.value = get_next(int, insn); - insn->immediate2.nbytes = 4; + insn_field_set(&insn->immediate1, get_next(int, insn), 4); + insn_field_set(&insn->immediate2, get_next(int, insn), 4); break; default: /* opnd_bytes must be modified manually */ goto err_out; @@ -515,12 +517,10 @@ static int __get_immptr(struct insn *insn) { switch (insn->opnd_bytes) { case 2: - insn->immediate1.value = get_next(short, insn); - insn->immediate1.nbytes = 2; + insn_field_set(&insn->immediate1, get_next(short, insn), 2); break; case 4: - insn->immediate1.value = get_next(int, insn); - insn->immediate1.nbytes = 4; + insn_field_set(&insn->immediate1, get_next(int, insn), 4); break; case 8: /* ptr16:64 is not exist (no segment) */ @@ -528,8 +528,7 @@ static int __get_immptr(struct insn *insn) default: /* opnd_bytes must be modified manually */ goto err_out; } - insn->immediate2.value = get_next(unsigned short, insn); - insn->immediate2.nbytes = 2; + insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2); insn->immediate1.got = insn->immediate2.got = 1; return 1; @@ -565,22 +564,17 @@ void insn_get_immediate(struct insn *insn) switch (inat_immediate_size(insn->attr)) { case INAT_IMM_BYTE: - insn->immediate.value = get_next(signed char, insn); - insn->immediate.nbytes = 1; + insn_field_set(&insn->immediate, get_next(signed char, insn), 1); break; case INAT_IMM_WORD: - insn->immediate.value = get_next(short, insn); - insn->immediate.nbytes = 2; + insn_field_set(&insn->immediate, get_next(short, insn), 2); break; case INAT_IMM_DWORD: - insn->immediate.value = get_next(int, insn); - insn->immediate.nbytes = 4; + insn_field_set(&insn->immediate, get_next(int, insn), 4); break; case INAT_IMM_QWORD: - insn->immediate1.value = get_next(int, insn); - insn->immediate1.nbytes = 4; - insn->immediate2.value = get_next(int, insn); - insn->immediate2.nbytes = 4; + insn_field_set(&insn->immediate1, get_next(int, insn), 4); + insn_field_set(&insn->immediate2, get_next(int, insn), 4); break; case INAT_IMM_PTR: if (!__get_immptr(insn)) @@ -599,8 +593,7 @@ void insn_get_immediate(struct insn *insn) goto err_out; } if (inat_has_second_immediate(insn->attr)) { - insn->immediate2.value = get_next(signed char, insn); - insn->immediate2.nbytes = 1; + insn_field_set(&insn->immediate2, get_next(signed char, insn), 1); } done: insn->immediate.got = 1; diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index b4c43a9b1483..f6fb1d218dcc 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -28,7 +28,7 @@ SYM_FUNC_START_NOALIGN(__x86_retpoline_\reg) jmp .Lspec_trap_\@ .Ldo_rop_\@: mov %\reg, (%_ASM_SP) - UNWIND_HINT_RET_OFFSET + UNWIND_HINT_FUNC ret SYM_FUNC_END(__x86_retpoline_\reg) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 525197381baa..a73347e2cdfc 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -9,6 +9,7 @@ #include <linux/kdebug.h> /* oops_begin/end, ... */ #include <linux/extable.h> /* search_exception_tables */ #include <linux/memblock.h> /* max_low_pfn */ +#include <linux/kfence.h> /* kfence_handle_page_fault */ #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ #include <linux/mmiotrace.h> /* kmmio_handler, ... */ #include <linux/perf_event.h> /* perf_sw_event */ @@ -680,6 +681,11 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, if (IS_ENABLED(CONFIG_EFI)) efi_crash_gracefully_on_page_fault(address); + /* Only not-present faults should be handled by KFENCE. */ + if (!(error_code & X86_PF_PROT) && + kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs)) + return; + oops: /* * Oops. The kernel tried to access some bad page. We'll have to diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 8f665c352bf0..ca311aaa67b8 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -1164,12 +1164,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + kfree(v); ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { + kfree(v); } static int memtype_seq_show(struct seq_file *seq, void *v) @@ -1181,8 +1183,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v) entry_print->end, cattr_name(entry_print->type)); - kfree(entry_print); - return 0; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 79e7a0ec1da5..6926d0ca6c71 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1349,6 +1349,7 @@ st: if (is_imm8(insn->off)) insn->imm == (BPF_XOR | BPF_FETCH)) { u8 *branch_target; bool is64 = BPF_SIZE(insn->code) == BPF_DW; + u32 real_src_reg = src_reg; /* * Can't be implemented with a single x86 insn. @@ -1357,6 +1358,9 @@ st: if (is_imm8(insn->off)) /* Will need RAX as a CMPXCHG operand so save R0 */ emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); + if (src_reg == BPF_REG_0) + real_src_reg = BPF_REG_AX; + branch_target = prog; /* Load old value */ emit_ldx(&prog, BPF_SIZE(insn->code), @@ -1366,9 +1370,9 @@ st: if (is_imm8(insn->off)) * put the result in the AUX_REG. */ emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); - maybe_emit_mod(&prog, AUX_REG, src_reg, is64); + maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], - add_2reg(0xC0, AUX_REG, src_reg)); + add_2reg(0xC0, AUX_REG, real_src_reg)); /* Attempt to swap in new value */ err = emit_atomic(&prog, BPF_CMPXCHG, dst_reg, AUX_REG, insn->off, @@ -1381,7 +1385,7 @@ st: if (is_imm8(insn->off)) */ EMIT2(X86_JNE, -(prog - branch_target) - 2); /* Return the pre-modification value */ - emit_mov_reg(&prog, is64, src_reg, BPF_REG_0); + emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); /* Restore R0 after clobbering RAX */ emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); break; diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 95e2e6bd8d8c..8edd62206604 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -28,10 +28,12 @@ #include <linux/io.h> #include <linux/smp.h> +#include <asm/cpu_device_id.h> #include <asm/segment.h> #include <asm/pci_x86.h> #include <asm/hw_irq.h> #include <asm/io_apic.h> +#include <asm/intel-family.h> #include <asm/intel-mid.h> #include <asm/acpi.h> @@ -140,6 +142,7 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, * type1_access_ok - check whether to use type 1 * @bus: bus number * @devfn: device & function in question + * @reg: configuration register offset * * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at * all, the we can go ahead with any reads & writes. If it's on a Lincroft, @@ -212,10 +215,17 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, where, size, value); } +static const struct x86_cpu_id intel_mid_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL), + {} +}; + static int intel_mid_pci_irq_enable(struct pci_dev *dev) { + const struct x86_cpu_id *id; struct irq_alloc_info info; bool polarity_low; + u16 model = 0; int ret; u8 gsi; @@ -228,8 +238,12 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) return ret; } - switch (intel_mid_identify_cpu()) { - case INTEL_MID_CPU_CHIP_TANGIER: + id = x86_match_cpu(intel_mid_cpu_ids); + if (id) + model = id->model; + + switch (model) { + case INTEL_FAM6_ATOM_SILVERMONT_MID: polarity_low = false; /* Special treatment for IRQ0 */ diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 234998f196d4..de6bf0e7e8f8 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -11,9 +11,9 @@ * themselves. */ +#include <linux/acpi.h> #include <linux/pci.h> #include <linux/init.h> -#include <linux/sfi_acpi.h> #include <linux/bitmap.h> #include <linux/dmi.h> #include <linux/slab.h> @@ -665,7 +665,7 @@ void __init pci_mmcfg_early_init(void) if (pci_mmcfg_check_hostbridge()) known_bridge = 1; else - acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); + acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); __pci_mmcfg_init(1); set_apei_filter(); @@ -683,7 +683,7 @@ void __init pci_mmcfg_late_init(void) /* MMCONFIG hasn't been enabled yet, try again */ if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) { - acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); + acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); __pci_mmcfg_init(0); } } diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index b2f90a1a89f1..3ed03a2552d0 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile @@ -10,6 +10,5 @@ obj-y += intel-mid/ obj-y += intel-quark/ obj-y += olpc/ obj-y += scx200/ -obj-y += sfi/ obj-y += ts5500/ obj-y += uv/ diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile index cc2549f0ccb1..ddfc08783fb8 100644 --- a/arch/x86/platform/intel-mid/Makefile +++ b/arch/x86/platform/intel-mid/Makefile @@ -1,7 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o pwr.o - -# SFI specific code -ifdef CONFIG_X86_INTEL_MID -obj-$(CONFIG_SFI) += sfi.o device_libs/ -endif +obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o pwr.o diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile deleted file mode 100644 index 480fed21cc7d..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Family-Level Interface Shim (FLIS) -obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o -# SDHCI Devices -obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o -# WiFi + BT -obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o -obj-$(subst m,y,$(CONFIG_BT_HCIUART_BCM)) += platform_bt.o -# IPC Devices -obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o -obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o -obj-$(subst m,y,$(CONFIG_GPIO_MSIC)) += platform_msic_gpio.o -obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_ocd.o -obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o -obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o -obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o -# SPI Devices -obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o -# I2C Devices -obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o -obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o -obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o -obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o -obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o -# I2C GPIO Expanders -obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o -obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o -obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o -# MISC Devices -obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o -obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o -obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o -obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c deleted file mode 100644 index 564c47c53f3a..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_bcm43xx.c: bcm43xx platform data initialization file - * - * (C) Copyright 2016 Intel Corporation - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - */ - -#include <linux/gpio/machine.h> -#include <linux/platform_device.h> -#include <linux/regulator/machine.h> -#include <linux/regulator/fixed.h> -#include <linux/sfi.h> - -#include <asm/intel-mid.h> - -#define WLAN_SFI_GPIO_IRQ_NAME "WLAN-interrupt" -#define WLAN_SFI_GPIO_ENABLE_NAME "WLAN-enable" - -#define WLAN_DEV_NAME "0000:00:01.3" - -static struct regulator_consumer_supply bcm43xx_vmmc_supply = { - .dev_name = WLAN_DEV_NAME, - .supply = "vmmc", -}; - -static struct regulator_init_data bcm43xx_vmmc_data = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = 1, - .consumer_supplies = &bcm43xx_vmmc_supply, -}; - -static struct fixed_voltage_config bcm43xx_vmmc = { - .supply_name = "bcm43xx-vmmc-regulator", - /* - * Announce 2.0V here to be compatible with SDIO specification. The - * real voltage and signaling are still 1.8V. - */ - .microvolts = 2000000, /* 1.8V */ - .startup_delay = 250 * 1000, /* 250ms */ - .enabled_at_boot = 0, /* disabled at boot */ - .init_data = &bcm43xx_vmmc_data, -}; - -static struct platform_device bcm43xx_vmmc_regulator = { - .name = "reg-fixed-voltage", - .id = PLATFORM_DEVID_AUTO, - .dev = { - .platform_data = &bcm43xx_vmmc, - }, -}; - -static struct gpiod_lookup_table bcm43xx_vmmc_gpio_table = { - .dev_id = "reg-fixed-voltage.0", - .table = { - GPIO_LOOKUP("0000:00:0c.0", -1, NULL, GPIO_ACTIVE_LOW), - {} - }, -}; - -static int __init bcm43xx_regulator_register(void) -{ - struct gpiod_lookup_table *table = &bcm43xx_vmmc_gpio_table; - struct gpiod_lookup *lookup = table->table; - int ret; - - lookup[0].chip_hwnum = get_gpio_by_name(WLAN_SFI_GPIO_ENABLE_NAME); - gpiod_add_lookup_table(table); - - ret = platform_device_register(&bcm43xx_vmmc_regulator); - if (ret) { - pr_err("%s: vmmc regulator register failed\n", __func__); - return ret; - } - - return 0; -} - -static void __init *bcm43xx_platform_data(void *info) -{ - int ret; - - ret = bcm43xx_regulator_register(); - if (ret) - return NULL; - - pr_info("Using generic wifi platform data\n"); - - /* For now it's empty */ - return NULL; -} - -static const struct devs_id bcm43xx_clk_vmmc_dev_id __initconst = { - .name = "bcm43xx_clk_vmmc", - .type = SFI_DEV_TYPE_SD, - .get_platform_data = &bcm43xx_platform_data, -}; - -sfi_device(bcm43xx_clk_vmmc_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c b/arch/x86/platform/intel-mid/device_libs/platform_bma023.c deleted file mode 100644 index 32912a17f68e..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_bma023.c: bma023 platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - */ - -#include <asm/intel-mid.h> - -static const struct devs_id bma023_dev_id __initconst = { - .name = "bma023", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, -}; - -sfi_device(bma023_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c deleted file mode 100644 index 2930b6e9473e..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Bluetooth platform data initialization file - * - * (C) Copyright 2017 Intel Corporation - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - */ - -#include <linux/gpio/machine.h> -#include <linux/pci.h> -#include <linux/platform_device.h> - -#include <asm/cpu_device_id.h> -#include <asm/intel-family.h> -#include <asm/intel-mid.h> - -struct bt_sfi_data { - struct device *dev; - const char *name; - int (*setup)(struct bt_sfi_data *ddata); -}; - -static struct gpiod_lookup_table tng_bt_sfi_gpio_table = { - .dev_id = "hci_bcm", - .table = { - GPIO_LOOKUP("0000:00:0c.0", -1, "device-wakeup", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("0000:00:0c.0", -1, "shutdown", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("0000:00:0c.0", -1, "host-wakeup", GPIO_ACTIVE_HIGH), - { }, - }, -}; - -#define TNG_BT_SFI_GPIO_DEVICE_WAKEUP "bt_wakeup" -#define TNG_BT_SFI_GPIO_SHUTDOWN "BT-reset" -#define TNG_BT_SFI_GPIO_HOST_WAKEUP "bt_uart_enable" - -static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) -{ - struct gpiod_lookup_table *table = &tng_bt_sfi_gpio_table; - struct gpiod_lookup *lookup = table->table; - struct pci_dev *pdev; - - /* Connected to /dev/ttyS0 */ - pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(4, 1)); - if (!pdev) - return -ENODEV; - - ddata->dev = &pdev->dev; - ddata->name = table->dev_id; - - lookup[0].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_DEVICE_WAKEUP); - lookup[1].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_SHUTDOWN); - lookup[2].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_HOST_WAKEUP); - - gpiod_add_lookup_table(table); - return 0; -} - -static struct bt_sfi_data tng_bt_sfi_data __initdata = { - .setup = tng_bt_sfi_setup, -}; - -static const struct x86_cpu_id bt_sfi_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &tng_bt_sfi_data), - {} -}; - -static int __init bt_sfi_init(void) -{ - struct platform_device_info info; - struct platform_device *pdev; - const struct x86_cpu_id *id; - struct bt_sfi_data *ddata; - int ret; - - id = x86_match_cpu(bt_sfi_cpu_ids); - if (!id) - return -ENODEV; - - ddata = (struct bt_sfi_data *)id->driver_data; - if (!ddata) - return -ENODEV; - - ret = ddata->setup(ddata); - if (ret) - return ret; - - memset(&info, 0, sizeof(info)); - info.fwnode = ddata->dev->fwnode; - info.parent = ddata->dev; - info.name = ddata->name; - info.id = PLATFORM_DEVID_NONE; - - pdev = platform_device_register_full(&info); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - dev_info(ddata->dev, "Registered Bluetooth device: %s\n", ddata->name); - return 0; -} -device_initcall(bt_sfi_init); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c deleted file mode 100644 index a2508582a0b1..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_emc1403.c: emc1403 platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/init.h> -#include <linux/gpio.h> -#include <linux/i2c.h> -#include <asm/intel-mid.h> - -static void __init *emc1403_platform_data(void *info) -{ - static short intr2nd_pdata; - struct i2c_board_info *i2c_info = info; - int intr = get_gpio_by_name("thermal_int"); - int intr2nd = get_gpio_by_name("thermal_alert"); - - if (intr < 0) - return NULL; - if (intr2nd < 0) - return NULL; - - i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; - intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET; - - return &intr2nd_pdata; -} - -static const struct devs_id emc1403_dev_id __initconst = { - .name = "emc1403", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &emc1403_platform_data, -}; - -sfi_device(emc1403_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c deleted file mode 100644 index d9435d2196a4..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c +++ /dev/null @@ -1,81 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_gpio_keys.c: gpio_keys platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/input.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/gpio.h> -#include <linux/gpio_keys.h> -#include <linux/platform_device.h> -#include <asm/intel-mid.h> - -#define DEVICE_NAME "gpio-keys" - -/* - * we will search these buttons in SFI GPIO table (by name) - * and register them dynamically. Please add all possible - * buttons here, we will shrink them if no GPIO found. - */ -static struct gpio_keys_button gpio_button[] = { - {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000}, - {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20}, - {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20}, - {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20}, - {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20}, - {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20}, - {KEY_MUTE, -1, 1, "mute_enable", EV_KEY, 0, 20}, - {KEY_VOLUMEUP, -1, 1, "volume_up", EV_KEY, 0, 20}, - {KEY_VOLUMEDOWN, -1, 1, "volume_down", EV_KEY, 0, 20}, - {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20}, - {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20}, - {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20}, - {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20}, -}; - -static struct gpio_keys_platform_data gpio_keys = { - .buttons = gpio_button, - .rep = 1, - .nbuttons = -1, /* will fill it after search */ -}; - -static struct platform_device pb_device = { - .name = DEVICE_NAME, - .id = -1, - .dev = { - .platform_data = &gpio_keys, - }, -}; - -/* - * Shrink the non-existent buttons, register the gpio button - * device if there is some - */ -static int __init pb_keys_init(void) -{ - struct gpio_keys_button *gb = gpio_button; - int i, good = 0; - - for (i = 0; i < ARRAY_SIZE(gpio_button); i++) { - gb[i].gpio = get_gpio_by_name(gb[i].desc); - pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, - gb[i].gpio); - if (gb[i].gpio < 0) - continue; - - if (i != good) - gb[good] = gb[i]; - good++; - } - - if (good) { - gpio_keys.nbuttons = good; - return platform_device_register(&pb_device); - } - return 0; -} -late_initcall(pb_keys_init); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c deleted file mode 100644 index a4485cd638c6..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_lis331.c: lis331 platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/i2c.h> -#include <linux/gpio.h> -#include <asm/intel-mid.h> - -static void __init *lis331dl_platform_data(void *info) -{ - static short intr2nd_pdata; - struct i2c_board_info *i2c_info = info; - int intr = get_gpio_by_name("accel_int"); - int intr2nd = get_gpio_by_name("accel_2"); - - if (intr < 0) - return NULL; - if (intr2nd < 0) - return NULL; - - i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; - intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET; - - return &intr2nd_pdata; -} - -static const struct devs_id lis331dl_dev_id __initconst = { - .name = "i2c_accel", - .type = SFI_DEV_TYPE_I2C, - .get_platform_data = &lis331dl_platform_data, -}; - -sfi_device(lis331dl_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c deleted file mode 100644 index e9287c3184da..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_max7315.c: max7315 platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/init.h> -#include <linux/gpio.h> -#include <linux/i2c.h> -#include <linux/platform_data/pca953x.h> -#include <asm/intel-mid.h> - -#define MAX7315_NUM 2 - -static void __init *max7315_platform_data(void *info) -{ - static struct pca953x_platform_data max7315_pdata[MAX7315_NUM]; - static int nr; - struct pca953x_platform_data *max7315 = &max7315_pdata[nr]; - struct i2c_board_info *i2c_info = info; - int gpio_base, intr; - char base_pin_name[SFI_NAME_LEN + 1]; - char intr_pin_name[SFI_NAME_LEN + 1]; - - if (nr == MAX7315_NUM) { - pr_err("too many max7315s, we only support %d\n", - MAX7315_NUM); - return NULL; - } - /* we have several max7315 on the board, we only need load several - * instances of the same pca953x driver to cover them - */ - strcpy(i2c_info->type, "max7315"); - if (nr++) { - snprintf(base_pin_name, sizeof(base_pin_name), - "max7315_%d_base", nr); - snprintf(intr_pin_name, sizeof(intr_pin_name), - "max7315_%d_int", nr); - } else { - strcpy(base_pin_name, "max7315_base"); - strcpy(intr_pin_name, "max7315_int"); - } - - gpio_base = get_gpio_by_name(base_pin_name); - intr = get_gpio_by_name(intr_pin_name); - - if (gpio_base < 0) - return NULL; - max7315->gpio_base = gpio_base; - if (intr != -1) { - i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; - max7315->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; - } else { - i2c_info->irq = -1; - max7315->irq_base = -1; - } - return max7315; -} - -static const struct devs_id max7315_dev_id __initconst = { - .name = "i2c_max7315", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &max7315_platform_data, -}; - -static const struct devs_id max7315_2_dev_id __initconst = { - .name = "i2c_max7315_2", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &max7315_platform_data, -}; - -sfi_device(max7315_dev_id); -sfi_device(max7315_2_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c deleted file mode 100644 index 28a182713934..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_mpu3050.c: mpu3050 platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/gpio.h> -#include <linux/i2c.h> -#include <asm/intel-mid.h> - -static void *mpu3050_platform_data(void *info) -{ - struct i2c_board_info *i2c_info = info; - int intr = get_gpio_by_name("mpu3050_int"); - - if (intr < 0) - return NULL; - - i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; - return NULL; -} - -static const struct devs_id mpu3050_dev_id __initconst = { - .name = "mpu3050", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &mpu3050_platform_data, -}; - -sfi_device(mpu3050_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c deleted file mode 100644 index 605e1f94ad89..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Merrifield FLIS platform device initialization file - * - * Copyright (C) 2016, Intel Corporation - * - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - */ - -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> - -#include <asm/intel-mid.h> - -#define FLIS_BASE_ADDR 0xff0c0000 -#define FLIS_LENGTH 0x8000 - -static struct resource mrfld_pinctrl_mmio_resource = { - .start = FLIS_BASE_ADDR, - .end = FLIS_BASE_ADDR + FLIS_LENGTH - 1, - .flags = IORESOURCE_MEM, -}; - -static struct platform_device mrfld_pinctrl_device = { - .name = "pinctrl-merrifield", - .id = PLATFORM_DEVID_NONE, - .resource = &mrfld_pinctrl_mmio_resource, - .num_resources = 1, -}; - -static int __init mrfld_pinctrl_init(void) -{ - if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) - return platform_device_register(&mrfld_pinctrl_device); - - return -ENODEV; -} -arch_initcall(mrfld_pinctrl_init); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c deleted file mode 100644 index ec2afb41b34a..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Merrifield power button support - * - * (C) Copyright 2017 Intel Corporation - * - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - */ - -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/sfi.h> - -#include <asm/intel-mid.h> -#include <asm/intel_scu_ipc.h> - -static struct resource mrfld_power_btn_resources[] = { - { - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device mrfld_power_btn_dev = { - .name = "msic_power_btn", - .id = PLATFORM_DEVID_NONE, - .num_resources = ARRAY_SIZE(mrfld_power_btn_resources), - .resource = mrfld_power_btn_resources, -}; - -static int mrfld_power_btn_scu_status_change(struct notifier_block *nb, - unsigned long code, void *data) -{ - if (code == SCU_DOWN) { - platform_device_unregister(&mrfld_power_btn_dev); - return 0; - } - - return platform_device_register(&mrfld_power_btn_dev); -} - -static struct notifier_block mrfld_power_btn_scu_notifier = { - .notifier_call = mrfld_power_btn_scu_status_change, -}; - -static int __init register_mrfld_power_btn(void) -{ - if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) - return -ENODEV; - - /* - * We need to be sure that the SCU IPC is ready before - * PMIC power button device can be registered: - */ - intel_scu_notifier_add(&mrfld_power_btn_scu_notifier); - - return 0; -} -arch_initcall(register_mrfld_power_btn); - -static void __init *mrfld_power_btn_platform_data(void *info) -{ - struct resource *res = mrfld_power_btn_resources; - struct sfi_device_table_entry *pentry = info; - - res->start = res->end = pentry->irq; - return NULL; -} - -static const struct devs_id mrfld_power_btn_dev_id __initconst = { - .name = "bcove_power_btn", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &mrfld_power_btn_platform_data, -}; - -sfi_device(mrfld_power_btn_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c deleted file mode 100644 index 40e9808a9634..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Merrifield legacy RTC initialization file - * - * (C) Copyright 2017 Intel Corporation - * - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - */ - -#include <linux/init.h> - -#include <asm/hw_irq.h> -#include <asm/intel-mid.h> -#include <asm/io_apic.h> -#include <asm/time.h> -#include <asm/x86_init.h> - -static int __init mrfld_legacy_rtc_alloc_irq(void) -{ - struct irq_alloc_info info; - int ret; - - if (!x86_platform.legacy.rtc) - return -ENODEV; - - ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, 0); - ret = mp_map_gsi_to_irq(RTC_IRQ, IOAPIC_MAP_ALLOC, &info); - if (ret < 0) { - pr_info("Failed to allocate RTC interrupt. Disabling RTC\n"); - x86_platform.legacy.rtc = 0; - return ret; - } - - return 0; -} - -static int __init mrfld_legacy_rtc_init(void) -{ - if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) - return -ENODEV; - - return mrfld_legacy_rtc_alloc_irq(); -} -arch_initcall(mrfld_legacy_rtc_init); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c deleted file mode 100644 index fe3b7ff975f3..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * SDHCI platform data initilisation file - * - * (C) Copyright 2016 Intel Corporation - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - */ - -#include <linux/init.h> -#include <linux/pci.h> - -#include <linux/mmc/sdhci-pci-data.h> - -#include <asm/intel-mid.h> - -#define INTEL_MRFLD_SD 2 -#define INTEL_MRFLD_SD_CD_GPIO 77 - -static struct sdhci_pci_data mrfld_sdhci_pci_data = { - .rst_n_gpio = -EINVAL, - .cd_gpio = INTEL_MRFLD_SD_CD_GPIO, -}; - -static struct sdhci_pci_data * -mrfld_sdhci_pci_get_data(struct pci_dev *pdev, int slotno) -{ - unsigned int func = PCI_FUNC(pdev->devfn); - - if (func == INTEL_MRFLD_SD) - return &mrfld_sdhci_pci_data; - - return NULL; -} - -static int __init mrfld_sd_init(void) -{ - if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) - return -ENODEV; - - sdhci_pci_get_data = mrfld_sdhci_pci_get_data; - return 0; -} -arch_initcall(mrfld_sd_init); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c deleted file mode 100644 index b828f4fd40be..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spidev platform data initialization file - * - * (C) Copyright 2014, 2016 Intel Corporation - * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - * Dan O'Donovan <dan@emutex.com> - */ - -#include <linux/err.h> -#include <linux/init.h> -#include <linux/sfi.h> -#include <linux/spi/pxa2xx_spi.h> -#include <linux/spi/spi.h> - -#include <asm/intel-mid.h> - -#define MRFLD_SPI_DEFAULT_DMA_BURST 8 -#define MRFLD_SPI_DEFAULT_TIMEOUT 500 - -/* GPIO pin for spidev chipselect */ -#define MRFLD_SPIDEV_GPIO_CS 111 - -static struct pxa2xx_spi_chip spidev_spi_chip = { - .dma_burst_size = MRFLD_SPI_DEFAULT_DMA_BURST, - .timeout = MRFLD_SPI_DEFAULT_TIMEOUT, - .gpio_cs = MRFLD_SPIDEV_GPIO_CS, -}; - -static void __init *spidev_platform_data(void *info) -{ - struct spi_board_info *spi_info = info; - - if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) - return ERR_PTR(-ENODEV); - - spi_info->mode = SPI_MODE_0; - spi_info->controller_data = &spidev_spi_chip; - - return NULL; -} - -static const struct devs_id spidev_dev_id __initconst = { - .name = "spidev", - .type = SFI_DEV_TYPE_SPI, - .delay = 0, - .get_platform_data = &spidev_platform_data, -}; - -sfi_device(spidev_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c deleted file mode 100644 index 227218a8f98e..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Merrifield watchdog platform device library file - * - * (C) Copyright 2014 Intel Corporation - * Author: David Cohen <david.a.cohen@linux.intel.com> - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/platform_data/intel-mid_wdt.h> - -#include <asm/intel-mid.h> -#include <asm/intel_scu_ipc.h> -#include <asm/io_apic.h> -#include <asm/hw_irq.h> - -#define TANGIER_EXT_TIMER0_MSI 12 - -static struct platform_device wdt_dev = { - .name = "intel_mid_wdt", - .id = -1, -}; - -static int tangier_probe(struct platform_device *pdev) -{ - struct irq_alloc_info info; - struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data; - int gsi = TANGIER_EXT_TIMER0_MSI; - int irq; - - if (!pdata) - return -EINVAL; - - /* IOAPIC builds identity mapping between GSI and IRQ on MID */ - ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0); - irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); - if (irq < 0) { - dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi); - return irq; - } - - pdata->irq = irq; - return 0; -} - -static struct intel_mid_wdt_pdata tangier_pdata = { - .probe = tangier_probe, -}; - -static int wdt_scu_status_change(struct notifier_block *nb, - unsigned long code, void *data) -{ - if (code == SCU_DOWN) { - platform_device_unregister(&wdt_dev); - return 0; - } - - return platform_device_register(&wdt_dev); -} - -static struct notifier_block wdt_scu_notifier = { - .notifier_call = wdt_scu_status_change, -}; - -static int __init register_mid_wdt(void) -{ - if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) - return -ENODEV; - - wdt_dev.dev.platform_data = &tangier_pdata; - - /* - * We need to be sure that the SCU IPC is ready before watchdog device - * can be registered: - */ - intel_scu_notifier_add(&wdt_scu_notifier); - - return 0; -} -arch_initcall(register_mid_wdt); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.c b/arch/x86/platform/intel-mid/device_libs/platform_msic.c deleted file mode 100644 index b17783d0d4e7..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic.c +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic.c: MSIC platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/init.h> -#include <linux/sfi.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel_scu_ipc.h> -#include <asm/intel-mid.h> -#include "platform_msic.h" - -struct intel_msic_platform_data msic_pdata; - -static struct resource msic_resources[] = { - { - .start = INTEL_MSIC_IRQ_PHYS_BASE, - .end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device msic_device = { - .name = "intel_msic", - .id = -1, - .dev = { - .platform_data = &msic_pdata, - }, - .num_resources = ARRAY_SIZE(msic_resources), - .resource = msic_resources, -}; - -static int msic_scu_status_change(struct notifier_block *nb, - unsigned long code, void *data) -{ - if (code == SCU_DOWN) { - platform_device_unregister(&msic_device); - return 0; - } - - return platform_device_register(&msic_device); -} - -static int __init msic_init(void) -{ - static struct notifier_block msic_scu_notifier = { - .notifier_call = msic_scu_status_change, - }; - - /* - * We need to be sure that the SCU IPC is ready before MSIC device - * can be registered. - */ - if (intel_mid_has_msic()) - intel_scu_notifier_add(&msic_scu_notifier); - - return 0; -} -arch_initcall(msic_init); - -/* - * msic_generic_platform_data - sets generic platform data for the block - * @info: pointer to the SFI device table entry for this block - * @block: MSIC block - * - * Function sets IRQ number from the SFI table entry for given device to - * the MSIC platform data. - */ -void *msic_generic_platform_data(void *info, enum intel_msic_block block) -{ - struct sfi_device_table_entry *entry = info; - - BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST); - msic_pdata.irq[block] = entry->irq; - - return NULL; -} diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h deleted file mode 100644 index 91deb2e65b0e..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * platform_msic.h: MSIC platform data header file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ -#ifndef _PLATFORM_MSIC_H_ -#define _PLATFORM_MSIC_H_ - -extern struct intel_msic_platform_data msic_pdata; - -void *msic_generic_platform_data(void *info, enum intel_msic_block block); - -#endif diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c deleted file mode 100644 index e765da78ad8c..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic_audio.c: MSIC audio platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/init.h> -#include <linux/sfi.h> -#include <linux/platform_device.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel-mid.h> - -#include "platform_msic.h" - -static void *msic_audio_platform_data(void *info) -{ - struct platform_device *pdev; - - pdev = platform_device_register_simple("sst-platform", -1, NULL, 0); - - if (IS_ERR(pdev)) { - pr_err("failed to create audio platform device\n"); - return NULL; - } - - return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO); -} - -static const struct devs_id msic_audio_dev_id __initconst = { - .name = "msic_audio", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &msic_audio_platform_data, -}; - -sfi_device(msic_audio_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c deleted file mode 100644 index f461f84903f8..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic_battery.c: MSIC battery platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/init.h> -#include <linux/sfi.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel-mid.h> - -#include "platform_msic.h" - -static void __init *msic_battery_platform_data(void *info) -{ - return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY); -} - -static const struct devs_id msic_battery_dev_id __initconst = { - .name = "msic_battery", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &msic_battery_platform_data, -}; - -sfi_device(msic_battery_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c deleted file mode 100644 index 71a7d6db3878..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic_gpio.c: MSIC GPIO platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/sfi.h> -#include <linux/init.h> -#include <linux/gpio.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel-mid.h> - -#include "platform_msic.h" - -static void __init *msic_gpio_platform_data(void *info) -{ - static struct intel_msic_gpio_pdata msic_gpio_pdata; - - int gpio = get_gpio_by_name("msic_gpio_base"); - - if (gpio < 0) - return NULL; - - msic_gpio_pdata.gpio_base = gpio; - msic_pdata.gpio = &msic_gpio_pdata; - - return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO); -} - -static const struct devs_id msic_gpio_dev_id __initconst = { - .name = "msic_gpio", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &msic_gpio_platform_data, -}; - -sfi_device(msic_gpio_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c deleted file mode 100644 index 558c0d974430..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic_ocd.c: MSIC OCD platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/sfi.h> -#include <linux/init.h> -#include <linux/gpio.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel-mid.h> - -#include "platform_msic.h" - -static void __init *msic_ocd_platform_data(void *info) -{ - static struct intel_msic_ocd_pdata msic_ocd_pdata; - int gpio; - - gpio = get_gpio_by_name("ocd_gpio"); - - if (gpio < 0) - return NULL; - - msic_ocd_pdata.gpio = gpio; - msic_pdata.ocd = &msic_ocd_pdata; - - return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD); -} - -static const struct devs_id msic_ocd_dev_id __initconst = { - .name = "msic_ocd", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &msic_ocd_platform_data, -}; - -sfi_device(msic_ocd_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c deleted file mode 100644 index 3d3de2d59726..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic_power_btn.c: MSIC power btn platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/sfi.h> -#include <linux/init.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel-mid.h> - -#include "platform_msic.h" - -static void __init *msic_power_btn_platform_data(void *info) -{ - return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN); -} - -static const struct devs_id msic_power_btn_dev_id __initconst = { - .name = "msic_power_btn", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &msic_power_btn_platform_data, -}; - -sfi_device(msic_power_btn_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c deleted file mode 100644 index 4858da1d78c6..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_msic_thermal.c: msic_thermal platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/input.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/gpio.h> -#include <linux/platform_device.h> -#include <linux/mfd/intel_msic.h> -#include <asm/intel-mid.h> - -#include "platform_msic.h" - -static void __init *msic_thermal_platform_data(void *info) -{ - return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL); -} - -static const struct devs_id msic_thermal_dev_id __initconst = { - .name = "msic_thermal", - .type = SFI_DEV_TYPE_IPC, - .delay = 1, - .msic = 1, - .get_platform_data = &msic_thermal_platform_data, -}; - -sfi_device(msic_thermal_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c deleted file mode 100644 index 5609d8da3978..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * PCAL9555a platform data initialization file - * - * Copyright (C) 2016, Intel Corporation - * - * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - * Dan O'Donovan <dan@emutex.com> - */ - -#include <linux/gpio.h> -#include <linux/init.h> -#include <linux/i2c.h> -#include <linux/platform_data/pca953x.h> -#include <linux/sfi.h> - -#include <asm/intel-mid.h> - -#define PCAL9555A_NUM 4 - -static struct pca953x_platform_data pcal9555a_pdata[PCAL9555A_NUM]; -static int nr; - -static void __init *pcal9555a_platform_data(void *info) -{ - struct i2c_board_info *i2c_info = info; - char *type = i2c_info->type; - struct pca953x_platform_data *pcal9555a; - char base_pin_name[SFI_NAME_LEN + 1]; - char intr_pin_name[SFI_NAME_LEN + 1]; - int gpio_base, intr; - - snprintf(base_pin_name, sizeof(base_pin_name), "%s_base", type); - snprintf(intr_pin_name, sizeof(intr_pin_name), "%s_int", type); - - gpio_base = get_gpio_by_name(base_pin_name); - intr = get_gpio_by_name(intr_pin_name); - - /* Check if the SFI record valid */ - if (gpio_base == -1) - return NULL; - - if (nr >= PCAL9555A_NUM) { - pr_err("%s: Too many instances, only %d supported\n", __func__, - PCAL9555A_NUM); - return NULL; - } - - pcal9555a = &pcal9555a_pdata[nr++]; - pcal9555a->gpio_base = gpio_base; - - if (intr >= 0) { - i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; - pcal9555a->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; - } else { - i2c_info->irq = -1; - pcal9555a->irq_base = -1; - } - - strcpy(type, "pcal9555a"); - return pcal9555a; -} - -static const struct devs_id pcal9555a_1_dev_id __initconst = { - .name = "pcal9555a-1", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &pcal9555a_platform_data, -}; - -static const struct devs_id pcal9555a_2_dev_id __initconst = { - .name = "pcal9555a-2", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &pcal9555a_platform_data, -}; - -static const struct devs_id pcal9555a_3_dev_id __initconst = { - .name = "pcal9555a-3", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &pcal9555a_platform_data, -}; - -static const struct devs_id pcal9555a_4_dev_id __initconst = { - .name = "pcal9555a-4", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &pcal9555a_platform_data, -}; - -sfi_device(pcal9555a_1_dev_id); -sfi_device(pcal9555a_2_dev_id); -sfi_device(pcal9555a_3_dev_id); -sfi_device(pcal9555a_4_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c deleted file mode 100644 index 139738bbdd36..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_tc35876x.c: tc35876x platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/gpio/machine.h> -#include <asm/intel-mid.h> - -static struct gpiod_lookup_table tc35876x_gpio_table = { - .dev_id = "i2c_disp_brig", - .table = { - GPIO_LOOKUP("0000:00:0c.0", -1, "bridge-reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("0000:00:0c.0", -1, "bl-en", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("0000:00:0c.0", -1, "vadd", GPIO_ACTIVE_HIGH), - { }, - }, -}; - -/*tc35876x DSI_LVDS bridge chip and panel platform data*/ -static void *tc35876x_platform_data(void *data) -{ - struct gpiod_lookup_table *table = &tc35876x_gpio_table; - struct gpiod_lookup *lookup = table->table; - - lookup[0].chip_hwnum = get_gpio_by_name("LCMB_RXEN"); - lookup[1].chip_hwnum = get_gpio_by_name("6S6P_BL_EN"); - lookup[2].chip_hwnum = get_gpio_by_name("EN_VREG_LCD_V3P3"); - gpiod_add_lookup_table(table); - - return NULL; -} - -static const struct devs_id tc35876x_dev_id __initconst = { - .name = "i2c_disp_brig", - .type = SFI_DEV_TYPE_I2C, - .get_platform_data = &tc35876x_platform_data, -}; - -sfi_device(tc35876x_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c deleted file mode 100644 index e689d8f61059..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * platform_tca6416.c: tca6416 platform data initialization file - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/platform_data/pca953x.h> -#include <linux/i2c.h> -#include <linux/gpio.h> -#include <asm/intel-mid.h> - -#define TCA6416_NAME "tca6416" -#define TCA6416_BASE "tca6416_base" -#define TCA6416_INTR "tca6416_int" - -static void *tca6416_platform_data(void *info) -{ - static struct pca953x_platform_data tca6416; - struct i2c_board_info *i2c_info = info; - int gpio_base, intr; - char base_pin_name[SFI_NAME_LEN + 1]; - char intr_pin_name[SFI_NAME_LEN + 1]; - - strcpy(i2c_info->type, TCA6416_NAME); - strcpy(base_pin_name, TCA6416_BASE); - strcpy(intr_pin_name, TCA6416_INTR); - - gpio_base = get_gpio_by_name(base_pin_name); - intr = get_gpio_by_name(intr_pin_name); - - if (gpio_base < 0) - return NULL; - tca6416.gpio_base = gpio_base; - if (intr >= 0) { - i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; - tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; - } else { - i2c_info->irq = -1; - tca6416.irq_base = -1; - } - return &tca6416; -} - -static const struct devs_id tca6416_dev_id __initconst = { - .name = "tca6416", - .type = SFI_DEV_TYPE_I2C, - .delay = 1, - .get_platform_data = &tca6416_platform_data, -}; - -sfi_device(tca6416_dev_id); diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 780728161f7d..f4592dc7a1c1 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * intel-mid.c: Intel MID platform setup code + * Intel MID platform setup code * - * (C) Copyright 2008, 2012 Intel Corporation + * (C) Copyright 2008, 2012, 2021 Intel Corporation * Author: Jacob Pan (jacob.jun.pan@intel.com) * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> */ @@ -14,7 +14,6 @@ #include <linux/interrupt.h> #include <linux/regulator/machine.h> #include <linux/scatterlist.h> -#include <linux/sfi.h> #include <linux/irq.h> #include <linux/export.h> #include <linux/notifier.h> @@ -25,38 +24,13 @@ #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/intel-mid.h> -#include <asm/intel_mid_vrtc.h> #include <asm/io.h> #include <asm/i8259.h> #include <asm/intel_scu_ipc.h> -#include <asm/apb_timer.h> #include <asm/reboot.h> -/* - * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, - * cmdline option x86_intel_mid_timer can be used to override the configuration - * to prefer one or the other. - * at runtime, there are basically three timer configurations: - * 1. per cpu apbt clock only - * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only - * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast. - * - * by default (without cmdline option), platform code first detects cpu type - * to see if we are on lincroft or penwell, then set up both lapic or apbt - * clocks accordingly. - * i.e. by default, medfield uses configuration #2, moorestown uses #1. - * config #3 is supported but not recommended on medfield. - * - * rating and feature summary: - * lapic (with C3STOP) --------- 100 - * apbt (always-on) ------------ 110 - * lapic (always-on,ARAT) ------ 150 - */ - -enum intel_mid_timer_options intel_mid_timer_options; - -enum intel_mid_cpu_type __intel_mid_cpu_chip; -EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip); +#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */ +#define IPCMSG_COLD_RESET 0xF1 static void intel_mid_power_off(void) { @@ -64,69 +38,32 @@ static void intel_mid_power_off(void) intel_mid_pwr_power_off(); /* Only for Tangier, the rest will ignore this command */ - intel_scu_ipc_simple_command(IPCMSG_COLD_OFF, 1); + intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_OFF, 1); }; static void intel_mid_reboot(void) { - intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); -} - -static void __init intel_mid_setup_bp_timer(void) -{ - apbt_time_init(); - setup_boot_APIC_clock(); + intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_RESET, 0); } static void __init intel_mid_time_init(void) { - sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); - - switch (intel_mid_timer_options) { - case INTEL_MID_TIMER_APBT_ONLY: - break; - case INTEL_MID_TIMER_LAPIC_APBT: - /* Use apbt and local apic */ - x86_init.timers.setup_percpu_clockev = intel_mid_setup_bp_timer; - x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock; - return; - default: - if (!boot_cpu_has(X86_FEATURE_ARAT)) - break; - /* Lapic only, no apbt */ - x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock; - x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock; - return; - } - - x86_init.timers.setup_percpu_clockev = apbt_time_init; + /* Lapic only, no apbt */ + x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock; + x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock; } static void intel_mid_arch_setup(void) { - if (boot_cpu_data.x86 != 6) { - pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; - goto out; - } - switch (boot_cpu_data.x86_model) { - case 0x35: - __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW; - break; case 0x3C: case 0x4A: - __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER; x86_platform.legacy.rtc = 1; break; - case 0x27: default: - __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; break; } -out: /* * Intel MID platforms are using explicitly defined regulators. * @@ -159,14 +96,11 @@ void __init x86_intel_mid_early_setup(void) x86_init.timers.timer_init = intel_mid_time_init; x86_init.timers.setup_percpu_clockev = x86_init_noop; - x86_init.timers.wallclock_init = intel_mid_rtc_init; x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.oem.arch_setup = intel_mid_arch_setup; - x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; - x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; x86_init.pci.arch_init = intel_mid_pci_init; @@ -188,25 +122,3 @@ void __init x86_intel_mid_early_setup(void) x86_init.mpparse.get_smp_config = x86_init_uint_noop; set_bit(MP_BUS_ISA, mp_bus_not_pci); } - -/* - * if user does not want to use per CPU apb timer, just give it a lower rating - * than local apic timer and skip the late per cpu timer init. - */ -static inline int __init setup_x86_intel_mid_timer(char *arg) -{ - if (!arg) - return -EINVAL; - - if (strcmp("apbt_only", arg) == 0) - intel_mid_timer_options = INTEL_MID_TIMER_APBT_ONLY; - else if (strcmp("lapic_and_apbt", arg) == 0) - intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT; - else { - pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n", - arg); - return -EINVAL; - } - return 0; -} -__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer); diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c deleted file mode 100644 index 2226da4f437a..000000000000 --- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform - * - * (C) Copyright 2009 Intel Corporation - * - * Note: - * VRTC is emulated by system controller firmware, the real HW - * RTC is located in the PMIC device. SCU FW shadows PMIC RTC - * in a memory mapped IO space that is visible to the host IA - * processor. - * - * This driver is based on RTC CMOS driver. - */ - -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/init.h> -#include <linux/sfi.h> -#include <linux/platform_device.h> -#include <linux/mc146818rtc.h> - -#include <asm/intel-mid.h> -#include <asm/intel_mid_vrtc.h> -#include <asm/time.h> -#include <asm/fixmap.h> - -static unsigned char __iomem *vrtc_virt_base; - -unsigned char vrtc_cmos_read(unsigned char reg) -{ - unsigned char retval; - - /* vRTC's registers range from 0x0 to 0xD */ - if (reg > 0xd || !vrtc_virt_base) - return 0xff; - - lock_cmos_prefix(reg); - retval = __raw_readb(vrtc_virt_base + (reg << 2)); - lock_cmos_suffix(reg); - return retval; -} -EXPORT_SYMBOL_GPL(vrtc_cmos_read); - -void vrtc_cmos_write(unsigned char val, unsigned char reg) -{ - if (reg > 0xd || !vrtc_virt_base) - return; - - lock_cmos_prefix(reg); - __raw_writeb(val, vrtc_virt_base + (reg << 2)); - lock_cmos_suffix(reg); -} -EXPORT_SYMBOL_GPL(vrtc_cmos_write); - -void vrtc_get_time(struct timespec64 *now) -{ - u8 sec, min, hour, mday, mon; - unsigned long flags; - u32 year; - - spin_lock_irqsave(&rtc_lock, flags); - - while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) - cpu_relax(); - - sec = vrtc_cmos_read(RTC_SECONDS); - min = vrtc_cmos_read(RTC_MINUTES); - hour = vrtc_cmos_read(RTC_HOURS); - mday = vrtc_cmos_read(RTC_DAY_OF_MONTH); - mon = vrtc_cmos_read(RTC_MONTH); - year = vrtc_cmos_read(RTC_YEAR); - - spin_unlock_irqrestore(&rtc_lock, flags); - - /* vRTC YEAR reg contains the offset to 1972 */ - year += 1972; - - pr_info("vRTC: sec: %d min: %d hour: %d day: %d " - "mon: %d year: %d\n", sec, min, hour, mday, mon, year); - - now->tv_sec = mktime64(year, mon, mday, hour, min, sec); - now->tv_nsec = 0; -} - -int vrtc_set_mmss(const struct timespec64 *now) -{ - unsigned long flags; - struct rtc_time tm; - int year; - int retval = 0; - - rtc_time64_to_tm(now->tv_sec, &tm); - if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { - /* - * tm.year is the number of years since 1900, and the - * vrtc need the years since 1972. - */ - year = tm.tm_year - 72; - spin_lock_irqsave(&rtc_lock, flags); - vrtc_cmos_write(year, RTC_YEAR); - vrtc_cmos_write(tm.tm_mon, RTC_MONTH); - vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH); - vrtc_cmos_write(tm.tm_hour, RTC_HOURS); - vrtc_cmos_write(tm.tm_min, RTC_MINUTES); - vrtc_cmos_write(tm.tm_sec, RTC_SECONDS); - spin_unlock_irqrestore(&rtc_lock, flags); - } else { - pr_err("%s: Invalid vRTC value: write of %llx to vRTC failed\n", - __func__, (s64)now->tv_sec); - retval = -EINVAL; - } - return retval; -} - -void __init intel_mid_rtc_init(void) -{ - unsigned long vrtc_paddr; - - sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); - - vrtc_paddr = sfi_mrtc_array[0].phys_addr; - if (!sfi_mrtc_num || !vrtc_paddr) - return; - - vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC, - vrtc_paddr); - x86_platform.get_wallclock = vrtc_get_time; - x86_platform.set_wallclock = vrtc_set_mmss; -} - -/* - * The Moorestown platform has a memory mapped virtual RTC device that emulates - * the programming interface of the RTC. - */ - -static struct resource vrtc_resources[] = { - [0] = { - .flags = IORESOURCE_MEM, - }, - [1] = { - .flags = IORESOURCE_IRQ, - } -}; - -static struct platform_device vrtc_device = { - .name = "rtc_mrst", - .id = -1, - .resource = vrtc_resources, - .num_resources = ARRAY_SIZE(vrtc_resources), -}; - -/* Register the RTC device if appropriate */ -static int __init intel_mid_device_create(void) -{ - /* No Moorestown, no device */ - if (!intel_mid_identify_cpu()) - return -ENODEV; - /* No timer, no device */ - if (!sfi_mrtc_num) - return -ENODEV; - - /* iomem resource */ - vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr; - vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr + - MRST_VRTC_MAP_SZ; - /* irq resource */ - vrtc_resources[1].start = sfi_mrtc_array[0].irq; - vrtc_resources[1].end = sfi_mrtc_array[0].irq; - - return platform_device_register(&vrtc_device); -} -device_initcall(intel_mid_device_create); diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c deleted file mode 100644 index 30bd5714a3d4..000000000000 --- a/arch/x86/platform/intel-mid/sfi.c +++ /dev/null @@ -1,543 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * intel_mid_sfi.c: Intel MID SFI initialization code - * - * (C) Copyright 2013 Intel Corporation - * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> - */ - -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/scatterlist.h> -#include <linux/sfi.h> -#include <linux/spi/spi.h> -#include <linux/i2c.h> -#include <linux/skbuff.h> -#include <linux/gpio.h> -#include <linux/gpio_keys.h> -#include <linux/input.h> -#include <linux/platform_device.h> -#include <linux/irq.h> -#include <linux/export.h> -#include <linux/notifier.h> -#include <linux/mmc/core.h> -#include <linux/mmc/card.h> -#include <linux/blkdev.h> - -#include <asm/setup.h> -#include <asm/mpspec_def.h> -#include <asm/hw_irq.h> -#include <asm/apic.h> -#include <asm/io_apic.h> -#include <asm/intel-mid.h> -#include <asm/intel_mid_vrtc.h> -#include <asm/io.h> -#include <asm/i8259.h> -#include <asm/intel_scu_ipc.h> -#include <asm/apb_timer.h> -#include <asm/reboot.h> - -#define SFI_SIG_OEM0 "OEM0" -#define MAX_IPCDEVS 24 -#define MAX_SCU_SPI 24 -#define MAX_SCU_I2C 24 - -static struct platform_device *ipc_devs[MAX_IPCDEVS]; -static struct spi_board_info *spi_devs[MAX_SCU_SPI]; -static struct i2c_board_info *i2c_devs[MAX_SCU_I2C]; -static struct sfi_gpio_table_entry *gpio_table; -static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; -static int ipc_next_dev; -static int spi_next_dev; -static int i2c_next_dev; -static int i2c_bus[MAX_SCU_I2C]; -static int gpio_num_entry; -static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; -int sfi_mrtc_num; -int sfi_mtimer_num; - -struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; -EXPORT_SYMBOL_GPL(sfi_mrtc_array); - -struct blocking_notifier_head intel_scu_notifier = - BLOCKING_NOTIFIER_INIT(intel_scu_notifier); -EXPORT_SYMBOL_GPL(intel_scu_notifier); - -#define intel_mid_sfi_get_pdata(dev, priv) \ - ((dev)->get_platform_data ? (dev)->get_platform_data(priv) : NULL) - -/* parse all the mtimer info to a static mtimer array */ -int __init sfi_parse_mtmr(struct sfi_table_header *table) -{ - struct sfi_table_simple *sb; - struct sfi_timer_table_entry *pentry; - struct mpc_intsrc mp_irq; - int totallen; - - sb = (struct sfi_table_simple *)table; - if (!sfi_mtimer_num) { - sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb, - struct sfi_timer_table_entry); - pentry = (struct sfi_timer_table_entry *) sb->pentry; - totallen = sfi_mtimer_num * sizeof(*pentry); - memcpy(sfi_mtimer_array, pentry, totallen); - } - - pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num); - pentry = sfi_mtimer_array; - for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) { - pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n", - totallen, (u32)pentry->phys_addr, - pentry->freq_hz, pentry->irq); - mp_irq.type = MP_INTSRC; - mp_irq.irqtype = mp_INT; - mp_irq.irqflag = MP_IRQTRIG_EDGE | MP_IRQPOL_ACTIVE_HIGH; - mp_irq.srcbus = MP_BUS_ISA; - mp_irq.srcbusirq = pentry->irq; /* IRQ */ - mp_irq.dstapic = MP_APIC_ALL; - mp_irq.dstirq = pentry->irq; - mp_save_irq(&mp_irq); - mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL); - } - - return 0; -} - -struct sfi_timer_table_entry *sfi_get_mtmr(int hint) -{ - int i; - if (hint < sfi_mtimer_num) { - if (!sfi_mtimer_usage[hint]) { - pr_debug("hint taken for timer %d irq %d\n", - hint, sfi_mtimer_array[hint].irq); - sfi_mtimer_usage[hint] = 1; - return &sfi_mtimer_array[hint]; - } - } - /* take the first timer available */ - for (i = 0; i < sfi_mtimer_num;) { - if (!sfi_mtimer_usage[i]) { - sfi_mtimer_usage[i] = 1; - return &sfi_mtimer_array[i]; - } - i++; - } - return NULL; -} - -void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr) -{ - int i; - for (i = 0; i < sfi_mtimer_num;) { - if (mtmr->irq == sfi_mtimer_array[i].irq) { - sfi_mtimer_usage[i] = 0; - return; - } - i++; - } -} - -/* parse all the mrtc info to a global mrtc array */ -int __init sfi_parse_mrtc(struct sfi_table_header *table) -{ - struct sfi_table_simple *sb; - struct sfi_rtc_table_entry *pentry; - struct mpc_intsrc mp_irq; - - int totallen; - - sb = (struct sfi_table_simple *)table; - if (!sfi_mrtc_num) { - sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb, - struct sfi_rtc_table_entry); - pentry = (struct sfi_rtc_table_entry *)sb->pentry; - totallen = sfi_mrtc_num * sizeof(*pentry); - memcpy(sfi_mrtc_array, pentry, totallen); - } - - pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num); - pentry = sfi_mrtc_array; - for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { - pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", - totallen, (u32)pentry->phys_addr, pentry->irq); - mp_irq.type = MP_INTSRC; - mp_irq.irqtype = mp_INT; - mp_irq.irqflag = MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW; - mp_irq.srcbus = MP_BUS_ISA; - mp_irq.srcbusirq = pentry->irq; /* IRQ */ - mp_irq.dstapic = MP_APIC_ALL; - mp_irq.dstirq = pentry->irq; - mp_save_irq(&mp_irq); - mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL); - } - return 0; -} - - -/* - * Parsing GPIO table first, since the DEVS table will need this table - * to map the pin name to the actual pin. - */ -static int __init sfi_parse_gpio(struct sfi_table_header *table) -{ - struct sfi_table_simple *sb; - struct sfi_gpio_table_entry *pentry; - int num, i; - - if (gpio_table) - return 0; - sb = (struct sfi_table_simple *)table; - num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); - pentry = (struct sfi_gpio_table_entry *)sb->pentry; - - gpio_table = kmemdup(pentry, num * sizeof(*pentry), GFP_KERNEL); - if (!gpio_table) - return -1; - gpio_num_entry = num; - - pr_debug("GPIO pin info:\n"); - for (i = 0; i < num; i++, pentry++) - pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s," - " pin = %d\n", i, - pentry->controller_name, - pentry->pin_name, - pentry->pin_no); - return 0; -} - -int get_gpio_by_name(const char *name) -{ - struct sfi_gpio_table_entry *pentry = gpio_table; - int i; - - if (!pentry) - return -1; - for (i = 0; i < gpio_num_entry; i++, pentry++) { - if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN)) - return pentry->pin_no; - } - return -EINVAL; -} - -static void __init intel_scu_ipc_device_register(struct platform_device *pdev) -{ - if (ipc_next_dev == MAX_IPCDEVS) - pr_err("too many SCU IPC devices"); - else - ipc_devs[ipc_next_dev++] = pdev; -} - -static void __init intel_scu_spi_device_register(struct spi_board_info *sdev) -{ - struct spi_board_info *new_dev; - - if (spi_next_dev == MAX_SCU_SPI) { - pr_err("too many SCU SPI devices"); - return; - } - - new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL); - if (!new_dev) { - pr_err("failed to alloc mem for delayed spi dev %s\n", - sdev->modalias); - return; - } - *new_dev = *sdev; - - spi_devs[spi_next_dev++] = new_dev; -} - -static void __init intel_scu_i2c_device_register(int bus, - struct i2c_board_info *idev) -{ - struct i2c_board_info *new_dev; - - if (i2c_next_dev == MAX_SCU_I2C) { - pr_err("too many SCU I2C devices"); - return; - } - - new_dev = kzalloc(sizeof(*idev), GFP_KERNEL); - if (!new_dev) { - pr_err("failed to alloc mem for delayed i2c dev %s\n", - idev->type); - return; - } - *new_dev = *idev; - - i2c_bus[i2c_next_dev] = bus; - i2c_devs[i2c_next_dev++] = new_dev; -} - -/* Called by IPC driver */ -void intel_scu_devices_create(void) -{ - int i; - - for (i = 0; i < ipc_next_dev; i++) - platform_device_add(ipc_devs[i]); - - for (i = 0; i < spi_next_dev; i++) - spi_register_board_info(spi_devs[i], 1); - - for (i = 0; i < i2c_next_dev; i++) { - struct i2c_adapter *adapter; - struct i2c_client *client; - - adapter = i2c_get_adapter(i2c_bus[i]); - if (adapter) { - client = i2c_new_client_device(adapter, i2c_devs[i]); - if (IS_ERR(client)) - pr_err("can't create i2c device %s\n", - i2c_devs[i]->type); - } else - i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); - } - intel_scu_notifier_post(SCU_AVAILABLE, NULL); -} -EXPORT_SYMBOL_GPL(intel_scu_devices_create); - -/* Called by IPC driver */ -void intel_scu_devices_destroy(void) -{ - int i; - - intel_scu_notifier_post(SCU_DOWN, NULL); - - for (i = 0; i < ipc_next_dev; i++) - platform_device_del(ipc_devs[i]); -} -EXPORT_SYMBOL_GPL(intel_scu_devices_destroy); - -static void __init install_irq_resource(struct platform_device *pdev, int irq) -{ - /* Single threaded */ - static struct resource res __initdata = { - .name = "IRQ", - .flags = IORESOURCE_IRQ, - }; - res.start = irq; - platform_device_add_resources(pdev, &res, 1); -} - -static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry, - struct devs_id *dev) -{ - struct platform_device *pdev; - void *pdata = NULL; - - pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", - pentry->name, pentry->irq); - - /* - * We need to call platform init of IPC devices to fill misc_pdata - * structure. It will be used in msic_init for initialization. - */ - pdata = intel_mid_sfi_get_pdata(dev, pentry); - if (IS_ERR(pdata)) - return; - - /* - * On Medfield the platform device creation is handled by the MSIC - * MFD driver so we don't need to do it here. - */ - if (dev->msic && intel_mid_has_msic()) - return; - - pdev = platform_device_alloc(pentry->name, 0); - if (pdev == NULL) { - pr_err("out of memory for SFI platform device '%s'.\n", - pentry->name); - return; - } - install_irq_resource(pdev, pentry->irq); - - pdev->dev.platform_data = pdata; - if (dev->delay) - intel_scu_ipc_device_register(pdev); - else - platform_device_add(pdev); -} - -static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry, - struct devs_id *dev) -{ - struct spi_board_info spi_info; - void *pdata = NULL; - - memset(&spi_info, 0, sizeof(spi_info)); - strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); - spi_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq); - spi_info.bus_num = pentry->host_num; - spi_info.chip_select = pentry->addr; - spi_info.max_speed_hz = pentry->max_freq; - pr_debug("SPI bus=%d, name=%16.16s, irq=0x%2x, max_freq=%d, cs=%d\n", - spi_info.bus_num, - spi_info.modalias, - spi_info.irq, - spi_info.max_speed_hz, - spi_info.chip_select); - - pdata = intel_mid_sfi_get_pdata(dev, &spi_info); - if (IS_ERR(pdata)) - return; - - spi_info.platform_data = pdata; - if (dev->delay) - intel_scu_spi_device_register(&spi_info); - else - spi_register_board_info(&spi_info, 1); -} - -static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry, - struct devs_id *dev) -{ - struct i2c_board_info i2c_info; - void *pdata = NULL; - - memset(&i2c_info, 0, sizeof(i2c_info)); - strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); - i2c_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq); - i2c_info.addr = pentry->addr; - pr_debug("I2C bus = %d, name = %16.16s, irq = 0x%2x, addr = 0x%x\n", - pentry->host_num, - i2c_info.type, - i2c_info.irq, - i2c_info.addr); - pdata = intel_mid_sfi_get_pdata(dev, &i2c_info); - i2c_info.platform_data = pdata; - if (IS_ERR(pdata)) - return; - - if (dev->delay) - intel_scu_i2c_device_register(pentry->host_num, &i2c_info); - else - i2c_register_board_info(pentry->host_num, &i2c_info, 1); -} - -static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry, - struct devs_id *dev) -{ - struct mid_sd_board_info sd_info; - void *pdata; - - memset(&sd_info, 0, sizeof(sd_info)); - strncpy(sd_info.name, pentry->name, SFI_NAME_LEN); - sd_info.bus_num = pentry->host_num; - sd_info.max_clk = pentry->max_freq; - sd_info.addr = pentry->addr; - pr_debug("SD bus = %d, name = %16.16s, max_clk = %d, addr = 0x%x\n", - sd_info.bus_num, - sd_info.name, - sd_info.max_clk, - sd_info.addr); - pdata = intel_mid_sfi_get_pdata(dev, &sd_info); - if (IS_ERR(pdata)) - return; - - /* Nothing we can do with this for now */ - sd_info.platform_data = pdata; - - pr_debug("Successfully registered %16.16s", sd_info.name); -} - -extern struct devs_id *const __x86_intel_mid_dev_start[], - *const __x86_intel_mid_dev_end[]; - -static struct devs_id __init *get_device_id(u8 type, char *name) -{ - struct devs_id *const *dev_table; - - for (dev_table = __x86_intel_mid_dev_start; - dev_table < __x86_intel_mid_dev_end; dev_table++) { - struct devs_id *dev = *dev_table; - if (dev->type == type && - !strncmp(dev->name, name, SFI_NAME_LEN)) { - return dev; - } - } - - return NULL; -} - -static int __init sfi_parse_devs(struct sfi_table_header *table) -{ - struct sfi_table_simple *sb; - struct sfi_device_table_entry *pentry; - struct devs_id *dev = NULL; - int num, i, ret; - int polarity; - struct irq_alloc_info info; - - sb = (struct sfi_table_simple *)table; - num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry); - pentry = (struct sfi_device_table_entry *)sb->pentry; - - for (i = 0; i < num; i++, pentry++) { - int irq = pentry->irq; - - if (irq != (u8)0xff) { /* native RTE case */ - /* these SPI2 devices are not exposed to system as PCI - * devices, but they have separate RTE entry in IOAPIC - * so we have to enable them one by one here - */ - if (intel_mid_identify_cpu() == - INTEL_MID_CPU_CHIP_TANGIER) { - if (!strncmp(pentry->name, "r69001-ts-i2c", 13)) - /* active low */ - polarity = 1; - else if (!strncmp(pentry->name, - "synaptics_3202", 14)) - /* active low */ - polarity = 1; - else if (irq == 41) - /* fast_int_1 */ - polarity = 1; - else - /* active high */ - polarity = 0; - } else { - /* PNW and CLV go with active low */ - polarity = 1; - } - - ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, polarity); - ret = mp_map_gsi_to_irq(irq, IOAPIC_MAP_ALLOC, &info); - WARN_ON(ret < 0); - } - - dev = get_device_id(pentry->type, pentry->name); - - if (!dev) - continue; - - switch (pentry->type) { - case SFI_DEV_TYPE_IPC: - sfi_handle_ipc_dev(pentry, dev); - break; - case SFI_DEV_TYPE_SPI: - sfi_handle_spi_dev(pentry, dev); - break; - case SFI_DEV_TYPE_I2C: - sfi_handle_i2c_dev(pentry, dev); - break; - case SFI_DEV_TYPE_SD: - sfi_handle_sd_dev(pentry, dev); - break; - case SFI_DEV_TYPE_UART: - case SFI_DEV_TYPE_HSI: - default: - break; - } - } - return 0; -} - -static int __init intel_mid_platform_init(void) -{ - sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio); - sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs); - return 0; -} -arch_initcall(intel_mid_platform_init); diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index 43b4d864817e..d2ccadc247e6 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -16,6 +16,7 @@ #include <asm/boot.h> #include <asm/processor-flags.h> #include <asm/msr.h> +#include <asm/nospec-branch.h> #include <xen/interface/elfnote.h> __HEAD @@ -105,6 +106,7 @@ SYM_CODE_START_LOCAL(pvh_start_xen) /* startup_64 expects boot_params in %rsi. */ mov $_pa(pvh_bootparams), %rsi mov $_pa(startup_64), %rax + ANNOTATE_RETPOLINE_SAFE jmp *%rax #else /* CONFIG_X86_64 */ diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c deleted file mode 100644 index 6259563760f9..000000000000 --- a/arch/x86/platform/sfi/sfi.c +++ /dev/null @@ -1,100 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * sfi.c - x86 architecture SFI support. - * - * Copyright (c) 2009, Intel Corporation. - */ - -#define KMSG_COMPONENT "SFI" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <linux/acpi.h> -#include <linux/init.h> -#include <linux/sfi.h> -#include <linux/io.h> - -#include <asm/irqdomain.h> -#include <asm/io_apic.h> -#include <asm/mpspec.h> -#include <asm/setup.h> -#include <asm/apic.h> - -#ifdef CONFIG_X86_LOCAL_APIC -static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; - -/* All CPUs enumerated by SFI must be present and enabled */ -static void __init mp_sfi_register_lapic(u8 id) -{ - if (MAX_LOCAL_APIC - id <= 0) { - pr_warn("Processor #%d invalid (max %d)\n", id, MAX_LOCAL_APIC); - return; - } - - pr_info("registering lapic[%d]\n", id); - - generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR))); -} - -static int __init sfi_parse_cpus(struct sfi_table_header *table) -{ - struct sfi_table_simple *sb; - struct sfi_cpu_table_entry *pentry; - int i; - int cpu_num; - - sb = (struct sfi_table_simple *)table; - cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry); - pentry = (struct sfi_cpu_table_entry *)sb->pentry; - - for (i = 0; i < cpu_num; i++) { - mp_sfi_register_lapic(pentry->apic_id); - pentry++; - } - - smp_found_config = 1; - return 0; -} -#endif /* CONFIG_X86_LOCAL_APIC */ - -#ifdef CONFIG_X86_IO_APIC - -static int __init sfi_parse_ioapic(struct sfi_table_header *table) -{ - struct sfi_table_simple *sb; - struct sfi_apic_table_entry *pentry; - int i, num; - struct ioapic_domain_cfg cfg = { - .type = IOAPIC_DOMAIN_STRICT, - .ops = &mp_ioapic_irqdomain_ops, - }; - - sb = (struct sfi_table_simple *)table; - num = SFI_GET_NUM_ENTRIES(sb, struct sfi_apic_table_entry); - pentry = (struct sfi_apic_table_entry *)sb->pentry; - - for (i = 0; i < num; i++) { - mp_register_ioapic(i, pentry->phys_addr, gsi_top, &cfg); - pentry++; - } - - WARN(pic_mode, KERN_WARNING - "SFI: pic_mod shouldn't be 1 when IOAPIC table is present\n"); - pic_mode = 0; - return 0; -} -#endif /* CONFIG_X86_IO_APIC */ - -/* - * sfi_platform_init(): register lapics & io-apics - */ -int __init sfi_platform_init(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - register_lapic_address(sfi_lapic_addr); - sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus); -#endif -#ifdef CONFIG_X86_IO_APIC - sfi_table_parse(SFI_SIG_APIC, NULL, NULL, sfi_parse_ioapic); -#endif - return 0; -} diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile index 6907b523e856..379777572bc9 100644 --- a/arch/x86/power/Makefile +++ b/arch/x86/power/Makefile @@ -1,9 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 -OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y # __restore_processor_state() restores %gs after S3 resume and so should not # itself be stack-protected CFLAGS_cpu.o := -fno-stack-protector +# Clang may incorrectly inline functions with stack protector enabled into +# __restore_processor_state(): https://bugs.llvm.org/show_bug.cgi?id=47479 +CFLAGS_REMOVE_cpu.o := $(CC_FLAGS_LTO) + obj-$(CONFIG_PM_SLEEP) += cpu.o obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 7918b8415f13..d9bed596d849 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -21,6 +21,53 @@ #include <asm/asm-offsets.h> #include <asm/processor-flags.h> #include <asm/frame.h> +#include <asm/nospec-branch.h> + + /* code below belongs to the image kernel */ + .align PAGE_SIZE +SYM_FUNC_START(restore_registers) + /* go back to the original page tables */ + movq %r9, %cr3 + + /* Flush TLB, including "global" things (vmalloc) */ + movq mmu_cr4_features(%rip), %rax + movq %rax, %rdx + andq $~(X86_CR4_PGE), %rdx + movq %rdx, %cr4; # turn off PGE + movq %cr3, %rcx; # flush TLB + movq %rcx, %cr3 + movq %rax, %cr4; # turn PGE back on + + /* We don't restore %rax, it must be 0 anyway */ + movq $saved_context, %rax + movq pt_regs_sp(%rax), %rsp + movq pt_regs_bp(%rax), %rbp + movq pt_regs_si(%rax), %rsi + movq pt_regs_di(%rax), %rdi + movq pt_regs_bx(%rax), %rbx + movq pt_regs_cx(%rax), %rcx + movq pt_regs_dx(%rax), %rdx + movq pt_regs_r8(%rax), %r8 + movq pt_regs_r9(%rax), %r9 + movq pt_regs_r10(%rax), %r10 + movq pt_regs_r11(%rax), %r11 + movq pt_regs_r12(%rax), %r12 + movq pt_regs_r13(%rax), %r13 + movq pt_regs_r14(%rax), %r14 + movq pt_regs_r15(%rax), %r15 + pushq pt_regs_flags(%rax) + popfq + + /* Saved in save_processor_state. */ + lgdt saved_context_gdt_desc(%rax) + + xorl %eax, %eax + + /* tell the hibernation core that we've just restored the memory */ + movq %rax, in_suspend(%rip) + + ret +SYM_FUNC_END(restore_registers) SYM_FUNC_START(swsusp_arch_suspend) movq $saved_context, %rax @@ -52,7 +99,7 @@ SYM_FUNC_START(swsusp_arch_suspend) ret SYM_FUNC_END(swsusp_arch_suspend) -SYM_CODE_START(restore_image) +SYM_FUNC_START(restore_image) /* prepare to jump to the image kernel */ movq restore_jump_address(%rip), %r8 movq restore_cr3(%rip), %r9 @@ -66,11 +113,12 @@ SYM_CODE_START(restore_image) /* jump to relocated restore code */ movq relocated_restore_code(%rip), %rcx + ANNOTATE_RETPOLINE_SAFE jmpq *%rcx -SYM_CODE_END(restore_image) +SYM_FUNC_END(restore_image) /* code below has been relocated to a safe page */ -SYM_CODE_START(core_restore_code) +SYM_FUNC_START(core_restore_code) /* switch to temporary page tables */ movq %rax, %cr3 /* flush TLB */ @@ -97,51 +145,6 @@ SYM_CODE_START(core_restore_code) .Ldone: /* jump to the restore_registers address from the image header */ + ANNOTATE_RETPOLINE_SAFE jmpq *%r8 -SYM_CODE_END(core_restore_code) - - /* code below belongs to the image kernel */ - .align PAGE_SIZE -SYM_FUNC_START(restore_registers) - /* go back to the original page tables */ - movq %r9, %cr3 - - /* Flush TLB, including "global" things (vmalloc) */ - movq mmu_cr4_features(%rip), %rax - movq %rax, %rdx - andq $~(X86_CR4_PGE), %rdx - movq %rdx, %cr4; # turn off PGE - movq %cr3, %rcx; # flush TLB - movq %rcx, %cr3 - movq %rax, %cr4; # turn PGE back on - - /* We don't restore %rax, it must be 0 anyway */ - movq $saved_context, %rax - movq pt_regs_sp(%rax), %rsp - movq pt_regs_bp(%rax), %rbp - movq pt_regs_si(%rax), %rsi - movq pt_regs_di(%rax), %rdi - movq pt_regs_bx(%rax), %rbx - movq pt_regs_cx(%rax), %rcx - movq pt_regs_dx(%rax), %rdx - movq pt_regs_r8(%rax), %r8 - movq pt_regs_r9(%rax), %r9 - movq pt_regs_r10(%rax), %r10 - movq pt_regs_r11(%rax), %r11 - movq pt_regs_r12(%rax), %r12 - movq pt_regs_r13(%rax), %r13 - movq pt_regs_r14(%rax), %r14 - movq pt_regs_r15(%rax), %r15 - pushq pt_regs_flags(%rax) - popfq - - /* Saved in save_processor_state. */ - lgdt saved_context_gdt_desc(%rax) - - xorl %eax, %eax - - /* tell the hibernation core that we've just restored the memory */ - movq %rax, in_suspend(%rip) - - ret -SYM_FUNC_END(restore_registers) +SYM_FUNC_END(core_restore_code) diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 55b1ab378974..bddfc9a46645 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -29,14 +29,14 @@ posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity hostprogs += insn_decoder_test insn_sanity # -I needed for generated C source and C source which in the kernel tree. -HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/ +HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(srctree)/tools/arch/x86/lib/ -I$(srctree)/tools/arch/x86/include/ -I$(objtree)/arch/x86/lib/ -HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ +HOSTCFLAGS_insn_sanity.o := -Wall -I$(srctree)/tools/arch/x86/lib/ -I$(srctree)/tools/arch/x86/include/ -I$(objtree)/arch/x86/lib/ # Dependencies are also needed. -$(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c +$(obj)/insn_decoder_test.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c -$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c +$(obj)/insn_sanity.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c HOST_EXTRACFLAGS += -I$(srctree)/tools/include hostprogs += relocs diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index 185ceba9d289..c6a0000ae635 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -14,10 +14,6 @@ #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> - -#define unlikely(cond) (cond) -#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) - #include <asm/insn.h> #include <inat.c> #include <insn.c> diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 1c3a1962cade..04c5a44b9682 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -61,8 +61,8 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|" "__(start|end)_pci_.*|" "__(start|end)_builtin_fw|" - "__(start|stop)___ksymtab(|_gpl|_unused|_unused_gpl|_gpl_future)|" - "__(start|stop)___kcrctab(|_gpl|_unused|_unused_gpl|_gpl_future)|" + "__(start|stop)___ksymtab(|_gpl)|" + "__(start|stop)___kcrctab(|_gpl)|" "__(start|stop)___param|" "__(start|stop)___modver|" "__(start|stop)___bug_table|" diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c index e62174638f00..1dc9adc20b1c 100644 --- a/arch/x86/um/os-Linux/task_size.c +++ b/arch/x86/um/os-Linux/task_size.c @@ -145,7 +145,7 @@ out: unsigned long os_get_top_address(void) { /* The old value of CONFIG_TOP_ADDR */ - return 0x7fc0000000; + return 0x7fc0002000; } #endif diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index 51fd256c75f0..c3891c1ada26 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -7,8 +7,8 @@ #define __SYSDEP_STUB_H #include <asm/ptrace.h> +#include <generated/asm-offsets.h> -#define STUB_SYSCALL_RET EAX #define STUB_MMAP_NR __NR_mmap2 #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT) @@ -77,17 +77,28 @@ static inline void trap_myself(void) __asm("int3"); } -static inline void remap_stack(int fd, unsigned long offset) +static void inline remap_stack_and_trap(void) { - __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;" - "movl %7, %%ebx ; movl %%eax, (%%ebx)" - : : "g" (STUB_MMAP_NR), "b" (STUB_DATA), - "c" (UM_KERN_PAGE_SIZE), - "d" (PROT_READ | PROT_WRITE), - "S" (MAP_FIXED | MAP_SHARED), "D" (fd), - "a" (offset), - "i" (&((struct stub_data *) STUB_DATA)->err) - : "memory"); + __asm__ volatile ( + "movl %%esp,%%ebx ;" + "andl %0,%%ebx ;" + "movl %1,%%eax ;" + "movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;" + "movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;" + "int $0x80 ;" + "addl %4,%%ebx ; movl %%eax, (%%ebx) ;" + "int $3" + : : + "g" (~(UM_KERN_PAGE_SIZE - 1)), + "g" (STUB_MMAP_NR), + "g" (UML_STUB_FIELD_FD), + "g" (UML_STUB_FIELD_OFFSET), + "g" (UML_STUB_FIELD_CHILD_ERR), + "c" (UM_KERN_PAGE_SIZE), + "d" (PROT_READ | PROT_WRITE), + "S" (MAP_FIXED | MAP_SHARED) + : + "memory"); } #endif diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h index 994df93c5ed3..6e2626b77a2e 100644 --- a/arch/x86/um/shared/sysdep/stub_64.h +++ b/arch/x86/um/shared/sysdep/stub_64.h @@ -7,8 +7,8 @@ #define __SYSDEP_STUB_H #include <sysdep/ptrace_user.h> +#include <generated/asm-offsets.h> -#define STUB_SYSCALL_RET PT_INDEX(RAX) #define STUB_MMAP_NR __NR_mmap #define MMAP_OFFSET(o) (o) @@ -82,18 +82,30 @@ static inline void trap_myself(void) __asm("int3"); } -static inline void remap_stack(long fd, unsigned long offset) +static inline void remap_stack_and_trap(void) { - __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; " - "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; " - "movq %%rax, (%%rbx)": - : "a" (STUB_MMAP_NR), "D" (STUB_DATA), - "S" (UM_KERN_PAGE_SIZE), - "d" (PROT_READ | PROT_WRITE), - "g" (MAP_FIXED | MAP_SHARED), "g" (fd), - "g" (offset), - "i" (&((struct stub_data *) STUB_DATA)->err) - : __syscall_clobber, "r10", "r8", "r9" ); + __asm__ volatile ( + "movq %0,%%rax ;" + "movq %%rsp,%%rdi ;" + "andq %1,%%rdi ;" + "movq %2,%%r10 ;" + "movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;" + "movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;" + __syscall ";" + "movq %%rsp,%%rdi ; andq %1,%%rdi ;" + "addq %5,%%rdi ; movq %%rax, (%%rdi) ;" + "int3" + : : + "g" (STUB_MMAP_NR), + "g" (~(UM_KERN_PAGE_SIZE - 1)), + "g" (MAP_FIXED | MAP_SHARED), + "g" (UML_STUB_FIELD_FD), + "g" (UML_STUB_FIELD_OFFSET), + "g" (UML_STUB_FIELD_CHILD_ERR), + "S" (UM_KERN_PAGE_SIZE), + "d" (PROT_READ | PROT_WRITE) + : + __syscall_clobber, "r10", "r8", "r9"); } #endif diff --git a/arch/x86/um/stub_32.S b/arch/x86/um/stub_32.S index a193e88536a9..8291899e6aaf 100644 --- a/arch/x86/um/stub_32.S +++ b/arch/x86/um/stub_32.S @@ -5,21 +5,22 @@ .globl batch_syscall_stub batch_syscall_stub: - /* load pointer to first operation */ - mov $(STUB_DATA+8), %esp - + /* %esp comes in as "top of page" */ + mov %esp, %ecx + /* %esp has pointer to first operation */ + add $8, %esp again: /* load length of additional data */ mov 0x0(%esp), %eax /* if(length == 0) : end of list */ /* write possible 0 to header */ - mov %eax, STUB_DATA+4 + mov %eax, 0x4(%ecx) cmpl $0, %eax jz done /* save current pointer */ - mov %esp, STUB_DATA+4 + mov %esp, 0x4(%ecx) /* skip additional data */ add %eax, %esp @@ -38,6 +39,10 @@ again: /* execute syscall */ int $0x80 + /* restore top of page pointer in %ecx */ + mov %esp, %ecx + andl $(~UM_KERN_PAGE_SIZE) + 1, %ecx + /* check return value */ pop %ebx cmp %ebx, %eax @@ -45,7 +50,7 @@ again: done: /* save return value */ - mov %eax, STUB_DATA + mov %eax, (%ecx) /* stop */ int3 diff --git a/arch/x86/um/stub_64.S b/arch/x86/um/stub_64.S index 8a95c5b2eaf9..f3404640197a 100644 --- a/arch/x86/um/stub_64.S +++ b/arch/x86/um/stub_64.S @@ -4,9 +4,8 @@ .section .__syscall_stub, "ax" .globl batch_syscall_stub batch_syscall_stub: - mov $(STUB_DATA), %rbx - /* load pointer to first operation */ - mov %rbx, %rsp + /* %rsp has the pointer to first operation */ + mov %rsp, %rbx add $0x10, %rsp again: /* load length of additional data */ diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c index 27361cbb7ca9..21836eaf1725 100644 --- a/arch/x86/um/stub_segv.c +++ b/arch/x86/um/stub_segv.c @@ -11,10 +11,11 @@ void __attribute__ ((__section__ (".__syscall_stub"))) stub_segv_handler(int sig, siginfo_t *info, void *p) { + int stack; ucontext_t *uc = p; + struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1)); - GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA), - &uc->uc_mcontext); + GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext); trap_myself(); } diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index fc5c5ba4aacb..40b5779fce21 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -OBJECT_FILES_NON_STANDARD_xen-asm.o := y ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 3301875dd196..a3cc33091f46 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void) xen_p2m_last_pfn = xen_max_p2m_pfn; p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; + if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC)) + p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO; + vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), PMD_SIZE * PMDS_PER_MID_PAGE); @@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) pte_t *ptep; unsigned int level; - if (unlikely(pfn >= xen_p2m_size)) { - BUG_ON(mfn != INVALID_P2M_ENTRY); - return true; - } + /* Only invalid entries allowed above the highest p2m covered frame. */ + if (unlikely(pfn >= xen_p2m_size)) + return mfn == INVALID_P2M_ENTRY; /* * The interface requires atomic updates on p2m elements. @@ -710,9 +712,12 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { unsigned long mfn, pfn; + struct gnttab_unmap_grant_ref unmap[2]; + int rc; /* Do not add to override if the map failed. */ - if (map_ops[i].status) + if (map_ops[i].status != GNTST_okay || + (kmap_ops && kmap_ops[i].status != GNTST_okay)) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { @@ -726,10 +731,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { - ret = -ENOMEM; - goto out; + if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap[0].host_addr = map_ops[i].host_addr, + unmap[0].handle = map_ops[i].handle; + map_ops[i].handle = ~0; + if (map_ops[i].flags & GNTMAP_device_map) + unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap[0].dev_bus_addr = 0; + + if (kmap_ops) { + kmap_ops[i].status = GNTST_general_error; + unmap[1].host_addr = kmap_ops[i].host_addr, + unmap[1].handle = kmap_ops[i].handle; + kmap_ops[i].handle = ~0; + if (kmap_ops[i].flags & GNTMAP_device_map) + unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; + else + unmap[1].dev_bus_addr = 0; } + + /* + * Pre-populate both status fields, to be recognizable in + * the log message below. + */ + unmap[0].status = 1; + unmap[1].status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + unmap, 1 + !!kmap_ops); + if (rc || unmap[0].status != GNTST_okay || + unmap[1].status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", + rc, unmap[0].status, unmap[1].status); } out: @@ -750,17 +791,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { + if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + else ret = -EINVAL; - goto out; - } - - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } if (kunmap_ops) ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, - kunmap_ops, count); -out: + kunmap_ops, count) ?: ret; + return ret; } EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 7eab14d56369..1a3b75652fa4 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -59,18 +59,6 @@ static struct { } xen_remap_buf __initdata __aligned(PAGE_SIZE); static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; -/* - * The maximum amount of extra memory compared to the base size. The - * main scaling factor is the size of struct page. At extreme ratios - * of base:extra, all the base memory can be filled with page - * structures for the extra memory, leaving no space for anything - * else. - * - * 10x seems like a reasonable balance between scaling flexibility and - * leaving a practically usable system. - */ -#define EXTRA_MEM_RATIO (10) - static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); static void __init xen_parse_512gb(void) @@ -790,20 +778,13 @@ char * __init xen_memory_setup(void) extra_pages += max_pages - max_pfn; /* - * Clamp the amount of extra memory to a EXTRA_MEM_RATIO - * factor the base size. On non-highmem systems, the base - * size is the full initial memory allocation; on highmem it - * is limited to the max size of lowmem, so that it doesn't - * get completely filled. + * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO + * factor the base size. * * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. - * - * In principle there could be a problem in lowmem systems if - * the initial memory is also very large with respect to - * lowmem, but we won't try to deal with that here. */ - extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages, max_pages - max_pfn); i = 0; addr = xen_e820_table.entries[0].addr; diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 02f31341e435..1e626444712b 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -14,6 +14,7 @@ #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/frame.h> +#include <asm/unwind_hints.h> #include <xen/interface/xen.h> @@ -118,6 +119,7 @@ SYM_FUNC_END(xen_read_cr2_direct); .macro xen_pv_trap name SYM_CODE_START(xen_\name) + UNWIND_HINT_EMPTY pop %rcx pop %r11 jmp \name @@ -157,6 +159,7 @@ xen_pv_trap asm_exc_xen_hypervisor_callback SYM_CODE_START(xen_early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS + UNWIND_HINT_EMPTY pop %rcx pop %r11 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE @@ -183,6 +186,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 * rsp->rax } */ SYM_CODE_START(xen_iret) + UNWIND_HINT_EMPTY pushq $0 jmp hypercall_iret SYM_CODE_END(xen_iret) @@ -203,7 +207,8 @@ SYM_CODE_END(xen_iret) */ /* Normal 64-bit system call target */ -SYM_FUNC_START(xen_syscall_target) +SYM_CODE_START(xen_syscall_target) + UNWIND_HINT_EMPTY popq %rcx popq %r11 @@ -216,12 +221,13 @@ SYM_FUNC_START(xen_syscall_target) movq $__USER_CS, 1*8(%rsp) jmp entry_SYSCALL_64_after_hwframe -SYM_FUNC_END(xen_syscall_target) +SYM_CODE_END(xen_syscall_target) #ifdef CONFIG_IA32_EMULATION /* 32-bit compat syscall target */ -SYM_FUNC_START(xen_syscall32_target) +SYM_CODE_START(xen_syscall32_target) + UNWIND_HINT_EMPTY popq %rcx popq %r11 @@ -234,10 +240,11 @@ SYM_FUNC_START(xen_syscall32_target) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSCALL_compat_after_hwframe -SYM_FUNC_END(xen_syscall32_target) +SYM_CODE_END(xen_syscall32_target) /* 32-bit compat sysenter target */ -SYM_FUNC_START(xen_sysenter_target) +SYM_CODE_START(xen_sysenter_target) + UNWIND_HINT_EMPTY /* * NB: Xen is polite and clears TF from EFLAGS for us. This means * that we don't need to guard against single step exceptions here. @@ -254,17 +261,18 @@ SYM_FUNC_START(xen_sysenter_target) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSENTER_compat_after_hwframe -SYM_FUNC_END(xen_sysenter_target) +SYM_CODE_END(xen_sysenter_target) #else /* !CONFIG_IA32_EMULATION */ -SYM_FUNC_START_ALIAS(xen_syscall32_target) -SYM_FUNC_START(xen_sysenter_target) +SYM_CODE_START(xen_syscall32_target) +SYM_CODE_START(xen_sysenter_target) + UNWIND_HINT_EMPTY lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $0 jmp hypercall_iret -SYM_FUNC_END(xen_sysenter_target) -SYM_FUNC_END_ALIAS(xen_syscall32_target) +SYM_CODE_END(xen_sysenter_target) +SYM_CODE_END(xen_syscall32_target) #endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 2d7c8f34f56c..cb6538ae2fe0 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -68,8 +68,9 @@ SYM_CODE_END(asm_cpu_bringup_and_idle) .balign PAGE_SIZE SYM_CODE_START(hypercall_page) .rept (PAGE_SIZE / 32) - UNWIND_HINT_EMPTY - .skip 32 + UNWIND_HINT_FUNC + .skip 31, 0x90 + ret .endr #define HYPERCALL(n) \ diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 584b0de6f2ca..41c449ece2d8 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -12,8 +12,8 @@ #define _XTENSA_SPINLOCK_H #include <asm/barrier.h> -#include <asm/qrwlock.h> #include <asm/qspinlock.h> +#include <asm/qrwlock.h> #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 397a7de56377..9534ef515d74 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -217,7 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, p->thread.sp = (unsigned long)childregs; - if (!(p->flags & PF_KTHREAD)) { + if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { struct pt_regs *regs = current_pt_regs(); unsigned long usp = usp_thread_fn ? usp_thread_fn : regs->areg[1]; diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile index 659faefdcb1d..285aaba832d9 100644 --- a/arch/xtensa/kernel/syscalls/Makefile +++ b/arch/xtensa/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -syscall := $(srctree)/$(src)/syscall.tbl +syscall := $(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh systbl := $(srctree)/$(src)/syscalltbl.sh @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))' \ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 46116a28eeed..365a9b849224 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -412,3 +412,4 @@ 439 common faccessat2 sys_faccessat2 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr |