summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig134
-rw-r--r--arch/arm64/Kconfig.platforms8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi40
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi40
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi32
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts74
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi39
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts70
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts40
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--arch/arm64/include/asm/archrandom.h2
-rw-r--r--arch/arm64/include/asm/asm-bug.h4
-rw-r--r--arch/arm64/include/asm/compiler.h16
-rw-r--r--arch/arm64/include/asm/cpu.h4
-rw-r--r--arch/arm64/include/asm/cpufeature.h24
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/debug-monitors.h4
-rw-r--r--arch/arm64/include/asm/el2_setup.h66
-rw-r--r--arch/arm64/include/asm/esr.h21
-rw-r--r--arch/arm64/include/asm/exception.h29
-rw-r--r--arch/arm64/include/asm/fpsimd.h135
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h87
-rw-r--r--arch/arm64/include/asm/ftrace.h7
-rw-r--r--arch/arm64/include/asm/hugetlb.h2
-rw-r--r--arch/arm64/include/asm/hwcap.h8
-rw-r--r--arch/arm64/include/asm/insn.h9
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h34
-rw-r--r--arch/arm64/include/asm/kvm_host.h16
-rw-r--r--arch/arm64/include/asm/kvm_ras.h2
-rw-r--r--arch/arm64/include/asm/mte.h1
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h7
-rw-r--r--arch/arm64/include/asm/processor.h36
-rw-r--r--arch/arm64/include/asm/stacktrace.h32
-rw-r--r--arch/arm64/include/asm/sysreg.h181
-rw-r--r--arch/arm64/include/asm/system_misc.h4
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/traps.h12
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h8
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h69
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h55
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/alternative.c6
-rw-r--r--arch/arm64/kernel/cpu_errata.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c182
-rw-r--r--arch/arm64/kernel/cpuinfo.c13
-rw-r--r--arch/arm64/kernel/debug-monitors.c12
-rw-r--r--arch/arm64/kernel/elfcore.c49
-rw-r--r--arch/arm64/kernel/entry-common.c25
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S36
-rw-r--r--arch/arm64/kernel/entry-ftrace.S17
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/fpsimd.c655
-rw-r--r--arch/arm64/kernel/ftrace.c17
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c6
-rw-r--r--arch/arm64/kernel/kgdb.c6
-rw-r--r--arch/arm64/kernel/machine_kexec.c9
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c12
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/mte.c48
-rw-r--r--arch/arm64/kernel/paravirt.c29
-rw-r--r--arch/arm64/kernel/patching.c4
-rw-r--r--arch/arm64/kernel/probes/kprobes.c4
-rw-r--r--arch/arm64/kernel/probes/uprobes.c4
-rw-r--r--arch/arm64/kernel/process.c44
-rw-r--r--arch/arm64/kernel/proton-pack.c1
-rw-r--r--arch/arm64/kernel/ptrace.c366
-rw-r--r--arch/arm64/kernel/relocate_kernel.S22
-rw-r--r--arch/arm64/kernel/setup.c17
-rw-r--r--arch/arm64/kernel/signal.c189
-rw-r--r--arch/arm64/kernel/signal32.c1
-rw-r--r--arch/arm64/kernel/smp.c3
-rw-r--r--arch/arm64/kernel/stacktrace.c124
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/syscall.c29
-rw-r--r--arch/arm64/kernel/traps.c67
-rw-r--r--arch/arm64/kernel/vdso/Makefile6
-rw-r--r--arch/arm64/kernel/vdso32/Makefile3
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S21
-rw-r--r--arch/arm64/kvm/arm.c4
-rw-r--r--arch/arm64/kvm/fpsimd.c43
-rw-r--r--arch/arm64/kvm/handle_exit.c16
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h2
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/fixed_config.h28
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S18
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c30
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c2
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c11
-rw-r--r--arch/arm64/kvm/inject_fault.c32
-rw-r--r--arch/arm64/kvm/mmu.c30
-rw-r--r--arch/arm64/kvm/pmu-emul.c23
-rw-r--r--arch/arm64/kvm/psci.c34
-rw-r--r--arch/arm64/kvm/reset.c65
-rw-r--r--arch/arm64/kvm/sys_regs.c16
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c2
-rw-r--r--arch/arm64/lib/insn.c67
-rw-r--r--arch/arm64/lib/mte.S4
-rw-r--r--arch/arm64/mm/copypage.c4
-rw-r--r--arch/arm64/mm/fault.c73
-rw-r--r--arch/arm64/mm/hugetlbpage.c46
-rw-r--r--arch/arm64/mm/init.c81
-rw-r--r--arch/arm64/mm/ioremap.c8
-rw-r--r--arch/arm64/mm/trans_pgd.c2
-rw-r--r--arch/arm64/net/bpf_jit.h17
-rw-r--r--arch/arm64/net/bpf_jit_comp.c255
-rw-r--r--arch/arm64/tools/Makefile8
-rw-r--r--arch/arm64/tools/cpucaps2
-rwxr-xr-xarch/arm64/tools/gen-sysreg.awk268
-rw-r--r--arch/arm64/tools/sysreg369
136 files changed, 4116 insertions, 1005 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 57c4c995965f..d550f5acfaf3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -175,8 +175,6 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS \
- if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -228,6 +226,17 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
+config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+ def_bool CC_IS_CLANG
+ # https://github.com/ClangBuiltLinux/linux/issues/1507
+ depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+ def_bool CC_IS_GCC
+ depends on $(cc-option,-fpatchable-function-entry=2)
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
config 64BIT
def_bool y
@@ -253,31 +262,31 @@ config ARM64_CONT_PMD_SHIFT
default 4
config ARCH_MMAP_RND_BITS_MIN
- default 14 if ARM64_64K_PAGES
- default 16 if ARM64_16K_PAGES
- default 18
+ default 14 if ARM64_64K_PAGES
+ default 16 if ARM64_16K_PAGES
+ default 18
# max bits determined by the following formula:
# VA_BITS - PAGE_SHIFT - 3
config ARCH_MMAP_RND_BITS_MAX
- default 19 if ARM64_VA_BITS=36
- default 24 if ARM64_VA_BITS=39
- default 27 if ARM64_VA_BITS=42
- default 30 if ARM64_VA_BITS=47
- default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
- default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
- default 33 if ARM64_VA_BITS=48
- default 14 if ARM64_64K_PAGES
- default 16 if ARM64_16K_PAGES
- default 18
+ default 19 if ARM64_VA_BITS=36
+ default 24 if ARM64_VA_BITS=39
+ default 27 if ARM64_VA_BITS=42
+ default 30 if ARM64_VA_BITS=47
+ default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
+ default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
+ default 33 if ARM64_VA_BITS=48
+ default 14 if ARM64_64K_PAGES
+ default 16 if ARM64_16K_PAGES
+ default 18
config ARCH_MMAP_RND_COMPAT_BITS_MIN
- default 7 if ARM64_64K_PAGES
- default 9 if ARM64_16K_PAGES
- default 11
+ default 7 if ARM64_64K_PAGES
+ default 9 if ARM64_16K_PAGES
+ default 11
config ARCH_MMAP_RND_COMPAT_BITS_MAX
- default 16
+ default 16
config NO_IOPORT_MAP
def_bool y if !PCI
@@ -304,7 +313,7 @@ config GENERIC_HWEIGHT
def_bool y
config GENERIC_CSUM
- def_bool y
+ def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -678,7 +687,7 @@ config ARM64_ERRATUM_2051678
default y
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
- Affected Coretex-A510 might not respect the ordering rules for
+ Affected Cortex-A510 might not respect the ordering rules for
hardware update of the page table's dirty bit. The workaround
is to not enable the feature on affected CPUs.
@@ -1037,8 +1046,7 @@ config SOCIONEXT_SYNQUACER_PREITS
If unsure, say Y.
-endmenu
-
+endmenu # "ARM errata workarounds via the alternatives framework"
choice
prompt "Page size"
@@ -1566,9 +1574,9 @@ config SETEND_EMULATION
be unexpected results in the applications.
If unsure, say Y
-endif
+endif # ARMV8_DEPRECATED
-endif
+endif # COMPAT
menu "ARMv8.1 architectural features"
@@ -1593,15 +1601,15 @@ config ARM64_PAN
bool "Enable support for Privileged Access Never (PAN)"
default y
help
- Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
- prevents the kernel or hypervisor from accessing user-space (EL0)
- memory directly.
+ Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+ prevents the kernel or hypervisor from accessing user-space (EL0)
+ memory directly.
- Choosing this option will cause any unprotected (not using
- copy_to_user et al) memory access to fail with a permission fault.
+ Choosing this option will cause any unprotected (not using
+ copy_to_user et al) memory access to fail with a permission fault.
- The feature is detected at runtime, and will remain as a 'nop'
- instruction if the cpu does not implement the feature.
+ The feature is detected at runtime, and will remain as a 'nop'
+ instruction if the cpu does not implement the feature.
config AS_HAS_LDAPR
def_bool $(as-instr,.arch_extension rcpc)
@@ -1629,15 +1637,15 @@ config ARM64_USE_LSE_ATOMICS
built with binutils >= 2.25 in order for the new instructions
to be used.
-endmenu
+endmenu # "ARMv8.1 architectural features"
menu "ARMv8.2 architectural features"
config AS_HAS_ARMV8_2
- def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a)
+ def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a)
config AS_HAS_SHA3
- def_bool $(as-instr,.arch armv8.2-a+sha3)
+ def_bool $(as-instr,.arch armv8.2-a+sha3)
config ARM64_PMEM
bool "Enable support for persistent memory"
@@ -1681,7 +1689,7 @@ config ARM64_CNP
at runtime, and does not affect PEs that do not implement
this feature.
-endmenu
+endmenu # "ARMv8.2 architectural features"
menu "ARMv8.3 architectural features"
@@ -1744,7 +1752,7 @@ config AS_HAS_PAC
config AS_HAS_CFI_NEGATE_RA_STATE
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
-endmenu
+endmenu # "ARMv8.3 architectural features"
menu "ARMv8.4 architectural features"
@@ -1785,7 +1793,7 @@ config ARM64_TLB_RANGE
The feature introduces new assembly instructions, and they were
support when binutils >= 2.30.
-endmenu
+endmenu # "ARMv8.4 architectural features"
menu "ARMv8.5 architectural features"
@@ -1871,6 +1879,7 @@ config ARM64_MTE
depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
+ select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
@@ -1892,7 +1901,7 @@ config ARM64_MTE
Documentation/arm64/memory-tagging-extension.rst.
-endmenu
+endmenu # "ARMv8.5 architectural features"
menu "ARMv8.7 architectural features"
@@ -1901,12 +1910,12 @@ config ARM64_EPAN
default y
depends on ARM64_PAN
help
- Enhanced Privileged Access Never (EPAN) allows Privileged
- Access Never to be used with Execute-only mappings.
+ Enhanced Privileged Access Never (EPAN) allows Privileged
+ Access Never to be used with Execute-only mappings.
- The feature is detected at runtime, and will remain disabled
- if the cpu does not implement the feature.
-endmenu
+ The feature is detected at runtime, and will remain disabled
+ if the cpu does not implement the feature.
+endmenu # "ARMv8.7 architectural features"
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
@@ -1939,6 +1948,17 @@ config ARM64_SVE
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
+config ARM64_SME
+ bool "ARM Scalable Matrix Extension support"
+ default y
+ depends on ARM64_SVE
+ help
+ The Scalable Matrix Extension (SME) is an extension to the AArch64
+ execution state which utilises a substantial subset of the SVE
+ instruction set, together with the addition of new architectural
+ register state capable of holding two dimensional matrix tiles to
+ enable various matrix operations.
+
config ARM64_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
@@ -1982,7 +2002,7 @@ config ARM64_DEBUG_PRIORITY_MASKING
the validity of ICC_PMR_EL1 when calling concerned functions.
If unsure, say N
-endif
+endif # ARM64_PSEUDO_NMI
config RELOCATABLE
bool "Build a relocatable kernel image" if EXPERT
@@ -2041,7 +2061,19 @@ config STACKPROTECTOR_PER_TASK
def_bool y
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG
-endmenu
+# The GPIO number here must be sorted by descending number. In case of
+# a multiplatform kernel, we just want the highest value required by the
+# selected platforms.
+config ARCH_NR_GPIO
+ int
+ default 2048 if ARCH_APPLE
+ default 0
+ help
+ Maximum number of GPIOs in the system.
+
+ If unsure, leave the default value.
+
+endmenu # "Kernel Features"
menu "Boot options"
@@ -2105,7 +2137,7 @@ config EFI
help
This option provides support for runtime services provided
by UEFI firmware (such as non-volatile variables, realtime
- clock, and platform reset). A UEFI stub is also provided to
+ clock, and platform reset). A UEFI stub is also provided to
allow the kernel to be booted as an EFI application. This
is only useful on systems that have UEFI firmware.
@@ -2120,7 +2152,7 @@ config DMI
However, even with this option, the resultant kernel should
continue to boot on existing non-UEFI platforms.
-endmenu
+endmenu # "Boot options"
config SYSVIPC_COMPAT
def_bool y
@@ -2141,7 +2173,7 @@ config ARCH_HIBERNATION_HEADER
config ARCH_SUSPEND_POSSIBLE
def_bool y
-endmenu
+endmenu # "Power management options"
menu "CPU Power Management"
@@ -2149,7 +2181,7 @@ source "drivers/cpuidle/Kconfig"
source "drivers/cpufreq/Kconfig"
-endmenu
+endmenu # "CPU Power Management"
source "drivers/acpi/Kconfig"
@@ -2157,4 +2189,4 @@ source "arch/arm64/kvm/Kconfig"
if CRYPTO
source "arch/arm64/crypto/Kconfig"
-endif
+endif # CRYPTO
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 30b123cde02c..4e6d635a1731 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -11,12 +11,11 @@ config ARCH_ACTIONS
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
select ARCH_HAS_RESET_CONTROLLER
- select GENERIC_IRQ_CHIP
- select IRQ_DOMAIN_HIERARCHY
- select IRQ_FASTEOI_HIERARCHY_HANDLERS
select PINCTRL
select RESET_CONTROLLER
select SUN4I_TIMER
+ select SUN6I_R_INTC
+ select SUNXI_NMI_INTC
help
This enables support for Allwinner sunxi based SoCs like the A64.
@@ -253,6 +252,7 @@ config ARCH_INTEL_SOCFPGA
config ARCH_SYNQUACER
bool "Socionext SynQuacer SoC Family"
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
config ARCH_TEGRA
bool "NVIDIA Tegra SoC Family"
@@ -325,4 +325,4 @@ config ARCH_ZYNQMP
help
This enables support for Xilinx ZynqMP Family
-endmenu
+endmenu # "Platform selection"
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
index d61f43052a34..8e9ad1e51d66 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
@@ -11,26 +11,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <761000>;
@@ -71,26 +51,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
index 1e5d0ee5d541..44c23c984034 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
@@ -11,26 +11,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
@@ -76,26 +56,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <751000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <751000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <751000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <751000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <771000>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index 2e45a8ecd9a0..ff213618a598 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -14,28 +14,28 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x3>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
index 5751c48620ed..cadba194b149 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
@@ -437,6 +437,7 @@
"",
"eMMC_RST#", /* BOOT_12 */
"eMMC_DS", /* BOOT_13 */
+ "", "",
/* GPIOC */
"SD_D0_B", /* GPIOC_0 */
"SD_D1_B", /* GPIOC_1 */
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index 3c07a89bfd27..80737731af3f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -95,26 +95,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <730000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <730000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <730000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <666666666>;
- opp-microvolt = <750000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <770000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
index 1dc9d187601c..a0bd540f27d3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
@@ -89,12 +89,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
- touchscreen-size-x = /bits/ 16 <4008>;
+ touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
- touchscreen-size-y = /bits/ 16 <3864>;
+ touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
- touchscreen-max-pressure = /bits/ 16 <255>;
- touchscreen-average-samples = /bits/ 16 <10>;
+ touchscreen-max-pressure = <255>;
+ touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
index 337f2600a276..c557dbf4dcd6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
@@ -147,12 +147,14 @@
&usbotg1 {
dr_mode = "otg";
+ over-current-active-low;
vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
&usbotg2 {
dr_mode = "host";
+ disable-over-current;
status = "okay";
};
@@ -216,7 +218,7 @@
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
index 8b0ef3fa543f..41d0de6a7027 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
@@ -212,12 +212,14 @@
&usbotg1 {
dr_mode = "otg";
+ over-current-active-low;
vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
&usbotg2 {
dr_mode = "host";
+ disable-over-current;
vbus-supply = <&reg_usb_otg2_vbus>;
status = "okay";
};
@@ -310,7 +312,7 @@
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
index 2561b8b3fb11..244ef8d6cc68 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
@@ -240,12 +240,14 @@
&usbotg1 {
dr_mode = "otg";
+ over-current-active-low;
vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
&usbotg2 {
dr_mode = "host";
+ disable-over-current;
vbus-supply = <&reg_usb_otg2_vbus>;
status = "okay";
};
@@ -360,7 +362,7 @@
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index 7dfee715a2c4..d8ce217c6016 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -59,6 +59,10 @@
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
rohm,reset-snvs-powered;
+ #clock-cells = <0>;
+ clocks = <&osc_32k 0>;
+ clock-output-names = "clk-32k-out";
+
regulators {
buck1_reg: BUCK1 {
regulator-name = "buck1";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
index b16c7caf34c1..87b5e23c766f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
@@ -70,12 +70,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
- touchscreen-size-x = /bits/ 16 <4008>;
+ touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
- touchscreen-size-y = /bits/ 16 <3864>;
+ touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
- touchscreen-max-pressure = /bits/ 16 <255>;
- touchscreen-average-samples = /bits/ 16 <10>;
+ touchscreen-max-pressure = <255>;
+ touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 176abe8fe659..e41e1d56f980 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -293,7 +293,7 @@
ranges;
sai2: sai@30020000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30020000 0x10000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
@@ -307,7 +307,7 @@
};
sai3: sai@30030000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30030000 0x10000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
@@ -321,7 +321,7 @@
};
sai5: sai@30050000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30050000 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
@@ -337,7 +337,7 @@
};
sai6: sai@30060000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30060000 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
@@ -394,7 +394,7 @@
};
sai7: sai@300b0000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x300b0000 0x10000>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
index 38ffcd145b33..899e8e7dbc24 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
@@ -253,7 +253,7 @@
#address-cells = <1>;
#size-cells = <1>;
spi-max-frequency = <84000000>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
index be8c76a0554c..4f767012f1f5 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
@@ -196,7 +196,7 @@
};
clk: clock-controller {
- compatible = "fsl,imx8qxp-clk", "fsl,scu-clk";
+ compatible = "fsl,imx8qm-clk", "fsl,scu-clk";
#clock-cells = <2>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index d665f742a7d5..caf9c8529fca 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -303,7 +303,7 @@
/* switch nodes are enabled by U-Boot if modules are present */
switch0@10 {
compatible = "marvell,mv88e6190";
- reg = <0x10 0>;
+ reg = <0x10>;
dsa,member = <0 0>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(0)>;
@@ -428,7 +428,7 @@
switch0@2 {
compatible = "marvell,mv88e6085";
- reg = <0x2 0>;
+ reg = <0x2>;
dsa,member = <0 0>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
@@ -495,7 +495,7 @@
switch1@11 {
compatible = "marvell,mv88e6190";
- reg = <0x11 0>;
+ reg = <0x11>;
dsa,member = <0 1>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(1)>;
@@ -620,7 +620,7 @@
switch1@2 {
compatible = "marvell,mv88e6085";
- reg = <0x2 0>;
+ reg = <0x2>;
dsa,member = <0 1>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
@@ -687,7 +687,7 @@
switch2@12 {
compatible = "marvell,mv88e6190";
- reg = <0x12 0>;
+ reg = <0x12>;
dsa,member = <0 2>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(2)>;
@@ -803,7 +803,7 @@
switch2@2 {
compatible = "marvell,mv88e6085";
- reg = <0x2 0>;
+ reg = <0x2>;
dsa,member = <0 2>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 9768523fe4c3..dbcee8b4d8d8 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -364,7 +364,7 @@
};
cci_control2: slave-if@5000 {
- compatible = "arm,cci-400-ctrl-if";
+ compatible = "arm,cci-400-ctrl-if", "syscon";
interface-type = "ace";
reg = <0x5000 0x1000>;
};
@@ -920,6 +920,11 @@
};
};
+ hifsys: syscon@1af00000 {
+ compatible = "mediatek,mt7622-hifsys", "syscon";
+ reg = <0 0x1af00000 0 0x70>;
+ };
+
ethsys: syscon@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
@@ -939,6 +944,26 @@
dma-requests = <3>;
};
+ pcie_mirror: pcie-mirror@10000400 {
+ compatible = "mediatek,mt7622-pcie-mirror",
+ "syscon";
+ reg = <0 0x10000400 0 0x10>;
+ };
+
+ wed0: wed@1020a000 {
+ compatible = "mediatek,mt7622-wed",
+ "syscon";
+ reg = <0 0x1020a000 0 0x1000>;
+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ wed1: wed@1020b000 {
+ compatible = "mediatek,mt7622-wed",
+ "syscon";
+ reg = <0 0x1020b000 0 0x1000>;
+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
+ };
+
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth",
"mediatek,mt2701-eth",
@@ -965,6 +990,11 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys>;
+ cci-control-port = <&cci_control2>;
+ mediatek,wed = <&wed0>, <&wed1>;
+ mediatek,pcie-mirror = <&pcie_mirror>;
+ mediatek,hifsys = <&hifsys>;
+ dma-coherent;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
index 21e420829572..882277a52b69 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
@@ -25,6 +25,80 @@
};
};
+&eth {
+ status = "okay";
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
+
+&mdio {
+ switch: switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <31>;
+ reset-gpios = <&pio 5 0>;
+ };
+};
+
+&switch {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index 694acf8f5b70..d2636a0ed152 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -222,6 +222,45 @@
#reset-cells = <1>;
};
+ eth: ethernet@15100000 {
+ compatible = "mediatek,mt7986-eth";
+ reg = <0 0x15100000 0 0x80000>;
+ interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ethsys CLK_ETH_FE_EN>,
+ <&ethsys CLK_ETH_GP2_EN>,
+ <&ethsys CLK_ETH_GP1_EN>,
+ <&ethsys CLK_ETH_WOCPU1_EN>,
+ <&ethsys CLK_ETH_WOCPU0_EN>,
+ <&sgmiisys0 CLK_SGMII0_TX250M_EN>,
+ <&sgmiisys0 CLK_SGMII0_RX250M_EN>,
+ <&sgmiisys0 CLK_SGMII0_CDR_REF>,
+ <&sgmiisys0 CLK_SGMII0_CDR_FB>,
+ <&sgmiisys1 CLK_SGMII1_TX250M_EN>,
+ <&sgmiisys1 CLK_SGMII1_RX250M_EN>,
+ <&sgmiisys1 CLK_SGMII1_CDR_REF>,
+ <&sgmiisys1 CLK_SGMII1_CDR_FB>,
+ <&topckgen CLK_TOP_NETSYS_SEL>,
+ <&topckgen CLK_TOP_NETSYS_500M_SEL>;
+ clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
+ "sgmii_tx250m", "sgmii_rx250m",
+ "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m",
+ "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "netsys0", "netsys1";
+ assigned-clocks = <&topckgen CLK_TOP_NETSYS_2X_SEL>,
+ <&topckgen CLK_TOP_SGM_325M_SEL>;
+ assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
+ <&apmixedsys CLK_APMIXED_SGMPLL>;
+ mediatek,ethsys = <&ethsys>;
+ mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ #reset-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
index d73467ea3641..0f49d5764ff3 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
@@ -28,3 +28,73 @@
&uart0 {
status = "okay";
};
+
+&eth {
+ status = "okay";
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <31>;
+ reset-gpios = <&pio 5 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 9d993f25046a..205af7b479a8 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -1255,14 +1255,14 @@
pins = "gpio47", "gpio48";
function = "blsp_i2c3";
drive-strength = <16>;
- bias-disable = <0>;
+ bias-disable;
};
blsp1_i2c3_sleep: blsp1-i2c2-sleep {
pins = "gpio47", "gpio48";
function = "gpio";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
blsp2_uart3_4pins_default: blsp2-uart2-4pins {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
index 4841d42c8c62..3df4920295ad 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -28,7 +28,7 @@
};
&alc5682 {
- realtek,dmic-clk-driving-high = "true";
+ realtek,dmic-clk-driving-high;
};
&ap_tp_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
index 1084d5ce9ac7..07b729f9fec5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -630,7 +630,7 @@
pins = "gpio6", "gpio25", "gpio26";
function = "gpio";
drive-strength = <8>;
- bias-disable = <0>;
+ bias-disable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index fb99cc2827c7..7ab3627cc347 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -622,6 +622,10 @@
status = "okay";
};
+&rxmacro {
+ status = "okay";
+};
+
&slpi {
status = "okay";
firmware-name = "qcom/sm8250/slpi.mbn";
@@ -773,6 +777,8 @@
};
&swr1 {
+ status = "okay";
+
wcd_rx: wcd9380-rx@0,4 {
compatible = "sdw20217010d00";
reg = <0 4>;
@@ -781,6 +787,8 @@
};
&swr2 {
+ status = "okay";
+
wcd_tx: wcd9380-tx@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
@@ -819,6 +827,10 @@
};
};
+&txmacro {
+ status = "okay";
+};
+
&uart12 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 26afaa4f98fe..dc2562070336 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -2256,6 +2256,7 @@
pinctrl-0 = <&rx_swr_active>;
compatible = "qcom,sm8250-lpass-rx-macro";
reg = <0 0x3200000 0 0x1000>;
+ status = "disabled";
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
@@ -2274,6 +2275,7 @@
swr1: soundwire-controller@3210000 {
reg = <0 0x3210000 0 0x2000>;
compatible = "qcom,soundwire-v1.5.1";
+ status = "disabled";
interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rxmacro>;
clock-names = "iface";
@@ -2301,6 +2303,7 @@
pinctrl-0 = <&tx_swr_active>;
compatible = "qcom,sm8250-lpass-tx-macro";
reg = <0 0x3220000 0 0x1000>;
+ status = "disabled";
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
@@ -2324,6 +2327,7 @@
compatible = "qcom,soundwire-v1.5.1";
interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core";
+ status = "disabled";
clocks = <&txmacro>;
clock-names = "iface";
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index a4aba63c7a1f..40cf2236c0b6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -16,6 +16,7 @@
aliases {
ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
mmc0 = &sdmmc0;
mmc1 = &sdhci;
};
@@ -127,7 +128,6 @@
assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>;
assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>, <&cru CLK_MAC0_2TOP>;
clock_in_out = "input";
- phy-handle = <&rgmii_phy0>;
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&gmac0_miim
@@ -139,8 +139,38 @@
snps,reset-active-low;
/* Reset time is 20ms, 100ms for rtl8211f */
snps,reset-delays-us = <0 20000 100000>;
+ tx_delay = <0x4f>;
+ rx_delay = <0x0f>;
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus>;
+
+ snps,reset-gpio = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ /* Reset time is 20ms, 100ms for rtl8211f */
+ snps,reset-delays-us = <0 20000 100000>;
+
tx_delay = <0x3c>;
rx_delay = <0x2f>;
+
status = "okay";
};
@@ -364,8 +394,8 @@
status = "disabled";
};
-&mdio0 {
- rgmii_phy0: ethernet-phy@0 {
+&mdio1 {
+ rgmii_phy1: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0x0>;
};
@@ -404,9 +434,9 @@
pmuio2-supply = <&vcc3v3_pmu>;
vccio1-supply = <&vccio_acodec>;
vccio3-supply = <&vccio_sd>;
- vccio4-supply = <&vcc_1v8>;
+ vccio4-supply = <&vcc_3v3>;
vccio5-supply = <&vcc_3v3>;
- vccio6-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_1v8>;
vccio7-supply = <&vcc_3v3>;
status = "okay";
};
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 345fe98605ba..5c8ee5a541d2 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -7,3 +7,4 @@ generic-y += parport.h
generic-y += user.h
generated-y += cpucaps.h
+generated-y += sysreg-defs.h
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8bd5afc7b692..48d4473e8eee 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -26,12 +26,6 @@
* sets the GP register's most significant bits to 0 with an explicit cast.
*/
-static inline void gic_write_eoir(u32 irq)
-{
- write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
- isb();
-}
-
static __always_inline void gic_write_dir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index d1bb5e71df25..3a6b6d38c5b8 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -142,7 +142,7 @@ static inline bool __init __early_cpu_has_rndr(void)
{
/* Open code as we run prior to the first call to cpufeature. */
unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
- return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
+ return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
static inline bool __init __must_check
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index 03f52f84a4f3..c762038ba400 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -14,7 +14,7 @@
14472: .string file; \
.popsection; \
\
- .long 14472b - 14470b; \
+ .long 14472b - .; \
.short line;
#else
#define _BUGVERBOSE_LOCATION(file, line)
@@ -25,7 +25,7 @@
#define __BUG_ENTRY(flags) \
.pushsection __bug_table,"aw"; \
.align 2; \
- 14470: .long 14471f - 14470b; \
+ 14470: .long 14471f - .; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \
.popsection; \
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index dc3ea4080e2e..6fb2e6bcc392 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -23,20 +23,4 @@
#define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
-#ifdef CONFIG_CFI_CLANG
-/*
- * With CONFIG_CFI_CLANG, the compiler replaces function address
- * references with the address of the function's CFI jump table
- * entry. The function_nocfi macro always returns the address of the
- * actual function instead.
- */
-#define function_nocfi(x) ({ \
- void *addr; \
- asm("adrp %0, " __stringify(x) "\n\t" \
- "add %0, %0, :lo12:" __stringify(x) \
- : "=r" (addr)); \
- addr; \
-})
-#endif
-
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index a58e366f0b07..115cdec1ae87 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -58,11 +58,15 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
u64 reg_id_aa64zfr0;
+ u64 reg_id_aa64smfr0;
struct cpuinfo_32bit aarch32;
/* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
u64 reg_zcr;
+
+ /* pseudo-SMCR for recording maximum SMCR_EL1 LEN value: */
+ u64 reg_smcr;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c62e7e5e2f0c..14a8f3d93add 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -622,6 +622,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
return val > 0;
}
+static inline bool id_aa64pfr1_sme(u64 pfr1)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT);
+
+ return val > 0;
+}
+
static inline bool id_aa64pfr1_mte(u64 pfr1)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
@@ -759,6 +766,23 @@ static __always_inline bool system_supports_sve(void)
cpus_have_const_cap(ARM64_SVE);
}
+static __always_inline bool system_supports_sme(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SME) &&
+ cpus_have_const_cap(ARM64_SME);
+}
+
+static __always_inline bool system_supports_fa64(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SME) &&
+ cpus_have_const_cap(ARM64_SME_FA64);
+}
+
+static __always_inline bool system_supports_tpidr2(void)
+{
+ return system_supports_sme();
+}
+
static __always_inline bool system_supports_cnp(void)
{
return IS_ENABLED(CONFIG_ARM64_CNP) &&
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 232b439cbaf3..92331c07c2d1 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -36,7 +36,7 @@
#define MIDR_VARIANT(midr) \
(((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
#define MIDR_IMPLEMENTOR_SHIFT 24
-#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
@@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
+#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47
@@ -130,6 +131,7 @@
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 00c291067e57..7b7e05c02691 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -64,7 +64,7 @@ struct task_struct;
struct step_hook {
struct list_head node;
- int (*fn)(struct pt_regs *regs, unsigned int esr);
+ int (*fn)(struct pt_regs *regs, unsigned long esr);
};
void register_user_step_hook(struct step_hook *hook);
@@ -75,7 +75,7 @@ void unregister_kernel_step_hook(struct step_hook *hook);
struct break_hook {
struct list_head node;
- int (*fn)(struct pt_regs *regs, unsigned int esr);
+ int (*fn)(struct pt_regs *regs, unsigned long esr);
u16 imm;
u16 mask; /* These bits are ignored when comparing with imm */
};
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 7f3c87f7a0ce..34ceff08cac4 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -107,7 +107,7 @@
isb // Make sure SRE is now set
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
- msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
+ msr_s SYS_ICH_HCR_EL2, xzr // Reset ICH_HCR_EL2 to defaults
.Lskip_gicv3_\@:
.endm
@@ -143,6 +143,50 @@
.Lskip_sve_\@:
.endm
+/* SME register access and priority mapping */
+.macro __init_el2_nvhe_sme
+ mrs x1, id_aa64pfr1_el1
+ ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
+ cbz x1, .Lskip_sme_\@
+
+ bic x0, x0, #CPTR_EL2_TSM // Also disable SME traps
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+ isb
+
+ mrs x1, sctlr_el2
+ orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps
+ msr sctlr_el2, x1
+ isb
+
+ mov x1, #0 // SMCR controls
+
+ mrs_s x2, SYS_ID_AA64SMFR0_EL1
+ ubfx x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM?
+ cbz x2, .Lskip_sme_fa64_\@
+
+ orr x1, x1, SMCR_ELx_FA64_MASK
+.Lskip_sme_fa64_\@:
+
+ orr x1, x1, #SMCR_ELx_LEN_MASK // Enable full SME vector
+ msr_s SYS_SMCR_EL2, x1 // length for EL1.
+
+ mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported?
+ ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
+ cbz x1, .Lskip_sme_\@
+
+ msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
+
+ mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
+ ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
+ cbz x1, .Lskip_sme_\@
+
+ mrs_s x1, SYS_HCRX_EL2
+ orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping
+ msr_s SYS_HCRX_EL2, x1
+
+.Lskip_sme_\@:
+.endm
+
/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
@@ -153,15 +197,26 @@
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
cmp x1, #3
- b.lt .Lset_fgt_\@
+ b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */
orr x0, x0, #(1 << 62)
-.Lset_fgt_\@:
+.Lset_debug_fgt_\@:
msr_s SYS_HDFGRTR_EL2, x0
msr_s SYS_HDFGWTR_EL2, x0
- msr_s SYS_HFGRTR_EL2, xzr
- msr_s SYS_HFGWTR_EL2, xzr
+
+ mov x0, xzr
+ mrs x1, id_aa64pfr1_el1
+ ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
+ cbz x1, .Lset_fgt_\@
+
+ /* Disable nVHE traps of TPIDR2 and SMPRI */
+ orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
+ orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
+
+.Lset_fgt_\@:
+ msr_s SYS_HFGRTR_EL2, x0
+ msr_s SYS_HFGWTR_EL2, x0
msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
@@ -196,6 +251,7 @@
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
+ __init_el2_nvhe_sme
__init_el2_fgt
__init_el2_nvhe_prepare_eret
.endm
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d52a0b269ee8..8f236de7359c 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -37,7 +37,8 @@
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
/* Unallocated EC: 0x1B */
#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
-/* Unallocated EC: 0x1D - 0x1E */
+#define ESR_ELx_EC_SME (0x1D)
+/* Unallocated EC: 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
@@ -75,6 +76,7 @@
#define ESR_ELx_IL_SHIFT (25)
#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+#define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK)
/* ISS field definitions shared by different classes */
#define ESR_ELx_WNR_SHIFT (6)
@@ -136,7 +138,7 @@
#define ESR_ELx_WFx_ISS_TI (UL(1) << 0)
#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
-#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+#define ESR_ELx_xVC_IMM_MASK ((UL(1) << 16) - 1)
#define DISR_EL1_IDS (UL(1) << 24)
/*
@@ -327,17 +329,26 @@
#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
ESR_ELx_CP15_32_ISS_DIR_READ)
+/*
+ * ISS values for SME traps
+ */
+
+#define ESR_ELx_SME_ISS_SME_DISABLED 0
+#define ESR_ELx_SME_ISS_ILL 1
+#define ESR_ELx_SME_ISS_SM_DISABLED 2
+#define ESR_ELx_SME_ISS_ZA_DISABLED 3
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
-static inline bool esr_is_data_abort(u32 esr)
+static inline bool esr_is_data_abort(unsigned long esr)
{
- const u32 ec = ESR_ELx_EC(esr);
+ const unsigned long ec = ESR_ELx_EC(esr);
return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
}
-const char *esr_get_class_string(u32 esr);
+const char *esr_get_class_string(unsigned long esr);
#endif /* __ASSEMBLY */
#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 339477dca551..d94aecff9690 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -19,9 +19,9 @@
#define __exception_irq_entry __kprobes
#endif
-static inline u32 disr_to_esr(u64 disr)
+static inline unsigned long disr_to_esr(u64 disr)
{
- unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
+ unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
if ((disr & DISR_EL1_IDS) == 0)
esr |= (disr & DISR_EL1_ESR_MASK);
@@ -57,23 +57,24 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *));
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
-void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
+void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
struct pt_regs *regs);
-void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
-void do_sve_acc(unsigned int esr, struct pt_regs *regs);
-void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
-void do_sysinstr(unsigned int esr, struct pt_regs *regs);
-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
-void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
-void do_cp15instr(unsigned int esr, struct pt_regs *regs);
+void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
+void do_sve_acc(unsigned long esr, struct pt_regs *regs);
+void do_sme_acc(unsigned long esr, struct pt_regs *regs);
+void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs);
+void do_sysinstr(unsigned long esr, struct pt_regs *regs);
+void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs);
+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
+void do_cp15instr(unsigned long esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
-void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
-void do_serror(struct pt_regs *regs, unsigned int esr);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
+void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
-void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
+void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index cb24385e3632..9bb1873f5295 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -32,6 +32,18 @@
#define VFP_STATE_SIZE ((32 * 8) + 4)
#endif
+/*
+ * When we defined the maximum SVE vector length we defined the ABI so
+ * that the maximum vector length included all the reserved for future
+ * expansion bits in ZCR rather than those just currently defined by
+ * the architecture. While SME follows a similar pattern the fact that
+ * it includes a square matrix means that any allocations that attempt
+ * to cover the maximum potential vector length (such as happen with
+ * the regset used for ptrace) end up being extremely large. Define
+ * the much lower actual limit for use in such situations.
+ */
+#define SME_VQ_MAX 16
+
struct task_struct;
extern void fpsimd_save_state(struct user_fpsimd_state *state);
@@ -46,11 +58,23 @@ extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
- void *sve_state, unsigned int sve_vl);
+ void *sve_state, unsigned int sve_vl,
+ void *za_state, unsigned int sme_vl,
+ u64 *svcr);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
+static inline bool thread_sm_enabled(struct thread_struct *thread)
+{
+ return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
+}
+
+static inline bool thread_za_enabled(struct thread_struct *thread)
+{
+ return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
+}
+
/* Maximum VL that SVE/SME VL-agnostic software can transparently support */
#define VL_ARCH_MAX 0x100
@@ -62,7 +86,14 @@ static inline size_t sve_ffr_offset(int vl)
static inline void *sve_pffr(struct thread_struct *thread)
{
- return (char *)thread->sve_state + sve_ffr_offset(thread_get_sve_vl(thread));
+ unsigned int vl;
+
+ if (system_supports_sme() && thread_sm_enabled(thread))
+ vl = thread_get_sme_vl(thread);
+ else
+ vl = thread_get_sve_vl(thread);
+
+ return (char *)thread->sve_state + sve_ffr_offset(vl);
}
extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
@@ -71,11 +102,17 @@ extern void sve_load_state(void const *state, u32 const *pfpsr,
extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
extern void sve_set_vq(unsigned long vq_minus_1);
+extern void sme_set_vq(unsigned long vq_minus_1);
+extern void za_save_state(void *state);
+extern void za_load_state(void const *state);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
+extern u64 read_smcr_features(void);
/*
* Helpers to translate bit indices in sve_vq_map to VQ values (and
@@ -119,6 +156,7 @@ struct vl_info {
extern void sve_alloc(struct task_struct *task);
extern void fpsimd_release_task(struct task_struct *task);
extern void fpsimd_sync_to_sve(struct task_struct *task);
+extern void fpsimd_force_sync_to_sve(struct task_struct *task);
extern void sve_sync_to_fpsimd(struct task_struct *task);
extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
@@ -171,6 +209,12 @@ static inline void write_vl(enum vec_type type, u64 val)
write_sysreg_s(tmp | val, SYS_ZCR_EL1);
break;
#endif
+#ifdef CONFIG_ARM64_SME
+ case ARM64_VEC_SME:
+ tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK;
+ write_sysreg_s(tmp | val, SYS_SMCR_EL1);
+ break;
+#endif
default:
WARN_ON_ONCE(1);
break;
@@ -208,6 +252,8 @@ static inline bool sve_vq_available(unsigned int vq)
return vq_available(ARM64_VEC_SVE, vq);
}
+size_t sve_state_size(struct task_struct const *task);
+
#else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task) { }
@@ -247,8 +293,93 @@ static inline void vec_update_vq_map(enum vec_type t) { }
static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { }
+static inline size_t sve_state_size(struct task_struct const *task)
+{
+ return 0;
+}
+
#endif /* ! CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static inline void sme_user_disable(void)
+{
+ sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0);
+}
+
+static inline void sme_user_enable(void)
+{
+ sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN);
+}
+
+static inline void sme_smstart_sm(void)
+{
+ asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
+}
+
+static inline void sme_smstop_sm(void)
+{
+ asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
+}
+
+static inline void sme_smstop(void)
+{
+ asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
+}
+
+extern void __init sme_setup(void);
+
+static inline int sme_max_vl(void)
+{
+ return vec_max_vl(ARM64_VEC_SME);
+}
+
+static inline int sme_max_virtualisable_vl(void)
+{
+ return vec_max_virtualisable_vl(ARM64_VEC_SME);
+}
+
+extern void sme_alloc(struct task_struct *task);
+extern unsigned int sme_get_vl(void);
+extern int sme_set_current_vl(unsigned long arg);
+extern int sme_get_current_vl(void);
+
+/*
+ * Return how many bytes of memory are required to store the full SME
+ * specific state (currently just ZA) for task, given task's currently
+ * configured vector length.
+ */
+static inline size_t za_state_size(struct task_struct const *task)
+{
+ unsigned int vl = task_get_sme_vl(task);
+
+ return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+}
+
+#else
+
+static inline void sme_user_disable(void) { BUILD_BUG(); }
+static inline void sme_user_enable(void) { BUILD_BUG(); }
+
+static inline void sme_smstart_sm(void) { }
+static inline void sme_smstop_sm(void) { }
+static inline void sme_smstop(void) { }
+
+static inline void sme_alloc(struct task_struct *task) { }
+static inline void sme_setup(void) { }
+static inline unsigned int sme_get_vl(void) { return 0; }
+static inline int sme_max_vl(void) { return 0; }
+static inline int sme_max_virtualisable_vl(void) { return 0; }
+static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
+static inline int sme_get_current_vl(void) { return -EINVAL; }
+
+static inline size_t za_state_size(struct task_struct const *task)
+{
+ return 0;
+}
+
+#endif /* ! CONFIG_ARM64_SME */
+
/* For use by EFI runtime services calls only */
extern void __efi_fpsimd_begin(void);
extern void __efi_fpsimd_end(void);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 2509d7dde55a..5e0910cf4832 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -93,6 +93,12 @@
.endif
.endm
+.macro _sme_check_wv v
+ .if (\v) < 12 || (\v) > 15
+ .error "Bad vector select register \v."
+ .endif
+.endm
+
/* SVE instruction encodings for non-SVE-capable assemblers */
/* (pre binutils 2.28, all kernel capable clang versions support SVE) */
@@ -174,6 +180,54 @@
| (\np)
.endm
+/* SME instruction encodings for non-SME-capable assemblers */
+/* (pre binutils 2.38/LLVM 13) */
+
+/* RDSVL X\nx, #\imm */
+.macro _sme_rdsvl nx, imm
+ _check_general_reg \nx
+ _check_num (\imm), -0x20, 0x1f
+ .inst 0x04bf5800 \
+ | (\nx) \
+ | (((\imm) & 0x3f) << 5)
+.endm
+
+/*
+ * STR (vector from ZA array):
+ * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _sme_str_zav nw, nxbase, offset=0
+ _sme_check_wv \nw
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe1200000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * LDR (vector to ZA array):
+ * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _sme_ldr_zav nw, nxbase, offset=0
+ _sme_check_wv \nw
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe1000000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * Zero the entire ZA array
+ * ZERO ZA
+ */
+.macro zero_za
+ .inst 0xc00800ff
+.endm
+
.macro __for from:req, to:req
.if (\from) == (\to)
_for__body %\from
@@ -208,6 +262,17 @@
921:
.endm
+/* Update SMCR_EL1.LEN with the new VQ */
+.macro sme_load_vq xvqminus1, xtmp, xtmp2
+ mrs_s \xtmp, SYS_SMCR_EL1
+ bic \xtmp2, \xtmp, SMCR_ELx_LEN_MASK
+ orr \xtmp2, \xtmp2, \xvqminus1
+ cmp \xtmp2, \xtmp
+ b.eq 921f
+ msr_s SYS_SMCR_EL1, \xtmp2 //self-synchronising
+921:
+.endm
+
/* Preserve the first 128-bits of Znz and zero the rest. */
.macro _sve_flush_z nz
_sve_check_zreg \nz
@@ -254,3 +319,25 @@
ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp
.endm
+
+.macro sme_save_za nxbase, xvl, nw
+ mov w\nw, #0
+
+423:
+ _sme_str_zav \nw, \nxbase
+ add x\nxbase, x\nxbase, \xvl
+ add x\nw, x\nw, #1
+ cmp \xvl, x\nw
+ bne 423b
+.endm
+
+.macro sme_load_za nxbase, xvl, nw
+ mov w\nw, #0
+
+423:
+ _sme_ldr_zav \nw, \nxbase
+ add x\nxbase, x\nxbase, \xvl
+ add x\nw, x\nw, #1
+ cmp \xvl, x\nw
+ bne 423b
+.endm
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 1494cfa8639b..dbc45a4157fa 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -80,8 +80,15 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
struct dyn_ftrace;
+struct ftrace_ops;
+struct ftrace_regs;
+
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
+
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
#endif
#define ftrace_return_address(n) return_address(n)
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 1242f71937f8..d656822b13f1 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -44,6 +44,8 @@ extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
#define __HAVE_ARCH_HUGE_PTE_CLEAR
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
+#define __HAVE_ARCH_HUGE_PTEP_GET
+extern pte_t huge_ptep_get(pte_t *ptep);
extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz);
#define set_huge_swap_pte_at set_huge_swap_pte_at
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 8db5ec0089db..9f0ce004fdbc 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -109,6 +109,14 @@
#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
#define KERNEL_HWCAP_MTE3 __khwcap2_feature(MTE3)
+#define KERNEL_HWCAP_SME __khwcap2_feature(SME)
+#define KERNEL_HWCAP_SME_I16I64 __khwcap2_feature(SME_I16I64)
+#define KERNEL_HWCAP_SME_F64F64 __khwcap2_feature(SME_F64F64)
+#define KERNEL_HWCAP_SME_I8I32 __khwcap2_feature(SME_I8I32)
+#define KERNEL_HWCAP_SME_F16F32 __khwcap2_feature(SME_F16F32)
+#define KERNEL_HWCAP_SME_B16F32 __khwcap2_feature(SME_B16F32)
+#define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32)
+#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 1e5760d567ae..6aa2dc836db1 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -201,6 +201,8 @@ enum aarch64_insn_size_type {
enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_REG_OFFSET,
AARCH64_INSN_LDST_STORE_REG_OFFSET,
+ AARCH64_INSN_LDST_LOAD_IMM_OFFSET,
+ AARCH64_INSN_LDST_STORE_IMM_OFFSET,
AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
@@ -335,6 +337,7 @@ __AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(str_imm, 0x3FC00000, 0x39000000)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldclr, 0x3F20FC00, 0x38201000)
__AARCH64_INSN_FUNCS(ldeor, 0x3F20FC00, 0x38202000)
@@ -342,6 +345,7 @@ __AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(ldr_imm, 0x3FC00000, 0x39400000)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
@@ -501,6 +505,11 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
enum aarch64_insn_register offset,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ unsigned int imm,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7fd836bea7eb..3995652daf81 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 1767ded83888..13ae232ec4a1 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -279,6 +279,7 @@
#define CPTR_EL2_TCPAC (1U << 31)
#define CPTR_EL2_TAM (1 << 30)
#define CPTR_EL2_TTA (1 << 20)
+#define CPTR_EL2_TSM (1 << 12)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d62405ce3e6d..08233172e7a9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -40,13 +40,26 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
+#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
}
+#else
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
+ &kvm->arch.flags));
+
+ return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+}
+#endif
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
@@ -72,15 +85,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TVM;
}
- if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW;
-
- /*
- * TID3: trap feature register accesses that we virtualise.
- * For now this is conditional, since no AArch32 feature regs
- * are currently virtualised.
- */
- if (!vcpu_el1_is_32bit(vcpu))
+ else
+ /*
+ * TID3: trap feature register accesses that we virtualise.
+ * For now this is conditional, since no AArch32 feature regs
+ * are currently virtualised.
+ */
vcpu->arch.hcr_el2 |= HCR_TID3;
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
@@ -224,14 +236,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
return mode != PSR_MODE_EL0t;
}
-static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
+static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.esr_el2;
}
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
if (esr & ESR_ELx_CV)
return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -362,7 +374,7 @@ static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
return ESR_ELx_SYS64_ISS_RT(esr);
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e3b25dc6c367..d5888dedf02a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -127,6 +127,16 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_MTE_ENABLED 1
/* At least one vCPU has ran in the VM */
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
+ /*
+ * The following two bits are used to indicate the guest's EL1
+ * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
+ * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
+ * Otherwise, the guest's EL1 register width has not yet been
+ * determined yet.
+ */
+#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
+#define KVM_ARCH_FLAG_EL1_32BIT 4
+
unsigned long flags;
/*
@@ -143,7 +153,7 @@ struct kvm_arch {
};
struct kvm_vcpu_fault_info {
- u32 esr_el2; /* Hyp Syndrom Register */
+ u64 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
u64 disr_el1; /* Deferred [SError] Status Register */
@@ -285,8 +295,11 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
+
+ /* Guest floating point state */
void *sve_state;
unsigned int sve_max_vl;
+ u64 svcr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
@@ -441,6 +454,7 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
+#define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h
index 8ac6ee77437c..87e10d9a635b 100644
--- a/arch/arm64/include/asm/kvm_ras.h
+++ b/arch/arm64/include/asm/kvm_ras.h
@@ -14,7 +14,7 @@
* Was this synchronous external abort a RAS notification?
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
*/
-static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
+static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr)
{
/* apei_claim_sea(NULL) expects to mask interrupts itself */
lockdep_assert_irqs_enabled();
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index adcb937342f1..aa523591a44e 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -47,6 +47,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
+size_t mte_probe_user_range(const char __user *uaddr, size_t size);
#else /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 66671ff05183..dd3d12bce07b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -49,7 +49,7 @@
#define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD PTRS_PER_PTE
+#define PTRS_PER_PMD (1 << (PAGE_SHIFT - 3))
#endif
/*
@@ -59,7 +59,7 @@
#define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PTRS_PER_PUD PTRS_PER_PTE
+#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3))
#endif
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 94e147e5456c..45c358538f13 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
-#define pmd_leaf(pmd) pmd_sect(pmd)
+#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
-#define pud_leaf(pud) pud_sect(pud)
+#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -1001,7 +1001,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
*/
static inline bool arch_faults_on_old_pte(void)
{
- WARN_ON(preemptible());
+ /* The register read below requires a stable CPU to make any sense */
+ cant_migrate();
return !cpu_has_hw_af();
}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 73e38d9a540c..bf8aafee1eac 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -118,6 +118,7 @@ struct debug_info {
enum vec_type {
ARM64_VEC_SVE = 0,
+ ARM64_VEC_SME,
ARM64_VEC_MAX,
};
@@ -153,6 +154,7 @@ struct thread_struct {
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
+ void *za_state; /* ZA register, if any */
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */
@@ -168,6 +170,8 @@ struct thread_struct {
u64 mte_ctrl;
#endif
u64 sctlr_user;
+ u64 svcr;
+ u64 tpidr2_el0;
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
@@ -181,6 +185,19 @@ static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
return thread_get_vl(thread, ARM64_VEC_SVE);
}
+static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
+{
+ return thread_get_vl(thread, ARM64_VEC_SME);
+}
+
+static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
+{
+ if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
+ return thread_get_sme_vl(thread);
+ else
+ return thread_get_sve_vl(thread);
+}
+
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl);
@@ -194,6 +211,11 @@ static inline unsigned int task_get_sve_vl(const struct task_struct *task)
return task_get_vl(task, ARM64_VEC_SVE);
}
+static inline unsigned int task_get_sme_vl(const struct task_struct *task)
+{
+ return task_get_vl(task, ARM64_VEC_SME);
+}
+
static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
{
task_set_vl(task, ARM64_VEC_SVE, vl);
@@ -354,9 +376,11 @@ extern void __init minsigstksz_setup(void);
*/
#include <asm/fpsimd.h>
-/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
+/* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
+#define SME_SET_VL(arg) sme_set_current_vl(arg)
+#define SME_GET_VL() sme_get_current_vl()
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
@@ -381,12 +405,10 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* of header definitions for the use of task_stack_page.
*/
-#define current_top_of_stack() \
-({ \
- struct stack_info _info; \
- BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
- _info.high; \
-})
+/*
+ * The top of the current task's task stack
+ */
+#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index e77cdef9ca29..aec9315bf156 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -31,38 +31,6 @@ struct stack_info {
enum stack_type type;
};
-/*
- * A snapshot of a frame record or fp/lr register values, along with some
- * accounting information necessary for robust unwinding.
- *
- * @fp: The fp value in the frame record (or the real fp)
- * @pc: The lr value in the frame record (or the real lr)
- *
- * @stacks_done: Stacks which have been entirely unwound, for which it is no
- * longer valid to unwind to.
- *
- * @prev_fp: The fp that pointed to this frame record, or a synthetic value
- * of 0. This is used to ensure that within a stack, each
- * subsequent frame record is at an increasing address.
- * @prev_type: The type of stack this frame record was on, or a synthetic
- * value of STACK_TYPE_UNKNOWN. This is used to detect a
- * transition from one stack to another.
- *
- * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
- * associated with the most recently encountered replacement lr
- * value.
- */
-struct stackframe {
- unsigned long fp;
- unsigned long pc;
- DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
- unsigned long prev_fp;
- enum stack_type prev_type;
-#ifdef CONFIG_KRETPROBES
- struct llist_node *kr_cur;
-#endif
-};
-
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index fbf5f8bb9055..55f998c3dc28 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -115,9 +115,21 @@
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
/*
+ * Automatically generated definitions for system registers, the
+ * manual encodings below are in the process of being converted to
+ * come from here. The header relies on the definition of sys_reg()
+ * earlier in this file.
+ */
+#include "asm/sysreg-defs.h"
+
+/*
* System registers, organised loosely by encoding but grouped together
* where the architected name contains an index. e.g. ID_MMFR<n>_EL1.
*/
+#define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3)
+#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3)
+#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3)
+
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
@@ -181,6 +193,7 @@
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
+#define SYS_ID_AA64SMFR0_EL1 sys_reg(3, 0, 0, 4, 5)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
@@ -188,7 +201,6 @@
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
-#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
@@ -196,17 +208,12 @@
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
-#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
-#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
-#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
-#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
-#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
@@ -242,7 +249,6 @@
#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
-#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
#define SYS_PAR_EL1_F BIT(0)
@@ -441,7 +447,6 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
-#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
@@ -449,11 +454,12 @@
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
-#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
-#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
+#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
+#define SMIDR_EL1_SMPS_SHIFT 15
+#define SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
@@ -477,6 +483,7 @@
#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
+#define SYS_TPIDR2_EL0 sys_reg(3, 3, 13, 0, 5)
#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
@@ -544,9 +551,8 @@
#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
-#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
-#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
@@ -557,7 +563,6 @@
#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
-#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
@@ -603,8 +608,6 @@
/* VHE encodings for architectural EL0/1 system registers */
#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
-#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
-#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
@@ -614,11 +617,9 @@
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
-#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
-#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
@@ -628,31 +629,30 @@
#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
#define SCTLR_ELx_ATA (BIT(43))
-#define SCTLR_ELx_TCF_SHIFT 40
-#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
-#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
-#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
-#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
-#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
-
#define SCTLR_ELx_ENIA_SHIFT 31
-#define SCTLR_ELx_ITFSB (BIT(37))
-#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
-#define SCTLR_ELx_ENIB (BIT(30))
-#define SCTLR_ELx_ENDA (BIT(27))
-#define SCTLR_ELx_EE (BIT(25))
-#define SCTLR_ELx_IESB (BIT(21))
-#define SCTLR_ELx_WXN (BIT(19))
-#define SCTLR_ELx_ENDB (BIT(13))
-#define SCTLR_ELx_I (BIT(12))
-#define SCTLR_ELx_SA (BIT(3))
-#define SCTLR_ELx_C (BIT(2))
-#define SCTLR_ELx_A (BIT(1))
-#define SCTLR_ELx_M (BIT(0))
+#define SCTLR_ELx_ITFSB (BIT(37))
+#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
+#define SCTLR_ELx_ENIB (BIT(30))
+#define SCTLR_ELx_LSMAOE (BIT(29))
+#define SCTLR_ELx_nTLSMD (BIT(28))
+#define SCTLR_ELx_ENDA (BIT(27))
+#define SCTLR_ELx_EE (BIT(25))
+#define SCTLR_ELx_EIS (BIT(22))
+#define SCTLR_ELx_IESB (BIT(21))
+#define SCTLR_ELx_TSCXT (BIT(20))
+#define SCTLR_ELx_WXN (BIT(19))
+#define SCTLR_ELx_ENDB (BIT(13))
+#define SCTLR_ELx_I (BIT(12))
+#define SCTLR_ELx_EOS (BIT(11))
+#define SCTLR_ELx_SA (BIT(3))
+#define SCTLR_ELx_C (BIT(2))
+#define SCTLR_ELx_A (BIT(1))
+#define SCTLR_ELx_M (BIT(0))
/* SCTLR_EL2 specific flags. */
#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
@@ -674,34 +674,6 @@
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
/* SCTLR_EL1 specific flags. */
-#define SCTLR_EL1_EPAN (BIT(57))
-#define SCTLR_EL1_ATA0 (BIT(42))
-
-#define SCTLR_EL1_TCF0_SHIFT 38
-#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
-#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
-#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
-#define SCTLR_EL1_TCF0_ASYMM (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
-#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
-
-#define SCTLR_EL1_BT1 (BIT(36))
-#define SCTLR_EL1_BT0 (BIT(35))
-#define SCTLR_EL1_UCI (BIT(26))
-#define SCTLR_EL1_E0E (BIT(24))
-#define SCTLR_EL1_SPAN (BIT(23))
-#define SCTLR_EL1_NTWE (BIT(18))
-#define SCTLR_EL1_NTWI (BIT(16))
-#define SCTLR_EL1_UCT (BIT(15))
-#define SCTLR_EL1_DZE (BIT(14))
-#define SCTLR_EL1_UMA (BIT(9))
-#define SCTLR_EL1_SED (BIT(8))
-#define SCTLR_EL1_ITD (BIT(7))
-#define SCTLR_EL1_CP15BEN (BIT(5))
-#define SCTLR_EL1_SA0 (BIT(4))
-
-#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
- (BIT(29)))
-
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
#else
@@ -709,13 +681,17 @@
#endif
#define INIT_SCTLR_EL1_MMU_OFF \
- (ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
+ (ENDIAN_SET_EL1 | SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | \
+ SCTLR_EL1_EIS | SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
#define INIT_SCTLR_EL1_MMU_ON \
- (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
- SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
- SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
- ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | \
+ SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I | \
+ SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_nTWE | \
+ SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
+ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | \
+ SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \
+ SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
@@ -728,25 +704,6 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-/* id_aa64isar0 */
-#define ID_AA64ISAR0_RNDR_SHIFT 60
-#define ID_AA64ISAR0_TLB_SHIFT 56
-#define ID_AA64ISAR0_TS_SHIFT 52
-#define ID_AA64ISAR0_FHM_SHIFT 48
-#define ID_AA64ISAR0_DP_SHIFT 44
-#define ID_AA64ISAR0_SM4_SHIFT 40
-#define ID_AA64ISAR0_SM3_SHIFT 36
-#define ID_AA64ISAR0_SHA3_SHIFT 32
-#define ID_AA64ISAR0_RDM_SHIFT 28
-#define ID_AA64ISAR0_ATOMICS_SHIFT 20
-#define ID_AA64ISAR0_CRC32_SHIFT 16
-#define ID_AA64ISAR0_SHA2_SHIFT 12
-#define ID_AA64ISAR0_SHA1_SHIFT 8
-#define ID_AA64ISAR0_AES_SHIFT 4
-
-#define ID_AA64ISAR0_TLB_RANGE_NI 0x0
-#define ID_AA64ISAR0_TLB_RANGE 0x2
-
/* id_aa64isar1 */
#define ID_AA64ISAR1_I8MM_SHIFT 52
#define ID_AA64ISAR1_DGH_SHIFT 48
@@ -836,6 +793,7 @@
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
+#define ID_AA64PFR1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
@@ -846,6 +804,7 @@
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
+#define ID_AA64PFR1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
@@ -874,6 +833,23 @@
#define ID_AA64ZFR0_AES_PMULL 0x2
#define ID_AA64ZFR0_SVEVER_SVE2 0x1
+/* id_aa64smfr0 */
+#define ID_AA64SMFR0_FA64_SHIFT 63
+#define ID_AA64SMFR0_I16I64_SHIFT 52
+#define ID_AA64SMFR0_F64F64_SHIFT 48
+#define ID_AA64SMFR0_I8I32_SHIFT 36
+#define ID_AA64SMFR0_F16F32_SHIFT 35
+#define ID_AA64SMFR0_B16F32_SHIFT 34
+#define ID_AA64SMFR0_F32F32_SHIFT 32
+
+#define ID_AA64SMFR0_FA64 0x1
+#define ID_AA64SMFR0_I16I64 0x4
+#define ID_AA64SMFR0_F64F64 0x1
+#define ID_AA64SMFR0_I8I32 0x4
+#define ID_AA64SMFR0_F16F32 0x1
+#define ID_AA64SMFR0_B16F32 0x1
+#define ID_AA64SMFR0_F32F32 0x1
+
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60
#define ID_AA64MMFR0_FGT_SHIFT 56
@@ -926,6 +902,7 @@
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
+#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
@@ -1110,18 +1087,12 @@
#define DCZID_DZP_SHIFT 4
#define DCZID_BS_SHIFT 0
-/*
- * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
- * are reserved by the SVE architecture for future expansion of the LEN
- * field, with compatible semantics.
- */
-#define ZCR_ELx_LEN_SHIFT 0
-#define ZCR_ELx_LEN_SIZE 9
-#define ZCR_ELx_LEN_MASK 0x1ff
-
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_SMEN_EL1EN (BIT(24)) /* enable EL1 access */
+#define CPACR_EL1_SMEN_EL0EN (BIT(25)) /* enable EL0 access, if EL1EN set */
+
#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
@@ -1170,6 +1141,8 @@
#define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0)
+/* HCRX_EL2 definitions */
+#define HCRX_EL2_SMPME_MASK (1 << 5)
/* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */
@@ -1233,6 +1206,12 @@
#define ICH_VTR_TDS_SHIFT 19
#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
+/* HFG[WR]TR_EL2 bit definitions */
+#define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55
+#define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT)
+#define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54
+#define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT)
+
#define ARM64_FEATURE_FIELD_BITS 4
/* Create a mask for the feature bits of the specified feature. */
@@ -1345,4 +1324,10 @@
#endif
+#define SYS_FIELD_PREP(reg, field, val) \
+ FIELD_PREP(reg##_##field##_MASK, val)
+
+#define SYS_FIELD_PREP_ENUM(reg, field, val) \
+ FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
+
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 305a7157c6a6..0eb7709422e2 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,9 +23,9 @@ void die(const char *msg, struct pt_regs *regs, int err);
struct siginfo;
void arm64_notify_die(const char *str, struct pt_regs *regs,
int signo, int sicode, unsigned long far,
- int err);
+ unsigned long err);
-void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long,
struct pt_regs *),
int sig, int code, const char *name);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e1317b7c4525..848739c15de8 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -82,6 +82,8 @@ int arch_dup_task_struct(struct task_struct *dst,
#define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
+#define TIF_SME 27 /* SME in use */
+#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 54f32a0675df..6e5826470bea 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -24,7 +24,7 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-void force_signal_inject(int signal, int code, unsigned long address, unsigned int err);
+void force_signal_inject(int signal, int code, unsigned long address, unsigned long err);
void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str);
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
@@ -57,7 +57,7 @@ static inline int in_entry_text(unsigned long ptr)
* errors share the same encoding as an all-zeros encoding from a CPU that
* doesn't support RAS.
*/
-static inline bool arm64_is_ras_serror(u32 esr)
+static inline bool arm64_is_ras_serror(unsigned long esr)
{
WARN_ON(preemptible());
@@ -77,9 +77,9 @@ static inline bool arm64_is_ras_serror(u32 esr)
* We treat them as Uncontainable.
* Non-RAS SError's are reported as Uncontained/Uncategorized.
*/
-static inline u32 arm64_ras_serror_get_severity(u32 esr)
+static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr)
{
- u32 aet = esr & ESR_ELx_AET;
+ unsigned long aet = esr & ESR_ELx_AET;
if (!arm64_is_ras_serror(esr)) {
/* Not a RAS error, we can't interpret the ESR. */
@@ -98,6 +98,6 @@ static inline u32 arm64_ras_serror_get_severity(u32 esr)
return aet;
}
-bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr);
-void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr);
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr);
+void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr);
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e8dce0cc5eaa..63f9c828f1a7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -460,4 +460,19 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src,
}
#endif
+#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+static inline size_t probe_subpage_writeable(const char __user *uaddr,
+ size_t size)
+{
+ if (!system_supports_mte())
+ return 0;
+ return mte_probe_user_range(uaddr, size);
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
deleted file mode 100644
index 27e984977402..000000000000
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <xen/arm/page-coherent.h>
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 99cb5d383048..b0256cec63b5 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -79,5 +79,13 @@
#define HWCAP2_AFP (1 << 20)
#define HWCAP2_RPRES (1 << 21)
#define HWCAP2_MTE3 (1 << 22)
+#define HWCAP2_SME (1 << 23)
+#define HWCAP2_SME_I16I64 (1 << 24)
+#define HWCAP2_SME_F64F64 (1 << 25)
+#define HWCAP2_SME_I8I32 (1 << 26)
+#define HWCAP2_SME_F16F32 (1 << 27)
+#define HWCAP2_SME_B16F32 (1 << 28)
+#define HWCAP2_SME_F32F32 (1 << 29)
+#define HWCAP2_SME_FA64 (1 << 30)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index c1b6ddc02d2f..ab585359242d 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -139,8 +139,10 @@ struct kvm_guest_debug_arch {
__u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
};
+#define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0)
struct kvm_debug_exit_arch {
__u32 hsr;
+ __u32 hsr_high; /* ESR_EL2[61:32] */
__u64 far; /* used for watchpoints */
};
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 758ae984ff97..7fa2f7036aa7 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -109,7 +109,7 @@ struct user_hwdebug_state {
} dbg_regs[16];
};
-/* SVE/FP/SIMD state (NT_ARM_SVE) */
+/* SVE/FP/SIMD state (NT_ARM_SVE & NT_ARM_SSVE) */
struct user_sve_header {
__u32 size; /* total meaningful regset content in bytes */
@@ -220,6 +220,7 @@ struct user_sve_header {
(SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \
SVE_PT_SVE_PREGS_OFFSET(vq))
+/* For streaming mode SVE (SSVE) FFR must be read and written as zero */
#define SVE_PT_SVE_FFR_OFFSET(vq) \
(SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
@@ -240,10 +241,12 @@ struct user_sve_header {
- SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
-#define SVE_PT_SIZE(vq, flags) \
- (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
- SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
- : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
+#define SVE_PT_SIZE(vq, flags) \
+ (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
+ SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
+ : ((((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD ? \
+ SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags) \
+ : SVE_PT_REGS_OFFSET)))
/* pointer authentication masks (NT_ARM_PAC_MASK) */
@@ -265,6 +268,62 @@ struct user_pac_generic_keys {
__uint128_t apgakey;
};
+/* ZA state (NT_ARM_ZA) */
+
+struct user_za_header {
+ __u32 size; /* total meaningful regset content in bytes */
+ __u32 max_size; /* maxmium possible size for this thread */
+ __u16 vl; /* current vector length */
+ __u16 max_vl; /* maximum possible vector length */
+ __u16 flags;
+ __u16 __reserved;
+};
+
+/*
+ * Common ZA_PT_* flags:
+ * These must be kept in sync with prctl interface in <linux/prctl.h>
+ */
+#define ZA_PT_VL_INHERIT ((1 << 17) /* PR_SME_VL_INHERIT */ >> 16)
+#define ZA_PT_VL_ONEXEC ((1 << 18) /* PR_SME_SET_VL_ONEXEC */ >> 16)
+
+
+/*
+ * The remainder of the ZA state follows struct user_za_header. The
+ * total size of the ZA state (including header) depends on the
+ * metadata in the header: ZA_PT_SIZE(vq, flags) gives the total size
+ * of the state in bytes, including the header.
+ *
+ * Refer to <asm/sigcontext.h> for details of how to pass the correct
+ * "vq" argument to these macros.
+ */
+
+/* Offset from the start of struct user_za_header to the register data */
+#define ZA_PT_ZA_OFFSET \
+ ((sizeof(struct user_za_header) + (__SVE_VQ_BYTES - 1)) \
+ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
+
+/*
+ * The payload starts at offset ZA_PT_ZA_OFFSET, and is of size
+ * ZA_PT_ZA_SIZE(vq, flags).
+ *
+ * The ZA array is stored as a sequence of horizontal vectors ZAV of SVL/8
+ * bytes each, starting from vector 0.
+ *
+ * Additional data might be appended in the future.
+ *
+ * The ZA matrix is represented in memory in an endianness-invariant layout
+ * which differs from the layout used for the FPSIMD V-registers on big-endian
+ * systems: see sigcontext.h for more explanation.
+ */
+
+#define ZA_PT_ZAV_OFFSET(vq, n) \
+ (ZA_PT_ZA_OFFSET + ((vq * __SVE_VQ_BYTES) * n))
+
+#define ZA_PT_ZA_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
+
+#define ZA_PT_SIZE(vq) \
+ (ZA_PT_ZA_OFFSET + ZA_PT_ZA_SIZE(vq))
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index 0c796c795dbe..4aaf31e3bf16 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -134,6 +134,17 @@ struct extra_context {
struct sve_context {
struct _aarch64_ctx head;
__u16 vl;
+ __u16 flags;
+ __u16 __reserved[2];
+};
+
+#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */
+
+#define ZA_MAGIC 0x54366345
+
+struct za_context {
+ struct _aarch64_ctx head;
+ __u16 vl;
__u16 __reserved[3];
};
@@ -186,9 +197,16 @@ struct sve_context {
* sve_context.vl must equal the thread's current vector length when
* doing a sigreturn.
*
+ * On systems with support for SME the SVE register state may reflect either
+ * streaming or non-streaming mode. In streaming mode the streaming mode
+ * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in
+ * the flags field. It is permitted to enter or leave streaming mode in
+ * a signal return, applications should take care to ensure that any difference
+ * in vector length between the two modes is handled, including any resizing
+ * and movement of context blocks.
*
- * Note: for all these macros, the "vq" argument denotes the SVE
- * vector length in quadwords (i.e., units of 128 bits).
+ * Note: for all these macros, the "vq" argument denotes the vector length
+ * in quadwords (i.e., units of 128 bits).
*
* The correct way to obtain vq is to use sve_vq_from_vl(vl). The
* result is valid if and only if sve_vl_valid(vl) is true. This is
@@ -249,4 +267,37 @@ struct sve_context {
#define SVE_SIG_CONTEXT_SIZE(vq) \
(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
+/*
+ * If the ZA register is enabled for the thread at signal delivery then,
+ * za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
+ * and the register data may be accessed using the ZA_SIG_*() macros.
+ *
+ * If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
+ * then ZA was not enabled and no register data was included in which case
+ * ZA register was not enabled for the thread and no register data
+ * the ZA_SIG_*() macros should not be used except for this check.
+ *
+ * The same convention applies when returning from a signal: a caller
+ * will need to remove or resize the za_context block if it wants to
+ * enable the ZA register when it was previously non-live or vice-versa.
+ * This may require the caller to allocate fresh memory and/or move other
+ * context blocks in the signal frame.
+ *
+ * Changing the vector length during signal return is not permitted:
+ * za_context.vl must equal the thread's current SME vector length when
+ * doing a sigreturn.
+ */
+
+#define ZA_SIG_REGS_OFFSET \
+ ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
+ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
+
+#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
+
+#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
+ (SVE_SIG_ZREG_SIZE(vq) * n))
+
+#define ZA_SIG_CONTEXT_SIZE(vq) \
+ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
+
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 986837d7ec82..fa7981d0d917 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -75,6 +75,10 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
+# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
+$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
+$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
+
obj-y += probes/
head-y := head.o
extra-y += $(head-y) vmlinux.lds
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 3fb79b76e9d9..7bbf5104b7b7 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature)
/*
* Check if the target PC is within an alternative block.
*/
-static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
@@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
-static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
+static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
{
u32 insn;
@@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
return insn;
}
-static void patch_alternative(struct alt_instr *alt,
+static noinstr void patch_alternative(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
__le32 *replptr;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 4c9b5b4b7a0b..c05cc3b6162e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1286807
{
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
+ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
{},
@@ -215,7 +217,7 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
-const struct midr_range cavium_erratum_23154_cpus[] = {
+static const struct midr_range cavium_erratum_23154_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_THUNDERX),
MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d72c4b4d389c..4ccddf382e5b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -191,20 +191,20 @@ static bool __system_matches_cap(unsigned int n);
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -261,6 +261,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
@@ -293,6 +295,24 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_FA64_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I16I64_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F64F64_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I8I32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F16F32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_B16F32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F32F32_SHIFT, 1, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
@@ -557,7 +577,13 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
static const struct arm64_ftr_bits ftr_zcr[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
- ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
+ ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_smcr[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
+ SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */
ARM64_FTR_END,
};
@@ -645,6 +671,7 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
&id_aa64pfr1_override),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
+ ARM64_FTR_REG(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -654,7 +681,6 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
&id_aa64isar1_override),
- ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
&id_aa64isar2_override),
@@ -666,6 +692,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 1, CRm = 2 */
ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+ ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr),
/* Op1 = 1, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
@@ -810,7 +837,7 @@ static void __init sort_ftr_regs(void)
* to sys_id for subsequent binary search in get_arm64_ftr_reg()
* to work correctly.
*/
- BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
+ BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
}
}
@@ -960,6 +987,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0);
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
init_32bit_cpu_features(&info->aarch32);
@@ -969,6 +997,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
vec_init_vq_map(ARM64_VEC_SVE);
}
+ if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+ init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
+ if (IS_ENABLED(CONFIG_ARM64_SME))
+ vec_init_vq_map(ARM64_VEC_SME);
+ }
+
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
@@ -1195,6 +1229,9 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
+ info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
+
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
info->reg_zcr, boot->reg_zcr);
@@ -1205,6 +1242,16 @@ void update_cpu_features(int cpu,
vec_update_vq_map(ARM64_VEC_SVE);
}
+ if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+ taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu,
+ info->reg_smcr, boot->reg_smcr);
+
+ /* Probe vector lengths, unless we already gave up on SME */
+ if (id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)) &&
+ !system_capabilities_finalized())
+ vec_update_vq_map(ARM64_VEC_SME);
+ }
+
/*
* The kernel uses the LDGM/STGM instructions and the number of tags
* they read/write depends on the GMID_EL1.BS field. Check that the
@@ -1288,6 +1335,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
read_sysreg_case(SYS_ID_AA64PFR0_EL1);
read_sysreg_case(SYS_ID_AA64PFR1_EL1);
read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64SMFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
@@ -2013,7 +2061,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
+ .field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 2,
@@ -2195,10 +2243,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_TLB_SHIFT,
+ .field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
- .min_field_value = ID_AA64ISAR0_TLB_RANGE,
+ .min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE,
},
#ifdef CONFIG_ARM64_HW_AFDBM
{
@@ -2227,7 +2275,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
+ .field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT,
.field_width = 4,
.min_field_value = 1,
},
@@ -2382,7 +2430,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+ .field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@@ -2442,6 +2490,33 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.min_field_value = 1,
},
+#ifdef CONFIG_ARM64_SME
+ {
+ .desc = "Scalable Matrix Extension",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SME,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR1_SME_SHIFT,
+ .field_width = 4,
+ .min_field_value = ID_AA64PFR1_SME,
+ .matches = has_cpuid_feature,
+ .cpu_enable = sme_kernel_enable,
+ },
+ /* FA64 should be sorted after the base SME capability */
+ {
+ .desc = "FA64",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SME_FA64,
+ .sys_reg = SYS_ID_AA64SMFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64SMFR0_FA64_SHIFT,
+ .field_width = 1,
+ .min_field_value = ID_AA64SMFR0_FA64,
+ .matches = has_cpuid_feature,
+ .cpu_enable = fa64_kernel_enable,
+ },
+#endif /* CONFIG_ARM64_SME */
{},
};
@@ -2514,22 +2589,22 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
@@ -2575,6 +2650,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+#ifdef CONFIG_ARM64_SME
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_FA64, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I16I64, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F64F64, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I8I32, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F16F32, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_B16F32, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F32F32, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+#endif /* CONFIG_ARM64_SME */
{},
};
@@ -2872,6 +2957,23 @@ static void verify_sve_features(void)
/* Add checks on other ZCR bits here if necessary */
}
+static void verify_sme_features(void)
+{
+ u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
+ u64 smcr = read_smcr_features();
+
+ unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK;
+ unsigned int len = smcr & SMCR_ELx_LEN_MASK;
+
+ if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) {
+ pr_crit("CPU%d: SME: vector length support mismatch\n",
+ smp_processor_id());
+ cpu_die_early();
+ }
+
+ /* Add checks on other SMCR bits here if necessary */
+}
+
static void verify_hyp_capabilities(void)
{
u64 safe_mmfr1, mmfr0, mmfr1;
@@ -2924,6 +3026,9 @@ static void verify_local_cpu_capabilities(void)
if (system_supports_sve())
verify_sve_features();
+ if (system_supports_sme())
+ verify_sme_features();
+
if (is_hyp_mode_available())
verify_hyp_capabilities();
}
@@ -3041,6 +3146,7 @@ void __init setup_cpu_features(void)
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
sve_setup();
+ sme_setup();
minsigstksz_setup();
/* Advertise that we have computed the system capabilities */
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 330b92ea863a..8a8136a096ac 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -98,6 +98,14 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_AFP] = "afp",
[KERNEL_HWCAP_RPRES] = "rpres",
[KERNEL_HWCAP_MTE3] = "mte3",
+ [KERNEL_HWCAP_SME] = "sme",
+ [KERNEL_HWCAP_SME_I16I64] = "smei16i64",
+ [KERNEL_HWCAP_SME_F64F64] = "smef64f64",
+ [KERNEL_HWCAP_SME_I8I32] = "smei8i32",
+ [KERNEL_HWCAP_SME_F16F32] = "smef16f32",
+ [KERNEL_HWCAP_SME_B16F32] = "smeb16f32",
+ [KERNEL_HWCAP_SME_F32F32] = "smef32f32",
+ [KERNEL_HWCAP_SME_FA64] = "smefa64",
};
#ifdef CONFIG_COMPAT
@@ -401,6 +409,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
+ info->reg_id_aa64smfr0 = read_cpuid(ID_AA64SMFR0_EL1);
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
info->reg_gmid = read_cpuid(GMID_EL1);
@@ -412,6 +421,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
info->reg_zcr = read_zcr_features();
+ if (IS_ENABLED(CONFIG_ARM64_SME) &&
+ id_aa64pfr1_sme(info->reg_id_aa64pfr1))
+ info->reg_smcr = read_smcr_features();
+
cpuinfo_detect_icache_policy(info);
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 4f3661eeb7ec..bf9fe71589bc 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -202,7 +202,7 @@ void unregister_kernel_step_hook(struct step_hook *hook)
* So we call all the registered handlers, until the right handler is
* found which returns zero.
*/
-static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+static int call_step_hook(struct pt_regs *regs, unsigned long esr)
{
struct step_hook *hook;
struct list_head *list;
@@ -238,7 +238,7 @@ static void send_user_sigtrap(int si_code)
"User debug trap");
}
-static int single_step_handler(unsigned long unused, unsigned int esr,
+static int single_step_handler(unsigned long unused, unsigned long esr,
struct pt_regs *regs)
{
bool handler_found = false;
@@ -299,11 +299,11 @@ void unregister_kernel_break_hook(struct break_hook *hook)
unregister_debug_hook(&hook->node);
}
-static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+static int call_break_hook(struct pt_regs *regs, unsigned long esr)
{
struct break_hook *hook;
struct list_head *list;
- int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+ int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL;
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
@@ -312,7 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
* entirely not preemptible, and we can use rcu list safely here.
*/
list_for_each_entry_rcu(hook, list, node) {
- unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+ unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
@@ -322,7 +322,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
}
NOKPROBE_SYMBOL(call_break_hook);
-static int brk_handler(unsigned long unused, unsigned int esr,
+static int brk_handler(unsigned long unused, unsigned long esr,
struct pt_regs *regs)
{
if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 3ed39c61a510..98d67444a5b6 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -8,16 +8,9 @@
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#ifndef VMA_ITERATOR
-#define VMA_ITERATOR(name, mm, addr) \
- struct mm_struct *name = mm
-#define for_each_vma(vmi, vma) \
- for (vma = vmi->mmap; vma; vma = vma->vm_next)
-#endif
-
-#define for_each_mte_vma(vmi, vma) \
+#define for_each_mte_vma(tsk, vma) \
if (system_supports_mte()) \
- for_each_vma(vmi, vma) \
+ for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) \
if (vma->vm_flags & VM_MTE)
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
@@ -32,10 +25,11 @@ static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
static int mte_dump_tag_range(struct coredump_params *cprm,
unsigned long start, unsigned long end)
{
+ int ret = 1;
unsigned long addr;
+ void *tags = NULL;
for (addr = start; addr < end; addr += PAGE_SIZE) {
- char tags[MTE_PAGE_TAG_STORAGE];
struct page *page = get_dump_page(addr);
/*
@@ -59,22 +53,36 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
continue;
}
+ if (!tags) {
+ tags = mte_allocate_tag_storage();
+ if (!tags) {
+ put_page(page);
+ ret = 0;
+ break;
+ }
+ }
+
mte_save_page_tags(page_address(page), tags);
put_page(page);
- if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE))
- return 0;
+ if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
+ mte_free_tag_storage(tags);
+ ret = 0;
+ break;
+ }
}
- return 1;
+ if (tags)
+ mte_free_tag_storage(tags);
+
+ return ret;
}
Elf_Half elf_core_extra_phdrs(void)
{
struct vm_area_struct *vma;
int vma_count = 0;
- VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(vmi, vma)
+ for_each_mte_vma(current, vma)
vma_count++;
return vma_count;
@@ -83,12 +91,11 @@ Elf_Half elf_core_extra_phdrs(void)
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(vmi, vma) {
+ for_each_mte_vma(current, vma) {
struct elf_phdr phdr;
- phdr.p_type = PT_ARM_MEMTAG_MTE;
+ phdr.p_type = PT_AARCH64_MEMTAG_MTE;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -109,9 +116,8 @@ size_t elf_core_extra_data_size(void)
{
struct vm_area_struct *vma;
size_t data_size = 0;
- VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(vmi, vma)
+ for_each_mte_vma(current, vma)
data_size += mte_vma_tag_dump_size(vma);
return data_size;
@@ -120,9 +126,8 @@ size_t elf_core_extra_data_size(void)
int elf_core_write_extra_data(struct coredump_params *cprm)
{
struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(vmi, vma) {
+ for_each_mte_vma(current, vma) {
if (vma->vm_flags & VM_DONTDUMP)
continue;
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 878c65aa7206..56cefd33eb8e 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -75,7 +75,7 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
@@ -121,7 +121,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
static __always_inline void __exit_to_user_mode(void)
{
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
@@ -179,7 +179,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
ftrace_nmi_exit();
if (restore) {
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
}
rcu_nmi_exit();
@@ -215,7 +215,7 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
if (restore) {
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
}
rcu_nmi_exit();
@@ -282,13 +282,13 @@ extern void (*handle_arch_irq)(struct pt_regs *);
extern void (*handle_arch_fiq)(struct pt_regs *);
static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
- unsigned int esr)
+ unsigned long esr)
{
arm64_enter_nmi(regs);
console_verbose();
- pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
+ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
vector, smp_processor_id(), esr,
esr_get_class_string(esr));
@@ -537,6 +537,14 @@ static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
+static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ do_sme_acc(esr, regs);
+ exit_to_user_mode(regs);
+}
+
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
@@ -645,6 +653,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_SVE:
el0_sve_acc(regs, esr);
break;
+ case ESR_ELx_EC_SME:
+ el0_sme_acc(regs, esr);
+ break;
case ESR_ELx_EC_FP_EXC64:
el0_fpsimd_exc(regs, esr);
break;
@@ -818,7 +829,7 @@ UNHANDLED(el0t, 32, error)
#ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
{
- unsigned int esr = read_sysreg(esr_el1);
+ unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs);
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index dc242e269f9a..229436f33df5 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -86,3 +86,39 @@ SYM_FUNC_START(sve_flush_live)
SYM_FUNC_END(sve_flush_live)
#endif /* CONFIG_ARM64_SVE */
+
+#ifdef CONFIG_ARM64_SME
+
+SYM_FUNC_START(sme_get_vl)
+ _sme_rdsvl 0, 1
+ ret
+SYM_FUNC_END(sme_get_vl)
+
+SYM_FUNC_START(sme_set_vq)
+ sme_load_vq x0, x1, x2
+ ret
+SYM_FUNC_END(sme_set_vq)
+
+/*
+ * Save the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_save_state)
+ _sme_rdsvl 1, 1 // x1 = VL/8
+ sme_save_za 0, x1, 12
+ ret
+SYM_FUNC_END(za_save_state)
+
+/*
+ * Load the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_load_state)
+ _sme_rdsvl 1, 1 // x1 = VL/8
+ sme_load_za 0, x1, 12
+ ret
+SYM_FUNC_END(za_load_state)
+
+#endif /* CONFIG_ARM64_SME */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e535480a4069..d42a205ef625 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -97,12 +97,6 @@ SYM_CODE_START(ftrace_common)
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
- nop // If enabled, this will be replaced
- // "b ftrace_graph_caller"
-#endif
-
/*
* At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
* x19-x29 per the AAPCS, and we created frame records upon entry, so we need
@@ -127,17 +121,6 @@ ftrace_common_return:
ret x9
SYM_CODE_END(ftrace_common)
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_CODE_START(ftrace_graph_caller)
- ldr x0, [sp, #S_PC]
- sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
- add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
- bl prepare_ftrace_return
- b ftrace_common_return
-SYM_CODE_END(ftrace_graph_caller)
-#endif
-
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ede028dee81b..5b82b9292400 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -596,7 +596,7 @@ SYM_CODE_START_LOCAL(ret_to_user)
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
- bl stackleak_erase
+ bl stackleak_erase_on_task_stack
#endif
kernel_exit 0
SYM_CODE_END(ret_to_user)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 47af76e53221..819979398127 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -121,7 +121,10 @@
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
void *sve_state;
+ void *za_state;
+ u64 *svcr;
unsigned int sve_vl;
+ unsigned int sme_vl;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -136,6 +139,12 @@ __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
.max_virtualisable_vl = SVE_VL_MIN,
},
#endif
+#ifdef CONFIG_ARM64_SME
+ [ARM64_VEC_SME] = {
+ .type = ARM64_VEC_SME,
+ .name = "SME",
+ },
+#endif
};
static unsigned int vec_vl_inherit_flag(enum vec_type type)
@@ -143,6 +152,8 @@ static unsigned int vec_vl_inherit_flag(enum vec_type type)
switch (type) {
case ARM64_VEC_SVE:
return TIF_SVE_VL_INHERIT;
+ case ARM64_VEC_SME:
+ return TIF_SME_VL_INHERIT;
default:
WARN_ON_ONCE(1);
return 0;
@@ -186,6 +197,26 @@ extern void __percpu *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static int get_sme_default_vl(void)
+{
+ return get_default_vl(ARM64_VEC_SME);
+}
+
+static void set_sme_default_vl(int val)
+{
+ set_default_vl(ARM64_VEC_SME, val);
+}
+
+static void sme_free(struct task_struct *);
+
+#else
+
+static inline void sme_free(struct task_struct *t) { }
+
+#endif
+
DEFINE_PER_CPU(bool, fpsimd_context_busy);
EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
@@ -206,10 +237,19 @@ static void __get_cpu_fpsimd_context(void)
*
* The double-underscore version must only be called if you know the task
* can't be preempted.
+ *
+ * On RT kernels local_bh_disable() is not sufficient because it only
+ * serializes soft interrupt related sections via a local lock, but stays
+ * preemptible. Disabling preemption is the right choice here as bottom
+ * half processing is always in thread context on RT kernels so it
+ * implicitly prevents bottom half processing as well.
*/
static void get_cpu_fpsimd_context(void)
{
- local_bh_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_disable();
+ else
+ preempt_disable();
__get_cpu_fpsimd_context();
}
@@ -230,7 +270,10 @@ static void __put_cpu_fpsimd_context(void)
static void put_cpu_fpsimd_context(void)
{
__put_cpu_fpsimd_context();
- local_bh_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_enable();
+ else
+ preempt_enable();
}
static bool have_cpu_fpsimd_context(void)
@@ -238,23 +281,6 @@ static bool have_cpu_fpsimd_context(void)
return !preemptible() && __this_cpu_read(fpsimd_context_busy);
}
-/*
- * Call __sve_free() directly only if you know task can't be scheduled
- * or preempted.
- */
-static void __sve_free(struct task_struct *task)
-{
- kfree(task->thread.sve_state);
- task->thread.sve_state = NULL;
-}
-
-static void sve_free(struct task_struct *task)
-{
- WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
-
- __sve_free(task);
-}
-
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
{
return task->thread.vl[type];
@@ -279,16 +305,27 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
}
/*
+ * TIF_SME controls whether a task can use SME without trapping while
+ * in userspace, when TIF_SME is set then we must have storage
+ * alocated in sve_state and za_state to store the contents of both ZA
+ * and the SVE registers for both streaming and non-streaming modes.
+ *
+ * If both SVCR.ZA and SVCR.SM are disabled then at any point we
+ * may disable TIF_SME and reenable traps.
+ */
+
+
+/*
* TIF_SVE controls whether a task can use SVE without trapping while
- * in userspace, and also the way a task's FPSIMD/SVE state is stored
- * in thread_struct.
+ * in userspace, and also (together with TIF_SME) the way a task's
+ * FPSIMD/SVE state is stored in thread_struct.
*
* The kernel uses this flag to track whether a user task is actively
* using SVE, and therefore whether full SVE register state needs to
* be tracked. If not, the cheaper FPSIMD context handling code can
* be used instead of the more costly SVE equivalents.
*
- * * TIF_SVE set:
+ * * TIF_SVE or SVCR.SM set:
*
* The task can execute SVE instructions while in userspace without
* trapping to the kernel.
@@ -296,7 +333,8 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
* corresponding Zn), P0-P15 and FFR are encoded in in
* task->thread.sve_state, formatted appropriately for vector
- * length task->thread.sve_vl.
+ * length task->thread.sve_vl or, if SVCR.SM is set,
+ * task->thread.sme_vl.
*
* task->thread.sve_state must point to a valid buffer at least
* sve_state_size(task) bytes in size.
@@ -334,16 +372,44 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
*/
static void task_fpsimd_load(void)
{
+ bool restore_sve_regs = false;
+ bool restore_ffr;
+
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
+ /* Check if we should restore SVE first */
if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) {
sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
+ restore_sve_regs = true;
+ restore_ffr = true;
+ }
+
+ /* Restore SME, override SVE register configuration if needed */
+ if (system_supports_sme()) {
+ unsigned long sme_vl = task_get_sme_vl(current);
+
+ /* Ensure VL is set up for restoring data */
+ if (test_thread_flag(TIF_SME))
+ sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
+
+ write_sysreg_s(current->thread.svcr, SYS_SVCR);
+
+ if (thread_za_enabled(&current->thread))
+ za_load_state(current->thread.za_state);
+
+ if (thread_sm_enabled(&current->thread)) {
+ restore_sve_regs = true;
+ restore_ffr = system_supports_fa64();
+ }
+ }
+
+ if (restore_sve_regs)
sve_load_state(sve_pffr(&current->thread),
- &current->thread.uw.fpsimd_state.fpsr, true);
- } else {
+ &current->thread.uw.fpsimd_state.fpsr,
+ restore_ffr);
+ else
fpsimd_load_state(&current->thread.uw.fpsimd_state);
- }
}
/*
@@ -361,6 +427,9 @@ static void fpsimd_save(void)
struct fpsimd_last_state_struct const *last =
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+ bool save_sve_regs = false;
+ bool save_ffr;
+ unsigned int vl;
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
@@ -368,9 +437,32 @@ static void fpsimd_save(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
- if (IS_ENABLED(CONFIG_ARM64_SVE) &&
- test_thread_flag(TIF_SVE)) {
- if (WARN_ON(sve_get_vl() != last->sve_vl)) {
+ if (test_thread_flag(TIF_SVE)) {
+ save_sve_regs = true;
+ save_ffr = true;
+ vl = last->sve_vl;
+ }
+
+ if (system_supports_sme()) {
+ u64 *svcr = last->svcr;
+ *svcr = read_sysreg_s(SYS_SVCR);
+
+ *svcr = read_sysreg_s(SYS_SVCR);
+
+ if (*svcr & SVCR_ZA_MASK)
+ za_save_state(last->za_state);
+
+ /* If we are in streaming mode override regular SVE. */
+ if (*svcr & SVCR_SM_MASK) {
+ save_sve_regs = true;
+ save_ffr = system_supports_fa64();
+ vl = last->sme_vl;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) {
+ /* Get the configured VL from RDVL, will account for SM */
+ if (WARN_ON(sve_get_vl() != vl)) {
/*
* Can't save the user regs, so current would
* re-enter user with corrupt state.
@@ -381,8 +473,8 @@ static void fpsimd_save(void)
}
sve_save_state((char *)last->sve_state +
- sve_ffr_offset(last->sve_vl),
- &last->st->fpsr, true);
+ sve_ffr_offset(vl),
+ &last->st->fpsr, save_ffr);
} else {
fpsimd_save_state(last->st);
}
@@ -409,6 +501,8 @@ static unsigned int find_supported_vector_length(enum vec_type type,
if (vl > max_vl)
vl = max_vl;
+ if (vl < info->min_vl)
+ vl = info->min_vl;
bit = find_next_bit(info->vq_map, SVE_VQ_MAX,
__vq_to_bit(sve_vq_from_vl(vl)));
@@ -467,6 +561,30 @@ static int __init sve_sysctl_init(void)
static int __init sve_sysctl_init(void) { return 0; }
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
+#if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL)
+static struct ctl_table sme_default_vl_table[] = {
+ {
+ .procname = "sme_default_vector_length",
+ .mode = 0644,
+ .proc_handler = vec_proc_do_default_vl,
+ .extra1 = &vl_info[ARM64_VEC_SME],
+ },
+ { }
+};
+
+static int __init sme_sysctl_init(void)
+{
+ if (system_supports_sme())
+ if (!register_sysctl("abi", sme_default_vl_table))
+ return -EINVAL;
+
+ return 0;
+}
+
+#else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
+static int __init sme_sysctl_init(void) { return 0; }
+#endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
+
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
@@ -520,7 +638,7 @@ static void fpsimd_to_sve(struct task_struct *task)
if (!system_supports_sve())
return;
- vq = sve_vq_from_vl(task_get_sve_vl(task));
+ vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
__fpsimd_to_sve(sst, fst, vq);
}
@@ -537,7 +655,7 @@ static void fpsimd_to_sve(struct task_struct *task)
*/
static void sve_to_fpsimd(struct task_struct *task)
{
- unsigned int vq;
+ unsigned int vq, vl;
void const *sst = task->thread.sve_state;
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
@@ -546,7 +664,8 @@ static void sve_to_fpsimd(struct task_struct *task)
if (!system_supports_sve())
return;
- vq = sve_vq_from_vl(task_get_sve_vl(task));
+ vl = thread_get_cur_vl(&task->thread);
+ vq = sve_vq_from_vl(vl);
for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
@@ -554,14 +673,37 @@ static void sve_to_fpsimd(struct task_struct *task)
}
#ifdef CONFIG_ARM64_SVE
+/*
+ * Call __sve_free() directly only if you know task can't be scheduled
+ * or preempted.
+ */
+static void __sve_free(struct task_struct *task)
+{
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = NULL;
+}
+
+static void sve_free(struct task_struct *task)
+{
+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
+
+ __sve_free(task);
+}
/*
* Return how many bytes of memory are required to store the full SVE
* state for task, given task's currently configured vector length.
*/
-static size_t sve_state_size(struct task_struct const *task)
+size_t sve_state_size(struct task_struct const *task)
{
- return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task_get_sve_vl(task)));
+ unsigned int vl = 0;
+
+ if (system_supports_sve())
+ vl = task_get_sve_vl(task);
+ if (system_supports_sme())
+ vl = max(vl, task_get_sme_vl(task));
+
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
}
/*
@@ -588,6 +730,19 @@ void sve_alloc(struct task_struct *task)
/*
+ * Force the FPSIMD state shared with SVE to be updated in the SVE state
+ * even if the SVE state is the current active state.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void fpsimd_force_sync_to_sve(struct task_struct *task)
+{
+ fpsimd_to_sve(task);
+}
+
+/*
* Ensure that task->thread.sve_state is up to date with respect to
* the user task, irrespective of when SVE is in use or not.
*
@@ -597,7 +752,8 @@ void sve_alloc(struct task_struct *task)
*/
void fpsimd_sync_to_sve(struct task_struct *task)
{
- if (!test_tsk_thread_flag(task, TIF_SVE))
+ if (!test_tsk_thread_flag(task, TIF_SVE) &&
+ !thread_sm_enabled(&task->thread))
fpsimd_to_sve(task);
}
@@ -611,7 +767,8 @@ void fpsimd_sync_to_sve(struct task_struct *task)
*/
void sve_sync_to_fpsimd(struct task_struct *task)
{
- if (test_tsk_thread_flag(task, TIF_SVE))
+ if (test_tsk_thread_flag(task, TIF_SVE) ||
+ thread_sm_enabled(&task->thread))
sve_to_fpsimd(task);
}
@@ -636,7 +793,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
- vq = sve_vq_from_vl(task_get_sve_vl(task));
+ vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
__fpsimd_to_sve(sst, fst, vq);
@@ -680,8 +837,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
/*
* To ensure the FPSIMD bits of the SVE vector registers are preserved,
* write any live register state back to task_struct, and convert to a
- * regular FPSIMD thread. Since the vector length can only be changed
- * with a syscall we can't be in streaming mode while reconfiguring.
+ * regular FPSIMD thread.
*/
if (task == current) {
get_cpu_fpsimd_context();
@@ -690,17 +846,26 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
}
fpsimd_flush_task_state(task);
- if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
+ thread_sm_enabled(&task->thread))
sve_to_fpsimd(task);
+ if (system_supports_sme() && type == ARM64_VEC_SME) {
+ task->thread.svcr &= ~(SVCR_SM_MASK |
+ SVCR_ZA_MASK);
+ clear_thread_flag(TIF_SME);
+ }
+
if (task == current)
put_cpu_fpsimd_context();
/*
- * Force reallocation of task SVE state to the correct size
- * on next use:
+ * Force reallocation of task SVE and SME state to the correct
+ * size on next use:
*/
sve_free(task);
+ if (system_supports_sme() && type == ARM64_VEC_SME)
+ sme_free(task);
task_set_vl(task, type, vl);
@@ -761,6 +926,36 @@ int sve_get_current_vl(void)
return vec_prctl_status(ARM64_VEC_SVE, 0);
}
+#ifdef CONFIG_ARM64_SME
+/* PR_SME_SET_VL */
+int sme_set_current_vl(unsigned long arg)
+{
+ unsigned long vl, flags;
+ int ret;
+
+ vl = arg & PR_SME_VL_LEN_MASK;
+ flags = arg & ~vl;
+
+ if (!system_supports_sme() || is_compat_task())
+ return -EINVAL;
+
+ ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags);
+ if (ret)
+ return ret;
+
+ return vec_prctl_status(ARM64_VEC_SME, flags);
+}
+
+/* PR_SME_GET_VL */
+int sme_get_current_vl(void)
+{
+ if (!system_supports_sme() || is_compat_task())
+ return -EINVAL;
+
+ return vec_prctl_status(ARM64_VEC_SME, 0);
+}
+#endif /* CONFIG_ARM64_SME */
+
static void vec_probe_vqs(struct vl_info *info,
DECLARE_BITMAP(map, SVE_VQ_MAX))
{
@@ -770,7 +965,23 @@ static void vec_probe_vqs(struct vl_info *info,
for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
write_vl(info->type, vq - 1); /* self-syncing */
- vl = sve_get_vl();
+
+ switch (info->type) {
+ case ARM64_VEC_SVE:
+ vl = sve_get_vl();
+ break;
+ case ARM64_VEC_SME:
+ vl = sme_get_vl();
+ break;
+ default:
+ vl = 0;
+ break;
+ }
+
+ /* Minimum VL identified? */
+ if (sve_vq_from_vl(vl) > vq)
+ break;
+
vq = sve_vq_from_vl(vl); /* skip intervening lengths */
set_bit(__vq_to_bit(vq), map);
}
@@ -856,21 +1067,25 @@ int vec_verify_vq_map(enum vec_type type)
static void __init sve_efi_setup(void)
{
- struct vl_info *info = &vl_info[ARM64_VEC_SVE];
+ int max_vl = 0;
+ int i;
if (!IS_ENABLED(CONFIG_EFI))
return;
+ for (i = 0; i < ARRAY_SIZE(vl_info); i++)
+ max_vl = max(vl_info[i].max_vl, max_vl);
+
/*
* alloc_percpu() warns and prints a backtrace if this goes wrong.
* This is evidence of a crippled system and we are returning void,
* so no attempt is made to handle this situation here.
*/
- if (!sve_vl_valid(info->max_vl))
+ if (!sve_vl_valid(max_vl))
goto fail;
efi_sve_state = __alloc_percpu(
- SVE_SIG_REGS_SIZE(sve_vq_from_vl(info->max_vl)), SVE_VQ_BYTES);
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)), SVE_VQ_BYTES);
if (!efi_sve_state)
goto fail;
@@ -989,10 +1204,172 @@ void __init sve_setup(void)
void fpsimd_release_task(struct task_struct *dead_task)
{
__sve_free(dead_task);
+ sme_free(dead_task);
}
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+/*
+ * Ensure that task->thread.za_state is allocated and sufficiently large.
+ *
+ * This function should be used only in preparation for replacing
+ * task->thread.za_state with new data. The memory is always zeroed
+ * here to prevent stale data from showing through: this is done in
+ * the interest of testability and predictability, the architecture
+ * guarantees that when ZA is enabled it will be zeroed.
+ */
+void sme_alloc(struct task_struct *task)
+{
+ if (task->thread.za_state) {
+ memset(task->thread.za_state, 0, za_state_size(task));
+ return;
+ }
+
+ /* This could potentially be up to 64K. */
+ task->thread.za_state =
+ kzalloc(za_state_size(task), GFP_KERNEL);
+}
+
+static void sme_free(struct task_struct *task)
+{
+ kfree(task->thread.za_state);
+ task->thread.za_state = NULL;
+}
+
+void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ /* Set priority for all PEs to architecturally defined minimum */
+ write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
+ SYS_SMPRI_EL1);
+
+ /* Allow SME in kernel */
+ write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
+ isb();
+
+ /* Allow EL0 to access TPIDR2 */
+ write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
+ isb();
+}
+
+/*
+ * This must be called after sme_kernel_enable(), we rely on the
+ * feature table being sorted to ensure this.
+ */
+void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ /* Allow use of FA64 */
+ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
+ SYS_SMCR_EL1);
+}
+
+/*
+ * Read the pseudo-SMCR used by cpufeatures to identify the supported
+ * vector length.
+ *
+ * Use only if SME is present.
+ * This function clobbers the SME vector length.
+ */
+u64 read_smcr_features(void)
+{
+ u64 smcr;
+ unsigned int vq_max;
+
+ sme_kernel_enable(NULL);
+ sme_smstart_sm();
+
+ /*
+ * Set the maximum possible VL.
+ */
+ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
+ SYS_SMCR_EL1);
+
+ smcr = read_sysreg_s(SYS_SMCR_EL1);
+ smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
+ vq_max = sve_vq_from_vl(sve_get_vl());
+ smcr |= vq_max - 1; /* set LEN field to maximum effective value */
+
+ sme_smstop_sm();
+
+ return smcr;
+}
+
+void __init sme_setup(void)
+{
+ struct vl_info *info = &vl_info[ARM64_VEC_SME];
+ u64 smcr;
+ int min_bit;
+
+ if (!system_supports_sme())
+ return;
+
+ /*
+ * SME doesn't require any particular vector length be
+ * supported but it does require at least one. We should have
+ * disabled the feature entirely while bringing up CPUs but
+ * let's double check here.
+ */
+ WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX));
+
+ min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX);
+ info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit));
+
+ smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
+ info->max_vl = sve_vl_from_vq((smcr & SMCR_ELx_LEN_MASK) + 1);
+
+ /*
+ * Sanity-check that the max VL we determined through CPU features
+ * corresponds properly to sme_vq_map. If not, do our best:
+ */
+ if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SME,
+ info->max_vl)))
+ info->max_vl = find_supported_vector_length(ARM64_VEC_SME,
+ info->max_vl);
+
+ WARN_ON(info->min_vl > info->max_vl);
+
+ /*
+ * For the default VL, pick the maximum supported value <= 32
+ * (256 bits) if there is one since this is guaranteed not to
+ * grow the signal frame when in streaming mode, otherwise the
+ * minimum available VL will be used.
+ */
+ set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32));
+
+ pr_info("SME: minimum available vector length %u bytes per vector\n",
+ info->min_vl);
+ pr_info("SME: maximum available vector length %u bytes per vector\n",
+ info->max_vl);
+ pr_info("SME: default vector length %u bytes per vector\n",
+ get_sme_default_vl());
+}
+
+#endif /* CONFIG_ARM64_SME */
+
+static void sve_init_regs(void)
+{
+ /*
+ * Convert the FPSIMD state to SVE, zeroing all the state that
+ * is not shared with FPSIMD. If (as is likely) the current
+ * state is live in the registers then do this there and
+ * update our metadata for the current task including
+ * disabling the trap, otherwise update our in-memory copy.
+ * We are guaranteed to not be in streaming mode, we can only
+ * take a SVE trap when not in streaming mode and we can't be
+ * in streaming mode when taking a SME trap.
+ */
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ unsigned long vq_minus_one =
+ sve_vq_from_vl(task_get_sve_vl(current)) - 1;
+ sve_set_vq(vq_minus_one);
+ sve_flush_live(true, vq_minus_one);
+ fpsimd_bind_task_to_cpu();
+ } else {
+ fpsimd_to_sve(current);
+ }
+}
+
/*
* Trapped SVE access
*
@@ -1004,7 +1381,7 @@ void fpsimd_release_task(struct task_struct *dead_task)
* would have disabled the SVE access trap for userspace during
* ret_to_user, making an SVE access trap impossible in that case.
*/
-void do_sve_acc(unsigned int esr, struct pt_regs *regs)
+void do_sve_acc(unsigned long esr, struct pt_regs *regs)
{
/* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
@@ -1024,29 +1401,84 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
WARN_ON(1); /* SVE access shouldn't have trapped */
/*
- * Convert the FPSIMD state to SVE, zeroing all the state that
- * is not shared with FPSIMD. If (as is likely) the current
- * state is live in the registers then do this there and
- * update our metadata for the current task including
- * disabling the trap, otherwise update our in-memory copy.
+ * Even if the task can have used streaming mode we can only
+ * generate SVE access traps in normal SVE mode and
+ * transitioning out of streaming mode may discard any
+ * streaming mode state. Always clear the high bits to avoid
+ * any potential errors tracking what is properly initialised.
+ */
+ sve_init_regs();
+
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Trapped SME access
+ *
+ * Storage is allocated for the full SVE and SME state, the current
+ * FPSIMD register contents are migrated to SVE if SVE is not already
+ * active, and the access trap is disabled.
+ *
+ * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
+ * would have disabled the SME access trap for userspace during
+ * ret_to_user, making an SVE access trap impossible in that case.
+ */
+void do_sme_acc(unsigned long esr, struct pt_regs *regs)
+{
+ /* Even if we chose not to use SME, the hardware could still trap: */
+ if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ /*
+ * If this not a trap due to SME being disabled then something
+ * is being used in the wrong mode, report as SIGILL.
*/
+ if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ sve_alloc(current);
+ sme_alloc(current);
+ if (!current->thread.sve_state || !current->thread.za_state) {
+ force_sig(SIGKILL);
+ return;
+ }
+
+ get_cpu_fpsimd_context();
+
+ /* With TIF_SME userspace shouldn't generate any traps */
+ if (test_and_set_thread_flag(TIF_SME))
+ WARN_ON(1);
+
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
unsigned long vq_minus_one =
- sve_vq_from_vl(task_get_sve_vl(current)) - 1;
- sve_set_vq(vq_minus_one);
- sve_flush_live(true, vq_minus_one);
+ sve_vq_from_vl(task_get_sme_vl(current)) - 1;
+ sme_set_vq(vq_minus_one);
+
fpsimd_bind_task_to_cpu();
- } else {
- fpsimd_to_sve(current);
}
+ /*
+ * If SVE was not already active initialise the SVE registers,
+ * any non-shared state between the streaming and regular SVE
+ * registers is architecturally guaranteed to be zeroed when
+ * we enter streaming mode. We do not need to initialize ZA
+ * since ZA must be disabled at this point and enabling ZA is
+ * architecturally defined to zero ZA.
+ */
+ if (system_supports_sve() && !test_thread_flag(TIF_SVE))
+ sve_init_regs();
+
put_cpu_fpsimd_context();
}
/*
* Trapped FP/ASIMD access.
*/
-void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
+void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs)
{
/* TODO: implement lazy context saving/restoring */
WARN_ON(1);
@@ -1055,7 +1487,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
/*
* Raise a SIGFPE for the current process.
*/
-void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
+void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
{
unsigned int si_code = FPE_FLTUNK;
@@ -1141,6 +1573,9 @@ static void fpsimd_flush_thread_vl(enum vec_type type)
void fpsimd_flush_thread(void)
{
+ void *sve_state = NULL;
+ void *za_state = NULL;
+
if (!system_supports_fpsimd())
return;
@@ -1152,11 +1587,28 @@ void fpsimd_flush_thread(void)
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
- sve_free(current);
+
+ /* Defer kfree() while in atomic context */
+ sve_state = current->thread.sve_state;
+ current->thread.sve_state = NULL;
+
fpsimd_flush_thread_vl(ARM64_VEC_SVE);
}
+ if (system_supports_sme()) {
+ clear_thread_flag(TIF_SME);
+
+ /* Defer kfree() while in atomic context */
+ za_state = current->thread.za_state;
+ current->thread.za_state = NULL;
+
+ fpsimd_flush_thread_vl(ARM64_VEC_SME);
+ current->thread.svcr = 0;
+ }
+
put_cpu_fpsimd_context();
+ kfree(sve_state);
+ kfree(za_state);
}
/*
@@ -1198,22 +1650,34 @@ static void fpsimd_bind_task_to_cpu(void)
WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
+ last->za_state = current->thread.za_state;
last->sve_vl = task_get_sve_vl(current);
+ last->sme_vl = task_get_sme_vl(current);
+ last->svcr = &current->thread.svcr;
current->thread.fpsimd_cpu = smp_processor_id();
+ /*
+ * Toggle SVE and SME trapping for userspace if needed, these
+ * are serialsied by ret_to_user().
+ */
+ if (system_supports_sme()) {
+ if (test_thread_flag(TIF_SME))
+ sme_user_enable();
+ else
+ sme_user_disable();
+ }
+
if (system_supports_sve()) {
- /* Toggle SVE trapping for userspace if needed */
if (test_thread_flag(TIF_SVE))
sve_user_enable();
else
sve_user_disable();
-
- /* Serialised by exception return to user */
}
}
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
- unsigned int sve_vl)
+ unsigned int sve_vl, void *za_state,
+ unsigned int sme_vl, u64 *svcr)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -1222,8 +1686,11 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->svcr = svcr;
last->sve_state = sve_state;
+ last->za_state = za_state;
last->sve_vl = sve_vl;
+ last->sme_vl = sme_vl;
}
/*
@@ -1320,6 +1787,15 @@ static void fpsimd_flush_cpu_state(void)
{
WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
@@ -1397,6 +1873,7 @@ EXPORT_SYMBOL(kernel_neon_end);
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
static DEFINE_PER_CPU(bool, efi_sve_state_used);
+static DEFINE_PER_CPU(bool, efi_sm_state);
/*
* EFI runtime services support functions
@@ -1431,12 +1908,28 @@ void __efi_fpsimd_begin(void)
*/
if (system_supports_sve() && likely(efi_sve_state)) {
char *sve_state = this_cpu_ptr(efi_sve_state);
+ bool ffr = true;
+ u64 svcr;
__this_cpu_write(efi_sve_state_used, true);
+ if (system_supports_sme()) {
+ svcr = read_sysreg_s(SYS_SVCR);
+
+ if (!system_supports_fa64())
+ ffr = svcr & SVCR_SM_MASK;
+
+ __this_cpu_write(efi_sm_state, ffr);
+ }
+
sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()),
&this_cpu_ptr(&efi_fpsimd_state)->fpsr,
- true);
+ ffr);
+
+ if (system_supports_sme())
+ sysreg_clear_set_s(SYS_SVCR,
+ SVCR_SM_MASK, 0);
+
} else {
fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
}
@@ -1459,11 +1952,26 @@ void __efi_fpsimd_end(void)
if (system_supports_sve() &&
likely(__this_cpu_read(efi_sve_state_used))) {
char const *sve_state = this_cpu_ptr(efi_sve_state);
+ bool ffr = true;
+
+ /*
+ * Restore streaming mode; EFI calls are
+ * normal function calls so should not return in
+ * streaming mode.
+ */
+ if (system_supports_sme()) {
+ if (__this_cpu_read(efi_sm_state)) {
+ sysreg_clear_set_s(SYS_SVCR,
+ 0,
+ SVCR_SM_MASK);
+ if (!system_supports_fa64())
+ ffr = efi_sm_state;
+ }
+ }
- sve_set_vq(sve_vq_from_vl(sve_get_vl()) - 1);
sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()),
&this_cpu_ptr(&efi_fpsimd_state)->fpsr,
- true);
+ ffr);
__this_cpu_write(efi_sve_state_used, false);
} else {
@@ -1538,6 +2046,13 @@ static int __init fpsimd_init(void)
if (!cpu_have_named_feature(ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
- return sve_sysctl_init();
+
+ if (cpu_have_named_feature(SME) && !cpu_have_named_feature(SVE))
+ pr_notice("SME is implemented but not SVE\n");
+
+ sve_sysctl_init();
+ sme_sysctl_init();
+
+ return 0;
}
core_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 4506c4a90ac1..f447c4a36f69 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -268,6 +268,22 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
}
#ifdef CONFIG_DYNAMIC_FTRACE
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+ /*
+ * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
+ * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
+ * in which we can safely modify the LR.
+ */
+ struct pt_regs *regs = arch_ftrace_get_regs(fregs);
+ unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
+
+ prepare_ftrace_return(ip, parent, frame_pointer(regs));
+}
+#else
/*
* Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
* depending on @enable.
@@ -297,5 +313,6 @@ int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 712e97c03e54..b29a311bb055 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -617,7 +617,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers);
/*
* Debug exception handlers.
*/
-static int breakpoint_handler(unsigned long unused, unsigned int esr,
+static int breakpoint_handler(unsigned long unused, unsigned long esr,
struct pt_regs *regs)
{
int i, step = 0, *kernel_step;
@@ -701,7 +701,7 @@ NOKPROBE_SYMBOL(breakpoint_handler);
* addresses. There is no straight-forward way, short of disassembling the
* offending instruction, to map that address back to the watchpoint. This
* function computes the distance of the memory access from the watchpoint as a
- * heuristic for the likelyhood that a given access triggered the watchpoint.
+ * heuristic for the likelihood that a given access triggered the watchpoint.
*
* See Section D2.10.5 "Determining the memory location that caused a Watchpoint
* exception" of ARMv8 Architecture Reference Manual for details.
@@ -751,7 +751,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr,
return step;
}
-static int watchpoint_handler(unsigned long addr, unsigned int esr,
+static int watchpoint_handler(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
int i, step = 0, *kernel_step, access, closest_match = 0;
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 2aede780fb80..cda9c1e9864f 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -232,14 +232,14 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
return err;
}
-static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_brk_fn)
-static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr)
{
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
@@ -248,7 +248,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
}
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
-static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr)
{
if (!kgdb_single_step)
return DBG_HOOK_ERROR;
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index e16b248699d5..19c2d487cb08 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -329,8 +329,13 @@ bool crash_is_nosave(unsigned long pfn)
/* in reserved memory? */
addr = __pfn_to_phys(pfn);
- if ((addr < crashk_res.start) || (crashk_res.end < addr))
- return false;
+ if ((addr < crashk_res.start) || (crashk_res.end < addr)) {
+ if (!crashk_low_res.end)
+ return false;
+
+ if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr))
+ return false;
+ }
if (!kexec_crash_image)
return true;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 59c648d51848..889951291cc0 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -65,10 +65,18 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ if (crashk_low_res.end) {
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret)
+ goto out;
+ }
- if (!ret)
- ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+out:
kfree(cmem);
return ret;
}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index e53493d8b208..a3d0494f25a9 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -220,7 +220,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
* increasing the section's alignment so that the
* resulting address of this instruction is guaranteed
* to equal the offset in that particular bit (as well
- * as all less signficant bits). This ensures that the
+ * as all less significant bits). This ensures that the
* address modulo 4 KB != 0xfff8 or 0xfffc (which would
* have all ones in bits [11:3])
*/
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 78b3e0f8e997..57b30bcf9f21 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -15,6 +15,7 @@
#include <linux/swapops.h>
#include <linux/thread_info.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/barrier.h>
@@ -76,6 +77,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
}
+
+ /* ensure the tags are visible before the PTE is set */
+ smp_wmb();
}
int memcmp_pages(struct page *page1, struct page *page2)
@@ -106,7 +110,8 @@ int memcmp_pages(struct page *page1, struct page *page2)
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
- sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
+ SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf));
isb();
pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
@@ -122,12 +127,12 @@ void mte_enable_kernel_sync(void)
WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
"MTE async mode enabled system wide!");
- __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
+ __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC);
}
void mte_enable_kernel_async(void)
{
- __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
+ __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC);
/*
* MTE async mode is set system wide by the first PE that
@@ -144,7 +149,7 @@ void mte_enable_kernel_async(void)
void mte_enable_kernel_asymm(void)
{
if (cpus_have_cap(ARM64_MTE_ASYMM)) {
- __mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM);
+ __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM);
/*
* MTE asymm mode behaves as async mode for store
@@ -216,11 +221,11 @@ static void mte_update_sctlr_user(struct task_struct *task)
* default order.
*/
if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
- sctlr |= SCTLR_EL1_TCF0_ASYMM;
+ sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM);
else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
- sctlr |= SCTLR_EL1_TCF0_ASYNC;
+ sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC);
else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
- sctlr |= SCTLR_EL1_TCF0_SYNC;
+ sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC);
task->thread.sctlr_user = sctlr;
}
@@ -543,3 +548,32 @@ static int register_mte_tcf_preferred_sysctl(void)
return 0;
}
subsys_initcall(register_mte_tcf_preferred_sysctl);
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+size_t mte_probe_user_range(const char __user *uaddr, size_t size)
+{
+ const char __user *end = uaddr + size;
+ int err = 0;
+ char val;
+
+ __raw_get_user(val, uaddr, err);
+ if (err)
+ return size;
+
+ uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
+ while (uaddr < end) {
+ /*
+ * A read is sufficient for mte, the caller should have probed
+ * for the pte write permission if required.
+ */
+ __raw_get_user(val, uaddr, err);
+ if (err)
+ return end - uaddr;
+ uaddr += MTE_GRANULE_SIZE;
+ }
+ (void)val;
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 75fed4460407..57c7c211f8c7 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
struct pv_time_stolen_time_region {
- struct pvclock_vcpu_stolen_time *kaddr;
+ struct pvclock_vcpu_stolen_time __rcu *kaddr;
};
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
/* return stolen time in ns by asking the hypervisor */
static u64 para_steal_clock(int cpu)
{
+ struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
+ u64 ret = 0;
reg = per_cpu_ptr(&stolen_time_region, cpu);
@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
* online notification callback runs. Until the callback
* has run we just return zero.
*/
- if (!reg->kaddr)
+ rcu_read_lock();
+ kaddr = rcu_dereference(reg->kaddr);
+ if (!kaddr) {
+ rcu_read_unlock();
return 0;
+ }
- return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
+ ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
+ rcu_read_unlock();
+ return ret;
}
static int stolen_time_cpu_down_prepare(unsigned int cpu)
{
+ struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
reg = this_cpu_ptr(&stolen_time_region);
if (!reg->kaddr)
return 0;
- memunmap(reg->kaddr);
- memset(reg, 0, sizeof(*reg));
+ kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
+ synchronize_rcu();
+ memunmap(kaddr);
return 0;
}
static int stolen_time_cpu_online(unsigned int cpu)
{
+ struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res;
@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
return -EINVAL;
- reg->kaddr = memremap(res.a0,
+ kaddr = memremap(res.a0,
sizeof(struct pvclock_vcpu_stolen_time),
MEMREMAP_WB);
+ rcu_assign_pointer(reg->kaddr, kaddr);
+
if (!reg->kaddr) {
pr_warn("Failed to map stolen time data structure\n");
return -ENOMEM;
}
- if (le32_to_cpu(reg->kaddr->revision) != 0 ||
- le32_to_cpu(reg->kaddr->attributes) != 0) {
+ if (le32_to_cpu(kaddr->revision) != 0 ||
+ le32_to_cpu(kaddr->attributes) != 0) {
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
return -ENXIO;
}
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
index 771f543464e0..33e0fabc0b79 100644
--- a/arch/arm64/kernel/patching.c
+++ b/arch/arm64/kernel/patching.c
@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
int i, ret = 0;
struct aarch64_insn_patch *pp = arg;
- /* The first CPU becomes master */
- if (atomic_inc_return(&pp->cpu_count) == 1) {
+ /* The last CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
pp->new_insns[i]);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d9dfa82c1f18..d1d182320245 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -335,7 +335,7 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
}
static int __kprobes
-kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
+kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long addr = instruction_pointer(regs);
@@ -359,7 +359,7 @@ static struct break_hook kprobes_break_ss_hook = {
};
static int __kprobes
-kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
+kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
{
kprobe_handler(regs);
return DBG_HOOK_HANDLED;
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 9be668f3f034..d49aef2657cd 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -166,7 +166,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
}
static int uprobe_breakpoint_handler(struct pt_regs *regs,
- unsigned int esr)
+ unsigned long esr)
{
if (uprobe_pre_sstep_notifier(regs))
return DBG_HOOK_HANDLED;
@@ -175,7 +175,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs,
}
static int uprobe_single_step_handler(struct pt_regs *regs,
- unsigned int esr)
+ unsigned long esr)
{
struct uprobe_task *utask = current->utask;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7fa97df55e3a..9734c9fb1a32 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -250,6 +250,8 @@ void show_regs(struct pt_regs *regs)
static void tls_thread_flush(void)
{
write_sysreg(0, tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_compat_task()) {
current->thread.uw.tp_value = 0;
@@ -298,16 +300,42 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Detach src's sve_state (if any) from dst so that it does not
- * get erroneously used or freed prematurely. dst's sve_state
+ * get erroneously used or freed prematurely. dst's copies
* will be allocated on demand later on if dst uses SVE.
* For consistency, also clear TIF_SVE here: this could be done
* later in copy_process(), but to avoid tripping up future
- * maintainers it is best not to leave TIF_SVE and sve_state in
+ * maintainers it is best not to leave TIF flags and buffers in
* an inconsistent state, even temporarily.
*/
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ /*
+ * In the unlikely event that we create a new thread with ZA
+ * enabled we should retain the ZA state so duplicate it here.
+ * This may be shortly freed if we exec() or if CLONE_SETTLS
+ * but it's simpler to do it here. To avoid confusing the rest
+ * of the code ensure that we have a sve_state allocated
+ * whenever za_state is allocated.
+ */
+ if (thread_za_enabled(&src->thread)) {
+ dst->thread.sve_state = kzalloc(sve_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sve_state)
+ return -ENOMEM;
+ dst->thread.za_state = kmemdup(src->thread.za_state,
+ za_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.za_state) {
+ kfree(dst->thread.sve_state);
+ dst->thread.sve_state = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ dst->thread.za_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SME);
+ }
+
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
@@ -343,6 +371,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* out-of-sync with the saved value.
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2())
+ p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
@@ -353,10 +383,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
/*
* If a TLS pointer was passed to clone, use it for the new
- * thread.
+ * thread. We also reset TPIDR2 if it's in use.
*/
- if (clone_flags & CLONE_SETTLS)
+ if (clone_flags & CLONE_SETTLS) {
p->thread.uw.tp_value = tls;
+ p->thread.tpidr2_el0 = 0;
+ }
} else {
/*
* A kthread has no context to ERET to, so ensure any buggy
@@ -387,6 +419,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2() && !is_compat_task())
+ current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
}
static void tls_thread_switch(struct task_struct *next)
@@ -399,6 +433,8 @@ static void tls_thread_switch(struct task_struct *next)
write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
}
/*
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index 5777929d35bf..40be3a7c2c53 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -853,6 +853,7 @@ u8 spectre_bhb_loop_affected(int scope)
if (scope == SCOPE_LOCAL_CPU) {
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 230a47b9189e..21da83187a60 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -713,21 +713,51 @@ static int system_call_set(struct task_struct *target,
#ifdef CONFIG_ARM64_SVE
static void sve_init_header_from_task(struct user_sve_header *header,
- struct task_struct *target)
+ struct task_struct *target,
+ enum vec_type type)
{
unsigned int vq;
+ bool active;
+ bool fpsimd_only;
+ enum vec_type task_type;
memset(header, 0, sizeof(*header));
- header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
- SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
- if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
- header->flags |= SVE_PT_VL_INHERIT;
+ /* Check if the requested registers are active for the task */
+ if (thread_sm_enabled(&target->thread))
+ task_type = ARM64_VEC_SME;
+ else
+ task_type = ARM64_VEC_SVE;
+ active = (task_type == type);
+
+ switch (type) {
+ case ARM64_VEC_SVE:
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+ fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
+ break;
+ case ARM64_VEC_SME:
+ if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+ fpsimd_only = false;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
- header->vl = task_get_sve_vl(target);
+ if (active) {
+ if (fpsimd_only) {
+ header->flags |= SVE_PT_REGS_FPSIMD;
+ } else {
+ header->flags |= SVE_PT_REGS_SVE;
+ }
+ }
+
+ header->vl = task_get_vl(target, type);
vq = sve_vq_from_vl(header->vl);
- header->max_vl = sve_max_vl();
+ header->max_vl = vec_max_vl(type);
header->size = SVE_PT_SIZE(vq, header->flags);
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
SVE_PT_REGS_SVE);
@@ -738,19 +768,17 @@ static unsigned int sve_size_from_header(struct user_sve_header const *header)
return ALIGN(header->size, SVE_VQ_BYTES);
}
-static int sve_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
+static int sve_get_common(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to,
+ enum vec_type type)
{
struct user_sve_header header;
unsigned int vq;
unsigned long start, end;
- if (!system_supports_sve())
- return -EINVAL;
-
/* Header */
- sve_init_header_from_task(&header, target);
+ sve_init_header_from_task(&header, target, type);
vq = sve_vq_from_vl(header.vl);
membuf_write(&to, &header, sizeof(header));
@@ -758,49 +786,61 @@ static int sve_get(struct task_struct *target,
if (target == current)
fpsimd_preserve_current_state();
- /* Registers: FPSIMD-only case */
-
BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
- if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+
+ switch ((header.flags & SVE_PT_REGS_MASK)) {
+ case SVE_PT_REGS_FPSIMD:
return __fpr_get(target, regset, to);
- /* Otherwise: full SVE case */
+ case SVE_PT_REGS_SVE:
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ membuf_write(&to, target->thread.sve_state, end - start);
- BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
- start = SVE_PT_SVE_OFFSET;
- end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
- membuf_write(&to, target->thread.sve_state, end - start);
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ membuf_zero(&to, end - start);
- start = end;
- end = SVE_PT_SVE_FPSR_OFFSET(vq);
- membuf_zero(&to, end - start);
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
+ end - start);
- /*
- * Copy fpsr, and fpcr which must follow contiguously in
- * struct fpsimd_state:
- */
- start = end;
- end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
- membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start);
+ start = end;
+ end = sve_size_from_header(&header);
+ return membuf_zero(&to, end - start);
- start = end;
- end = sve_size_from_header(&header);
- return membuf_zero(&to, end - start);
+ default:
+ return 0;
+ }
}
-static int sve_set(struct task_struct *target,
+static int sve_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
+ struct membuf to)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ return sve_get_common(target, regset, to, ARM64_VEC_SVE);
+}
+
+static int sve_set_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ enum vec_type type)
{
int ret;
struct user_sve_header header;
unsigned int vq;
unsigned long start, end;
- if (!system_supports_sve())
- return -EINVAL;
-
/* Header */
if (count < sizeof(header))
return -EINVAL;
@@ -813,13 +853,37 @@ static int sve_set(struct task_struct *target,
* Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
* vec_set_vector_length(), which will also validate them for us:
*/
- ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl,
+ ret = vec_set_vector_length(target, type, header.vl,
((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
if (ret)
goto out;
/* Actual VL set may be less than the user asked for: */
- vq = sve_vq_from_vl(task_get_sve_vl(target));
+ vq = sve_vq_from_vl(task_get_vl(target, type));
+
+ /* Enter/exit streaming mode */
+ if (system_supports_sme()) {
+ u64 old_svcr = target->thread.svcr;
+
+ switch (type) {
+ case ARM64_VEC_SVE:
+ target->thread.svcr &= ~SVCR_SM_MASK;
+ break;
+ case ARM64_VEC_SME:
+ target->thread.svcr |= SVCR_SM_MASK;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ /*
+ * If we switched then invalidate any existing SVE
+ * state and ensure there's storage.
+ */
+ if (target->thread.svcr != old_svcr)
+ sve_alloc(target);
+ }
/* Registers: FPSIMD-only case */
@@ -828,10 +892,15 @@ static int sve_set(struct task_struct *target,
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
SVE_PT_FPSIMD_OFFSET);
clear_tsk_thread_flag(target, TIF_SVE);
+ if (type == ARM64_VEC_SME)
+ fpsimd_force_sync_to_sve(target);
goto out;
}
- /* Otherwise: full SVE case */
+ /*
+ * Otherwise: no registers or full SVE case. For backwards
+ * compatibility reasons we treat empty flags as SVE registers.
+ */
/*
* If setting a different VL from the requested VL and there is
@@ -852,8 +921,9 @@ static int sve_set(struct task_struct *target,
/*
* Ensure target->thread.sve_state is up to date with target's
- * FPSIMD regs, so that a short copyin leaves trailing registers
- * unmodified.
+ * FPSIMD regs, so that a short copyin leaves trailing
+ * registers unmodified. Always enable SVE even if going into
+ * streaming mode.
*/
fpsimd_sync_to_sve(target);
set_tsk_thread_flag(target, TIF_SVE);
@@ -889,8 +959,181 @@ out:
return ret;
}
+static int sve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ return sve_set_common(target, regset, pos, count, kbuf, ubuf,
+ ARM64_VEC_SVE);
+}
+
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static int ssve_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ return sve_get_common(target, regset, to, ARM64_VEC_SME);
+}
+
+static int ssve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ return sve_set_common(target, regset, pos, count, kbuf, ubuf,
+ ARM64_VEC_SME);
+}
+
+static int za_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_za_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ /* Header */
+ memset(&header, 0, sizeof(header));
+
+ if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+ header.flags |= ZA_PT_VL_INHERIT;
+
+ header.vl = task_get_sme_vl(target);
+ vq = sve_vq_from_vl(header.vl);
+ header.max_vl = sme_max_vl();
+ header.max_size = ZA_PT_SIZE(vq);
+
+ /* If ZA is not active there is only the header */
+ if (thread_za_enabled(&target->thread))
+ header.size = ZA_PT_SIZE(vq);
+ else
+ header.size = ZA_PT_ZA_OFFSET;
+
+ membuf_write(&to, &header, sizeof(header));
+
+ BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
+ end = ZA_PT_ZA_OFFSET;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ /* Any register data to include? */
+ if (thread_za_enabled(&target->thread)) {
+ start = end;
+ end = ZA_PT_SIZE(vq);
+ membuf_write(&to, target->thread.za_state, end - start);
+ }
+
+ /* Zero any trailing padding */
+ start = end;
+ end = ALIGN(header.size, SVE_VQ_BYTES);
+ return membuf_zero(&to, end - start);
+}
+
+static int za_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_za_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ /* Header */
+ if (count < sizeof(header))
+ return -EINVAL;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ goto out;
+
+ /*
+ * All current ZA_PT_* flags are consumed by
+ * vec_set_vector_length(), which will also validate them for
+ * us:
+ */
+ ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
+ ((unsigned long)header.flags) << 16);
+ if (ret)
+ goto out;
+
+ /* Actual VL set may be less than the user asked for: */
+ vq = sve_vq_from_vl(task_get_sme_vl(target));
+
+ /* Ensure there is some SVE storage for streaming mode */
+ if (!target->thread.sve_state) {
+ sve_alloc(target);
+ if (!target->thread.sve_state) {
+ clear_thread_flag(TIF_SME);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Allocate/reinit ZA storage */
+ sme_alloc(target);
+ if (!target->thread.za_state) {
+ ret = -ENOMEM;
+ clear_tsk_thread_flag(target, TIF_SME);
+ goto out;
+ }
+
+ /* If there is no data then disable ZA */
+ if (!count) {
+ target->thread.svcr &= ~SVCR_ZA_MASK;
+ goto out;
+ }
+
+ /*
+ * If setting a different VL from the requested VL and there is
+ * register data, the data layout will be wrong: don't even
+ * try to set the registers in this case.
+ */
+ if (vq != sve_vq_from_vl(header.vl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
+ start = ZA_PT_ZA_OFFSET;
+ end = ZA_PT_SIZE(vq);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.za_state,
+ start, end);
+ if (ret)
+ goto out;
+
+ /* Mark ZA as active and let userspace use it */
+ set_tsk_thread_flag(target, TIF_SME);
+ target->thread.svcr |= SVCR_ZA_MASK;
+
+out:
+ fpsimd_flush_task_state(target);
+ return ret;
+}
+
+#endif /* CONFIG_ARM64_SME */
+
#ifdef CONFIG_ARM64_PTR_AUTH
static int pac_mask_get(struct task_struct *target,
const struct user_regset *regset,
@@ -1108,6 +1351,10 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_SVE
REGSET_SVE,
#endif
+#ifdef CONFIG_ARM64_SVE
+ REGSET_SSVE,
+ REGSET_ZA,
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
REGSET_PAC_ENABLED_KEYS,
@@ -1188,6 +1435,33 @@ static const struct user_regset aarch64_regsets[] = {
.set = sve_set,
},
#endif
+#ifdef CONFIG_ARM64_SME
+ [REGSET_SSVE] = { /* Streaming mode SVE */
+ .core_note_type = NT_ARM_SSVE,
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
+ SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .regset_get = ssve_get,
+ .set = ssve_set,
+ },
+ [REGSET_ZA] = { /* SME ZA */
+ .core_note_type = NT_ARM_ZA,
+ /*
+ * ZA is a single register but it's variably sized and
+ * the ptrace core requires that the size of any data
+ * be an exact multiple of the configured register
+ * size so report as though we had SVE_VQ_BYTES
+ * registers. These values aren't exposed to
+ * userspace.
+ */
+ .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .regset_get = za_get,
+ .set = za_set,
+ },
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
[REGSET_PAC_MASK] = {
.core_note_type = NT_ARM_PAC_MASK,
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index f0a3df9e18a3..413f899e4ac6 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -37,6 +37,15 @@
* safe memory that has been set up to be preserved during the copy operation.
*/
SYM_CODE_START(arm64_relocate_new_kernel)
+ /*
+ * The kimage structure isn't allocated specially and may be clobbered
+ * during relocation. We must load any values we need from it prior to
+ * any relocation occurring.
+ */
+ ldr x28, [x0, #KIMAGE_START]
+ ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
+ ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
+
/* Setup the list loop variables. */
ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
ic iallu
dsb nsh
isb
- ldr x4, [x0, #KIMAGE_START] /* relocation start */
- ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
- ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
turn_off_mmu x12, x13
/* Start new image. */
- cbz x1, .Lel1
- mov x1, x4 /* relocation start */
- mov x2, x0 /* dtb address */
+ cbz x27, .Lel1
+ mov x1, x28 /* kernel entry point */
+ mov x2, x26 /* dtb address */
mov x3, xzr
mov x4, xzr
mov x0, #HVC_SOFT_RESTART
hvc #0 /* Jumps from el2 */
.Lel1:
+ mov x0, x26 /* dtb address */
+ mov x1, xzr
mov x2, xzr
mov x3, xzr
- br x4 /* Jumps from el1 */
+ br x28 /* Jumps from el1 */
SYM_CODE_END(arm64_relocate_new_kernel)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3505789cf4bd..fea3223704b6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -225,6 +225,8 @@ static void __init request_standard_resources(void)
kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1);
+ insert_resource(&iomem_resource, &kernel_code);
+ insert_resource(&iomem_resource, &kernel_data);
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
@@ -246,20 +248,7 @@ static void __init request_standard_resources(void)
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
}
- request_resource(&iomem_resource, res);
-
- if (kernel_code.start >= res->start &&
- kernel_code.end <= res->end)
- request_resource(res, &kernel_code);
- if (kernel_data.start >= res->start &&
- kernel_data.end <= res->end)
- request_resource(res, &kernel_data);
-#ifdef CONFIG_KEXEC_CORE
- /* Userspace will find "Crash kernel" region in /proc/iomem. */
- if (crashk_res.end && crashk_res.start >= res->start &&
- crashk_res.end <= res->end)
- request_resource(res, &crashk_res);
-#endif
+ insert_resource(&iomem_resource, res);
}
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 4a4122ef6f39..edb2d9206a78 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -56,6 +56,7 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset;
unsigned long esr_offset;
unsigned long sve_offset;
+ unsigned long za_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -218,6 +219,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
struct sve_context __user *sve;
+ struct za_context __user *za;
};
#ifdef CONFIG_ARM64_SVE
@@ -226,11 +228,17 @@ static int preserve_sve_context(struct sve_context __user *ctx)
{
int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ u16 flags = 0;
unsigned int vl = task_get_sve_vl(current);
unsigned int vq = 0;
- if (test_thread_flag(TIF_SVE))
+ if (thread_sm_enabled(&current->thread)) {
+ vl = task_get_sme_vl(current);
vq = sve_vq_from_vl(vl);
+ flags |= SVE_SIG_FLAG_SM;
+ } else if (test_thread_flag(TIF_SVE)) {
+ vq = sve_vq_from_vl(vl);
+ }
memset(reserved, 0, sizeof(reserved));
@@ -238,6 +246,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
&ctx->head.size, err);
__put_user_error(vl, &ctx->vl, err);
+ __put_user_error(flags, &ctx->flags, err);
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
@@ -258,18 +267,28 @@ static int preserve_sve_context(struct sve_context __user *ctx)
static int restore_sve_fpsimd_context(struct user_ctxs *user)
{
int err;
- unsigned int vq;
+ unsigned int vl, vq;
struct user_fpsimd_state fpsimd;
struct sve_context sve;
if (__copy_from_user(&sve, user->sve, sizeof(sve)))
return -EFAULT;
- if (sve.vl != task_get_sve_vl(current))
+ if (sve.flags & SVE_SIG_FLAG_SM) {
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ vl = task_get_sme_vl(current);
+ } else {
+ vl = task_get_sve_vl(current);
+ }
+
+ if (sve.vl != vl)
return -EINVAL;
if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE);
+ current->thread.svcr &= ~SVCR_SM_MASK;
goto fpsimd_only;
}
@@ -301,7 +320,10 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (err)
return -EFAULT;
- set_thread_flag(TIF_SVE);
+ if (sve.flags & SVE_SIG_FLAG_SM)
+ current->thread.svcr |= SVCR_SM_MASK;
+ else
+ set_thread_flag(TIF_SVE);
fpsimd_only:
/* copy the FP and status/control registers */
@@ -326,6 +348,101 @@ extern int restore_sve_fpsimd_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static int preserve_za_context(struct za_context __user *ctx)
+{
+ int err = 0;
+ u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ unsigned int vl = task_get_sme_vl(current);
+ unsigned int vq;
+
+ if (thread_za_enabled(&current->thread))
+ vq = sve_vq_from_vl(vl);
+ else
+ vq = 0;
+
+ memset(reserved, 0, sizeof(reserved));
+
+ __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
+ __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
+ &ctx->head.size, err);
+ __put_user_error(vl, &ctx->vl, err);
+ BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
+ err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
+
+ if (vq) {
+ /*
+ * This assumes that the ZA state has already been saved to
+ * the task struct by calling the function
+ * fpsimd_signal_preserve_current_state().
+ */
+ err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
+ current->thread.za_state,
+ ZA_SIG_REGS_SIZE(vq));
+ }
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_za_context(struct user_ctxs __user *user)
+{
+ int err;
+ unsigned int vq;
+ struct za_context za;
+
+ if (__copy_from_user(&za, user->za, sizeof(za)))
+ return -EFAULT;
+
+ if (za.vl != task_get_sme_vl(current))
+ return -EINVAL;
+
+ if (za.head.size <= sizeof(*user->za)) {
+ current->thread.svcr &= ~SVCR_ZA_MASK;
+ return 0;
+ }
+
+ vq = sve_vq_from_vl(za.vl);
+
+ if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
+ return -EINVAL;
+
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread.za_state with preemption enabled, so protection is
+ * needed to prevent a racing context switch from writing stale
+ * registers back over the new data.
+ */
+
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+ sme_alloc(current);
+ if (!current->thread.za_state) {
+ current->thread.svcr &= ~SVCR_ZA_MASK;
+ clear_thread_flag(TIF_SME);
+ return -ENOMEM;
+ }
+
+ err = __copy_from_user(current->thread.za_state,
+ (char __user const *)user->za +
+ ZA_SIG_REGS_OFFSET,
+ ZA_SIG_REGS_SIZE(vq));
+ if (err)
+ return -EFAULT;
+
+ set_thread_flag(TIF_SME);
+ current->thread.svcr |= SVCR_ZA_MASK;
+
+ return 0;
+}
+#else /* ! CONFIG_ARM64_SME */
+
+/* Turn any non-optimised out attempts to use these into a link error: */
+extern int preserve_za_context(void __user *ctx);
+extern int restore_za_context(struct user_ctxs *user);
+
+#endif /* ! CONFIG_ARM64_SME */
static int parse_user_sigframe(struct user_ctxs *user,
struct rt_sigframe __user *sf)
@@ -340,6 +457,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->fpsimd = NULL;
user->sve = NULL;
+ user->za = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -393,7 +511,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
break;
case SVE_MAGIC:
- if (!system_supports_sve())
+ if (!system_supports_sve() && !system_supports_sme())
goto invalid;
if (user->sve)
@@ -405,6 +523,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->sve = (struct sve_context __user *)head;
break;
+ case ZA_MAGIC:
+ if (!system_supports_sme())
+ goto invalid;
+
+ if (user->za)
+ goto invalid;
+
+ if (size < sizeof(*user->za))
+ goto invalid;
+
+ user->za = (struct za_context __user *)head;
+ break;
+
case EXTRA_MAGIC:
if (have_extra_context)
goto invalid;
@@ -528,6 +659,9 @@ static int restore_sigframe(struct pt_regs *regs,
}
}
+ if (err == 0 && system_supports_sme() && user.za)
+ err = restore_za_context(&user);
+
return err;
}
@@ -594,11 +728,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
if (system_supports_sve()) {
unsigned int vq = 0;
- if (add_all || test_thread_flag(TIF_SVE)) {
- int vl = sve_max_vl();
+ if (add_all || test_thread_flag(TIF_SVE) ||
+ thread_sm_enabled(&current->thread)) {
+ int vl = max(sve_max_vl(), sme_max_vl());
if (!add_all)
- vl = task_get_sve_vl(current);
+ vl = thread_get_cur_vl(&current->thread);
vq = sve_vq_from_vl(vl);
}
@@ -609,6 +744,24 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err;
}
+ if (system_supports_sme()) {
+ unsigned int vl;
+ unsigned int vq = 0;
+
+ if (add_all)
+ vl = sme_max_vl();
+ else
+ vl = task_get_sme_vl(current);
+
+ if (thread_za_enabled(&current->thread))
+ vq = sve_vq_from_vl(vl);
+
+ err = sigframe_alloc(user, &user->za_offset,
+ ZA_SIG_CONTEXT_SIZE(vq));
+ if (err)
+ return err;
+ }
+
return sigframe_alloc_end(user);
}
@@ -649,13 +802,21 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
}
- /* Scalable Vector Extension state, if present */
- if (system_supports_sve() && err == 0 && user->sve_offset) {
+ /* Scalable Vector Extension state (including streaming), if present */
+ if ((system_supports_sve() || system_supports_sme()) &&
+ err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx =
apply_user_offset(user, user->sve_offset);
err |= preserve_sve_context(sve_ctx);
}
+ /* ZA state if present */
+ if (system_supports_sme() && err == 0 && user->za_offset) {
+ struct za_context __user *za_ctx =
+ apply_user_offset(user, user->za_offset);
+ err |= preserve_za_context(za_ctx);
+ }
+
if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe;
char __user *userp =
@@ -759,6 +920,13 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* TCO (Tag Check Override) always cleared for signal handlers */
regs->pstate &= ~PSR_TCO_BIT;
+ /* Signal handlers are invoked with ZA and streaming mode disabled */
+ if (system_supports_sme()) {
+ current->thread.svcr &= ~(SVCR_ZA_MASK |
+ SVCR_SM_MASK);
+ sme_smstop();
+ }
+
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
@@ -1011,6 +1179,7 @@ static_assert(offsetof(siginfo_t, si_upper) == 0x28);
static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
+static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
static_assert(offsetof(siginfo_t, si_band) == 0x10);
static_assert(offsetof(siginfo_t, si_fd) == 0x18);
static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index d984282b979f..4700f8522d27 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -487,6 +487,7 @@ static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18);
static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18);
static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c);
static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_call_addr) == 0x0c);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 27df5c1e6baa..62ed361a4376 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -234,6 +234,7 @@ asmlinkage notrace void secondary_start_kernel(void)
* Log the CPU info before it is marked online and might get read.
*/
cpuinfo_store_cpu();
+ store_cpu_topology(cpu);
/*
* Enable GIC and timers.
@@ -242,7 +243,6 @@ asmlinkage notrace void secondary_start_kernel(void)
ipi_setup(cpu);
- store_cpu_topology(cpu);
numa_add_cpu(cpu);
/*
@@ -512,6 +512,7 @@ struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
{
return &cpu_madt_gicc[cpu];
}
+EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);
/*
* acpi_map_gic_cpu_interface - parse processor MADT entry
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index e4103e085681..0467cb79f080 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -19,43 +19,60 @@
#include <asm/stacktrace.h>
/*
- * AArch64 PCS assigns the frame pointer to x29.
+ * A snapshot of a frame record or fp/lr register values, along with some
+ * accounting information necessary for robust unwinding.
*
- * A simple function prologue looks like this:
- * sub sp, sp, #0x10
- * stp x29, x30, [sp]
- * mov x29, sp
+ * @fp: The fp value in the frame record (or the real fp)
+ * @pc: The lr value in the frame record (or the real lr)
*
- * A simple function epilogue looks like this:
- * mov sp, x29
- * ldp x29, x30, [sp]
- * add sp, sp, #0x10
+ * @stacks_done: Stacks which have been entirely unwound, for which it is no
+ * longer valid to unwind to.
+ *
+ * @prev_fp: The fp that pointed to this frame record, or a synthetic value
+ * of 0. This is used to ensure that within a stack, each
+ * subsequent frame record is at an increasing address.
+ * @prev_type: The type of stack this frame record was on, or a synthetic
+ * value of STACK_TYPE_UNKNOWN. This is used to detect a
+ * transition from one stack to another.
+ *
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
+ * associated with the most recently encountered replacement lr
+ * value.
*/
+struct unwind_state {
+ unsigned long fp;
+ unsigned long pc;
+ DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
+ unsigned long prev_fp;
+ enum stack_type prev_type;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
+};
-
-static notrace void start_backtrace(struct stackframe *frame, unsigned long fp,
- unsigned long pc)
+static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
+ unsigned long pc)
{
- frame->fp = fp;
- frame->pc = pc;
+ state->fp = fp;
+ state->pc = pc;
#ifdef CONFIG_KRETPROBES
- frame->kr_cur = NULL;
+ state->kr_cur = NULL;
#endif
/*
* Prime the first unwind.
*
- * In unwind_frame() we'll check that the FP points to a valid stack,
+ * In unwind_next() we'll check that the FP points to a valid stack,
* which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
* treated as a transition to whichever stack that happens to be. The
* prev_fp value won't be used, but we set it to 0 such that it is
* definitely not an accessible stack address.
*/
- bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
- frame->prev_fp = 0;
- frame->prev_type = STACK_TYPE_UNKNOWN;
+ bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
+ state->prev_fp = 0;
+ state->prev_type = STACK_TYPE_UNKNOWN;
}
-NOKPROBE_SYMBOL(start_backtrace);
+NOKPROBE_SYMBOL(unwind_init);
/*
* Unwind from one frame record (A) to the next frame record (B).
@@ -64,15 +81,12 @@ NOKPROBE_SYMBOL(start_backtrace);
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-static int notrace unwind_frame(struct task_struct *tsk,
- struct stackframe *frame)
+static int notrace unwind_next(struct task_struct *tsk,
+ struct unwind_state *state)
{
- unsigned long fp = frame->fp;
+ unsigned long fp = state->fp;
struct stack_info info;
- if (!tsk)
- tsk = current;
-
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
@@ -83,7 +97,7 @@ static int notrace unwind_frame(struct task_struct *tsk,
if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
- if (test_bit(info.type, frame->stacks_done))
+ if (test_bit(info.type, state->stacks_done))
return -EINVAL;
/*
@@ -99,27 +113,27 @@ static int notrace unwind_frame(struct task_struct *tsk,
* stack to another, it's never valid to unwind back to that first
* stack.
*/
- if (info.type == frame->prev_type) {
- if (fp <= frame->prev_fp)
+ if (info.type == state->prev_type) {
+ if (fp <= state->prev_fp)
return -EINVAL;
} else {
- set_bit(frame->prev_type, frame->stacks_done);
+ set_bit(state->prev_type, state->stacks_done);
}
/*
* Record this frame record's values and location. The prev_fp and
- * prev_type are only meaningful to the next unwind_frame() invocation.
+ * prev_type are only meaningful to the next unwind_next() invocation.
*/
- frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
- frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
- frame->prev_fp = fp;
- frame->prev_type = info.type;
+ state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+ state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ state->prev_fp = fp;
+ state->prev_type = info.type;
- frame->pc = ptrauth_strip_insn_pac(frame->pc);
+ state->pc = ptrauth_strip_insn_pac(state->pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (frame->pc == (unsigned long)return_to_handler)) {
+ (state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
/*
* This is a case where function graph tracer has
@@ -127,37 +141,37 @@ static int notrace unwind_frame(struct task_struct *tsk,
* to hook a function return.
* So replace it to an original value.
*/
- orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
- (void *)frame->fp);
- if (WARN_ON_ONCE(frame->pc == orig_pc))
+ orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
+ (void *)state->fp);
+ if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
- frame->pc = orig_pc;
+ state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
- if (is_kretprobe_trampoline(frame->pc))
- frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
+ if (is_kretprobe_trampoline(state->pc))
+ state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
return 0;
}
-NOKPROBE_SYMBOL(unwind_frame);
+NOKPROBE_SYMBOL(unwind_next);
-static void notrace walk_stackframe(struct task_struct *tsk,
- struct stackframe *frame,
- bool (*fn)(void *, unsigned long), void *data)
+static void notrace unwind(struct task_struct *tsk,
+ struct unwind_state *state,
+ stack_trace_consume_fn consume_entry, void *cookie)
{
while (1) {
int ret;
- if (!fn(data, frame->pc))
+ if (!consume_entry(cookie, state->pc))
break;
- ret = unwind_frame(tsk, frame);
+ ret = unwind_next(tsk, state);
if (ret < 0)
break;
}
}
-NOKPROBE_SYMBOL(walk_stackframe);
+NOKPROBE_SYMBOL(unwind);
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
@@ -196,17 +210,17 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
- struct stackframe frame;
+ struct unwind_state state;
if (regs)
- start_backtrace(&frame, regs->regs[29], regs->pc);
+ unwind_init(&state, regs->regs[29], regs->pc);
else if (task == current)
- start_backtrace(&frame,
+ unwind_init(&state,
(unsigned long)__builtin_frame_address(1),
(unsigned long)__builtin_return_address(0));
else
- start_backtrace(&frame, thread_saved_fp(task),
+ unwind_init(&state, thread_saved_fp(task),
thread_saved_pc(task));
- walk_stackframe(task, &frame, consume_entry, cookie);
+ unwind(task, &state, consume_entry, cookie);
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 19ee7c33769d..2b0887e58a7c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -140,7 +140,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
/*
* Restore pstate flags. OS lock and mdscr have been already
* restored, so from this point onwards, debugging is fully
- * renabled if it was enabled when core started shutdown.
+ * reenabled if it was enabled when core started shutdown.
*/
local_daif_restore(flags);
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 12c6864e51e1..df14336c3a29 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -113,6 +113,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno)
addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs,
- SIGILL, ILL_ILLTRP, addr, scno);
+ SIGILL, ILL_ILLTRP, addr, 0);
return 0;
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index c938603b3ba0..733451fe7e41 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -158,11 +158,36 @@ trace_exit:
syscall_trace_exit(regs);
}
-static inline void sve_user_discard(void)
+/*
+ * As per the ABI exit SME streaming mode and clear the SVE state not
+ * shared with FPSIMD on syscall entry.
+ */
+static inline void fp_user_discard(void)
{
+ /*
+ * If SME is active then exit streaming mode. If ZA is active
+ * then flush the SVE registers but leave userspace access to
+ * both SVE and SME enabled, otherwise disable SME for the
+ * task and fall through to disabling SVE too. This means
+ * that after a syscall we never have any streaming mode
+ * register state to track, if this changes the KVM code will
+ * need updating.
+ */
+ if (system_supports_sme() && test_thread_flag(TIF_SME)) {
+ u64 svcr = read_sysreg_s(SYS_SVCR);
+
+ if (svcr & SVCR_SM_MASK)
+ sme_smstop_sm();
+ }
+
if (!system_supports_sve())
return;
+ /*
+ * If SME is not active then disable SVE, the registers will
+ * be cleared when userspace next attempts to access them and
+ * we do not need to track the SVE register state until then.
+ */
clear_thread_flag(TIF_SVE);
/*
@@ -177,7 +202,7 @@ static inline void sve_user_discard(void)
void do_el0_svc(struct pt_regs *regs)
{
- sve_user_discard();
+ fp_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 0529fd57567e..9ac7a81b79be 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -242,7 +242,7 @@ static void arm64_show_signal(int signo, const char *str)
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
struct task_struct *tsk = current;
- unsigned int esr = tsk->thread.fault_code;
+ unsigned long esr = tsk->thread.fault_code;
struct pt_regs *regs = task_pt_regs(tsk);
/* Leave if the signal won't be shown */
@@ -253,7 +253,7 @@ static void arm64_show_signal(int signo, const char *str)
pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
if (esr)
- pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
+ pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr);
pr_cont("%s", str);
print_vma_addr(KERN_CONT " in ", regs->pc);
@@ -287,7 +287,7 @@ void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
void arm64_notify_die(const char *str, struct pt_regs *regs,
int signo, int sicode, unsigned long far,
- int err)
+ unsigned long err)
{
if (user_mode(regs)) {
WARN_ON(regs != current_pt_regs());
@@ -439,7 +439,7 @@ exit:
return fn ? fn(regs, instr) : 1;
}
-void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
+void force_signal_inject(int signal, int code, unsigned long address, unsigned long err)
{
const char *desc;
struct pt_regs *regs = current_pt_regs();
@@ -506,7 +506,7 @@ void do_bti(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_bti);
-void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
{
/*
* Unexpected FPAC exception or pointer authentication failure in
@@ -532,7 +532,7 @@ NOKPROBE_SYMBOL(do_ptrauth_fault);
uaccess_ttbr0_disable(); \
}
-static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
+static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs)
{
unsigned long tagged_address, address;
int rt = ESR_ELx_SYS64_ISS_RT(esr);
@@ -572,7 +572,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
-static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
+static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
{
int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
@@ -591,7 +591,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
-static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
{
int rt = ESR_ELx_SYS64_ISS_RT(esr);
@@ -599,7 +599,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
-static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
{
int rt = ESR_ELx_SYS64_ISS_RT(esr);
@@ -607,7 +607,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
-static void mrs_handler(unsigned int esr, struct pt_regs *regs)
+static void mrs_handler(unsigned long esr, struct pt_regs *regs)
{
u32 sysreg, rt;
@@ -618,15 +618,15 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs)
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
-static void wfi_handler(unsigned int esr, struct pt_regs *regs)
+static void wfi_handler(unsigned long esr, struct pt_regs *regs)
{
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
struct sys64_hook {
- unsigned int esr_mask;
- unsigned int esr_val;
- void (*handler)(unsigned int esr, struct pt_regs *regs);
+ unsigned long esr_mask;
+ unsigned long esr_val;
+ void (*handler)(unsigned long esr, struct pt_regs *regs);
};
static const struct sys64_hook sys64_hooks[] = {
@@ -675,7 +675,7 @@ static const struct sys64_hook sys64_hooks[] = {
};
#ifdef CONFIG_COMPAT
-static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
+static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs)
{
int cond;
@@ -695,7 +695,7 @@ static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
return aarch32_opcode_cond_checks[cond](regs->pstate);
}
-static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
{
int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
@@ -712,7 +712,7 @@ static const struct sys64_hook cp15_32_hooks[] = {
{},
};
-static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
@@ -737,7 +737,7 @@ static const struct sys64_hook cp15_64_hooks[] = {
{},
};
-void do_cp15instr(unsigned int esr, struct pt_regs *regs)
+void do_cp15instr(unsigned long esr, struct pt_regs *regs)
{
const struct sys64_hook *hook, *hook_base;
@@ -778,7 +778,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs)
NOKPROBE_SYMBOL(do_cp15instr);
#endif
-void do_sysinstr(unsigned int esr, struct pt_regs *regs)
+void do_sysinstr(unsigned long esr, struct pt_regs *regs)
{
const struct sys64_hook *hook;
@@ -821,6 +821,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_SVE] = "SVE",
[ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
[ESR_ELx_EC_FPAC] = "FPAC",
+ [ESR_ELx_EC_SME] = "SME",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -842,7 +843,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_BRK64] = "BRK (AArch64)",
};
-const char *esr_get_class_string(u32 esr)
+const char *esr_get_class_string(unsigned long esr)
{
return esr_class_str[ESR_ELx_EC(esr)];
}
@@ -851,7 +852,7 @@ const char *esr_get_class_string(u32 esr)
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0.
*/
-void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
{
unsigned long pc = instruction_pointer(regs);
@@ -867,7 +868,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
-void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
+void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
@@ -876,7 +877,7 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
- pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
+ pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr));
pr_emerg("FAR: 0x%016lx\n", far);
pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
@@ -897,11 +898,11 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
}
#endif
-void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
+void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
{
console_verbose();
- pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
+ pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
if (regs)
__show_regs(regs);
@@ -912,9 +913,9 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
unreachable();
}
-bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
{
- u32 aet = arm64_ras_serror_get_severity(esr);
+ unsigned long aet = arm64_ras_serror_get_severity(esr);
switch (aet) {
case ESR_ELx_AET_CE: /* corrected error */
@@ -944,7 +945,7 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
-void do_serror(struct pt_regs *regs, unsigned int esr)
+void do_serror(struct pt_regs *regs, unsigned long esr)
{
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
@@ -965,7 +966,7 @@ int is_valid_bugaddr(unsigned long addr)
return 1;
}
-static int bug_handler(struct pt_regs *regs, unsigned int esr)
+static int bug_handler(struct pt_regs *regs, unsigned long esr)
{
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
@@ -990,7 +991,7 @@ static struct break_hook bug_break_hook = {
.imm = BUG_BRK_IMM,
};
-static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
+static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
"Kernel text patching",
@@ -1012,7 +1013,7 @@ static struct break_hook fault_break_hook = {
#define KASAN_ESR_SIZE_MASK 0x0f
#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
-static int kasan_handler(struct pt_regs *regs, unsigned int esr)
+static int kasan_handler(struct pt_regs *regs, unsigned long esr)
{
bool recover = esr & KASAN_ESR_RECOVER;
bool write = esr & KASAN_ESR_WRITE;
@@ -1055,11 +1056,11 @@ static struct break_hook kasan_break_hook = {
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
*/
-int __init early_brk64(unsigned long addr, unsigned int esr,
+int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
#ifdef CONFIG_KASAN_SW_TAGS
- unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+ unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 172452f79e46..f6e25d7c346a 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -32,7 +32,8 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
# -Wmissing-prototypes and -Wmissing-declarations are removed from
# the CFLAGS of vgettimeofday.c to make possible to build the
# kernel with CONFIG_WERROR enabled.
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \
+ $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \
$(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
@@ -52,9 +53,6 @@ GCOV_PROFILE := n
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-# Force dependency (incbin is bad)
-$(obj)/vdso.o : $(obj)/vdso.so
-
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold_and_vdso_check)
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index ed181bedbffc..05ba1aae1b6f 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -131,9 +131,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-# Force dependency (vdso.s includes vdso.so through incbin)
-$(obj)/vdso.o: $(obj)/vdso.so
-
include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index edaf0faf766f..2d4a8f995175 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -93,7 +93,6 @@ jiffies = jiffies_64;
#ifdef CONFIG_HIBERNATION
#define HIBERNATE_TEXT \
- . = ALIGN(SZ_4K); \
__hibernate_exit_text_start = .; \
*(.hibernate_exit.text) \
__hibernate_exit_text_end = .;
@@ -103,7 +102,6 @@ jiffies = jiffies_64;
#ifdef CONFIG_KEXEC_CORE
#define KEXEC_TEXT \
- . = ALIGN(SZ_4K); \
__relocate_new_kernel_start = .; \
*(.kexec_relocate.text) \
__relocate_new_kernel_end = .;
@@ -170,9 +168,6 @@ SECTIONS
KPROBES_TEXT
HYPERVISOR_TEXT
IDMAP_TEXT
- HIBERNATE_TEXT
- KEXEC_TEXT
- TRAMP_TEXT
*(.gnu.warning)
. = ALIGN(16);
*(.got) /* Global offset table */
@@ -194,6 +189,14 @@ SECTIONS
HYPERVISOR_DATA_SECTIONS
+ /* code sections that are never executed via the kernel mapping */
+ .rodata.text : {
+ TRAMP_TEXT
+ HIBERNATE_TEXT
+ KEXEC_TEXT
+ . = ALIGN(PAGE_SIZE);
+ }
+
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
idmap_pg_end = .;
@@ -337,8 +340,8 @@ ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE,
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
#ifdef CONFIG_HIBERNATION
-ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
- <= SZ_4K, "Hibernate exit text too big or misaligned")
+ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K,
+ "Hibernate exit text is bigger than 4 KiB")
#endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
@@ -362,7 +365,7 @@ ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
#ifdef CONFIG_KEXEC_CORE
/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
-ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
- <= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K,
+ "kexec relocation code is bigger than 4 KiB")
ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
#endif
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 523bc934fe2f..cedc3ba2c098 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -783,6 +783,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->flags = 0;
while (ret > 0) {
/*
* Check conditions before entering the guest
@@ -1436,7 +1437,8 @@ static int kvm_init_vector_slots(void)
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
- if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
+ if (kvm_system_needs_idmapped_vectors() &&
+ !is_protected_kvm_enabled()) {
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
__BP_HARDEN_HYP_VECS_SZ, &base);
if (err)
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 397fdac75cb1..3d251a4d2cf7 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -82,6 +82,26 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
+
+ /*
+ * We don't currently support SME guests but if we leave
+ * things in streaming mode then when the guest starts running
+ * FPSIMD or SVE code it may generate SME traps so as a
+ * special case if we are in streaming mode we force the host
+ * state to be saved now and exit streaming mode so that we
+ * don't have to handle any SME traps for valid guest
+ * operations. Do this for ZA as well for now for simplicity.
+ */
+ if (system_supports_sme()) {
+ if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+ vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
+
+ if (read_sysreg_s(SYS_SVCR) &
+ (SVCR_SM_MASK | SVCR_ZA_MASK)) {
+ vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
+ fpsimd_save_and_flush_cpu_state();
+ }
+ }
}
/*
@@ -109,9 +129,14 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ /*
+ * Currently we do not support SME guests so SVCR is
+ * always 0 and we just need a variable to point to.
+ */
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state,
- vcpu->arch.sve_max_vl);
+ vcpu->arch.sve_max_vl,
+ NULL, 0, &vcpu->arch.svcr);
clear_thread_flag(TIF_FOREIGN_FPSTATE);
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
@@ -130,6 +155,22 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags);
+ /*
+ * If we have VHE then the Hyp code will reset CPACR_EL1 to
+ * CPACR_EL1_DEFAULT and we need to reenable SME.
+ */
+ if (has_vhe() && system_supports_sme()) {
+ /* Also restore EL0 state seen on entry */
+ if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
+ sysreg_clear_set(CPACR_EL1, 0,
+ CPACR_EL1_SMEN_EL0EN |
+ CPACR_EL1_SMEN_EL1EN);
+ else
+ sysreg_clear_set(CPACR_EL1,
+ CPACR_EL1_SMEN_EL0EN,
+ CPACR_EL1_SMEN_EL1EN);
+ }
+
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 97fe14aab1a3..0b829292dc54 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -26,7 +26,7 @@
typedef int (*exit_handle_fn)(struct kvm_vcpu *);
-static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
+static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
{
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
kvm_inject_vabt(vcpu);
@@ -117,10 +117,12 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.hsr = esr;
+ run->debug.arch.hsr = lower_32_bits(esr);
+ run->debug.arch.hsr_high = upper_32_bits(esr);
+ run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
run->debug.arch.far = vcpu->arch.fault.far_el2;
@@ -130,9 +132,9 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
{
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
- kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
+ kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
esr, esr_get_class_string(esr));
kvm_inject_undefined(vcpu);
@@ -187,7 +189,7 @@ static exit_handle_fn arm_exit_handlers[] = {
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
u8 esr_ec = ESR_ELx_EC(esr);
return arm_exit_handlers[esr_ec];
@@ -334,6 +336,6 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
*/
kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
- panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
+ panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
spsr, elr_virt, esr, far, hpfar, par, vcpu);
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 5d31f6c64c8c..37d9f211c200 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -266,7 +266,7 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
-static inline bool esr_is_ptrauth_trap(u32 esr)
+static inline bool esr_is_ptrauth_trap(u64 esr)
{
switch (esr_sys64_to_sysreg(esr)) {
case SYS_APIAKEYLO_EL1:
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
index 5ad626527d41..fd55014b3497 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
@@ -159,20 +159,20 @@
* No restrictions on instructions implemented in AArch64.
*/
#define PVM_ID_AA64ISAR0_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \
)
#define PVM_ID_AA64ISAR1_ALLOW (\
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 3d613e721a75..727c979b2b69 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el2_vect // FIQ EL2h
invalid_host_el2_vect // Error EL2h
- host_el1_sync_vect // Synchronous 64-bit EL1
- invalid_host_el1_vect // IRQ 64-bit EL1
- invalid_host_el1_vect // FIQ 64-bit EL1
- invalid_host_el1_vect // Error 64-bit EL1
-
- invalid_host_el1_vect // Synchronous 32-bit EL1
- invalid_host_el1_vect // IRQ 32-bit EL1
- invalid_host_el1_vect // FIQ 32-bit EL1
- invalid_host_el1_vect // Error 32-bit EL1
+ host_el1_sync_vect // Synchronous 64-bit EL1/EL0
+ invalid_host_el1_vect // IRQ 64-bit EL1/EL0
+ invalid_host_el1_vect // FIQ 64-bit EL1/EL0
+ invalid_host_el1_vect // Error 64-bit EL1/EL0
+
+ host_el1_sync_vect // Synchronous 32-bit EL1/EL0
+ invalid_host_el1_vect // IRQ 32-bit EL1/EL0
+ invalid_host_el1_vect // FIQ 32-bit EL1/EL0
+ invalid_host_el1_vect // Error 32-bit EL1/EL0
SYM_CODE_END(__kvm_hyp_host_vector)
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6410d21d8695..caace61ea459 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -47,10 +47,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
+ if (cpus_have_final_cap(ARM64_SME))
+ val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+ if (cpus_have_final_cap(ARM64_SME)) {
+ val = read_sysreg_s(SYS_HFGRTR_EL2);
+ val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK);
+ write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+ val = read_sysreg_s(SYS_HFGWTR_EL2);
+ val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK);
+ write_sysreg_s(val, SYS_HFGWTR_EL2);
+ }
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -94,9 +108,25 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+ if (cpus_have_final_cap(ARM64_SME)) {
+ u64 val;
+
+ val = read_sysreg_s(SYS_HFGRTR_EL2);
+ val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK;
+ write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+ val = read_sysreg_s(SYS_HFGWTR_EL2);
+ val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK;
+ write_sysreg_s(val, SYS_HFGWTR_EL2);
+ }
+
cptr = CPTR_EL2_DEFAULT;
if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
cptr |= CPTR_EL2_TZ;
+ if (cpus_have_final_cap(ARM64_SME))
+ cptr &= ~CPTR_EL2_TSM;
write_sysreg(cptr, cptr_el2);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 33f5181af330..619f94fc95fa 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -33,7 +33,7 @@ u64 id_aa64mmfr2_el1_sys_val;
*/
static void inject_undef64(struct kvm_vcpu *vcpu)
{
- u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+ u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 4fb419f7b8b6..6cb638b184b1 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -473,7 +473,7 @@ static int __vgic_v3_bpr_min(void)
static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
{
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
return crm != 8;
@@ -1016,7 +1016,7 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
int rt;
- u32 esr;
+ u64 esr;
u32 vmcr;
void (*fn)(struct kvm_vcpu *, u32, int);
bool is_read;
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 262dfe03134d..969f20daf97a 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -41,7 +41,8 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
+ CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -62,6 +63,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
+ if (cpus_have_final_cap(ARM64_SME))
+ write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
+ sctlr_el2);
+
write_sysreg(val, cpacr_el1);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
@@ -83,6 +88,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
+ if (cpus_have_final_cap(ARM64_SME))
+ write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
+ sctlr_el2);
+
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
if (!arm64_kernel_unmapped_at_el0())
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index b47df73e98d7..55a5dbe957e0 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -18,7 +18,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
- u32 esr = 0;
+ u64 esr = 0;
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
@@ -50,7 +50,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
static void inject_undef64(struct kvm_vcpu *vcpu)
{
- u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+ u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
@@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
inject_abt64(vcpu, true, addr);
}
+void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
+{
+ unsigned long addr, esr;
+
+ addr = kvm_vcpu_get_fault_ipa(vcpu);
+ addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
+
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ kvm_inject_pabt(vcpu, addr);
+ else
+ kvm_inject_dabt(vcpu, addr);
+
+ /*
+ * If AArch64 or LPAE, set FSC to 0 to indicate an Address
+ * Size Fault at level 0, as if exceeding PARange.
+ *
+ * Non-LPAE guests will only get the external abort, as there
+ * is no way to to describe the ASF.
+ */
+ if (vcpu_el1_is_32bit(vcpu) &&
+ !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
+ return;
+
+ esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
+ esr &= ~GENMASK_ULL(5, 0);
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+}
+
/**
* kvm_inject_undefined - inject an undefined instruction into the guest
* @vcpu: The vCPU in which to inject the exception
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0d19259454d8..5400fc020164 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- bool logging_perm_fault = false;
+ bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
- logging_perm_fault = (fault_status == FSC_PERM && write_fault);
+ use_read_lock = (fault_status == FSC_PERM && write_fault &&
+ fault_granule == PAGE_SIZE);
} else {
vma_shift = get_vma_page_shift(vma, hva);
}
@@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* logging dirty logging, only acquire read lock for permission
* relaxation.
*/
- if (logging_perm_fault)
+ if (use_read_lock)
read_lock(&kvm->mmu_lock);
else
write_lock(&kvm->mmu_lock);
@@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
} else {
+ WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
+
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
memcache);
@@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
out_unlock:
- if (logging_perm_fault)
+ if (use_read_lock)
read_unlock(&kvm->mmu_lock);
else
write_unlock(&kvm->mmu_lock);
@@ -1334,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ if (fault_status == FSC_FAULT) {
+ /* Beyond sanitised PARange (which is the IPA limit) */
+ if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
+ kvm_inject_size_fault(vcpu);
+ return 1;
+ }
+
+ /* Falls between the IPA range and the PARange? */
+ if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
+ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
+
+ if (is_iabt)
+ kvm_inject_pabt(vcpu, fault_ipa);
+ else
+ kvm_inject_dabt(vcpu, fault_ipa);
+ return 1;
+ }
+ }
+
/* Synchronous External Abort? */
if (kvm_vcpu_abt_issea(vcpu)) {
/*
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 78fdc443adc7..3dc990ac4f44 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
if (kvm_pmu_pmc_is_chained(pmc) &&
@@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
u64 reg;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
@@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
return;
@@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
- if (!val)
+ if (!kvm_vcpu_has_pmu(vcpu) || !val)
return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;
@@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
int i;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
@@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
{
u64 reg, mask;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
mask = ARMV8_PMU_EVTYPE_MASK;
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm);
@@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
u64 val, mask = 0;
int base, i, nr_events;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
base = 0;
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 372da09a2fab..708d80e8e60d 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -181,7 +181,8 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
- vcpu->run->system_event.flags = flags;
+ vcpu->run->system_event.ndata = 1;
+ vcpu->run->system_event.data[0] = flags;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
@@ -215,15 +216,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
{
- switch(fn) {
- case PSCI_0_2_FN64_CPU_SUSPEND:
- case PSCI_0_2_FN64_CPU_ON:
- case PSCI_0_2_FN64_AFFINITY_INFO:
- /* Disallow these functions for 32bit guests */
- if (vcpu_mode_is_32bit(vcpu))
- return PSCI_RET_NOT_SUPPORTED;
- break;
- }
+ /*
+ * Prevent 32 bit guests from calling 64 bit PSCI functions.
+ */
+ if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+ return PSCI_RET_NOT_SUPPORTED;
return 0;
}
@@ -235,10 +232,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
unsigned long val;
int ret = 1;
- val = kvm_psci_check_allowed_function(vcpu, psci_fn);
- if (val)
- goto out;
-
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
/*
@@ -306,7 +299,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
-out:
smccc_set_retval(vcpu, val, 0, 0, 0);
return ret;
}
@@ -318,9 +310,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
unsigned long val;
int ret = 1;
- if (minor > 1)
- return -EINVAL;
-
switch(psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
@@ -426,6 +415,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
*/
int kvm_psci_call(struct kvm_vcpu *vcpu)
{
+ u32 psci_fn = smccc_get_function(vcpu);
+ unsigned long val;
+
+ val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+ if (val) {
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+ }
+
switch (kvm_psci_version(vcpu)) {
case KVM_ARM_PSCI_1_1:
return kvm_psci_1_x_call(vcpu, 1);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index ecc40c8cd6f6..6c70c6f61c70 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+/**
+ * kvm_set_vm_width() - set the register width for the guest
+ * @vcpu: Pointer to the vcpu being configured
+ *
+ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
+ * in the VM flags based on the vcpu's requested register width, the HW
+ * capabilities and other options (such as MTE).
+ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
+ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *tmp;
+ struct kvm *kvm = vcpu->kvm;
bool is32bit;
- unsigned long i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+
+ lockdep_assert_held(&kvm->lock);
+
+ if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+ /*
+ * The guest's register width is already configured.
+ * Make sure that the vcpu is consistent with it.
+ */
+ if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
+ return 0;
+
+ return -EINVAL;
+ }
+
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
- return false;
+ return -EINVAL;
/* MTE is incompatible with AArch32 */
- if (kvm_has_mte(vcpu->kvm) && is32bit)
- return false;
+ if (kvm_has_mte(kvm) && is32bit)
+ return -EINVAL;
- /* Check that the vcpus are either all 32bit or all 64bit */
- kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
- return false;
- }
+ if (is32bit)
+ set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
- return true;
+ set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
+
+ return 0;
}
/**
@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
u32 pstate;
mutex_lock(&vcpu->kvm->lock);
- reset_state = vcpu->arch.reset_state;
- WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+ ret = kvm_set_vm_width(vcpu);
+ if (!ret) {
+ reset_state = vcpu->arch.reset_state;
+ WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+ }
mutex_unlock(&vcpu->kvm->lock);
+ if (ret)
+ return ret;
+
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
- if (!vcpu_allowed_register_width(vcpu)) {
- ret = -EINVAL;
- goto out;
- }
-
switch (vcpu->arch.target) {
default:
- if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+ if (vcpu_el1_is_32bit(vcpu)) {
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7b45c040cc27..18b403b58b53 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1123,8 +1123,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
- if (irqchip_in_kernel(vcpu->kvm) &&
- vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
}
@@ -1132,6 +1131,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
+
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1553,7 +1554,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
ID_SANITISED(ID_AA64ZFR0_EL1),
- ID_UNALLOCATED(4,5),
+ ID_HIDDEN(ID_AA64SMFR0_EL1),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
@@ -1596,6 +1597,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
+ { SYS_DESC(SYS_SMPRI_EL1), undef_access },
+ { SYS_DESC(SYS_SMCR_EL1), undef_access },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
@@ -1678,8 +1681,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
+ { SYS_DESC(SYS_SMIDR_EL1), undef_access },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
+ { SYS_DESC(SYS_SVCR), undef_access },
{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
.reset = reset_pmcr, .reg = PMCR_EL0 },
@@ -1719,6 +1724,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
+ { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
@@ -2304,7 +2310,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
size_t nr_global)
{
struct sys_reg_params params;
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
int Rt2 = (esr >> 10) & 0x1f;
@@ -2354,7 +2360,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
size_t nr_global)
{
struct sys_reg_params params;
- u32 esr = kvm_vcpu_get_esr(vcpu);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
params.CRm = (esr >> 1) & 0xf;
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index f38c40a76251..78cde687383c 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
{
- struct kvm *kvm = (struct kvm *)s->private;
+ struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
mutex_lock(&kvm->lock);
@@ -110,7 +110,7 @@ out:
static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct kvm *kvm = (struct kvm *)s->private;
+ struct kvm *kvm = s->private;
struct vgic_state_iter *iter = kvm->arch.vgic.iter;
++*pos;
@@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
static void vgic_debug_stop(struct seq_file *s, void *v)
{
- struct kvm *kvm = (struct kvm *)s->private;
+ struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
/*
@@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
static int vgic_debug_show(struct seq_file *s, void *v)
{
- struct kvm *kvm = (struct kvm *)s->private;
- struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+ struct kvm *kvm = s->private;
+ struct vgic_state_iter *iter = v;
struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL;
unsigned long flags;
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 089fc2ffcb43..2e13402be3bd 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
void *ptr, void *opaque)
{
- struct its_device *dev = (struct its_device *)opaque;
+ struct its_device *dev = opaque;
struct its_collection *collection;
struct kvm *kvm = its->dev->kvm;
struct kvm_vcpu *vcpu = NULL;
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 5e90887deec4..695d7368fadc 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -299,29 +299,24 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
return insn;
}
+static const u32 aarch64_insn_ldst_size[] = {
+ [AARCH64_INSN_SIZE_8] = 0,
+ [AARCH64_INSN_SIZE_16] = 1,
+ [AARCH64_INSN_SIZE_32] = 2,
+ [AARCH64_INSN_SIZE_64] = 3,
+};
+
static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
u32 insn)
{
u32 size;
- switch (type) {
- case AARCH64_INSN_SIZE_8:
- size = 0;
- break;
- case AARCH64_INSN_SIZE_16:
- size = 1;
- break;
- case AARCH64_INSN_SIZE_32:
- size = 2;
- break;
- case AARCH64_INSN_SIZE_64:
- size = 3;
- break;
- default:
+ if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
pr_err("%s: unknown size encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
+ size = aarch64_insn_ldst_size[type];
insn &= ~GENMASK(31, 30);
insn |= size << 30;
@@ -504,6 +499,50 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
offset);
}
+u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ unsigned int imm,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+ u32 shift;
+
+ if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
+ pr_err("%s: unknown size encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ shift = aarch64_insn_ldst_size[size];
+ if (imm & ~(BIT(12 + shift) - BIT(shift))) {
+ pr_err("%s: invalid imm: %d\n", __func__, imm);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ imm >>= shift;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
+ insn = aarch64_insn_get_ldr_imm_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
+ insn = aarch64_insn_get_str_imm_value();
+ break;
+ default:
+ pr_err("%s: unknown load/store encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
+}
+
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 8590af3c98c0..eeb9e45bcce8 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -93,7 +93,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
mov x3, x1
cbz x2, 2f
1:
- user_ldst 2f, ldtrb, w4, x1, 0
+USER(2f, ldtrb w4, [x1])
lsl x4, x4, #MTE_TAG_SHIFT
stg x4, [x0], #MTE_GRANULE_SIZE
add x1, x1, #1
@@ -120,7 +120,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
1:
ldg x4, [x1]
ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
- user_ldst 2f, sttrb, w4, x0, 0
+USER(2f, sttrb w4, [x0])
add x0, x0, #1
add x1, x1, #MTE_GRANULE_SIZE
subs x2, x2, #1
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index b5447e53cd73..0dea80bf6de4 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -16,8 +16,8 @@
void copy_highpage(struct page *to, struct page *from)
{
- struct page *kto = page_address(to);
- struct page *kfrom = page_address(from);
+ void *kto = page_address(to);
+ void *kfrom = page_address(from);
copy_page(kto, kfrom);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 77341b160aca..c5e11768e5c1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -43,7 +43,7 @@
#include <asm/traps.h>
struct fault_info {
- int (*fn)(unsigned long far, unsigned int esr,
+ int (*fn)(unsigned long far, unsigned long esr,
struct pt_regs *regs);
int sig;
int code;
@@ -53,17 +53,17 @@ struct fault_info {
static const struct fault_info fault_info[];
static struct fault_info debug_fault_info[];
-static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
{
return fault_info + (esr & ESR_ELx_FSC);
}
-static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
+static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
{
return debug_fault_info + DBG_ESR_EVT(esr);
}
-static void data_abort_decode(unsigned int esr)
+static void data_abort_decode(unsigned long esr)
{
pr_alert("Data abort info:\n");
@@ -85,11 +85,11 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
}
-static void mem_abort_decode(unsigned int esr)
+static void mem_abort_decode(unsigned long esr)
{
pr_alert("Mem abort info:\n");
- pr_alert(" ESR = 0x%08x\n", esr);
+ pr_alert(" ESR = 0x%016lx\n", esr);
pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n",
ESR_ELx_EC(esr), esr_get_class_string(esr),
(esr & ESR_ELx_IL) ? 32 : 16);
@@ -99,7 +99,7 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
- pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+ pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC),
esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
@@ -229,20 +229,20 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
return 1;
}
-static bool is_el1_instruction_abort(unsigned int esr)
+static bool is_el1_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
-static bool is_el1_data_abort(unsigned int esr)
+static bool is_el1_data_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
}
-static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
+static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+ unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE;
if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
@@ -258,7 +258,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
}
static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
- unsigned int esr,
+ unsigned long esr,
struct pt_regs *regs)
{
unsigned long flags;
@@ -290,7 +290,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
}
static void die_kernel_fault(const char *msg, unsigned long addr,
- unsigned int esr, struct pt_regs *regs)
+ unsigned long esr, struct pt_regs *regs)
{
bust_spinlocks(1);
@@ -308,7 +308,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
}
#ifdef CONFIG_KASAN_HW_TAGS
-static void report_tag_fault(unsigned long addr, unsigned int esr,
+static void report_tag_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
/*
@@ -320,11 +320,11 @@ static void report_tag_fault(unsigned long addr, unsigned int esr,
}
#else
/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */
-static inline void report_tag_fault(unsigned long addr, unsigned int esr,
+static inline void report_tag_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs) { }
#endif
-static void do_tag_recovery(unsigned long addr, unsigned int esr,
+static void do_tag_recovery(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
@@ -335,13 +335,14 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
* It will be done lazily on the other CPUs when they will hit a
* tag fault.
*/
- sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
+ SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE));
isb();
}
-static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
+static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
{
- unsigned int fsc = esr & ESR_ELx_FSC;
+ unsigned long fsc = esr & ESR_ELx_FSC;
if (!is_el1_data_abort(esr))
return false;
@@ -352,7 +353,7 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
return false;
}
-static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
const char *msg;
@@ -393,7 +394,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
die_kernel_fault(msg, addr, esr, regs);
}
-static void set_thread_esr(unsigned long address, unsigned int esr)
+static void set_thread_esr(unsigned long address, unsigned long esr)
{
current->thread.fault_address = address;
@@ -441,7 +442,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* exception level). Fail safe by not providing an ESR
* context record at all.
*/
- WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
+ WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr);
esr = 0;
break;
}
@@ -450,7 +451,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
current->thread.fault_code = esr;
}
-static void do_bad_area(unsigned long far, unsigned int esr,
+static void do_bad_area(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
unsigned long addr = untagged_addr(far);
@@ -501,7 +502,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
return handle_mm_fault(vma, addr, mm_flags, regs);
}
-static bool is_el0_instruction_abort(unsigned int esr)
+static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
}
@@ -510,12 +511,12 @@ static bool is_el0_instruction_abort(unsigned int esr)
* Note: not valid for EL1 DC IVAC, but we never use that such that it
* should fault. EL0 cannot issue DC IVAC (undef).
*/
-static bool is_write_abort(unsigned int esr)
+static bool is_write_abort(unsigned long esr)
{
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
-static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
+static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf;
@@ -671,7 +672,7 @@ no_context:
}
static int __kprobes do_translation_fault(unsigned long far,
- unsigned int esr,
+ unsigned long esr,
struct pt_regs *regs)
{
unsigned long addr = untagged_addr(far);
@@ -683,19 +684,19 @@ static int __kprobes do_translation_fault(unsigned long far,
return 0;
}
-static int do_alignment_fault(unsigned long far, unsigned int esr,
+static int do_alignment_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
do_bad_area(far, esr, regs);
return 0;
}
-static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs)
+static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
return 1; /* "fault" */
}
-static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
+static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf;
unsigned long siaddr;
@@ -725,7 +726,7 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
return 0;
}
-static int do_tag_check_fault(unsigned long far, unsigned int esr,
+static int do_tag_check_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
/*
@@ -805,7 +806,7 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
};
-void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
+void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
unsigned long addr = untagged_addr(far);
@@ -825,14 +826,14 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
addr, esr);
}
NOKPROBE_SYMBOL(do_sp_pc_abort);
-int __init early_brk64(unsigned long addr, unsigned int esr,
+int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs);
/*
@@ -852,7 +853,7 @@ static struct fault_info __refdata debug_fault_info[] = {
};
void __init hook_debug_fault_code(int nr,
- int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int (*fn)(unsigned long, unsigned long, struct pt_regs *),
int sig, int code, const char *name)
{
BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
@@ -885,7 +886,7 @@ static void debug_exception_exit(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(debug_exception_exit);
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index cbace1c9e137..64bb078e2e7b 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -158,6 +158,28 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
return contig_ptes;
}
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ int ncontig, i;
+ size_t pgsize;
+ pte_t orig_pte = ptep_get(ptep);
+
+ if (!pte_present(orig_pte) || !pte_cont(orig_pte))
+ return orig_pte;
+
+ ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
+ for (i = 0; i < ncontig; i++, ptep++) {
+ pte_t pte = ptep_get(ptep);
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+ return orig_pte;
+}
+
/*
* Changing some bits of contiguous entries requires us to follow a
* Break-Before-Make approach, breaking the whole contiguous set
@@ -166,15 +188,14 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*
* This helper performs the break step.
*/
-static pte_t get_clear_flush(struct mm_struct *mm,
+static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = huge_ptep_get(ptep);
- bool valid = pte_valid(orig_pte);
- unsigned long i, saddr = addr;
+ pte_t orig_pte = ptep_get(ptep);
+ unsigned long i;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
@@ -190,11 +211,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
-
- if (valid) {
- struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
- flush_tlb_range(&vma, saddr, addr);
- }
return orig_pte;
}
@@ -385,14 +401,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = huge_ptep_get(ptep);
+ pte_t orig_pte = ptep_get(ptep);
if (!pte_cont(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
/*
@@ -408,11 +424,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
- if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+ if (pte_write(pte) != pte_write(ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
- pte_t orig_pte = huge_ptep_get(ptep + i);
+ pte_t orig_pte = ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
@@ -443,7 +459,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
if (!__cont_access_flags_changed(ptep, pte, ncontig))
return 0;
- orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
@@ -476,7 +492,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
- pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
pte = pte_wrprotect(pte);
hugeprot = pte_pgprot(pte);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8ac25f19084e..339ee84e5a61 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(memstart_addr);
* In this scheme a comparatively quicker boot is observed.
*
* If ZONE_DMA configs are defined, crash kernel memory reservation
- * is delayed until DMA zone memory range size initilazation performed in
+ * is delayed until DMA zone memory range size initialization performed in
* zone_sizes_init(). The defer is necessary to steer clear of DMA zone
* memory range to avoid overlap allocation. So crash kernel memory boundaries
* are not known when mapping all bank memory ranges, which otherwise means
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(memstart_addr);
* so page-granularity mappings are created for the entire memory range.
* Hence a slightly slower boot is observed.
*
- * Note: Page-granularity mapppings are necessary for crash kernel memory
+ * Note: Page-granularity mappings are necessary for crash kernel memory
* range for shrinking its size via /sys/kernel/kexec_crash_size interface.
*/
#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
@@ -90,6 +90,32 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#endif
+/* Current arm64 boot protocol requires 2MB alignment */
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+ insert_resource(&iomem_resource, &crashk_low_res);
+
+ return 0;
+}
+
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -100,17 +126,35 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size;
- unsigned long long crash_max = arm64_dma_phys_limit;
+ unsigned long long crash_low_size = 0;
+ unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ char *cmdline = boot_command_line;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ /* crashkernel=X[@offset] */
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
- /* no crashkernel= or invalid value specified */
- if (ret || !crash_size)
+ if (ret == -ENOENT) {
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ /*
+ * crashkernel=Y,low can be specified or not, but invalid value
+ * is not allowed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret && (ret != -ENOENT))
+ return;
+
+ crash_max = CRASH_ADDR_HIGH_MAX;
+ } else if (ret || !crash_size) {
+ /* The specified value is invalid */
return;
+ }
crash_size = PAGE_ALIGN(crash_size);
@@ -118,8 +162,7 @@ static void __init reserve_crashkernel(void)
if (crash_base)
crash_max = crash_base + crash_size;
- /* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_base, crash_max);
if (!crash_base) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -127,6 +170,12 @@ static void __init reserve_crashkernel(void)
return;
}
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
+ crash_low_size && reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
+ return;
+ }
+
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
@@ -135,8 +184,12 @@ static void __init reserve_crashkernel(void)
* map. Inform kmemleak so that it won't try to access it.
*/
kmemleak_ignore_phys(crash_base);
+ if (crashk_low_res.end)
+ kmemleak_ignore_phys(crashk_low_res.start);
+
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_res);
}
/*
@@ -157,7 +210,7 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
+static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
@@ -176,7 +229,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
- max_zone_pfns[ZONE_NORMAL] = max;
+ max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init(max_zone_pfns);
}
@@ -374,7 +427,7 @@ void __init bootmem_init(void)
* done after the fixed reservations
*/
sparse_init();
- zone_sizes_init(min, max);
+ zone_sizes_init();
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
@@ -398,11 +451,7 @@ void __init bootmem_init(void)
*/
void __init mem_init(void)
{
- if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > PFN_DOWN(arm64_dma_phys_limit))
- swiotlb_init(1);
- else if (!xen_swiotlb_detect())
- swiotlb_force = SWIOTLB_NO_FORCE;
+ swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
/* this will put all unused low memory onto the freelists */
memblock_free_all();
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b7c81dacabf0..b21f91cd830d 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
{
early_ioremap_setup();
}
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ return pfn_is_map_memory(pfn);
+}
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index d7da8ca40d2e..4ea2eefbc053 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -238,7 +238,7 @@ int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
int this_level, index, level_lsb, level_msb;
dst_addr &= PAGE_MASK;
- prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
for (this_level = 3; this_level >= 0; this_level--) {
levels[this_level] = trans_alloc(info);
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index dd59b5ad8fe4..194c95ccc1cf 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -66,6 +66,20 @@
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
+/* Load/store register (immediate offset) */
+#define A64_LS_IMM(Rt, Rn, imm, size, type) \
+ aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
+ AARCH64_INSN_SIZE_##size, \
+ AARCH64_INSN_LDST_##type##_IMM_OFFSET)
+#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
+#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
+#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
+#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
+#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
+#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
+#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
+#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
+
/* Load/store register pair */
#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
@@ -249,6 +263,9 @@
/* HINTs */
#define A64_HINT(x) aarch64_insn_gen_hint(x)
+#define A64_PACIASP A64_HINT(AARCH64_INSN_HINT_PACIASP)
+#define A64_AUTIASP A64_HINT(AARCH64_INSN_HINT_AUTIASP)
+
/* BTI */
#define A64_BTI_C A64_HINT(AARCH64_INSN_HINT_BTIC)
#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index fcc675aa1670..8ab4035dea27 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -26,6 +26,7 @@
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
+#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -63,6 +64,7 @@ static const int bpf2a64[] = {
[TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
+ [FP_BOTTOM] = A64_R(27),
};
struct jit_ctx {
@@ -73,6 +75,7 @@ struct jit_ctx {
int exentry_idx;
__le32 *image;
u32 stack_size;
+ int fpb_offset;
};
static inline void emit(const u32 insn, struct jit_ctx *ctx)
@@ -191,11 +194,53 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
+/*
+ * There are 3 types of AArch64 LDR/STR (immediate) instruction:
+ * Post-index, Pre-index, Unsigned offset.
+ *
+ * For BPF ldr/str, the "unsigned offset" type is sufficient.
+ *
+ * "Unsigned offset" type LDR(immediate) format:
+ *
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x x|1 1 1 0 0 1 0 1| imm12 | Rn | Rt |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * scale
+ *
+ * "Unsigned offset" type STR(immediate) format:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x x|1 1 1 0 0 1 0 0| imm12 | Rn | Rt |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * scale
+ *
+ * The offset is calculated from imm12 and scale in the following way:
+ *
+ * offset = (u64)imm12 << scale
+ */
+static bool is_lsi_offset(int offset, int scale)
+{
+ if (offset < 0)
+ return false;
+
+ if (offset > (0xFFF << scale))
+ return false;
+
+ if (offset & ((1 << scale) - 1))
+ return false;
+
+ return true;
+}
+
/* Tail call offset to jump into */
-#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
-#define PROLOGUE_OFFSET 8
+#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) || \
+ IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)
+#define PROLOGUE_OFFSET 9
#else
-#define PROLOGUE_OFFSET 7
+#define PROLOGUE_OFFSET 8
#endif
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
@@ -207,6 +252,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tcc = bpf2a64[TCALL_CNT];
+ const u8 fpb = bpf2a64[FP_BOTTOM];
const int idx0 = ctx->idx;
int cur_offset;
@@ -233,8 +279,11 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
*
*/
+ /* Sign lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_PACIASP, ctx);
/* BTI landing pad */
- if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
+ else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
emit(A64_BTI_C, ctx);
/* Save FP and LR registers to stay align with ARM64 AAPCS */
@@ -245,6 +294,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_PUSH(r6, r7, A64_SP), ctx);
emit(A64_PUSH(r8, r9, A64_SP), ctx);
emit(A64_PUSH(fp, tcc, A64_SP), ctx);
+ emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
/* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
@@ -265,6 +315,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_BTI_J, ctx);
}
+ emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
+
/* Stack must be multiples of 16B */
ctx->stack_size = round_up(prog->aux->stack_depth, 16);
@@ -512,10 +564,13 @@ static void build_epilogue(struct jit_ctx *ctx)
const u8 r8 = bpf2a64[BPF_REG_8];
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 fpb = bpf2a64[FP_BOTTOM];
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ /* Restore x27 and x28 */
+ emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
/* Restore fs (x25) and x26 */
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -529,6 +584,10 @@ static void build_epilogue(struct jit_ctx *ctx)
/* Set return value */
emit(A64_MOV(1, A64_R(0), r0), ctx);
+ /* Authenticate lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_AUTIASP, ctx);
+
emit(A64_RET(A64_LR), ctx);
}
@@ -609,6 +668,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 fpb = bpf2a64[FP_BOTTOM];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -617,6 +678,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 jmp_cond;
s32 jmp_offset;
u32 a64_insn;
+ u8 src_adj;
+ u8 dst_adj;
+ int off_adj;
int ret;
switch (code) {
@@ -971,19 +1035,45 @@ emit_cond_jmp:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
- emit_a64_mov_i(1, tmp, off, ctx);
+ if (ctx->fpb_offset > 0 && src == fp) {
+ src_adj = fpb;
+ off_adj = off + ctx->fpb_offset;
+ } else {
+ src_adj = src;
+ off_adj = off;
+ }
switch (BPF_SIZE(code)) {
case BPF_W:
- emit(A64_LDR32(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 2)) {
+ emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDR32(dst, src, tmp), ctx);
+ }
break;
case BPF_H:
- emit(A64_LDRH(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 1)) {
+ emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDRH(dst, src, tmp), ctx);
+ }
break;
case BPF_B:
- emit(A64_LDRB(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 0)) {
+ emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDRB(dst, src, tmp), ctx);
+ }
break;
case BPF_DW:
- emit(A64_LDR64(dst, src, tmp), ctx);
+ if (is_lsi_offset(off_adj, 3)) {
+ emit(A64_LDR64I(dst, src_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_LDR64(dst, src, tmp), ctx);
+ }
break;
}
@@ -1010,21 +1100,47 @@ emit_cond_jmp:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
+ if (ctx->fpb_offset > 0 && dst == fp) {
+ dst_adj = fpb;
+ off_adj = off + ctx->fpb_offset;
+ } else {
+ dst_adj = dst;
+ off_adj = off;
+ }
/* Load imm to a register then store it */
- emit_a64_mov_i(1, tmp2, off, ctx);
emit_a64_mov_i(1, tmp, imm, ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
- emit(A64_STR32(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 2)) {
+ emit(A64_STR32I(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STR32(tmp, dst, tmp2), ctx);
+ }
break;
case BPF_H:
- emit(A64_STRH(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 1)) {
+ emit(A64_STRHI(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STRH(tmp, dst, tmp2), ctx);
+ }
break;
case BPF_B:
- emit(A64_STRB(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 0)) {
+ emit(A64_STRBI(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STRB(tmp, dst, tmp2), ctx);
+ }
break;
case BPF_DW:
- emit(A64_STR64(tmp, dst, tmp2), ctx);
+ if (is_lsi_offset(off_adj, 3)) {
+ emit(A64_STR64I(tmp, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit(A64_STR64(tmp, dst, tmp2), ctx);
+ }
break;
}
break;
@@ -1034,19 +1150,45 @@ emit_cond_jmp:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- emit_a64_mov_i(1, tmp, off, ctx);
+ if (ctx->fpb_offset > 0 && dst == fp) {
+ dst_adj = fpb;
+ off_adj = off + ctx->fpb_offset;
+ } else {
+ dst_adj = dst;
+ off_adj = off;
+ }
switch (BPF_SIZE(code)) {
case BPF_W:
- emit(A64_STR32(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 2)) {
+ emit(A64_STR32I(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STR32(src, dst, tmp), ctx);
+ }
break;
case BPF_H:
- emit(A64_STRH(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 1)) {
+ emit(A64_STRHI(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STRH(src, dst, tmp), ctx);
+ }
break;
case BPF_B:
- emit(A64_STRB(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 0)) {
+ emit(A64_STRBI(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STRB(src, dst, tmp), ctx);
+ }
break;
case BPF_DW:
- emit(A64_STR64(src, dst, tmp), ctx);
+ if (is_lsi_offset(off_adj, 3)) {
+ emit(A64_STR64I(src, dst_adj, off_adj), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_STR64(src, dst, tmp), ctx);
+ }
break;
}
break;
@@ -1069,6 +1211,79 @@ emit_cond_jmp:
return 0;
}
+/*
+ * Return 0 if FP may change at runtime, otherwise find the minimum negative
+ * offset to FP, converts it to positive number, and align down to 8 bytes.
+ */
+static int find_fpb_offset(struct bpf_prog *prog)
+{
+ int i;
+ int offset = 0;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ const u8 class = BPF_CLASS(insn->code);
+ const u8 mode = BPF_MODE(insn->code);
+ const u8 src = insn->src_reg;
+ const u8 dst = insn->dst_reg;
+ const s32 imm = insn->imm;
+ const s16 off = insn->off;
+
+ switch (class) {
+ case BPF_STX:
+ case BPF_ST:
+ /* fp holds atomic operation result */
+ if (class == BPF_STX && mode == BPF_ATOMIC &&
+ ((imm == BPF_XCHG ||
+ imm == (BPF_FETCH | BPF_ADD) ||
+ imm == (BPF_FETCH | BPF_AND) ||
+ imm == (BPF_FETCH | BPF_XOR) ||
+ imm == (BPF_FETCH | BPF_OR)) &&
+ src == BPF_REG_FP))
+ return 0;
+
+ if (mode == BPF_MEM && dst == BPF_REG_FP &&
+ off < offset)
+ offset = insn->off;
+ break;
+
+ case BPF_JMP32:
+ case BPF_JMP:
+ break;
+
+ case BPF_LDX:
+ case BPF_LD:
+ /* fp holds load result */
+ if (dst == BPF_REG_FP)
+ return 0;
+
+ if (class == BPF_LDX && mode == BPF_MEM &&
+ src == BPF_REG_FP && off < offset)
+ offset = off;
+ break;
+
+ case BPF_ALU:
+ case BPF_ALU64:
+ default:
+ /* fp holds ALU result */
+ if (dst == BPF_REG_FP)
+ return 0;
+ }
+ }
+
+ if (offset < 0) {
+ /*
+ * safely be converted to a positive 'int', since insn->off
+ * is 's16'
+ */
+ offset = -offset;
+ /* align down to 8 bytes */
+ offset = ALIGN_DOWN(offset, 8);
+ }
+
+ return offset;
+}
+
static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
@@ -1190,6 +1405,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
+ ctx.fpb_offset = find_fpb_offset(prog);
+
/*
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
*
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
index cf1307188150..07a93ab21a62 100644
--- a/arch/arm64/tools/Makefile
+++ b/arch/arm64/tools/Makefile
@@ -3,7 +3,7 @@
gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
-kapi-hdrs-y := $(kapi)/cpucaps.h
+kapi-hdrs-y := $(kapi)/cpucaps.h $(kapi)/sysreg-defs.h
targets += $(addprefix ../../../, $(kapi-hdrs-y))
@@ -14,5 +14,11 @@ kapi: $(kapi-hdrs-y)
quiet_cmd_gen_cpucaps = GEN $@
cmd_gen_cpucaps = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
+quiet_cmd_gen_sysreg = GEN $@
+ cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
+
$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
$(call if_changed,gen_cpucaps)
+
+$(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE
+ $(call if_changed,gen_sysreg)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 3ed418f70e3b..e52b289a27c2 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -43,6 +43,8 @@ KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
MTE
MTE_ASYMM
+SME
+SME_FA64
SPECTRE_V2
SPECTRE_V3A
SPECTRE_V4
diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk
new file mode 100755
index 000000000000..89bfb74e28de
--- /dev/null
+++ b/arch/arm64/tools/gen-sysreg.awk
@@ -0,0 +1,268 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-sysreg.awk: arm64 sysreg header generator
+#
+# Usage: awk -f gen-sysreg.awk sysregs.txt
+
+# Log an error and terminate
+function fatal(msg) {
+ print "Error at " NR ": " msg > "/dev/stderr"
+ exit 1
+}
+
+# Sanity check that the start or end of a block makes sense at this point in
+# the file. If not, produce an error and terminate.
+#
+# @this - the $Block or $EndBlock
+# @prev - the only valid block to already be in (value of @block)
+# @new - the new value of @block
+function change_block(this, prev, new) {
+ if (block != prev)
+ fatal("unexpected " this " (inside " block ")")
+
+ block = new
+}
+
+# Sanity check the number of records for a field makes sense. If not, produce
+# an error and terminate.
+function expect_fields(nf) {
+ if (NF != nf)
+ fatal(NF " fields found where " nf " expected")
+}
+
+# Print a CPP macro definition, padded with spaces so that the macro bodies
+# line up in a column
+function define(name, val) {
+ printf "%-48s%s\n", "#define " name, val
+}
+
+# Print standard BITMASK/SHIFT/WIDTH CPP definitions for a field
+function define_field(reg, field, msb, lsb) {
+ define(reg "_" field, "GENMASK(" msb ", " lsb ")")
+ define(reg "_" field "_MASK", "GENMASK(" msb ", " lsb ")")
+ define(reg "_" field "_SHIFT", lsb)
+ define(reg "_" field "_WIDTH", msb - lsb + 1)
+}
+
+# Parse a "<msb>[:<lsb>]" string into the global variables @msb and @lsb
+function parse_bitdef(reg, field, bitdef, _bits)
+{
+ if (bitdef ~ /^[0-9]+$/) {
+ msb = bitdef
+ lsb = bitdef
+ } else if (split(bitdef, _bits, ":") == 2) {
+ msb = _bits[1]
+ lsb = _bits[2]
+ } else {
+ fatal("invalid bit-range definition '" bitdef "'")
+ }
+
+
+ if (msb != next_bit)
+ fatal(reg "." field " starts at " msb " not " next_bit)
+ if (63 < msb || msb < 0)
+ fatal(reg "." field " invalid high bit in '" bitdef "'")
+ if (63 < lsb || lsb < 0)
+ fatal(reg "." field " invalid low bit in '" bitdef "'")
+ if (msb < lsb)
+ fatal(reg "." field " invalid bit-range '" bitdef "'")
+ if (low > high)
+ fatal(reg "." field " has invalid range " high "-" low)
+
+ next_bit = lsb - 1
+}
+
+BEGIN {
+ print "#ifndef __ASM_SYSREG_DEFS_H"
+ print "#define __ASM_SYSREG_DEFS_H"
+ print ""
+ print "/* Generated file - do not edit */"
+ print ""
+
+ block = "None"
+}
+
+END {
+ print "#endif /* __ASM_SYSREG_DEFS_H */"
+}
+
+# skip blank lines and comment lines
+/^$/ { next }
+/^#/ { next }
+
+/^SysregFields/ {
+ change_block("SysregFields", "None", "SysregFields")
+ expect_fields(2)
+
+ reg = $2
+
+ res0 = "UL(0)"
+ res1 = "UL(0)"
+
+ next_bit = 63
+
+ next
+}
+
+/^EndSysregFields/ {
+ if (next_bit > 0)
+ fatal("Unspecified bits in " reg)
+
+ change_block("EndSysregFields", "SysregFields", "None")
+
+ define(reg "_RES0", "(" res0 ")")
+ define(reg "_RES1", "(" res1 ")")
+ print ""
+
+ reg = null
+ res0 = null
+ res1 = null
+
+ next
+}
+
+/^Sysreg/ {
+ change_block("Sysreg", "None", "Sysreg")
+ expect_fields(7)
+
+ reg = $2
+ op0 = $3
+ op1 = $4
+ crn = $5
+ crm = $6
+ op2 = $7
+
+ res0 = "UL(0)"
+ res1 = "UL(0)"
+
+ define("REG_" reg, "S" op0 "_" op1 "_C" crn "_C" crm "_" op2)
+ define("SYS_" reg, "sys_reg(" op0 ", " op1 ", " crn ", " crm ", " op2 ")")
+
+ define("SYS_" reg "_Op0", op0)
+ define("SYS_" reg "_Op1", op1)
+ define("SYS_" reg "_CRn", crn)
+ define("SYS_" reg "_CRm", crm)
+ define("SYS_" reg "_Op2", op2)
+
+ print ""
+
+ next_bit = 63
+
+ next
+}
+
+/^EndSysreg/ {
+ if (next_bit > 0)
+ fatal("Unspecified bits in " reg)
+
+ change_block("EndSysreg", "Sysreg", "None")
+
+ if (res0 != null)
+ define(reg "_RES0", "(" res0 ")")
+ if (res1 != null)
+ define(reg "_RES1", "(" res1 ")")
+ if (res0 != null || res1 != null)
+ print ""
+
+ reg = null
+ op0 = null
+ op1 = null
+ crn = null
+ crm = null
+ op2 = null
+ res0 = null
+ res1 = null
+
+ next
+}
+
+# Currently this is effectivey a comment, in future we may want to emit
+# defines for the fields.
+/^Fields/ && (block == "Sysreg") {
+ expect_fields(2)
+
+ if (next_bit != 63)
+ fatal("Some fields already defined for " reg)
+
+ print "/* For " reg " fields see " $2 " */"
+ print ""
+
+ next_bit = 0
+ res0 = null
+ res1 = null
+
+ next
+}
+
+
+/^Res0/ && (block == "Sysreg" || block == "SysregFields") {
+ expect_fields(2)
+ parse_bitdef(reg, "RES0", $2)
+ field = "RES0_" msb "_" lsb
+
+ res0 = res0 " | GENMASK_ULL(" msb ", " lsb ")"
+
+ next
+}
+
+/^Res1/ && (block == "Sysreg" || block == "SysregFields") {
+ expect_fields(2)
+ parse_bitdef(reg, "RES1", $2)
+ field = "RES1_" msb "_" lsb
+
+ res1 = res1 " | GENMASK_ULL(" msb ", " lsb ")"
+
+ next
+}
+
+/^Field/ && (block == "Sysreg" || block == "SysregFields") {
+ expect_fields(3)
+ field = $3
+ parse_bitdef(reg, field, $2)
+
+ define_field(reg, field, msb, lsb)
+ print ""
+
+ next
+}
+
+/^Raz/ && (block == "Sysreg" || block == "SysregFields") {
+ expect_fields(2)
+ parse_bitdef(reg, field, $2)
+
+ next
+}
+
+/^Enum/ {
+ change_block("Enum", "Sysreg", "Enum")
+ expect_fields(3)
+ field = $3
+ parse_bitdef(reg, field, $2)
+
+ define_field(reg, field, msb, lsb)
+
+ next
+}
+
+/^EndEnum/ {
+ change_block("EndEnum", "Enum", "Sysreg")
+ field = null
+ msb = null
+ lsb = null
+ print ""
+ next
+}
+
+/0b[01]+/ && block = "Enum" {
+ expect_fields(2)
+ val = $1
+ name = $2
+
+ define(reg "_" field "_" name, "UL(" val ")")
+ next
+}
+
+# Any lines not handled by previous rules are unexpected
+{
+ fatal("unhandled statement")
+}
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
new file mode 100644
index 000000000000..ff5e552f7420
--- /dev/null
+++ b/arch/arm64/tools/sysreg
@@ -0,0 +1,369 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# System register metadata
+
+# Each System register is described by a Sysreg block:
+
+# Sysreg <name> <op0> <op1> <crn> <crm> <op2>
+# <field>
+# ...
+# EndSysreg
+
+# Within a Sysreg block, each field can be described as one of:
+
+# Res0 <msb>[:<lsb>]
+
+# Res1 <msb>[:<lsb>]
+
+# Field <msb>[:<lsb>] <name>
+
+# Enum <msb>[:<lsb>] <name>
+# <enumval> <enumname>
+# ...
+# EndEnum
+
+# Alternatively if multiple registers share the same layout then
+# a SysregFields block can be used to describe the shared layout
+
+# SysregFields <fieldsname>
+# <field>
+# ...
+# EndSysregFields
+
+# and referenced from within the Sysreg:
+
+# Sysreg <name> <op0> <op1> <crn> <crm> <op2>
+# Fields <fieldsname>
+# EndSysreg
+
+# For ID registers we adopt a few conventions for translating the
+# language in the ARM into defines:
+#
+# NI - Not implemented
+# IMP - Implemented
+#
+# In general it is recommended that new enumeration items be named for the
+# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
+# item ACCDATA) though it may be more taseful to do something else.
+
+Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0
+Enum 63:60 RNDR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 59:56 TLB
+ 0b0000 NI
+ 0b0001 OS
+ 0b0010 RANGE
+EndEnum
+Enum 55:52 TS
+ 0b0000 NI
+ 0b0001 FLAGM
+ 0b0010 FLAGM2
+EndEnum
+Enum 51:48 FHM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 47:44 DP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 43:40 SM4
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 39:36 SM3
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 35:32 SHA3
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 31:28 RDM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 TME
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 ATOMIC
+ 0b0000 NI
+ 0b0010 IMP
+EndEnum
+Enum 19:16 CRC32
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 SHA2
+ 0b0000 NI
+ 0b0001 SHA256
+ 0b0010 SHA512
+EndEnum
+Enum 11:8 SHA1
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 AES
+ 0b0000 NI
+ 0b0001 AES
+ 0b0010 PMULL
+EndEnum
+Res0 3:0
+EndSysreg
+
+Sysreg SCTLR_EL1 3 0 1 0 0
+Field 63 TIDCP
+Field 62 SPINMASK
+Field 61 NMI
+Field 60 EnTP2
+Res0 59:58
+Field 57 EPAN
+Field 56 EnALS
+Field 55 EnAS0
+Field 54 EnASR
+Field 53 TME
+Field 52 TME0
+Field 51 TMT
+Field 50 TMT0
+Field 49:46 TWEDEL
+Field 45 TWEDEn
+Field 44 DSSBS
+Field 43 ATA
+Field 42 ATA0
+Enum 41:40 TCF
+ 0b00 NONE
+ 0b01 SYNC
+ 0b10 ASYNC
+ 0b11 ASYMM
+EndEnum
+Enum 39:38 TCF0
+ 0b00 NONE
+ 0b01 SYNC
+ 0b10 ASYNC
+ 0b11 ASYMM
+EndEnum
+Field 37 ITFSB
+Field 36 BT1
+Field 35 BT0
+Res0 34
+Field 33 MSCEn
+Field 32 CMOW
+Field 31 EnIA
+Field 30 EnIB
+Field 29 LSMAOE
+Field 28 nTLSMD
+Field 27 EnDA
+Field 26 UCI
+Field 25 EE
+Field 24 E0E
+Field 23 SPAN
+Field 22 EIS
+Field 21 IESB
+Field 20 TSCXT
+Field 19 WXN
+Field 18 nTWE
+Res0 17
+Field 16 nTWI
+Field 15 UCT
+Field 14 DZE
+Field 13 EnDB
+Field 12 I
+Field 11 EOS
+Field 10 EnRCTX
+Field 9 UMA
+Field 8 SED
+Field 7 ITD
+Field 6 nAA
+Field 5 CP15BEN
+Field 4 SA0
+Field 3 SA
+Field 2 C
+Field 1 A
+Field 0 M
+EndSysreg
+
+SysregFields CPACR_ELx
+Res0 63:29
+Field 28 TTA
+Res0 27:26
+Field 25:24 SMEN
+Res0 23:22
+Field 21:20 FPEN
+Res0 19:18
+Field 17:16 ZEN
+Res0 15:0
+EndSysregFields
+
+Sysreg CPACR_EL1 3 0 1 0 2
+Fields CPACR_ELx
+EndSysreg
+
+Sysreg SMPRI_EL1 3 0 1 2 4
+Res0 63:4
+Field 3:0 PRIORITY
+EndSysreg
+
+SysregFields ZCR_ELx
+Res0 63:9
+Raz 8:4
+Field 3:0 LEN
+EndSysregFields
+
+Sysreg ZCR_EL1 3 0 1 2 0
+Fields ZCR_ELx
+EndSysreg
+
+SysregFields SMCR_ELx
+Res0 63:32
+Field 31 FA64
+Res0 30:9
+Raz 8:4
+Field 3:0 LEN
+EndSysregFields
+
+Sysreg SMCR_EL1 3 0 1 2 6
+Fields SMCR_ELx
+EndSysreg
+
+Sysreg FAR_EL1 3 0 6 0 0
+Field 63:0 ADDR
+EndSysreg
+
+SysregFields CONTEXTIDR_ELx
+Res0 63:32
+Field 31:0 PROCID
+EndSysregFields
+
+Sysreg CONTEXTIDR_EL1 3 0 13 0 1
+Fields CONTEXTIDR_ELx
+EndSysreg
+
+Sysreg CLIDR_EL1 3 1 0 0 1
+Res0 63:47
+Field 46:33 Ttypen
+Field 32:30 ICB
+Field 29:27 LoUU
+Field 26:24 LoC
+Field 23:21 LoUIS
+Field 20:18 Ctype7
+Field 17:15 Ctype6
+Field 14:12 Ctype5
+Field 11:9 Ctype4
+Field 8:6 Ctype3
+Field 5:3 Ctype2
+Field 2:0 Ctype1
+EndSysreg
+
+Sysreg SMIDR_EL1 3 1 0 0 6
+Res0 63:32
+Field 31:24 IMPLEMENTER
+Field 23:16 REVISION
+Field 15 SMPS
+Res0 14:12
+Field 11:0 AFFINITY
+EndSysreg
+
+Sysreg CSSELR_EL1 3 2 0 0 0
+Res0 63:5
+Field 4 TnD
+Field 3:1 Level
+Field 0 InD
+EndSysreg
+
+Sysreg SVCR 3 3 4 2 2
+Res0 63:2
+Field 1 ZA
+Field 0 SM
+EndSysreg
+
+Sysreg ZCR_EL2 3 4 1 2 0
+Fields ZCR_ELx
+EndSysreg
+
+Sysreg SMPRIMAP_EL2 3 4 1 2 5
+Field 63:60 P15
+Field 59:56 P14
+Field 55:52 P13
+Field 51:48 P12
+Field 47:44 P11
+Field 43:40 P10
+Field 39:36 F9
+Field 35:32 P8
+Field 31:28 P7
+Field 27:24 P6
+Field 23:20 P5
+Field 19:16 P4
+Field 15:12 P3
+Field 11:8 P2
+Field 7:4 P1
+Field 3:0 P0
+EndSysreg
+
+Sysreg SMCR_EL2 3 4 1 2 6
+Fields SMCR_ELx
+EndSysreg
+
+Sysreg DACR32_EL2 3 4 3 0 0
+Res0 63:32
+Field 31:30 D15
+Field 29:28 D14
+Field 27:26 D13
+Field 25:24 D12
+Field 23:22 D11
+Field 21:20 D10
+Field 19:18 D9
+Field 17:16 D8
+Field 15:14 D7
+Field 13:12 D6
+Field 11:10 D5
+Field 9:8 D4
+Field 7:6 D3
+Field 5:4 D2
+Field 3:2 D1
+Field 1:0 D0
+EndSysreg
+
+Sysreg FAR_EL2 3 4 6 0 0
+Field 63:0 ADDR
+EndSysreg
+
+Sysreg CONTEXTIDR_EL2 3 4 13 0 1
+Fields CONTEXTIDR_ELx
+EndSysreg
+
+Sysreg CPACR_EL12 3 5 1 0 2
+Fields CPACR_ELx
+EndSysreg
+
+Sysreg ZCR_EL12 3 5 1 2 0
+Fields ZCR_ELx
+EndSysreg
+
+Sysreg SMCR_EL12 3 5 1 2 6
+Fields SMCR_ELx
+EndSysreg
+
+Sysreg FAR_EL12 3 5 6 0 0
+Field 63:0 ADDR
+EndSysreg
+
+Sysreg CONTEXTIDR_EL12 3 5 13 0 1
+Fields CONTEXTIDR_ELx
+EndSysreg
+
+SysregFields TTBRx_EL1
+Field 63:48 ASID
+Field 47:1 BADDR
+Field 0 CnP
+EndSysregFields
+
+Sysreg TTBR0_EL1 3 0 2 0 0
+Fields TTBRx_EL1
+EndSysreg
+
+Sysreg TTBR1_EL1 3 0 2 0 1
+Fields TTBRx_EL1
+EndSysreg