diff options
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/archrandom.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/device.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/extable.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/fixmap.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 75 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_coproc.h | 20 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 75 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 116 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_hyp.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 18 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_ptrauth.h | 34 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_types.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgalloc.h | 39 | ||||
-rw-r--r-- | arch/arm64/include/asm/smp.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/vdso/compat_gettimeofday.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/vdso/gettimeofday.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/virt.h | 13 |
20 files changed, 254 insertions, 212 deletions
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index fc1594a0710e..44209f6146aa 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -6,7 +6,6 @@ #include <linux/bug.h> #include <linux/kernel.h> -#include <linux/random.h> #include <asm/cpufeature.h> static inline bool __arm64_rndr(unsigned long *v) diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 12b778d55342..996498751318 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -6,9 +6,6 @@ #define __ASM_DEVICE_H struct dev_archdata { -#ifdef CONFIG_IOMMU_API - void *iommu; /* private IOMMU data */ -#endif }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 56a4f68b262e..840a35ed92ec 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -22,5 +22,17 @@ struct exception_table_entry #define ARCH_HAS_RELATIVE_EXTABLE +#ifdef CONFIG_BPF_JIT +int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs); +#else /* !CONFIG_BPF_JIT */ +static inline +int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + return 0; +} +#endif /* !CONFIG_BPF_JIT */ + extern int fixup_exception(struct pt_regs *regs); #endif diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index f987b8a8f325..4335800201c9 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -28,10 +28,9 @@ * compile time, but to set the physical address only * in the boot process. * - * These 'compile-time allocated' memory buffers are - * page-sized. Use set_fixmap(idx,phys) to associate - * physical memory with fixmap indices. - * + * Each enum increment in these 'compile-time allocated' + * memory buffers is page-sized. Use set_fixmap(idx,phys) + * to associate physical memory with a fixmap index. */ enum fixed_addresses { FIX_HOLE, diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 352aaebf4198..fb1a922b31ba 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -42,33 +42,81 @@ #include <linux/mm.h> -/* Translate a kernel address of @sym into its equivalent linear mapping */ -#define kvm_ksym_ref(sym) \ +/* + * Translate name of a symbol defined in nVHE hyp to the name seen + * by kernel proper. All nVHE symbols are prefixed by the build system + * to avoid clashes with the VHE variants. + */ +#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym + +#define DECLARE_KVM_VHE_SYM(sym) extern char sym[] +#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[] + +/* + * Define a pair of symbols sharing the same name but one defined in + * VHE and the other in nVHE hyp implementations. + */ +#define DECLARE_KVM_HYP_SYM(sym) \ + DECLARE_KVM_VHE_SYM(sym); \ + DECLARE_KVM_NVHE_SYM(sym) + +#define CHOOSE_VHE_SYM(sym) sym +#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) + +#ifndef __KVM_NVHE_HYPERVISOR__ +/* + * BIG FAT WARNINGS: + * + * - Don't be tempted to change the following is_kernel_in_hyp_mode() + * to has_vhe(). has_vhe() is implemented as a *final* capability, + * while this is used early at boot time, when the capabilities are + * not final yet.... + * + * - Don't let the nVHE hypervisor have access to this, as it will + * pick the *wrong* symbol (yes, it runs at EL2...). + */ +#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \ + : CHOOSE_NVHE_SYM(sym)) +#else +/* The nVHE hypervisor shouldn't even try to access anything */ +extern void *__nvhe_undefined_symbol; +#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol +#endif + +/* Translate a kernel address @ptr into its equivalent linear mapping */ +#define kvm_ksym_ref(ptr) \ ({ \ - void *val = &sym; \ + void *val = (ptr); \ if (!is_kernel_in_hyp_mode()) \ - val = lm_alias(&sym); \ + val = lm_alias((ptr)); \ val; \ }) +#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym)) struct kvm; struct kvm_vcpu; +struct kvm_s2_mmu; -extern char __kvm_hyp_init[]; -extern char __kvm_hyp_init_end[]; +DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); +DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); +#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) +#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) -extern char __kvm_hyp_vector[]; +#ifdef CONFIG_KVM_INDIRECT_VECTORS +extern atomic_t arm64_el2_vector_last_slot; +DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); +#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) +#endif extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, + int level); +extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); +extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_timer_set_cntvoff(u64 cntvoff); -extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu); - -extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu); +extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern void __kvm_enable_ssbs(void); @@ -143,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; .macro get_vcpu_ptr vcpu, ctxt get_host_ctxt \ctxt, \vcpu ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] - kern_hyp_va \vcpu .endm #endif diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h index 0185ee8b8b5e..d6bb40122fdb 100644 --- a/arch/arm64/include/asm/kvm_coproc.h +++ b/arch/arm64/include/asm/kvm_coproc.h @@ -19,20 +19,12 @@ struct kvm_sys_reg_table { size_t num; }; -struct kvm_sys_reg_target_table { - struct kvm_sys_reg_table table64; - struct kvm_sys_reg_table table32; -}; - -void kvm_register_target_sys_reg_table(unsigned int target, - struct kvm_sys_reg_target_table *table); - -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); +int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); #define kvm_coproc_table_init kvm_sys_reg_table_init void kvm_sys_reg_table_init(void); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4d0f8ea600ba..49a55be2b9a2 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; -} - -static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) -{ - return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; -} - -static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.sysregs_loaded_on_cpu) - return read_sysreg_el1(SYS_ELR); - else - return *__vcpu_elr_el1(vcpu); -} - -static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) -{ - if (vcpu->arch.sysregs_loaded_on_cpu) - write_sysreg_el1(v, SYS_ELR); - else - *__vcpu_elr_el1(vcpu) = v; + return (unsigned long *)&vcpu_gp_regs(vcpu)->pc; } static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; + return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate; } static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) @@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { - return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; + return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num]; } static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, unsigned long val) { if (reg_num != 31) - vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; + vcpu_gp_regs(vcpu)->regs[reg_num] = val; } static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) @@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) if (vcpu->arch.sysregs_loaded_on_cpu) return read_sysreg_el1(SYS_SPSR); else - return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; + return __vcpu_sys_reg(vcpu, SPSR_EL1); } static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) @@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) if (vcpu->arch.sysregs_loaded_on_cpu) write_sysreg_el1(v, SYS_SPSR); else - vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; + __vcpu_sys_reg(vcpu, SPSR_EL1) = v; } /* @@ -259,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) return mode != PSR_MODE_EL0t; } -static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) +static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); if (esr & ESR_ELx_CV) return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; @@ -291,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK; } static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV); } static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); + return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); } static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE); } static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF); } static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { - return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; + return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); } static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) || kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ } static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM); } static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { - return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); + return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } /* This one is not specific to Data Abort */ static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL); } static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { - return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); + return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); } static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) @@ -358,15 +337,15 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; } static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; } -static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: @@ -387,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); return ESR_ELx_SYS64_ISS_RT(esr); } @@ -516,14 +495,14 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i * Skip an instruction which has been emulated at hyp while most guest sysregs * are live. */ -static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) +static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); - vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); + vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); + write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e21d4a01372f..65568b23868a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -66,19 +66,34 @@ struct kvm_vmid { u32 vmid; }; -struct kvm_arch { +struct kvm_s2_mmu { struct kvm_vmid vmid; - /* stage2 entry level table */ - pgd_t *pgd; - phys_addr_t pgd_phys; - - /* VTCR_EL2 value for this VM */ - u64 vtcr; + /* + * stage2 entry level table + * + * Two kvm_s2_mmu structures in the same VM can point to the same + * pgd here. This happens when running a guest using a + * translation regime that isn't affected by its own stage-2 + * translation, such as a non-VHE hypervisor running at vEL2, or + * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the + * canonical stage-2 page tables. + */ + pgd_t *pgd; + phys_addr_t pgd_phys; /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; + struct kvm *kvm; +}; + +struct kvm_arch { + struct kvm_s2_mmu mmu; + + /* VTCR_EL2 value for this VM */ + u64 vtcr; + /* The maximum number of vCPUs depends on the used GIC model */ int max_vcpus; @@ -97,17 +112,6 @@ struct kvm_arch { bool return_nisv_io_abort_to_user; }; -#define KVM_NR_MEM_OBJS 40 - -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - void *objects[KVM_NR_MEM_OBJS]; -}; - struct kvm_vcpu_fault_info { u32 esr_el2; /* Hyp Syndrom Register */ u64 far_el2; /* Hyp Fault Address Register */ @@ -170,6 +174,16 @@ enum vcpu_sysreg { APGAKEYLO_EL1, APGAKEYHI_EL1, + ELR_EL1, + SP_EL1, + SPSR_EL1, + + CNTVOFF_EL2, + CNTV_CVAL_EL0, + CNTV_CTL_EL0, + CNTP_CVAL_EL0, + CNTP_CTL_EL0, + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ @@ -221,7 +235,15 @@ enum vcpu_sysreg { #define NR_COPRO_REGS (NR_SYS_REGS * 2) struct kvm_cpu_context { - struct kvm_regs gp_regs; + struct user_pt_regs regs; /* sp = sp_el0 */ + + u64 spsr_abt; + u64 spsr_und; + u64 spsr_irq; + u64 spsr_fiq; + + struct user_fpsimd_state fp_regs; + union { u64 sys_regs[NR_SYS_REGS]; u32 copro[NR_COPRO_REGS]; @@ -254,6 +276,9 @@ struct kvm_vcpu_arch { void *sve_state; unsigned int sve_max_vl; + /* Stage 2 paging state used by the hardware on next switch */ + struct kvm_s2_mmu *hw_mmu; + /* HYP configuration */ u64 hcr_el2; u32 mdcr_el2; @@ -338,7 +363,7 @@ struct kvm_vcpu_arch { struct vcpu_reset_state reset_state; /* True when deferrable sysregs are loaded on the physical CPU, - * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ + * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ bool sysregs_loaded_on_cpu; /* Guest PV state */ @@ -389,15 +414,20 @@ struct kvm_vcpu_arch { #define vcpu_has_ptrauth(vcpu) false #endif -#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) +#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) /* - * Only use __vcpu_sys_reg if you know you want the memory backed version of a - * register, and not the one most recently accessed by a running VCPU. For - * example, for userspace access or for system registers that are never context - * switched, but only emulated. + * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the + * memory backed version of a register, and not the one most recently + * accessed by a running VCPU. For example, for userspace access or + * for system registers that are never context switched, but only + * emulated. */ -#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) +#define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) + +#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) + +#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); @@ -453,6 +483,18 @@ void kvm_arm_resume_guest(struct kvm *kvm); u64 __kvm_call_hyp(void *hypfn, ...); +#define kvm_call_hyp_nvhe(f, ...) \ + do { \ + DECLARE_KVM_NVHE_SYM(f); \ + __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \ + } while(0) + +#define kvm_call_hyp_nvhe_ret(f, ...) \ + ({ \ + DECLARE_KVM_NVHE_SYM(f); \ + __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \ + }) + /* * The couple of isb() below are there to guarantee the same behaviour * on VHE as on !VHE, where the eret to EL1 acts as a context @@ -464,7 +506,7 @@ u64 __kvm_call_hyp(void *hypfn, ...); f(__VA_ARGS__); \ isb(); \ } else { \ - __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \ + kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ } \ } while(0) @@ -476,8 +518,7 @@ u64 __kvm_call_hyp(void *hypfn, ...); ret = f(__VA_ARGS__); \ isb(); \ } else { \ - ret = __kvm_call_hyp(kvm_ksym_ref(f), \ - ##__VA_ARGS__); \ + ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__); \ } \ \ ret; \ @@ -486,18 +527,15 @@ u64 __kvm_call_hyp(void *hypfn, ...); void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); -int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index); -void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index); +int handle_exit(struct kvm_vcpu *vcpu, int exception_index); +void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); /* MMIO helpers */ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa); +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); +int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); int kvm_perf_init(void); int kvm_perf_teardown(void); @@ -532,7 +570,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { /* The host's MPIDR is immutable, so let's set it up at boot time */ - cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); + ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); } static inline bool kvm_arch_requires_vhe(void) @@ -633,8 +671,8 @@ static inline int kvm_arm_have_ssbd(void) } } -void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); -void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); +void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); int kvm_set_ipa_limit(void); diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index ce3080834bfa..46689e7db46c 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -12,8 +12,6 @@ #include <asm/alternative.h> #include <asm/sysreg.h> -#define __hyp_text __section(.hyp.text) notrace __noscs - #define read_sysreg_elx(r,nvh,vh) \ ({ \ u64 reg; \ @@ -63,17 +61,20 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); +#ifdef __KVM_NVHE_HYPERVISOR__ void __timer_enable_traps(struct kvm_vcpu *vcpu); void __timer_disable_traps(struct kvm_vcpu *vcpu); +#endif +#ifdef __KVM_NVHE_HYPERVISOR__ void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); +#else void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); -void __sysreg32_save_state(struct kvm_vcpu *vcpu); -void __sysreg32_restore_state(struct kvm_vcpu *vcpu); +#endif void __debug_switch_to_guest(struct kvm_vcpu *vcpu); void __debug_switch_to_host(struct kvm_vcpu *vcpu); @@ -81,11 +82,17 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +#ifndef __KVM_NVHE_HYPERVISOR__ void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void deactivate_traps_vhe_put(void); +#endif u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); + +void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt); +#ifdef __KVM_NVHE_HYPERVISOR__ void __noreturn __hyp_do_panic(unsigned long, ...); +#endif #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b12bfc1f051a..189839c3706a 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -134,12 +134,12 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); -int kvm_alloc_stage2_pgd(struct kvm *kvm); -void kvm_free_stage2_pgd(struct kvm *kvm); +int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); +void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable); -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); @@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); } -static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) +static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) { - struct kvm_vmid *vmid = &kvm->arch.vmid; + struct kvm_vmid *vmid = &mmu->vmid; u64 vmid_field, baddr; u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; - baddr = kvm->arch.pgd_phys; + baddr = mmu->pgd_phys; vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } @@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) * Must be called from hyp code running at EL2 with an updated VTTBR * and interrupts disabled. */ -static __always_inline void __load_guest_stage2(struct kvm *kvm) +static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) { - write_sysreg(kvm->arch.vtcr, vtcr_el2); - write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); + write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); + write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); /* * ARM errata 1165522 and 1530923 require the actual execution of the diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h index 6301813dcace..0ddf98c3ba9f 100644 --- a/arch/arm64/include/asm/kvm_ptrauth.h +++ b/arch/arm64/include/asm/kvm_ptrauth.h @@ -61,44 +61,36 @@ /* * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will - * check for the presence of one of the cpufeature flag - * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and + * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as + * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and * then proceed ahead with the save/restore of Pointer Authentication - * key registers. + * key registers if enabled for the guest. */ .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 -alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH - b 1000f +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b .L__skip_switch\@ alternative_else_nop_endif -alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF - b 1001f -alternative_else_nop_endif -1000: - ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] + mrs \reg1, hcr_el2 and \reg1, \reg1, #(HCR_API | HCR_APK) - cbz \reg1, 1001f + cbz \reg1, .L__skip_switch\@ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_restore_state \reg1, \reg2, \reg3 -1001: +.L__skip_switch\@: .endm .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 -alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH - b 2000f -alternative_else_nop_endif -alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF - b 2001f +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b .L__skip_switch\@ alternative_else_nop_endif -2000: - ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] + mrs \reg1, hcr_el2 and \reg1, \reg1, #(HCR_API | HCR_APK) - cbz \reg1, 2001f + cbz \reg1, .L__skip_switch\@ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_save_state \reg1, \reg2, \reg3 add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_restore_state \reg1, \reg2, \reg3 isb -2001: +.L__skip_switch\@: .endm #else /* !CONFIG_ARM64_PTR_AUTH */ diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h new file mode 100644 index 000000000000..9a126b9e2d7c --- /dev/null +++ b/arch/arm64/include/asm/kvm_types.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_KVM_TYPES_H +#define _ASM_ARM64_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_ARM64_KVM_TYPES_H */ + diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 8444df000181..a7a5ecaa2e83 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -45,13 +45,6 @@ struct bp_hardening_data { bp_hardening_cb_t fn; }; -#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ - defined(CONFIG_HARDEN_EL2_VECTORS)) - -extern char __bp_harden_hyp_vecs[]; -extern atomic_t arm64_el2_vector_last_slot; -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ - #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 58e93583ddb6..3c6a7f5988b1 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -13,37 +13,13 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> -#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ +#define __HAVE_ARCH_PGD_FREE +#include <asm-generic/pgalloc.h> #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_PGTABLE_USER; - struct page *page; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - - page = alloc_page(gfp); - if (!page) - return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_page(page); - return NULL; - } - return page_address(page); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) -{ - BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1)); - pgtable_pmd_page_dtor(virt_to_page(pmdp)); - free_page((unsigned long)pmdp); -} - static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) { set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot)); @@ -62,17 +38,6 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) #if CONFIG_PGTABLE_LEVELS > 3 -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pud_t *)__get_free_page(GFP_PGTABLE_USER); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pudp) -{ - BUG_ON((unsigned long)pudp & (PAGE_SIZE-1)); - free_page((unsigned long)pudp); -} - static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot) { set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot)); diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b65259..0eadbf933e35 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -46,7 +46,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); * Logical CPU mapping. */ extern u64 __cpu_logical_map[NR_CPUS]; -#define cpu_logical_map(cpu) __cpu_logical_map[cpu] +extern u64 cpu_logical_map(int cpu); + +static inline void set_cpu_logical_map(int cpu, u64 hwid) +{ + __cpu_logical_map[cpu] = hwid; +} struct seq_file; diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8d7c466f809b..991dd5f031e4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -50,7 +50,7 @@ static inline void set_fs(mm_segment_t fs) CONFIG_ARM64_UAO)); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * Test whether a block of memory is a valid user space address. diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index c760b9e159f5..734860ac7cf9 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -308,8 +308,8 @@ __SYSCALL(__NR_writev, compat_sys_writev) __SYSCALL(__NR_getsid, sys_getsid) #define __NR_fdatasync 148 __SYSCALL(__NR_fdatasync, sys_fdatasync) -#define __NR__sysctl 149 -__SYSCALL(__NR__sysctl, compat_sys_sysctl) + /* 149 was sys_sysctl */ +__SYSCALL(149, sys_ni_syscall) #define __NR_mlock 150 __SYSCALL(__NR_mlock, sys_mlock) #define __NR_munlock 151 @@ -599,9 +599,9 @@ __SYSCALL(__NR_recvfrom, compat_sys_recvfrom) #define __NR_shutdown 293 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_setsockopt 294 -__SYSCALL(__NR_setsockopt, compat_sys_setsockopt) +__SYSCALL(__NR_setsockopt, sys_setsockopt) #define __NR_getsockopt 295 -__SYSCALL(__NR_getsockopt, compat_sys_getsockopt) +__SYSCALL(__NR_getsockopt, sys_getsockopt) #define __NR_sendmsg 296 __SYSCALL(__NR_sendmsg, compat_sys_sendmsg) #define __NR_recvmsg 297 diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index 75cbae60455b..7508b0ac1d21 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -103,7 +103,8 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) return ret; } -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { u64 res; diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index 9c29ad3049f8..631ab1281633 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -64,7 +64,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) return ret; } -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { u64 res; diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 5051b388c654..09977acc007d 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -85,10 +85,17 @@ static inline bool is_kernel_in_hyp_mode(void) static __always_inline bool has_vhe(void) { - if (cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN)) + /* + * The following macros are defined for code specic to VHE/nVHE. + * If has_vhe() is inlined into those compilation units, it can + * be determined statically. Otherwise fall back to caps. + */ + if (__is_defined(__KVM_VHE_HYPERVISOR__)) return true; - - return false; + else if (__is_defined(__KVM_NVHE_HYPERVISOR__)) + return false; + else + return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN); } #endif /* __ASSEMBLY__ */ |