summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_asm.h33
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h9
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h20
-rw-r--r--arch/arm64/kvm/aarch32.c28
-rw-r--r--arch/arm64/kvm/arm.c25
-rw-r--r--arch/arm64/kvm/handle_exit.c32
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c65
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
-rw-r--r--arch/arm64/kvm/pmu.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c25
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c10
13 files changed, 171 insertions, 102 deletions
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 0c9b5fc4ba0a..352aaebf4198 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
-/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
+/*
+ * Obtain the PC-relative address of a kernel symbol
+ * s: symbol
+ *
+ * The goal of this macro is to return a symbol's address based on a
+ * PC-relative computation, as opposed to a loading the VA from a
+ * constant pool or something similar. This works well for HYP, as an
+ * absolute VA is guaranteed to be wrong. Only use this if trying to
+ * obtain the address of a symbol (i.e. not something you obtained by
+ * following a pointer).
+ */
+#define hyp_symbol_addr(s) \
+ ({ \
+ typeof(s) *addr; \
+ asm("adrp %0, %1\n" \
+ "add %0, %0, :lo12:%1\n" \
+ : "=r" (addr) : "S" (&s)); \
+ addr; \
+ })
+
+/*
+ * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
+ * provided that sym is really a *symbol* and not a pointer obtained from
+ * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
+ * sparse quiet.
+ */
#define __hyp_this_cpu_ptr(sym) \
({ \
- void *__ptr = hyp_symbol_addr(sym); \
+ void *__ptr; \
+ __verify_pcpu_ptr(&sym); \
+ __ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
- (typeof(&sym))__ptr; \
+ (typeof(sym) __kernel __force *)__ptr; \
})
#define __hyp_this_cpu_read(sym) \
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6ea53e6e8b26..4d0f8ea600ba 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
}
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
-{
- if (vcpu_has_ptrauth(vcpu))
- vcpu_ptrauth_disable(vcpu);
-}
-
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index abbdf9703e20..c3e6fcc664b1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- /* Pointer to host CPU context */
- struct kvm_cpu_context *host_cpu_context;
-
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
@@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 094260aaafdd..b12bfc1f051a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -108,26 +108,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
- * Obtain the PC-relative address of a kernel symbol
- * s: symbol
- *
- * The goal of this macro is to return a symbol's address based on a
- * PC-relative computation, as opposed to a loading the VA from a
- * constant pool or something similar. This works well for HYP, as an
- * absolute VA is guaranteed to be wrong. Only use this if trying to
- * obtain the address of a symbol (i.e. not something you obtained by
- * following a pointer).
- */
-#define hyp_symbol_addr(s) \
- ({ \
- typeof(s) *addr; \
- asm("adrp %0, %1\n" \
- "add %0, %0, :lo12:%1\n" \
- : "=r" (addr) : "S" (&s)); \
- addr; \
- })
-
-/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
*/
diff --git a/arch/arm64/kvm/aarch32.c b/arch/arm64/kvm/aarch32.c
index 0a356aa91aa1..40a62a99fbf8 100644
--- a/arch/arm64/kvm/aarch32.c
+++ b/arch/arm64/kvm/aarch32.c
@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};
+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.sysregs_loaded_on_cpu) {
+ kvm_arch_vcpu_put(vcpu);
+ return true;
+ }
+
+ preempt_enable();
+ return false;
+}
+
+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
+{
+ if (loaded) {
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ preempt_enable();
+ }
+}
+
/*
* When an exception is taken, most CPSR fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]).
@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
+ bool loaded = pre_fault_synchronize(vcpu);
+
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
+ post_fault_synchronize(vcpu, loaded);
}
/*
@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
u32 vect_offset;
u32 *far, *fsr;
bool is_lpae;
+ bool loaded;
+
+ loaded = pre_fault_synchronize(vcpu);
if (is_pabt) {
vect_offset = 12;
@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
*fsr = DFSR_FSC_EXTABT_nLPAE;
}
+
+ post_fault_synchronize(vcpu, loaded);
}
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7a57381c05e8..90cb90561446 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -144,11 +144,6 @@ out_fail_alloc:
return ret;
}
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
@@ -340,10 +335,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
- kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
- cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +348,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
@@ -370,7 +362,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
else
vcpu_set_wfx_traps(vcpu);
- vcpu_ptrauth_setup_lazy(vcpu);
+ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_disable(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -990,11 +983,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Ensure a rebooted VM will fault in RAM pages and detect if the
* guest MMU is turned off and flush the caches as needed.
*
- * S2FWB enforces all memory accesses to RAM being cacheable, we
- * ensure that the cache is always coherent.
+ * S2FWB enforces all memory accesses to RAM being cacheable,
+ * ensuring that the data side is always coherent. We still
+ * need to invalidate the I-cache though, as FWB does *not*
+ * imply CTR_EL0.DIC.
*/
- if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
- stage2_unmap_vm(vcpu->kvm);
+ if (vcpu->arch.has_run_once) {
+ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+ stage2_unmap_vm(vcpu->kvm);
+ else
+ __flush_icache_all();
+ }
vcpu_reset_hcr(vcpu);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index eb194696ef62..5a02d4c90559 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-#define __ptrauth_save_key(regs, key) \
-({ \
- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
-})
-
-/*
- * Handle the guest trying to use a ptrauth instruction, or trying to access a
- * ptrauth register.
- */
-void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpu_context *ctxt;
-
- if (vcpu_has_ptrauth(vcpu)) {
- vcpu_ptrauth_enable(vcpu);
- ctxt = vcpu->arch.host_cpu_context;
- __ptrauth_save_key(ctxt->sys_regs, APIA);
- __ptrauth_save_key(ctxt->sys_regs, APIB);
- __ptrauth_save_key(ctxt->sys_regs, APDA);
- __ptrauth_save_key(ctxt->sys_regs, APDB);
- __ptrauth_save_key(ctxt->sys_regs, APGA);
- } else {
- kvm_inject_undefined(vcpu);
- }
-}
-
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
- * a NOP).
+ * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
+ * that we can do is give the guest an UNDEF.
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_arm_vcpu_ptrauth_trap(vcpu);
+ kvm_inject_undefined(vcpu);
return 1;
}
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc9872a1467..e95af204fec7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 676b6585e5ae..db1c4487d95d 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
+{
+ u32 ec = ESR_ELx_EC(esr);
+
+ if (ec == ESR_ELx_EC_PAC)
+ return true;
+
+ if (ec != ESR_ELx_EC_SYS64)
+ return false;
+
+ switch (esr_sys64_to_sysreg(esr)) {
+ case SYS_APIAKEYLO_EL1:
+ case SYS_APIAKEYHI_EL1:
+ case SYS_APIBKEYLO_EL1:
+ case SYS_APIBKEYHI_EL1:
+ case SYS_APDAKEYLO_EL1:
+ case SYS_APDAKEYHI_EL1:
+ case SYS_APDBKEYLO_EL1:
+ case SYS_APDBKEYHI_EL1:
+ case SYS_APGAKEYLO_EL1:
+ case SYS_APGAKEYHI_EL1:
+ return true;
+ }
+
+ return false;
+}
+
+#define __ptrauth_save_key(regs, key) \
+({ \
+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+})
+
+static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *ctxt;
+ u64 val;
+
+ if (!vcpu_has_ptrauth(vcpu) ||
+ !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
+ return false;
+
+ ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ __ptrauth_save_key(ctxt->sys_regs, APIA);
+ __ptrauth_save_key(ctxt->sys_regs, APIB);
+ __ptrauth_save_key(ctxt->sys_regs, APDA);
+ __ptrauth_save_key(ctxt->sys_regs, APDB);
+ __ptrauth_save_key(ctxt->sys_regs, APGA);
+
+ vcpu_ptrauth_enable(vcpu);
+
+ val = read_sysreg(hcr_el2);
+ val |= (HCR_API | HCR_APK);
+ write_sysreg(val, hcr_el2);
+
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (__hyp_handle_fpsimd(vcpu))
return true;
+ if (__hyp_handle_ptrauth(vcpu))
+ return true;
+
if (!__populate_fault_info(vcpu))
return true;
@@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = vcpu->arch.host_cpu_context;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index ea5d22fbdacf..cc7e957f5b2c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -39,7 +39,6 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
- ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
@@ -123,7 +122,6 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
isb();
}
- write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
@@ -267,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -303,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..b5ae3a5d509e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 80985439bfb2..baf5ce9225ce 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,6 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
switch (reg) {
case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
- case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break;
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
@@ -118,7 +117,6 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
switch (reg) {
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
- case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break;
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
@@ -1034,16 +1032,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- kvm_arm_vcpu_ptrauth_trap(vcpu);
-
/*
- * Return false for both cases as we never skip the trapped
- * instruction:
- *
- * - Either we re-execute the same key register access instruction
- * after enabling ptrauth.
- * - Or an UNDEF is injected as ptrauth is not supported/enabled.
+ * If we land here, that is because we didn't fixup the access on exit
+ * by allowing the PtrAuth sysregs. The only way this happens is when
+ * the guest does not have PtrAuth support enabled.
*/
+ kvm_inject_undefined(vcpu);
+
return false;
}
@@ -1319,10 +1314,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ int reg = r->reg;
+
+ /* See the 32bit mapping in kvm_host.h */
+ if (p->is_aarch32)
+ reg = r->reg / 2;
+
if (p->is_write)
- vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+ vcpu_write_sys_reg(vcpu, p->regval, reg);
else
- p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+ p->regval = vcpu_read_sys_reg(vcpu, reg);
return true;
}
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 9cb6b4c8355a..aa9d356451eb 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -27,6 +27,14 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
+
+ if (p->is_aarch32) {
+ if (r->Op2 & 2)
+ p->regval = upper_32_bits(p->regval);
+ else
+ p->regval = lower_32_bits(p->regval);
+ }
+
return true;
}
@@ -47,6 +55,8 @@ static const struct sys_reg_desc genericv8_cp15_regs[] = {
/* ACTLR */
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
access_actlr },
+ { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b011),
+ access_actlr },
};
static struct kvm_sys_reg_target_table genericv8_target_table = {