summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnup Patel <apatel@ventanamicro.com>2024-10-21 01:17:33 +0530
committerAnup Patel <anup@brainfault.org>2024-10-28 16:44:05 +0530
commit3e7d154ad89be46b41bb47a0a8a19ecf8e0ca3f3 (patch)
tree258c42aec91c5e3bd0f3bd4fb62c6b404020375d
parent68c72a6557b072bff79658b9c0fdb0e69148e32d (diff)
RISC-V: KVM: Save trap CSRs in kvm_riscv_vcpu_enter_exit()
Save trap CSRs in the kvm_riscv_vcpu_enter_exit() function instead of the kvm_arch_vcpu_ioctl_run() function so that HTVAL and HTINST CSRs are accessed in more optimized manner while running under some other hypervisor. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Link: https://lore.kernel.org/r/20241020194734.58686-13-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
-rw-r--r--arch/riscv/kvm/vcpu.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 113eb8957472..dc3f76f6e46c 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -764,12 +764,21 @@ static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *v
* This must be noinstr as instrumentation may make use of RCU, and this is not
* safe during the EQS.
*/
-static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_trap *trap)
{
void *nsh;
struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context;
struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;
+ /*
+ * We save trap CSRs (such as SEPC, SCAUSE, STVAL, HTVAL, and
+ * HTINST) here because we do local_irq_enable() after this
+ * function in kvm_arch_vcpu_ioctl_run() which can result in
+ * an interrupt immediately after local_irq_enable() and can
+ * potentially change trap CSRs.
+ */
+
kvm_riscv_vcpu_swap_in_guest_state(vcpu);
guest_state_enter_irqoff();
@@ -812,14 +821,24 @@ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
} else {
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
}
+
+ trap->htval = nacl_csr_read(nsh, CSR_HTVAL);
+ trap->htinst = nacl_csr_read(nsh, CSR_HTINST);
} else {
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
__kvm_riscv_switch_to(&vcpu->arch);
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
+
+ trap->htval = csr_read(CSR_HTVAL);
+ trap->htinst = csr_read(CSR_HTINST);
}
+ trap->sepc = gcntx->sepc;
+ trap->scause = csr_read(CSR_SCAUSE);
+ trap->stval = csr_read(CSR_STVAL);
+
vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();
kvm_riscv_vcpu_swap_in_host_state(vcpu);
@@ -936,22 +955,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
guest_timing_enter_irqoff();
- kvm_riscv_vcpu_enter_exit(vcpu);
+ kvm_riscv_vcpu_enter_exit(vcpu, &trap);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
- /*
- * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
- * get an interrupt between __kvm_riscv_switch_to() and
- * local_irq_enable() which can potentially change CSRs.
- */
- trap.sepc = vcpu->arch.guest_context.sepc;
- trap.scause = csr_read(CSR_SCAUSE);
- trap.stval = csr_read(CSR_STVAL);
- trap.htval = ncsr_read(CSR_HTVAL);
- trap.htinst = ncsr_read(CSR_HTINST);
-
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);