diff options
-rw-r--r-- | arch/x86/kvm/vmx/main.c | 18 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/tdx.c | 13 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/x86_ops.h | 2 |
3 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index e19e3579ce3e..0be6abc77f4c 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -118,8 +118,10 @@ static void vt_vcpu_free(struct kvm_vcpu *vcpu) static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { - if (is_td_vcpu(vcpu)) + if (is_td_vcpu(vcpu)) { + tdx_vcpu_reset(vcpu, init_event); return; + } vmx_vcpu_reset(vcpu, init_event); } @@ -226,6 +228,18 @@ static void vt_enable_smi_window(struct kvm_vcpu *vcpu) } #endif +static bool vt_apic_init_signal_blocked(struct kvm_vcpu *vcpu) +{ + /* + * INIT and SIPI are always blocked for TDX, i.e., INIT handling and + * the OP vcpu_deliver_sipi_vector() won't be called. + */ + if (is_td_vcpu(vcpu)) + return true; + + return vmx_apic_init_signal_blocked(vcpu); +} + static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu) { struct pi_desc *pi = vcpu_to_pi_desc(vcpu); @@ -591,7 +605,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { #endif .check_emulate_instruction = vmx_check_emulate_instruction, - .apic_init_signal_blocked = vmx_apic_init_signal_blocked, + .apic_init_signal_blocked = vt_apic_init_signal_blocked, .migrate_timers = vmx_migrate_timers, .msr_filter_changed = vmx_msr_filter_changed, diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index d1848b02c3b9..d1edf11c18fe 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -2626,6 +2626,19 @@ static int tdx_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd) return 0; } +void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) +{ + /* + * Yell on INIT, as TDX doesn't support INIT, i.e. KVM should drop all + * INIT events. + * + * Defer initializing vCPU for RESET state until KVM_TDX_INIT_VCPU, as + * userspace needs to define the vCPU model before KVM can initialize + * vCPU state, e.g. to enable x2APIC. + */ + WARN_ON_ONCE(init_event); +} + struct tdx_gmem_post_populate_arg { struct kvm_vcpu *vcpu; __u32 flags; diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 8de276285214..c792ea66a45f 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -128,6 +128,7 @@ void tdx_vm_destroy(struct kvm *kvm); int tdx_vm_ioctl(struct kvm *kvm, void __user *argp); int tdx_vcpu_create(struct kvm_vcpu *vcpu); +void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void tdx_vcpu_free(struct kvm_vcpu *vcpu); void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu); @@ -167,6 +168,7 @@ static inline void tdx_vm_destroy(struct kvm *kvm) {} static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; } static inline int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; } +static inline void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {} static inline void tdx_vcpu_free(struct kvm_vcpu *vcpu) {} static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {} static inline int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; } |