summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/cpuid.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/cpuid.c')
-rw-r--r--arch/x86/kvm/cpuid.c118
1 files changed, 74 insertions, 44 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8a294f9747aa..3fd6eec202d7 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -54,28 +54,38 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
#define F feature_bit
-int kvm_update_cpuid(struct kvm_vcpu *vcpu)
+static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
- struct kvm_lapic *apic = vcpu->arch.apic;
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- if (!best)
- return 0;
+ /*
+ * The existing code assumes virtual address is 48-bit or 57-bit in the
+ * canonical address checks; exit if it is ever changed.
+ */
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best) {
+ int vaddr_bits = (best->eax & 0xff00) >> 8;
+
+ if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
+ return -EINVAL;
+ }
- /* Update OSXSAVE bit */
- if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1)
- cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
+ return 0;
+}
+
+void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (best) {
+ /* Update OSXSAVE bit */
+ if (boot_cpu_has(X86_FEATURE_XSAVE))
+ cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
- cpuid_entry_change(best, X86_FEATURE_APIC,
+ cpuid_entry_change(best, X86_FEATURE_APIC,
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
-
- if (apic) {
- if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
- apic->lapic_timer.timer_mode_mask = 3 << 17;
- else
- apic->lapic_timer.timer_mode_mask = 1 << 17;
}
best = kvm_find_cpuid_entry(vcpu, 7, 0);
@@ -84,31 +94,14 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
- if (!best) {
- vcpu->arch.guest_supported_xcr0 = 0;
- } else {
- vcpu->arch.guest_supported_xcr0 =
- (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
+ if (best)
best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
- }
best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- /*
- * The existing code assumes virtual address is 48-bit or 57-bit in the
- * canonical address checks; exit if it is ever changed.
- */
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- if (best) {
- int vaddr_bits = (best->eax & 0xff00) >> 8;
-
- if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
- return -EINVAL;
- }
-
best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
if (kvm_hlt_in_guest(vcpu->kvm) && best &&
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
@@ -121,14 +114,39 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.ia32_misc_enable_msr &
MSR_IA32_MISC_ENABLE_MWAIT);
}
+}
+
+static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct kvm_cpuid_entry2 *best;
+
+ kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (best && apic) {
+ if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
+ apic->lapic_timer.timer_mode_mask = 3 << 17;
+ else
+ apic->lapic_timer.timer_mode_mask = 1 << 17;
+
+ kvm_apic_set_version(vcpu);
+ }
+
+ best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
+ if (!best)
+ vcpu->arch.guest_supported_xcr0 = 0;
+ else
+ vcpu->arch.guest_supported_xcr0 =
+ (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
- /* Note, maxphyaddr must be updated before tdp_level. */
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
- vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
kvm_mmu_reset_context(vcpu);
kvm_pmu_refresh(vcpu);
- return 0;
+ vcpu->arch.cr4_guest_rsvd_bits =
+ __cr4_reserved_bits(guest_cpuid_has, vcpu);
+ kvm_x86_ops.update_exception_bitmap(vcpu);
}
static int is_efer_nx(void)
@@ -203,10 +221,16 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_entries[i].padding[2] = 0;
}
vcpu->arch.cpuid_nent = cpuid->nent;
+ r = kvm_check_cpuid(vcpu);
+ if (r) {
+ vcpu->arch.cpuid_nent = 0;
+ kvfree(cpuid_entries);
+ goto out;
+ }
+
cpuid_fix_nx_cap(vcpu);
- kvm_apic_set_version(vcpu);
- kvm_x86_ops.cpuid_update(vcpu);
- r = kvm_update_cpuid(vcpu);
+ kvm_update_cpuid_runtime(vcpu);
+ kvm_vcpu_after_set_cpuid(vcpu);
kvfree(cpuid_entries);
out:
@@ -227,9 +251,14 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
- kvm_apic_set_version(vcpu);
- kvm_x86_ops.cpuid_update(vcpu);
- r = kvm_update_cpuid(vcpu);
+ r = kvm_check_cpuid(vcpu);
+ if (r) {
+ vcpu->arch.cpuid_nent = 0;
+ goto out;
+ }
+
+ kvm_update_cpuid_runtime(vcpu);
+ kvm_vcpu_after_set_cpuid(vcpu);
out:
return r;
}
@@ -341,7 +370,8 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_mask(CPUID_7_EDX,
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
- F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM)
+ F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
+ F(SERIALIZE)
);
/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
@@ -604,7 +634,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
eax.split.bit_width = cap.bit_width_gp;
eax.split.mask_length = cap.events_mask_len;
- edx.split.num_counters_fixed = cap.num_counters_fixed;
+ edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
edx.split.bit_width_fixed = cap.bit_width_fixed;
edx.split.reserved = 0;