diff options
Diffstat (limited to 'tools')
18 files changed, 870 insertions, 253 deletions
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 1750f91dd936..844417601618 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -67,6 +67,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid TEST_GEN_PROGS_x86_64 += x86_64/hyperv_evmcs +TEST_GEN_PROGS_x86_64 += x86_64/hyperv_extended_hypercalls TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h index 9218bb5f44bf..fa65b908b13e 100644 --- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h @@ -85,61 +85,110 @@ #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF /* HYPERV_CPUID_FEATURES.EAX */ -#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) -#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) -#define HV_MSR_SYNIC_AVAILABLE BIT(2) -#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) -#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) -#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) -#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) -#define HV_MSR_RESET_AVAILABLE BIT(7) -#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) -#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) -#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) -#define HV_ACCESS_FREQUENCY_MSRS BIT(11) -#define HV_ACCESS_REENLIGHTENMENT BIT(13) -#define HV_ACCESS_TSC_INVARIANT BIT(15) +#define HV_MSR_VP_RUNTIME_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 0) +#define HV_MSR_TIME_REF_COUNT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 1) +#define HV_MSR_SYNIC_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 2) +#define HV_MSR_SYNTIMER_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 3) +#define HV_MSR_APIC_ACCESS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 4) +#define HV_MSR_HYPERCALL_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 5) +#define HV_MSR_VP_INDEX_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 6) +#define HV_MSR_RESET_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 7) +#define HV_MSR_STAT_PAGES_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 8) +#define HV_MSR_REFERENCE_TSC_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 9) +#define HV_MSR_GUEST_IDLE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 10) +#define HV_ACCESS_FREQUENCY_MSRS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 11) +#define HV_ACCESS_REENLIGHTENMENT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 13) +#define HV_ACCESS_TSC_INVARIANT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 15) /* HYPERV_CPUID_FEATURES.EBX */ -#define HV_CREATE_PARTITIONS BIT(0) -#define HV_ACCESS_PARTITION_ID BIT(1) -#define HV_ACCESS_MEMORY_POOL BIT(2) -#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) -#define HV_POST_MESSAGES BIT(4) -#define HV_SIGNAL_EVENTS BIT(5) -#define HV_CREATE_PORT BIT(6) -#define HV_CONNECT_PORT BIT(7) -#define HV_ACCESS_STATS BIT(8) -#define HV_DEBUGGING BIT(11) -#define HV_CPU_MANAGEMENT BIT(12) -#define HV_ISOLATION BIT(22) +#define HV_CREATE_PARTITIONS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 0) +#define HV_ACCESS_PARTITION_ID \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 1) +#define HV_ACCESS_MEMORY_POOL \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 2) +#define HV_ADJUST_MESSAGE_BUFFERS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 3) +#define HV_POST_MESSAGES \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 4) +#define HV_SIGNAL_EVENTS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 5) +#define HV_CREATE_PORT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 6) +#define HV_CONNECT_PORT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 7) +#define HV_ACCESS_STATS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 8) +#define HV_DEBUGGING \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 11) +#define HV_CPU_MANAGEMENT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 12) +#define HV_ENABLE_EXTENDED_HYPERCALLS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 20) +#define HV_ISOLATION \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 22) /* HYPERV_CPUID_FEATURES.EDX */ -#define HV_X64_MWAIT_AVAILABLE BIT(0) -#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) -#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) -#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4) -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) -#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11) -#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19) +#define HV_X64_MWAIT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 0) +#define HV_X64_GUEST_DEBUGGING_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 1) +#define HV_X64_PERF_MONITOR_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 2) +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 3) +#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 4) +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 5) +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 8) +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 10) +#define HV_FEATURE_DEBUG_MSRS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 11) +#define HV_STIMER_DIRECT_MODE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 19) /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */ -#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0) -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1) -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2) -#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3) -#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4) -#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5) -#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9) -#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10) -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) -#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) +#define HV_X64_AS_SWITCH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 0) +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 1) +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 2) +#define HV_X64_APIC_ACCESS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 3) +#define HV_X64_SYSTEM_RESET_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 4) +#define HV_X64_RELAXED_TIMING_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 5) +#define HV_DEPRECATING_AEOI_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 9) +#define HV_X64_CLUSTER_IPI_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 10) +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 11) +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 14) /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */ -#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) +#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0, EAX, 1) /* Hypercalls */ #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 @@ -166,6 +215,9 @@ #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 +/* Extended hypercalls */ +#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001 + #define HV_FLUSH_ALL_PROCESSORS BIT(0) #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) @@ -288,4 +340,7 @@ struct hyperv_test_pages { struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, vm_vaddr_t *p_hv_pages_gva); +/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ +#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0) + #endif /* !SELFTEST_KVM_HYPERV_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index b1a31de7108a..53ffa43c90db 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -19,6 +19,9 @@ #include "../kvm_util.h" +extern bool host_cpu_is_intel; +extern bool host_cpu_is_amd; + #define NMI_VECTOR 0x02 #define X86_EFLAGS_FIXED (1u << 1) @@ -137,6 +140,7 @@ struct kvm_x86_cpu_feature { #define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26) #define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27) #define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29) +#define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8) #define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4) #define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12) #define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0) @@ -554,6 +558,28 @@ static inline uint32_t this_cpu_model(void) return x86_model(this_cpu_fms()); } +static inline bool this_cpu_vendor_string_is(const char *vendor) +{ + const uint32_t *chunk = (const uint32_t *)vendor; + uint32_t eax, ebx, ecx, edx; + + cpuid(0, &eax, &ebx, &ecx, &edx); + return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); +} + +static inline bool this_cpu_is_intel(void) +{ + return this_cpu_vendor_string_is("GenuineIntel"); +} + +/* + * Exclude early K5 samples with a vendor string of "AMDisbetter!" + */ +static inline bool this_cpu_is_amd(void) +{ + return this_cpu_vendor_string_is("AuthenticAMD"); +} + static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, uint8_t reg, uint8_t lo, uint8_t hi) { @@ -690,9 +716,6 @@ static inline void cpu_relax(void) "hlt\n" \ ) -bool is_intel_cpu(void); -bool is_amd_cpu(void); - struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu); void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state); void kvm_x86_state_cleanup(struct kvm_x86_state *state); @@ -716,7 +739,7 @@ static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); TEST_ASSERT(r == msrs->nmsrs, - "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", + "KVM_SET_MSRS failed, r: %i (failed on MSR %x)", r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); } static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu, diff --git a/tools/testing/selftests/kvm/kvm_binary_stats_test.c b/tools/testing/selftests/kvm/kvm_binary_stats_test.c index 0b45ac593387..a7001e29dc06 100644 --- a/tools/testing/selftests/kvm/kvm_binary_stats_test.c +++ b/tools/testing/selftests/kvm/kvm_binary_stats_test.c @@ -19,6 +19,7 @@ #include "kvm_util.h" #include "asm/kvm.h" #include "linux/kvm.h" +#include "kselftest.h" static void stats_test(int stats_fd) { @@ -51,7 +52,7 @@ static void stats_test(int stats_fd) /* Sanity check for other fields in header */ if (header.num_desc == 0) { - printf("No KVM stats defined!"); + ksft_print_msg("No KVM stats defined!\n"); return; } /* @@ -133,7 +134,7 @@ static void stats_test(int stats_fd) "Bucket size of stats (%s) is not zero", pdesc->name); } - size_data += pdesc->size * sizeof(*stats_data); + size_data = max(size_data, pdesc->offset + pdesc->size * sizeof(*stats_data)); } /* @@ -148,14 +149,6 @@ static void stats_test(int stats_fd) TEST_ASSERT(size_data >= header.num_desc * sizeof(*stats_data), "Data size is not correct"); - /* Check stats offset */ - for (i = 0; i < header.num_desc; ++i) { - pdesc = get_stats_descriptor(stats_desc, i, &header); - TEST_ASSERT(pdesc->offset < size_data, - "Invalid offset (%u) for stats: %s", - pdesc->offset, pdesc->name); - } - /* Allocate memory for stats data */ stats_data = malloc(size_data); TEST_ASSERT(stats_data, "Allocate memory for stats data"); @@ -224,9 +217,13 @@ int main(int argc, char *argv[]) max_vcpu = DEFAULT_NUM_VCPU; } + ksft_print_header(); + /* Check the extension for binary stats */ TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD)); + ksft_set_plan(max_vm); + /* Create VMs and VCPUs */ vms = malloc(sizeof(vms[0]) * max_vm); TEST_ASSERT(vms, "Allocate memory for storing VM pointers"); @@ -245,10 +242,12 @@ int main(int argc, char *argv[]) vm_stats_test(vms[i]); for (j = 0; j < max_vcpu; ++j) vcpu_stats_test(vcpus[i * max_vcpu + j]); + ksft_test_result_pass("vm%i\n", i); } for (i = 0; i < max_vm; ++i) kvm_vm_free(vms[i]); free(vms); - return 0; + + ksft_finished(); /* Print results and exit() accordingly */ } diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c index 820ac2d08c98..266f3876e10a 100644 --- a/tools/testing/selftests/kvm/lib/elf.c +++ b/tools/testing/selftests/kvm/lib/elf.c @@ -90,6 +90,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp) " hdrp->e_shentsize: %x\n" " expected: %zx", hdrp->e_shentsize, sizeof(Elf64_Shdr)); + close(fd); } /* VM ELF Load @@ -190,4 +191,5 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) phdr.p_filesz); } } + close(fd); } diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index 99a575bbbc52..1df3ce4b16fd 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -127,7 +127,7 @@ void guest_modes_cmdline(const char *arg) mode_selected = true; } - mode = strtoul(optarg, NULL, 10); + mode = atoi_non_negative("Guest mode ID", arg); TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode); guest_modes[mode].enabled = true; } diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 56d5ea949cbb..3ea24a5f4c43 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1844,6 +1844,7 @@ static struct exit_reason { {KVM_EXIT_X86_RDMSR, "RDMSR"}, {KVM_EXIT_X86_WRMSR, "WRMSR"}, {KVM_EXIT_XEN, "XEN"}, + {KVM_EXIT_HYPERV, "HYPERV"}, #ifdef KVM_EXIT_MEMORY_NOT_PRESENT {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, #endif @@ -1941,9 +1942,6 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); } -/* Arbitrary minimum physical address used for virtual translation tables. */ -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 - vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) { return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index acfa1d01e7df..ae1e573d94ce 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -19,6 +19,8 @@ #define MAX_NR_CPUID_ENTRIES 100 vm_vaddr_t exception_handlers; +bool host_cpu_is_amd; +bool host_cpu_is_intel; static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) { @@ -113,7 +115,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) bool kvm_is_tdp_enabled(void) { - if (is_intel_cpu()) + if (host_cpu_is_intel) return get_kvm_intel_param_bool("ept"); else return get_kvm_amd_param_bool("npt"); @@ -555,6 +557,8 @@ static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) void kvm_arch_vm_post_create(struct kvm_vm *vm) { vm_create_irqchip(vm); + sync_global_to_guest(vm, host_cpu_is_intel); + sync_global_to_guest(vm, host_cpu_is_amd); } struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, @@ -1006,28 +1010,6 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state) free(state); } -static bool cpu_vendor_string_is(const char *vendor) -{ - const uint32_t *chunk = (const uint32_t *)vendor; - uint32_t eax, ebx, ecx, edx; - - cpuid(0, &eax, &ebx, &ecx, &edx); - return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); -} - -bool is_intel_cpu(void) -{ - return cpu_vendor_string_is("GenuineIntel"); -} - -/* - * Exclude early K5 samples with a vendor string of "AMDisbetter!" - */ -bool is_amd_cpu(void) -{ - return cpu_vendor_string_is("AuthenticAMD"); -} - void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) { if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { @@ -1162,9 +1144,15 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, { uint64_t r; - asm volatile("vmcall" + asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" + "jnz 1f\n\t" + "vmcall\n\t" + "jmp 2f\n\t" + "1: vmmcall\n\t" + "2:" : "=a"(r) - : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); + : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3), + [use_vmmcall] "r" (host_cpu_is_amd)); return r; } @@ -1236,7 +1224,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm) max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; /* Avoid reserved HyperTransport region on AMD processors. */ - if (!is_amd_cpu()) + if (!host_cpu_is_amd) return max_gfn; /* On parts with <40 physical address bits, the area is fully hidden */ @@ -1276,3 +1264,9 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm) return get_kvm_intel_param_bool("unrestricted_guest"); } + +void kvm_selftest_arch_init(void) +{ + host_cpu_is_intel = this_cpu_is_intel(); + host_cpu_is_amd = this_cpu_is_amd(); +} diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index e6587e193490..4210cd21d159 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -308,8 +308,6 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); TEST_ASSERT(data->hva_slots, "malloc() fail"); - data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); - pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", data->nslots, data->pages_per_slot, rempages); @@ -349,6 +347,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages); sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); + sync->guest_page_size = data->vm->page_size; atomic_init(&sync->start_flag, false); atomic_init(&sync->exit_flag, false); atomic_init(&sync->sync_flag, false); @@ -810,8 +809,6 @@ static bool test_execute(int nslots, uint64_t *maxslots, } sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); - - sync->guest_page_size = data->vm->page_size; if (tdata->prepare && !tdata->prepare(data, sync, maxslots)) { ret = false; diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index 32f7e09ef67c..0f728f05ea82 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -48,10 +48,10 @@ static void guest_main(void) const uint8_t *other_hypercall_insn; uint64_t ret; - if (is_intel_cpu()) { + if (host_cpu_is_intel) { native_hypercall_insn = vmx_vmcall; other_hypercall_insn = svm_vmmcall; - } else if (is_amd_cpu()) { + } else if (host_cpu_is_amd) { native_hypercall_insn = svm_vmmcall; other_hypercall_insn = vmx_vmcall; } else { diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c index d576bc8ce823..2ee0af0d449e 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c @@ -104,7 +104,7 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_ /* Set Guest OS id to enable Hyper-V emulation */ GUEST_SYNC(1); - wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48); + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); GUEST_SYNC(2); check_tsc_msr_rdtsc(); diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c new file mode 100644 index 000000000000..73af44d2167f --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test Hyper-V extended hypercall, HV_EXT_CALL_QUERY_CAPABILITIES (0x8001), + * exit to userspace and receive result in guest. + * + * Negative tests are present in hyperv_features.c + * + * Copyright 2022 Google LLC + * Author: Vipin Sharma <vipinsh@google.com> + */ + +#include "kvm_util.h" +#include "processor.h" +#include "hyperv.h" + +/* Any value is fine */ +#define EXT_CAPABILITIES 0xbull + +static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, + vm_vaddr_t out_pg_gva) +{ + uint64_t *output_gva; + + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); + wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa); + + output_gva = (uint64_t *)out_pg_gva; + + hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa); + + /* TLFS states output will be a uint64_t value */ + GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES); + + GUEST_DONE(); +} + +int main(void) +{ + vm_vaddr_t hcall_out_page; + vm_vaddr_t hcall_in_page; + struct kvm_vcpu *vcpu; + struct kvm_run *run; + struct kvm_vm *vm; + uint64_t *outval; + struct ucall uc; + + /* Verify if extended hypercalls are supported */ + if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(), + HV_ENABLE_EXTENDED_HYPERCALLS)) { + print_skip("Extended calls not supported by the kernel"); + exit(KSFT_SKIP); + } + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + run = vcpu->run; + vcpu_set_hv_cpuid(vcpu); + + /* Hypercall input */ + hcall_in_page = vm_vaddr_alloc_pages(vm, 1); + memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); + + /* Hypercall output */ + hcall_out_page = vm_vaddr_alloc_pages(vm, 1); + memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); + + vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page), + addr_gva2gpa(vm, hcall_out_page), hcall_out_page); + + vcpu_run(vcpu); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERV, + "Unexpected exit reason: %u (%s)", + run->exit_reason, exit_reason_str(run->exit_reason)); + + outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]); + *outval = EXT_CAPABILITIES; + run->hyperv.u.hcall.result = HV_STATUS_SUCCESS; + + vcpu_run(vcpu); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s)", + run->exit_reason, exit_reason_str(run->exit_reason)); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld"); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unhandled ucall: %ld", uc.cmd); + } + + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 3163c3e8db0a..c5e3b39edd07 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -13,9 +13,17 @@ #include "processor.h" #include "hyperv.h" +/* + * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf + * but to activate the feature it is sufficient to set it to a non-zero + * value. Use BIT(0) for that. + */ +#define HV_PV_SPINLOCKS_TEST \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) + struct msr_data { uint32_t idx; - bool available; + bool fault_expected; bool write; u64 write_val; }; @@ -26,22 +34,46 @@ struct hcall_data { bool ud_expected; }; +static bool is_write_only_msr(uint32_t msr) +{ + return msr == HV_X64_MSR_EOI; +} + static void guest_msr(struct msr_data *msr) { - uint64_t ignored; - uint8_t vector; + uint8_t vector = 0; + uint64_t msr_val = 0; GUEST_ASSERT(msr->idx); - if (!msr->write) - vector = rdmsr_safe(msr->idx, &ignored); - else + if (msr->write) vector = wrmsr_safe(msr->idx, msr->write_val); - if (msr->available) - GUEST_ASSERT_2(!vector, msr->idx, vector); + if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) + vector = rdmsr_safe(msr->idx, &msr_val); + + if (msr->fault_expected) + GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR); else - GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); + GUEST_ASSERT_3(!vector, msr->idx, vector, 0); + + if (vector || is_write_only_msr(msr->idx)) + goto done; + + if (msr->write) + GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx, + msr_val, msr->write_val); + + /* Invariant TSC bit appears when TSC invariant control MSR is written to */ + if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) { + if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT)) + GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC)); + else + GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) == + !!(msr_val & HV_INVARIANT_TSC_EXPOSED)); + } + +done: GUEST_DONE(); } @@ -89,7 +121,6 @@ static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu) static void guest_test_msrs_access(void) { struct kvm_cpuid2 *prev_cpuid = NULL; - struct kvm_cpuid_entry2 *feat, *dbg; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; @@ -97,6 +128,7 @@ static void guest_test_msrs_access(void) int stage = 0; vm_vaddr_t msr_gva; struct msr_data *msr; + bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_msr); @@ -116,9 +148,6 @@ static void guest_test_msrs_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); - dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); - vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); @@ -134,133 +163,139 @@ static void guest_test_msrs_access(void) * Only available when Hyper-V identification is set */ msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 1: msr->idx = HV_X64_MSR_HYPERCALL; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 2: - feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); /* * HV_X64_MSR_GUEST_OS_ID has to be written first to make * HV_X64_MSR_HYPERCALL available. */ msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 1; + msr->write = true; msr->write_val = HYPERV_LINUX_OS_ID; - msr->available = 1; + msr->fault_expected = false; break; case 3: msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 4: msr->idx = HV_X64_MSR_HYPERCALL; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 5: msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 6: - feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE); msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 7: /* Read only */ msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 8: msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 9: - feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE); msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 10: /* Read only */ msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 11: msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 12: - feat->eax |= HV_MSR_VP_INDEX_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE); msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 13: /* Read only */ msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 14: msr->idx = HV_X64_MSR_RESET; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 15: - feat->eax |= HV_MSR_RESET_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE); msr->idx = HV_X64_MSR_RESET; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 16: msr->idx = HV_X64_MSR_RESET; - msr->write = 1; + msr->write = true; + /* + * TODO: the test only writes '0' to HV_X64_MSR_RESET + * at the moment, writing some other value there will + * trigger real vCPU reset and the code is not prepared + * to handle it yet. + */ msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 17: msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 18: - feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE); msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 19: msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 20: msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 21: /* @@ -268,149 +303,185 @@ static void guest_test_msrs_access(void) * capability enabled and guest visible CPUID bit unset. */ msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 22: - feat->eax |= HV_MSR_SYNIC_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE); msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 23: msr->idx = HV_X64_MSR_EOM; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 24: msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 25: - feat->eax |= HV_MSR_SYNTIMER_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE); msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 26: msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 27: /* Direct mode test */ msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 1 << 12; - msr->available = 0; + msr->fault_expected = true; break; case 28: - feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE); msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 1 << 12; - msr->available = 1; + msr->fault_expected = false; break; case 29: msr->idx = HV_X64_MSR_EOI; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 30: - feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE); msr->idx = HV_X64_MSR_EOI; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 31: msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 32: - feat->eax |= HV_ACCESS_FREQUENCY_MSRS; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS); msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 33: /* Read only */ msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 34: msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 35: - feat->eax |= HV_ACCESS_REENLIGHTENMENT; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT); msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 36: msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 37: /* Can only write '0' */ msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 38: msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 39: - feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE); msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 40: msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 41: msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 42: - feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; - dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE); + vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING); msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 43: msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 44: + /* MSR is not available when CPUID feature bit is unset */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = false; + msr->fault_expected = true; + break; + case 45: + /* MSR is vailable when CPUID feature bit is set */ + if (!has_invtsc) + continue; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT); + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = false; + msr->fault_expected = false; + break; + case 46: + /* Writing bits other than 0 is forbidden */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = true; + msr->write_val = 0xdeadbeef; + msr->fault_expected = true; + break; + case 47: + /* Setting bit 0 enables the feature */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = true; + msr->write_val = 1; + msr->fault_expected = false; + break; + + default: kvm_vm_free(vm); return; } @@ -429,7 +500,7 @@ static void guest_test_msrs_access(void) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "MSR = %lx, vector = %lx"); + REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx"); return; case UCALL_DONE: break; @@ -445,7 +516,6 @@ static void guest_test_msrs_access(void) static void guest_test_hcalls_access(void) { - struct kvm_cpuid_entry2 *feat, *recomm, *dbg; struct kvm_cpuid2 *prev_cpuid = NULL; struct kvm_vcpu *vcpu; struct kvm_run *run; @@ -480,15 +550,11 @@ static void guest_test_hcalls_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); - recomm = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); - dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); - run = vcpu->run; switch (stage) { case 0: - feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); hcall->control = 0xbeef; hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; @@ -498,7 +564,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 2: - feat->ebx |= HV_POST_MESSAGES; + vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES); hcall->control = HVCALL_POST_MESSAGE; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -508,7 +574,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 4: - feat->ebx |= HV_SIGNAL_EVENTS; + vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS); hcall->control = HVCALL_SIGNAL_EVENT; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -518,12 +584,12 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; case 6: - dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING); hcall->control = HVCALL_RESET_DEBUG_SESSION; hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 7: - feat->ebx |= HV_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING); hcall->control = HVCALL_RESET_DEBUG_SESSION; hcall->expect = HV_STATUS_OPERATION_DENIED; break; @@ -533,7 +599,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 9: - recomm->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE; hcall->expect = HV_STATUS_SUCCESS; break; @@ -542,7 +608,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 11: - recomm->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX; hcall->expect = HV_STATUS_SUCCESS; break; @@ -552,7 +618,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 13: - recomm->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED); hcall->control = HVCALL_SEND_IPI; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -567,7 +633,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 16: - recomm->ebx = 0xfff; + vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST); hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT; hcall->expect = HV_STATUS_SUCCESS; break; @@ -577,12 +643,21 @@ static void guest_test_hcalls_access(void) hcall->ud_expected = true; break; case 18: - feat->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; hcall->ud_expected = false; hcall->expect = HV_STATUS_SUCCESS; break; case 19: + hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES; + hcall->expect = HV_STATUS_ACCESS_DENIED; + break; + case 20: + vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS); + hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT; + hcall->expect = HV_STATUS_INVALID_PARAMETER; + break; + case 21: kvm_vm_free(vm); return; } diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c index fb02581953a3..ce1ccc4c1503 100644 --- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c +++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c @@ -93,7 +93,7 @@ int main(void) { int warnings_before, warnings_after; - TEST_REQUIRE(is_intel_cpu()); + TEST_REQUIRE(host_cpu_is_intel); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 2de98fce7edd..bad7ef8c5b92 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -198,14 +198,15 @@ static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents) static struct kvm_pmu_event_filter * -create_pmu_event_filter(const uint64_t event_list[], - int nevents, uint32_t action) +create_pmu_event_filter(const uint64_t event_list[], int nevents, + uint32_t action, uint32_t flags) { struct kvm_pmu_event_filter *f; int i; f = alloc_pmu_event_filter(nevents); f->action = action; + f->flags = flags; for (i = 0; i < nevents; i++) f->events[i] = event_list[i]; @@ -216,7 +217,7 @@ static struct kvm_pmu_event_filter *event_filter(uint32_t action) { return create_pmu_event_filter(event_list, ARRAY_SIZE(event_list), - action); + action, 0); } /* @@ -263,7 +264,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) struct kvm_pmu_event_filter *f; uint64_t count; - f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY); + f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); count = test_with_filter(vcpu, f); free(f); @@ -363,7 +364,7 @@ static void test_pmu_config_disable(void (*guest_code)(void)) */ static bool use_intel_pmu(void) { - return is_intel_cpu() && + return host_cpu_is_intel && kvm_cpu_property(X86_PROPERTY_PMU_VERSION) && kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) && kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED); @@ -397,19 +398,378 @@ static bool use_amd_pmu(void) uint32_t family = kvm_cpu_family(); uint32_t model = kvm_cpu_model(); - return is_amd_cpu() && + return host_cpu_is_amd && (is_zen1(family, model) || is_zen2(family, model) || is_zen3(family, model)); } +/* + * "MEM_INST_RETIRED.ALL_LOADS", "MEM_INST_RETIRED.ALL_STORES", and + * "MEM_INST_RETIRED.ANY" from https://perfmon-events.intel.com/ + * supported on Intel Xeon processors: + * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake. + */ +#define MEM_INST_RETIRED 0xD0 +#define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81) +#define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82) +#define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83) + +static bool supports_event_mem_inst_retired(void) +{ + uint32_t eax, ebx, ecx, edx; + + cpuid(1, &eax, &ebx, &ecx, &edx); + if (x86_family(eax) == 0x6) { + switch (x86_model(eax)) { + /* Sapphire Rapids */ + case 0x8F: + /* Ice Lake */ + case 0x6A: + /* Skylake */ + /* Cascade Lake */ + case 0x55: + return true; + } + } + + return false; +} + +/* + * "LS Dispatch", from Processor Programming Reference + * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors, + * Preliminary Processor Programming Reference (PPR) for AMD Family + * 17h Model 31h, Revision B0 Processors, and Preliminary Processor + * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision + * B1 Processors Volume 1 of 2. + */ +#define LS_DISPATCH 0x29 +#define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0)) +#define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1)) +#define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2)) + +#define INCLUDE_MASKED_ENTRY(event_select, mask, match) \ + KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false) +#define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ + KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) + +struct perf_counter { + union { + uint64_t raw; + struct { + uint64_t loads:22; + uint64_t stores:22; + uint64_t loads_stores:20; + }; + }; +}; + +static uint64_t masked_events_guest_test(uint32_t msr_base) +{ + uint64_t ld0, ld1, st0, st1, ls0, ls1; + struct perf_counter c; + int val; + + /* + * The acutal value of the counters don't determine the outcome of + * the test. Only that they are zero or non-zero. + */ + ld0 = rdmsr(msr_base + 0); + st0 = rdmsr(msr_base + 1); + ls0 = rdmsr(msr_base + 2); + + __asm__ __volatile__("movl $0, %[v];" + "movl %[v], %%eax;" + "incl %[v];" + : [v]"+m"(val) :: "eax"); + + ld1 = rdmsr(msr_base + 0); + st1 = rdmsr(msr_base + 1); + ls1 = rdmsr(msr_base + 2); + + c.loads = ld1 - ld0; + c.stores = st1 - st0; + c.loads_stores = ls1 - ls0; + + return c.raw; +} + +static void intel_masked_events_guest_code(void) +{ + uint64_t r; + + for (;;) { + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + + wrmsr(MSR_P6_EVNTSEL0 + 0, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD); + wrmsr(MSR_P6_EVNTSEL0 + 1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_STORE); + wrmsr(MSR_P6_EVNTSEL0 + 2, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD_STORE); + + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7); + + r = masked_events_guest_test(MSR_IA32_PMC0); + + GUEST_SYNC(r); + } +} + +static void amd_masked_events_guest_code(void) +{ + uint64_t r; + + for (;;) { + wrmsr(MSR_K7_EVNTSEL0, 0); + wrmsr(MSR_K7_EVNTSEL1, 0); + wrmsr(MSR_K7_EVNTSEL2, 0); + + wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD); + wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_STORE); + wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE); + + r = masked_events_guest_test(MSR_K7_PERFCTR0); + + GUEST_SYNC(r); + } +} + +static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu, + const uint64_t masked_events[], + const int nmasked_events) +{ + struct kvm_pmu_event_filter *f; + struct perf_counter r; + + f = create_pmu_event_filter(masked_events, nmasked_events, + KVM_PMU_EVENT_ALLOW, + KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + r.raw = test_with_filter(vcpu, f); + free(f); + + return r; +} + +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ +#define MAX_FILTER_EVENTS 300 +#define MAX_TEST_EVENTS 10 + +#define ALLOW_LOADS BIT(0) +#define ALLOW_STORES BIT(1) +#define ALLOW_LOADS_STORES BIT(2) + +struct masked_events_test { + uint64_t intel_events[MAX_TEST_EVENTS]; + uint64_t intel_event_end; + uint64_t amd_events[MAX_TEST_EVENTS]; + uint64_t amd_event_end; + const char *msg; + uint32_t flags; +}; + +/* + * These are the test cases for the masked events tests. + * + * For each test, the guest enables 3 PMU counters (loads, stores, + * loads + stores). The filter is then set in KVM with the masked events + * provided. The test then verifies that the counters agree with which + * ones should be counting and which ones should be filtered. + */ +const struct masked_events_test test_cases[] = { + { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x81), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)), + }, + .msg = "Only allow loads.", + .flags = ALLOW_LOADS, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)), + }, + .msg = "Only allow stores.", + .flags = ALLOW_STORES, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(2)), + }, + .msg = "Only allow loads + stores.", + .flags = ALLOW_LOADS_STORES, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0), + EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, ~(BIT(0) | BIT(1)), 0), + }, + .msg = "Only allow loads and stores.", + .flags = ALLOW_LOADS | ALLOW_STORES, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0), + EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0), + EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)), + }, + .msg = "Only allow loads and loads + stores.", + .flags = ALLOW_LOADS | ALLOW_LOADS_STORES + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFE, 0x82), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0), + EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)), + }, + .msg = "Only allow stores and loads + stores.", + .flags = ALLOW_STORES | ALLOW_LOADS_STORES + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0), + }, + .msg = "Only allow loads, stores, and loads + stores.", + .flags = ALLOW_LOADS | ALLOW_STORES | ALLOW_LOADS_STORES + }, +}; + +static int append_test_events(const struct masked_events_test *test, + uint64_t *events, int nevents) +{ + const uint64_t *evts; + int i; + + evts = use_intel_pmu() ? test->intel_events : test->amd_events; + for (i = 0; i < MAX_TEST_EVENTS; i++) { + if (evts[i] == 0) + break; + + events[nevents + i] = evts[i]; + } + + return nevents + i; +} + +static bool bool_eq(bool a, bool b) +{ + return a == b; +} + +static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, + int nevents) +{ + int ntests = ARRAY_SIZE(test_cases); + struct perf_counter c; + int i, n; + + for (i = 0; i < ntests; i++) { + const struct masked_events_test *test = &test_cases[i]; + + /* Do any test case events overflow MAX_TEST_EVENTS? */ + assert(test->intel_event_end == 0); + assert(test->amd_event_end == 0); + + n = append_test_events(test, events, nevents); + + c = run_masked_events_test(vcpu, events, n); + TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) && + bool_eq(c.stores, test->flags & ALLOW_STORES) && + bool_eq(c.loads_stores, + test->flags & ALLOW_LOADS_STORES), + "%s loads: %u, stores: %u, loads + stores: %u", + test->msg, c.loads, c.stores, c.loads_stores); + } +} + +static void add_dummy_events(uint64_t *events, int nevents) +{ + int i; + + for (i = 0; i < nevents; i++) { + int event_select = i % 0xFF; + bool exclude = ((i % 4) == 0); + + if (event_select == MEM_INST_RETIRED || + event_select == LS_DISPATCH) + event_select++; + + events[i] = KVM_PMU_ENCODE_MASKED_ENTRY(event_select, 0, + 0, exclude); + } +} + +static void test_masked_events(struct kvm_vcpu *vcpu) +{ + int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS; + uint64_t events[MAX_FILTER_EVENTS]; + + /* Run the test cases against a sparse PMU event filter. */ + run_masked_events_tests(vcpu, events, 0); + + /* Run the test cases against a dense PMU event filter. */ + add_dummy_events(events, MAX_FILTER_EVENTS); + run_masked_events_tests(vcpu, events, nevents); +} + +static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events, + int nevents, uint32_t flags) +{ + struct kvm_pmu_event_filter *f; + int r; + + f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags); + r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); + free(f); + + return r; +} + +static void test_filter_ioctl(struct kvm_vcpu *vcpu) +{ + uint64_t e = ~0ul; + int r; + + /* + * Unfortunately having invalid bits set in event data is expected to + * pass when flags == 0 (bits other than eventsel+umask). + */ + r = run_filter_test(vcpu, &e, 1, 0); + TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); + + r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail"); + + e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf); + r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); +} + int main(int argc, char *argv[]) { void (*guest_code)(void); - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu, *vcpu2 = NULL; struct kvm_vm *vm; TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER)); + TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_MASKED_EVENTS)); TEST_REQUIRE(use_intel_pmu() || use_amd_pmu()); guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code; @@ -430,6 +790,17 @@ int main(int argc, char *argv[]) test_not_member_deny_list(vcpu); test_not_member_allow_list(vcpu); + if (use_intel_pmu() && + supports_event_mem_inst_retired() && + kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) >= 3) + vcpu2 = vm_vcpu_add(vm, 2, intel_masked_events_guest_code); + else if (use_amd_pmu()) + vcpu2 = vm_vcpu_add(vm, 2, amd_masked_events_guest_code); + + if (vcpu2) + test_masked_events(vcpu2); + test_filter_ioctl(vcpu); + kvm_vm_free(vm); test_pmu_config_disable(guest_code); diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c index 22d366c697f7..c9f67702f657 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c @@ -72,11 +72,16 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: - TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && - uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx", - stage + 1, (ulong)uc.args[1]); + if (!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage + 1) + ksft_test_result_pass("stage %d passed\n", stage + 1); + else + ksft_test_result_fail( + "stage %d: Unexpected register values vmexit, got %lx", + stage + 1, (ulong)uc.args[1]); return; case UCALL_DONE: + ksft_test_result_pass("stage %d passed\n", stage + 1); return; case UCALL_ABORT: REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); @@ -92,6 +97,9 @@ int main(void) struct kvm_vm *vm; uint64_t val; + ksft_print_header(); + ksft_set_plan(5); + vm = vm_create_with_one_vcpu(&vcpu, guest_code); val = 0; @@ -149,5 +157,5 @@ int main(void) kvm_vm_free(vm); - return 0; + ksft_finished(); /* Print results and exit() accordingly */ } diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c index 2641b286b4ed..ccdfa5dc1a4d 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c @@ -111,7 +111,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; - TEST_REQUIRE(is_intel_cpu()); + TEST_REQUIRE(host_cpu_is_intel); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); vm = vm_create_with_one_vcpu(&vcpu, guest_code); diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 13c75dc18c10..5a3bf8f61417 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -19,9 +19,6 @@ #include <sys/eventfd.h> -/* Defined in include/linux/kvm_types.h */ -#define GPA_INVALID (~(ulong)0) - #define SHINFO_REGION_GVA 0xc0000000ULL #define SHINFO_REGION_GPA 0xc0000000ULL #define SHINFO_REGION_SLOT 10 @@ -412,21 +409,21 @@ static void *juggle_shinfo_state(void *arg) { struct kvm_vm *vm = (struct kvm_vm *)arg; - struct kvm_xen_hvm_attr cache_init = { + struct kvm_xen_hvm_attr cache_activate = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE }; - struct kvm_xen_hvm_attr cache_destroy = { + struct kvm_xen_hvm_attr cache_deactivate = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, - .u.shared_info.gfn = GPA_INVALID + .u.shared_info.gfn = KVM_XEN_INVALID_GFN }; for (;;) { - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_init); - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_destroy); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate); pthread_testcancel(); - }; + } return NULL; } |