diff options
author | Sean Christopherson <seanjc@google.com> | 2023-11-03 16:05:36 -0700 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2023-11-30 12:52:54 -0800 |
commit | cbb359d81a2695bb5e63ec9de06fcbef28518891 (patch) | |
tree | bb62c8d182ef0b047648edeb48aea6279f1fe1b4 /arch/x86/kvm/pmu.c | |
parent | e9e60c82fe391d04db55a91c733df4a017c28b2f (diff) |
KVM: x86/pmu: Move PMU reset logic to common x86 code
Move the common (or at least "ignored") aspects of resetting the vPMU to
common x86 code, along with the stop/release helpers that are no used only
by the common pmu.c.
There is no need to manually handle fixed counters as all_valid_pmc_idx
tracks both fixed and general purpose counters, and resetting the vPMU is
far from a hot path, i.e. the extra bit of overhead to the PMC from the
index is a non-issue.
Zero fixed_ctr_ctrl in common code even though it's Intel specific.
Ensuring it's zero doesn't harm AMD/SVM in any way, and stopping the fixed
counters via all_valid_pmc_idx, but not clearing the associated control
bits, would be odd/confusing.
Make the .reset() hook optional as SVM no longer needs vendor specific
handling.
Cc: stable@vger.kernel.org
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20231103230541.352265-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/pmu.c')
-rw-r--r-- | arch/x86/kvm/pmu.c | 40 |
1 files changed, 39 insertions, 1 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 9ae07db6f0f6..027e9c3c2b93 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -250,6 +250,24 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) return true; } +static void pmc_release_perf_event(struct kvm_pmc *pmc) +{ + if (pmc->perf_event) { + perf_event_release_kernel(pmc->perf_event); + pmc->perf_event = NULL; + pmc->current_config = 0; + pmc_to_pmu(pmc)->event_count--; + } +} + +static void pmc_stop_counter(struct kvm_pmc *pmc) +{ + if (pmc->perf_event) { + pmc->counter = pmc_read_counter(pmc); + pmc_release_perf_event(pmc); + } +} + static int filter_cmp(const void *pa, const void *pb, u64 mask) { u64 a = *(u64 *)pa & mask; @@ -654,7 +672,27 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) void kvm_pmu_reset(struct kvm_vcpu *vcpu) { - static_call(kvm_x86_pmu_reset)(vcpu); + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + int i; + + bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX); + + for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { + pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); + if (!pmc) + continue; + + pmc_stop_counter(pmc); + pmc->counter = 0; + + if (pmc_is_gp(pmc)) + pmc->eventsel = 0; + } + + pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; + + static_call_cond(kvm_x86_pmu_reset)(vcpu); } void kvm_pmu_init(struct kvm_vcpu *vcpu) |