summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-09-07 13:52:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-09-07 13:52:20 -0700
commit0c02183427b4d2002992f26d4917c1263c5d4a7f (patch)
tree426a0b282af3f309934cf0ff813b02c385e7ea04 /drivers
parent4a0fc73da97efd23a383ca839e6fe86410268f6b (diff)
parentd011151616e73de20c139580b73fa4c7042bd861 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "ARM: - Clean up vCPU targets, always returning generic v8 as the preferred target - Trap forwarding infrastructure for nested virtualization (used for traps that are taken from an L2 guest and are needed by the L1 hypervisor) - FEAT_TLBIRANGE support to only invalidate specific ranges of addresses when collapsing a table PTE to a block PTE. This avoids that the guest refills the TLBs again for addresses that aren't covered by the table PTE. - Fix vPMU issues related to handling of PMUver. - Don't unnecessary align non-stack allocations in the EL2 VA space - Drop HCR_VIRT_EXCP_MASK, which was never used... - Don't use smp_processor_id() in kvm_arch_vcpu_load(), but the cpu parameter instead - Drop redundant call to kvm_set_pfn_accessed() in user_mem_abort() - Remove prototypes without implementations RISC-V: - Zba, Zbs, Zicntr, Zicsr, Zifencei, and Zihpm support for guest - Added ONE_REG interface for SATP mode - Added ONE_REG interface to enable/disable multiple ISA extensions - Improved error codes returned by ONE_REG interfaces - Added KVM_GET_REG_LIST ioctl() implementation for KVM RISC-V - Added get-reg-list selftest for KVM RISC-V s390: - PV crypto passthrough enablement (Tony, Steffen, Viktor, Janosch) Allows a PV guest to use crypto cards. Card access is governed by the firmware and once a crypto queue is "bound" to a PV VM every other entity (PV or not) looses access until it is not bound anymore. Enablement is done via flags when creating the PV VM. - Guest debug fixes (Ilya) x86: - Clean up KVM's handling of Intel architectural events - Intel bugfixes - Add support for SEV-ES DebugSwap, allowing SEV-ES guests to use debug registers and generate/handle #DBs - Clean up LBR virtualization code - Fix a bug where KVM fails to set the target pCPU during an IRTE update - Fix fatal bugs in SEV-ES intrahost migration - Fix a bug where the recent (architecturally correct) change to reinject #BP and skip INT3 broke SEV guests (can't decode INT3 to skip it) - Retry APIC map recalculation if a vCPU is added/enabled - Overhaul emergency reboot code to bring SVM up to par with VMX, tie the "emergency disabling" behavior to KVM actually being loaded, and move all of the logic within KVM - Fix user triggerable WARNs in SVM where KVM incorrectly assumes the TSC ratio MSR cannot diverge from the default when TSC scaling is disabled up related code - Add a framework to allow "caching" feature flags so that KVM can check if the guest can use a feature without needing to search guest CPUID - Rip out the ancient MMU_DEBUG crud and replace the useful bits with CONFIG_KVM_PROVE_MMU - Fix KVM's handling of !visible guest roots to avoid premature triple fault injection - Overhaul KVM's page-track APIs, and KVMGT's usage, to reduce the API surface that is needed by external users (currently only KVMGT), and fix a variety of issues in the process Generic: - Wrap kvm_{gfn,hva}_range.pte in a union to allow mmu_notifier events to pass action specific data without needing to constantly update the main handlers. - Drop unused function declarations Selftests: - Add testcases to x86's sync_regs_test for detecting KVM TOCTOU bugs - Add support for printf() in guest code and covert all guest asserts to use printf-based reporting - Clean up the PMU event filter test and add new testcases - Include x86 selftests in the KVM x86 MAINTAINERS entry" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (279 commits) KVM: x86/mmu: Include mmu.h in spte.h KVM: x86/mmu: Use dummy root, backed by zero page, for !visible guest roots KVM: x86/mmu: Disallow guest from using !visible slots for page tables KVM: x86/mmu: Harden TDP MMU iteration against root w/o shadow page KVM: x86/mmu: Harden new PGD against roots without shadow pages KVM: x86/mmu: Add helper to convert root hpa to shadow page drm/i915/gvt: Drop final dependencies on KVM internal details KVM: x86/mmu: Handle KVM bookkeeping in page-track APIs, not callers KVM: x86/mmu: Drop @slot param from exported/external page-track APIs KVM: x86/mmu: Bug the VM if write-tracking is used but not enabled KVM: x86/mmu: Assert that correct locks are held for page write-tracking KVM: x86/mmu: Rename page-track APIs to reflect the new reality KVM: x86/mmu: Drop infrastructure for multiple page-track modes KVM: x86/mmu: Use page-track notifiers iff there are external users KVM: x86/mmu: Move KVM-only page-track declarations to internal header KVM: x86: Remove the unused page-track hook track_flush_slot() drm/i915/gvt: switch from ->track_flush_slot() to ->track_remove_region() KVM: x86: Add a new page-track hook to handle memslot deletion drm/i915/gvt: Don't bother removing write-protection on to-be-deleted slot KVM: x86: Reject memslot MOVE operations if KVMGT is attached ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c102
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c10
-rw-r--r--drivers/perf/arm_pmuv3.c2
6 files changed, 65 insertions, 173 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4ec85308379a..094fca9b0e73 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,22 +49,6 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
-static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
-{
- struct kvm *kvm = vgpu->vfio_device.kvm;
- int idx;
- bool ret;
-
- if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return false;
-
- idx = srcu_read_lock(&kvm->srcu);
- ret = kvm_is_visible_gfn(kvm, gfn);
- srcu_read_unlock(&kvm->srcu, idx);
-
- return ret;
-}
-
/*
* validate a gm address and related range size,
* translate it to host gm address
@@ -1161,31 +1145,6 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/*
- * Check if can do 2M page
- * @vgpu: target vgpu
- * @entry: target pfn's gtt entry
- *
- * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
- * negative if found err.
- */
-static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
- struct intel_gvt_gtt_entry *entry)
-{
- const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- kvm_pfn_t pfn;
-
- if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
- return 0;
-
- if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return -EINVAL;
- pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
- if (is_error_noslot_pfn(pfn))
- return -EINVAL;
- return PageTransHuge(pfn_to_page(pfn));
-}
-
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *se)
@@ -1279,7 +1238,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
{
const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn, page_size = PAGE_SIZE;
+ unsigned long gfn;
dma_addr_t dma_addr;
int ret;
@@ -1291,6 +1250,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
switch (ge->type) {
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return -ENXIO;
break;
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
gvt_vdbg_mm("shadow 64K gtt entry\n");
@@ -1302,25 +1264,20 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
gvt_vdbg_mm("shadow 2M gtt entry\n");
- ret = is_2MB_gtt_possible(vgpu, ge);
- if (ret == 0)
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) ||
+ intel_gvt_dma_map_guest_page(vgpu, gfn,
+ I915_GTT_PAGE_SIZE_2M, &dma_addr))
return split_2MB_gtt_entry(vgpu, spt, index, &se);
- else if (ret < 0)
- return ret;
- page_size = I915_GTT_PAGE_SIZE_2M;
break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
+ return -EINVAL;
}
- /* direct shadow */
- ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
- if (ret)
- return -ENXIO;
-
+ /* Successfully shadowed a 4K or 2M page (without splitting). */
pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
ppgtt_set_shadow_entry(spt, &se, index);
return 0;
@@ -1329,11 +1286,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
- unsigned long gfn, i;
+ unsigned long i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
@@ -1350,13 +1305,6 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_generate_shadow_entry(&se, s, &ge);
ppgtt_set_shadow_entry(spt, &se, i);
} else {
- gfn = ops->get_pfn(&ge);
- if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
- ops->set_pfn(&se, gvt->gtt.scratch_mfn);
- ppgtt_set_shadow_entry(spt, &se, i);
- continue;
- }
-
ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
if (ret)
goto fail;
@@ -1845,6 +1793,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
if (mm->ppgtt_mm.shadowed)
return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
+
mm->ppgtt_mm.shadowed = true;
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
@@ -2331,14 +2282,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
m.val64 = e.val64;
m.type = e.type;
- /* one PTE update may be issued in multiple writes and the
- * first write may not construct a valid gfn
- */
- if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- goto out;
- }
-
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
&dma_addr);
if (ret) {
@@ -2355,7 +2298,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->clear_present(&m);
}
-out:
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
@@ -2876,24 +2818,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
}
/**
- * intel_vgpu_reset_gtt - reset the all GTT related status
- * @vgpu: a vGPU
- *
- * This function is called from vfio core to reset reset all
- * GTT related status, including GGTT, PPGTT, scratch page.
- *
- */
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
-{
- /* Shadow pages are only created when there is no page
- * table tracking data, so remove page tracking data after
- * removing the shadow pages.
- */
- intel_vgpu_destroy_all_ppgtt_mm(vgpu);
- intel_vgpu_reset_ggtt(vgpu, true);
-}
-
-/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a3b0f59ec8bd..4cb183e06e95 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
int intel_gvt_init_gtt(struct intel_gvt *gvt);
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
void intel_gvt_clean_gtt(struct intel_gvt *gvt);
struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2d65800d8e93..53a0a42a50db 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -34,10 +34,11 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
-#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <asm/kvm_page_track.h>
+
#include "i915_drv.h"
#include "intel_gvt.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 9cd9e9da60dd..42ce20e72db7 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -106,12 +106,10 @@ struct gvt_dma {
#define vfio_dev_to_vgpu(vfio_dev) \
container_of((vfio_dev), struct intel_vgpu, vfio_device)
-static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *val, int len,
- struct kvm_page_track_notifier_node *node);
-static void kvmgt_page_track_flush_slot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node);
static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
@@ -161,8 +159,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
if (npage == 0)
base_page = cur_page;
- else if (base_page + npage != cur_page) {
- gvt_vgpu_err("The pages are not continuous\n");
+ else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) {
ret = -EINVAL;
npage++;
goto err;
@@ -172,7 +169,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
*page = base_page;
return 0;
err:
- gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ if (npage)
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
return ret;
}
@@ -352,6 +350,8 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;
+ lockdep_assert_held(&info->vgpu_lock);
+
hash_for_each_possible(info->ptable, p, hnode, gfn) {
if (gfn == p->gfn) {
res = p;
@@ -654,21 +654,19 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
-
- if (!vgpu->vfio_device.kvm ||
- vgpu->vfio_device.kvm->mm != current->mm) {
- gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- return -ESRCH;
- }
+ int ret;
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
vgpu->track_node.track_write = kvmgt_page_track_write;
- vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_get_kvm(vgpu->vfio_device.kvm);
- kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
- &vgpu->track_node);
+ vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
+ ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
+ if (ret) {
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
+ return ret;
+ }
set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
@@ -703,7 +701,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
- kvm_put_kvm(vgpu->vfio_device.kvm);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
@@ -1547,95 +1544,70 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->vfio_device.kvm;
- struct kvm_memory_slot *slot;
- int idx;
+ int r;
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
-
if (kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
+ return 0;
- kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_add(info, gfn);
+ r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn);
+ if (r)
+ return r;
-out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ kvmgt_protect_table_add(info, gfn);
return 0;
}
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->vfio_device.kvm;
- struct kvm_memory_slot *slot;
- int idx;
+ int r;
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
-
if (!kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
+ return 0;
- kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_del(info, gfn);
+ r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn);
+ if (r)
+ return r;
-out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ kvmgt_protect_table_del(info, gfn);
return 0;
}
-static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *val, int len,
- struct kvm_page_track_notifier_node *node)
+static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
{
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
- if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ mutex_lock(&info->vgpu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT))
intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);
+
+ mutex_unlock(&info->vgpu_lock);
}
-static void kvmgt_page_track_flush_slot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node)
+static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node)
{
- int i;
- gfn_t gfn;
+ unsigned long i;
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
- write_lock(&kvm->mmu_lock);
- for (i = 0; i < slot->npages; i++) {
- gfn = slot->base_gfn + i;
- if (kvmgt_gfn_is_write_protected(info, gfn)) {
- kvm_slot_page_track_remove_page(kvm, slot, gfn,
- KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_del(info, gfn);
- }
+ mutex_lock(&info->vgpu_lock);
+
+ for (i = 0; i < nr_pages; i++) {
+ if (kvmgt_gfn_is_write_protected(info, gfn + i))
+ kvmgt_protect_table_del(info, gfn + i);
}
- write_unlock(&kvm->mmu_lock);
+
+ mutex_unlock(&info->vgpu_lock);
}
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index df34e73cba41..60a65435556d 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -162,13 +162,9 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&vgpu->vgpu_lock);
-
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
- if (!page_track) {
- ret = -ENXIO;
- goto out;
- }
+ if (!page_track)
+ return -ENXIO;
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
@@ -179,7 +175,5 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
gvt_err("guest page write error, gpa %llx\n", gpa);
}
-out:
- mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index e5a2ac4155f6..8fcaa26f0f8a 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -749,6 +749,8 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+
+ kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)