summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c69
-rw-r--r--virt/kvm/kvm_mm.h6
-rw-r--r--virt/kvm/pfncache.c50
3 files changed, 53 insertions, 72 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6c09fe40948f..0583ce03dace 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -401,12 +401,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
{
+ void *page;
+
gfp_flags |= mc->gfp_zero;
if (mc->kmem_cache)
return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
- else
- return (void *)__get_free_page(gfp_flags);
+
+ page = (void *)__get_free_page(gfp_flags);
+ if (page && mc->init_value)
+ memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
+ return page;
}
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
@@ -421,6 +426,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
if (WARN_ON_ONCE(!capacity))
return -EIO;
+ /*
+ * Custom init values can be used only for page allocations,
+ * and obviously conflict with __GFP_ZERO.
+ */
+ if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
+ return -EIO;
+
mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
if (!mc->objects)
return -ENOMEM;
@@ -583,8 +595,6 @@ static void kvm_null_fn(void)
}
#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
-static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
-
/* Iterate over each memslot intersecting [start, last] (inclusive) range */
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
@@ -670,14 +680,12 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
- union kvm_mmu_notifier_arg arg,
gfn_handler_t handler)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
.start = start,
.end = end,
- .arg = arg,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
.flush_on_ret = true,
@@ -705,48 +713,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
return __kvm_handle_hva_range(kvm, &range).ret;
}
-static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- /*
- * Skipping invalid memslots is correct if and only change_pte() is
- * surrounded by invalidate_range_{start,end}(), which is currently
- * guaranteed by the primary MMU. If that ever changes, KVM needs to
- * unmap the memslot instead of skipping the memslot to ensure that KVM
- * doesn't hold references to the old PFN.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
-
- if (range->slot->flags & KVM_MEMSLOT_INVALID)
- return false;
-
- return kvm_set_spte_gfn(kvm, range);
-}
-
-static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address,
- pte_t pte)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const union kvm_mmu_notifier_arg arg = { .pte = pte };
-
- trace_kvm_set_spte_hva(address);
-
- /*
- * .change_pte() must be surrounded by .invalidate_range_{start,end}().
- * If mmu_invalidate_in_progress is zero, then no in-progress
- * invalidations, including this one, found a relevant memslot at
- * start(); rechecking memslots here is unnecessary. Note, a false
- * positive (count elevated by a different invalidation) is sub-optimal
- * but functionally ok.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
- if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
- return;
-
- kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
-}
-
void kvm_mmu_invalidate_begin(struct kvm *kvm)
{
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -832,8 +798,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* mn_active_invalidate_count (see above) instead of
* mmu_invalidate_in_progress.
*/
- gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
- hva_range.may_block);
+ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
/*
* If one or more memslots were found and thus zapped, notify arch code
@@ -910,8 +875,7 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
- kvm_age_gfn);
+ return kvm_handle_hva_range(mn, start, end, kvm_age_gfn);
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -964,7 +928,6 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.clear_young = kvm_mmu_notifier_clear_young,
.test_young = kvm_mmu_notifier_test_young,
- .change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index ecefc7ec51af..715f19669d01 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -26,13 +26,11 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
#ifdef CONFIG_HAVE_KVM_PFNCACHE
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block);
+ unsigned long end);
#else
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block)
+ unsigned long end)
{
}
#endif /* HAVE_KVM_PFNCACHE */
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 4e07112a24c2..e3453e869e92 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -23,7 +23,7 @@
* MMU notifier 'invalidate_range_start' hook.
*/
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
- unsigned long end, bool may_block)
+ unsigned long end)
{
struct gfn_to_pfn_cache *gpc;
@@ -57,6 +57,19 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
spin_unlock(&kvm->gpc_lock);
}
+static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
+ unsigned long len)
+{
+ unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
+ offset_in_page(gpa);
+
+ /*
+ * The cached access must fit within a single page. The 'len' argument
+ * to activate() and refresh() exists only to enforce that.
+ */
+ return offset + len <= PAGE_SIZE;
+}
+
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
@@ -74,7 +87,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (kvm_is_error_hva(gpc->uhva))
return false;
- if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
return false;
if (!gpc->valid)
@@ -232,8 +245,7 @@ out_error:
return -EFAULT;
}
-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
- unsigned long len)
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
{
unsigned long page_offset;
bool unmap_old = false;
@@ -247,15 +259,6 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
return -EINVAL;
- /*
- * The cached acces must fit within a single page. The 'len' argument
- * exists only to enforce that.
- */
- page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
- offset_in_page(gpa);
- if (page_offset + len > PAGE_SIZE)
- return -EINVAL;
-
lockdep_assert_held(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
@@ -270,6 +273,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
if (kvm_is_error_gpa(gpa)) {
+ page_offset = offset_in_page(uhva);
+
gpc->gpa = INVALID_GPA;
gpc->memslot = NULL;
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
@@ -279,6 +284,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
} else {
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
+ page_offset = offset_in_page(gpa);
+
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
@@ -354,6 +361,9 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
guard(mutex)(&gpc->refresh_lock);
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
+ return -EINVAL;
+
/*
* If the GPA is valid then ignore the HVA, as a cache can be GPA-based
* or HVA-based, not both. For GPA-based caches, the HVA will be
@@ -361,7 +371,7 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
*/
uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
- return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
+ return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
}
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
@@ -381,6 +391,9 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned
{
struct kvm *kvm = gpc->kvm;
+ if (!kvm_gpc_is_valid_len(gpa, uhva, len))
+ return -EINVAL;
+
guard(mutex)(&gpc->refresh_lock);
if (!gpc->active) {
@@ -400,11 +413,18 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, uhva, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva);
}
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
{
+ /*
+ * Explicitly disallow INVALID_GPA so that the magic value can be used
+ * by KVM to differentiate between GPA-based and HVA-based caches.
+ */
+ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
+ return -EINVAL;
+
return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
}