summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/tdp_iter.h
blob: 2880fd392e0cbb9921aed5b8b5992c54e484530f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
// SPDX-License-Identifier: GPL-2.0

#ifndef __KVM_X86_MMU_TDP_ITER_H
#define __KVM_X86_MMU_TDP_ITER_H

#include <linux/kvm_host.h>

#include "mmu.h"
#include "spte.h"

/*
 * TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
 * to be zapped while holding mmu_lock for read, and to allow TLB flushes to be
 * batched without having to collect the list of zapped SPs.  Flows that can
 * remove SPs must service pending TLB flushes prior to dropping RCU protection.
 */
static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
{
	return READ_ONCE(*rcu_dereference(sptep));
}

static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
{
	KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
	return xchg(rcu_dereference(sptep), new_spte);
}

static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
{
	KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
	WRITE_ONCE(*rcu_dereference(sptep), new_spte);
}

/*
 * SPTEs must be modified atomically if they are shadow-present, leaf
 * SPTEs, and have volatile bits, i.e. has bits that can be set outside
 * of mmu_lock.  The Writable bit can be set by KVM's fast page fault
 * handler, and Accessed and Dirty bits can be set by the CPU.
 *
 * Note, non-leaf SPTEs do have Accessed bits and those bits are
 * technically volatile, but KVM doesn't consume the Accessed bit of
 * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit.  This
 * logic needs to be reassessed if KVM were to use non-leaf Accessed
 * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
 */
static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level)
{
	return is_shadow_present_pte(old_spte) &&
	       is_last_spte(old_spte, level) &&
	       spte_has_volatile_bits(old_spte);
}

static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
					 u64 new_spte, int level)
{
	if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level))
		return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);

	__kvm_tdp_mmu_write_spte(sptep, new_spte);
	return old_spte;
}

static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte,
					  u64 mask, int level)
{
	atomic64_t *sptep_atomic;

	if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) {
		sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
		return (u64)atomic64_fetch_and(~mask, sptep_atomic);
	}

	__kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask);
	return old_spte;
}

/*
 * A TDP iterator performs a pre-order walk over a TDP paging structure.
 */
struct tdp_iter {
	/*
	 * The iterator will traverse the paging structure towards the mapping
	 * for this GFN.
	 */
	gfn_t next_last_level_gfn;
	/*
	 * The next_last_level_gfn at the time when the thread last
	 * yielded. Only yielding when the next_last_level_gfn !=
	 * yielded_gfn helps ensure forward progress.
	 */
	gfn_t yielded_gfn;
	/* Pointers to the page tables traversed to reach the current SPTE */
	tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL];
	/* A pointer to the current SPTE */
	tdp_ptep_t sptep;
	/* The lowest GFN mapped by the current SPTE */
	gfn_t gfn;
	/* The level of the root page given to the iterator */
	int root_level;
	/* The lowest level the iterator should traverse to */
	int min_level;
	/* The iterator's current level within the paging structure */
	int level;
	/* The address space ID, i.e. SMM vs. regular. */
	int as_id;
	/* A snapshot of the value at sptep */
	u64 old_spte;
	/*
	 * Whether the iterator has a valid state. This will be false if the
	 * iterator walks off the end of the paging structure.
	 */
	bool valid;
	/*
	 * True if KVM dropped mmu_lock and yielded in the middle of a walk, in
	 * which case tdp_iter_next() needs to restart the walk at the root
	 * level instead of advancing to the next entry.
	 */
	bool yielded;
};

/*
 * Iterates over every SPTE mapping the GFN range [start, end) in a
 * preorder traversal.
 */
#define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
	for (tdp_iter_start(&iter, root, min_level, start); \
	     iter.valid && iter.gfn < end;		     \
	     tdp_iter_next(&iter))

#define for_each_tdp_pte(iter, root, start, end) \
	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)

tdp_ptep_t spte_to_child_pt(u64 pte, int level);

void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
		    int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_restart(struct tdp_iter *iter);

#endif /* __KVM_X86_MMU_TDP_ITER_H */