This source file includes following definitions.
- rsvd_bits
- kvm_mmu_available_pages
- kvm_mmu_reload
- kvm_get_pcid
- kvm_get_active_pcid
- kvm_mmu_load_cr3
- is_writable_pte
- is_write_protection
- permission_fault
1
2 #ifndef __KVM_X86_MMU_H
3 #define __KVM_X86_MMU_H
4
5 #include <linux/kvm_host.h>
6 #include "kvm_cache_regs.h"
7
8 #define PT64_PT_BITS 9
9 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
10 #define PT32_PT_BITS 10
11 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
12
13 #define PT_WRITABLE_SHIFT 1
14 #define PT_USER_SHIFT 2
15
16 #define PT_PRESENT_MASK (1ULL << 0)
17 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
18 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
19 #define PT_PWT_MASK (1ULL << 3)
20 #define PT_PCD_MASK (1ULL << 4)
21 #define PT_ACCESSED_SHIFT 5
22 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
23 #define PT_DIRTY_SHIFT 6
24 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
25 #define PT_PAGE_SIZE_SHIFT 7
26 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
27 #define PT_PAT_MASK (1ULL << 7)
28 #define PT_GLOBAL_MASK (1ULL << 8)
29 #define PT64_NX_SHIFT 63
30 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
31
32 #define PT_PAT_SHIFT 7
33 #define PT_DIR_PAT_SHIFT 12
34 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
35
36 #define PT32_DIR_PSE36_SIZE 4
37 #define PT32_DIR_PSE36_SHIFT 13
38 #define PT32_DIR_PSE36_MASK \
39 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
40
41 #define PT64_ROOT_5LEVEL 5
42 #define PT64_ROOT_4LEVEL 4
43 #define PT32_ROOT_LEVEL 2
44 #define PT32E_ROOT_LEVEL 3
45
46 static inline u64 rsvd_bits(int s, int e)
47 {
48 if (e < s)
49 return 0;
50
51 return ((1ULL << (e - s + 1)) - 1) << s;
52 }
53
54 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask);
55
56 void
57 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
58
59 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
60 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
61 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
62 bool accessed_dirty, gpa_t new_eptp);
63 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
64 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
65 u64 fault_address, char *insn, int insn_len);
66
67 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
68 {
69 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
70 return kvm->arch.n_max_mmu_pages -
71 kvm->arch.n_used_mmu_pages;
72
73 return 0;
74 }
75
76 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
77 {
78 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
79 return 0;
80
81 return kvm_mmu_load(vcpu);
82 }
83
84 static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
85 {
86 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
87
88 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
89 ? cr3 & X86_CR3_PCID_MASK
90 : 0;
91 }
92
93 static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
94 {
95 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
96 }
97
98 static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
99 {
100 if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
101 vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
102 kvm_get_active_pcid(vcpu));
103 }
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 static inline int is_writable_pte(unsigned long pte)
139 {
140 return pte & PT_WRITABLE_MASK;
141 }
142
143 static inline bool is_write_protection(struct kvm_vcpu *vcpu)
144 {
145 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
146 }
147
148
149
150
151
152
153
154
155
156 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
157 unsigned pte_access, unsigned pte_pkey,
158 unsigned pfec)
159 {
160 int cpl = kvm_x86_ops->get_cpl(vcpu);
161 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
177 int index = (pfec >> 1) +
178 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
179 bool fault = (mmu->permissions[index] >> pte_access) & 1;
180 u32 errcode = PFERR_PRESENT_MASK;
181
182 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
183 if (unlikely(mmu->pkru_mask)) {
184 u32 pkru_bits, offset;
185
186
187
188
189
190
191
192 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
193
194
195 offset = (pfec & ~1) +
196 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
197
198 pkru_bits &= mmu->pkru_mask >> offset;
199 errcode |= -pkru_bits & PFERR_PK_MASK;
200 fault |= (pkru_bits != 0);
201 }
202
203 return -(u32)fault & errcode;
204 }
205
206 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
207
208 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
209 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
210 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
211 struct kvm_memory_slot *slot, u64 gfn);
212 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
213
214 int kvm_mmu_post_init_vm(struct kvm *kvm);
215 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
216
217 #endif