Lines Matching refs:spte

161 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\  argument
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
165 __shadow_walk_next(&(_walker), spte))
178 static void mmu_spte_set(u64 *sptep, u64 spte);
215 static unsigned int get_mmio_spte_generation(u64 spte) in get_mmio_spte_generation() argument
219 spte &= ~shadow_mmio_mask; in get_mmio_spte_generation()
221 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; in get_mmio_spte_generation()
222 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; in get_mmio_spte_generation()
244 static bool is_mmio_spte(u64 spte) in is_mmio_spte() argument
246 return (spte & shadow_mmio_mask) == shadow_mmio_mask; in is_mmio_spte()
249 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument
252 return (spte & ~mask) >> PAGE_SHIFT; in get_mmio_spte_gfn()
255 static unsigned get_mmio_spte_access(u64 spte) in get_mmio_spte_access() argument
258 return (spte & ~mask) & ~PAGE_MASK; in get_mmio_spte_access()
272 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) in check_mmio_spte() argument
277 spte_gen = get_mmio_spte_generation(spte); in check_mmio_spte()
279 trace_check_mmio_spte(spte, kvm_gen, spte_gen); in check_mmio_spte()
341 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
343 *sptep = spte; in __set_spte()
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
348 *sptep = spte; in __update_clear_spte_fast()
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
353 return xchg(sptep, spte); in __update_clear_spte_slow()
366 u64 spte; member
369 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
373 if (is_shadow_present_pte(spte)) in count_spte_clear()
381 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
386 sspte = (union split_spte)spte; in __set_spte()
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
405 sspte = (union split_spte)spte; in __update_clear_spte_fast()
416 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
424 sspte = (union split_spte)spte; in __update_clear_spte_slow()
430 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
432 return orig.spte; in __update_clear_spte_slow()
456 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless() local
463 spte.spte_low = orig->spte_low; in __get_spte_lockless()
466 spte.spte_high = orig->spte_high; in __get_spte_lockless()
469 if (unlikely(spte.spte_low != orig->spte_low || in __get_spte_lockless()
473 return spte.spte; in __get_spte_lockless()
477 static bool spte_is_locklessly_modifiable(u64 spte) in spte_is_locklessly_modifiable() argument
479 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == in spte_is_locklessly_modifiable()
483 static bool spte_has_volatile_bits(u64 spte) in spte_has_volatile_bits() argument
491 if (spte_is_locklessly_modifiable(spte)) in spte_has_volatile_bits()
497 if (!is_shadow_present_pte(spte)) in spte_has_volatile_bits()
500 if ((spte & shadow_accessed_mask) && in spte_has_volatile_bits()
501 (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) in spte_has_volatile_bits()
923 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, in pte_list_add() argument
930 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); in pte_list_add()
931 *pte_list = (unsigned long)spte; in pte_list_add()
933 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); in pte_list_add()
936 desc->sptes[1] = spte; in pte_list_add()
940 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); in pte_list_add()
952 desc->sptes[i] = spte; in pte_list_add()
979 static void pte_list_remove(u64 *spte, unsigned long *pte_list) in pte_list_remove() argument
986 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); in pte_list_remove()
989 rmap_printk("pte_list_remove: %p 1->0\n", spte); in pte_list_remove()
990 if ((u64 *)*pte_list != spte) { in pte_list_remove()
991 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); in pte_list_remove()
996 rmap_printk("pte_list_remove: %p many->many\n", spte); in pte_list_remove()
1001 if (desc->sptes[i] == spte) { in pte_list_remove()
1010 pr_err("pte_list_remove: %p many->many\n", spte); in pte_list_remove()
1015 typedef void (*pte_list_walk_fn) (u64 *spte);
1065 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1070 sp = page_header(__pa(spte)); in rmap_add()
1071 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1073 return pte_list_add(vcpu, spte, rmapp); in rmap_add()
1076 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove() argument
1082 sp = page_header(__pa(spte)); in rmap_remove()
1083 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1085 pte_list_remove(spte, rmapp); in rmap_remove()
1195 u64 spte = *sptep; in spte_write_protect() local
1197 if (!is_writable_pte(spte) && in spte_write_protect()
1198 !(pt_protect && spte_is_locklessly_modifiable(spte))) in spte_write_protect()
1204 spte &= ~SPTE_MMU_WRITEABLE; in spte_write_protect()
1205 spte = spte & ~PT_WRITABLE_MASK; in spte_write_protect()
1207 return mmu_spte_update(sptep, spte); in spte_write_protect()
1225 u64 spte = *sptep; in spte_clear_dirty() local
1229 spte &= ~shadow_dirty_mask; in spte_clear_dirty()
1231 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1248 u64 spte = *sptep; in spte_set_dirty() local
1252 spte |= shadow_dirty_mask; in spte_set_dirty()
1254 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1614 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1619 sp = page_header(__pa(spte)); in rmap_recycle()
1746 static void mark_unsync(u64 *spte);
1752 static void mark_unsync(u64 *spte) in mark_unsync() argument
1757 sp = page_header(__pa(spte)); in mark_unsync()
1758 index = spte - sp->spt; in mark_unsync()
1777 struct kvm_mmu_page *sp, u64 *spte, in nonpaging_update_pte() argument
2069 static void clear_sp_write_flooding_count(u64 *spte) in clear_sp_write_flooding_count() argument
2071 struct kvm_mmu_page *sp = page_header(__pa(spte)); in clear_sp_write_flooding_count()
2185 u64 spte) in __shadow_walk_next() argument
2187 if (is_last_spte(spte, iterator->level)) { in __shadow_walk_next()
2192 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; in __shadow_walk_next()
2203 u64 spte; in link_shadow_page() local
2208 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | in link_shadow_page()
2212 spte |= shadow_accessed_mask; in link_shadow_page()
2214 mmu_spte_set(sptep, spte); in link_shadow_page()
2240 u64 *spte) in mmu_page_zap_pte() argument
2245 pte = *spte; in mmu_page_zap_pte()
2248 drop_spte(kvm, spte); in mmu_page_zap_pte()
2253 drop_parent_pte(child, spte); in mmu_page_zap_pte()
2259 mmu_spte_clear_no_track(spte); in mmu_page_zap_pte()
2492 u64 spte; in set_spte() local
2498 spte = PT_PRESENT_MASK; in set_spte()
2500 spte |= shadow_accessed_mask; in set_spte()
2503 spte |= shadow_x_mask; in set_spte()
2505 spte |= shadow_nx_mask; in set_spte()
2508 spte |= shadow_user_mask; in set_spte()
2511 spte |= PT_PAGE_SIZE_MASK; in set_spte()
2513 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2517 spte |= SPTE_HOST_WRITEABLE; in set_spte()
2521 spte |= (u64)pfn << PAGE_SHIFT; in set_spte()
2535 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; in set_spte()
2551 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); in set_spte()
2557 spte |= shadow_dirty_mask; in set_spte()
2561 if (mmu_spte_update(sptep, spte)) in set_spte()
2671 u64 *spte, *start = NULL; in __direct_pte_prefetch() local
2677 spte = sp->spt + i; in __direct_pte_prefetch()
2679 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { in __direct_pte_prefetch()
2680 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
2683 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
2687 start = spte; in __direct_pte_prefetch()
2864 u64 *sptep, u64 spte) in fast_pf_fix_direct_spte() argument
2888 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) in fast_pf_fix_direct_spte()
2905 u64 spte = 0ull; in fast_page_fault() local
2914 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
2915 if (!is_shadow_present_pte(spte) || iterator.level < level) in fast_page_fault()
2922 if (!is_rmap_spte(spte)) { in fast_page_fault()
2928 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
2937 if (is_writable_pte(spte)) { in fast_page_fault()
2946 if (!spte_is_locklessly_modifiable(spte)) in fast_page_fault()
2967 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); in fast_page_fault()
2970 spte, ret); in fast_page_fault()
3303 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) in is_shadow_zero_bits_set() argument
3305 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); in is_shadow_zero_bits_set()
3321 u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; in walk_shadow_page_get_mmio_spte() local
3333 __shadow_walk_next(&iterator, spte)) { in walk_shadow_page_get_mmio_spte()
3334 spte = mmu_spte_get_lockless(iterator.sptep); in walk_shadow_page_get_mmio_spte()
3336 sptes[leaf - 1] = spte; in walk_shadow_page_get_mmio_spte()
3339 if (!is_shadow_present_pte(spte)) in walk_shadow_page_get_mmio_spte()
3342 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
3358 *sptep = spte; in walk_shadow_page_get_mmio_spte()
3364 u64 spte; in handle_mmio_page_fault() local
3370 reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
3374 if (is_mmio_spte(spte)) { in handle_mmio_page_fault()
3375 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault()
3376 unsigned access = get_mmio_spte_access(spte); in handle_mmio_page_fault()
3378 if (!check_mmio_spte(vcpu, spte)) in handle_mmio_page_fault()
4136 struct kvm_mmu_page *sp, u64 *spte, in mmu_pte_write_new_pte() argument
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4256 u64 *spte; in get_written_sptes() local
4280 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
4281 return spte; in get_written_sptes()
4290 u64 entry, gentry, *spte; in kvm_mmu_pte_write() local
4335 spte = get_written_sptes(sp, gpa, &npte); in kvm_mmu_pte_write()
4336 if (!spte) in kvm_mmu_pte_write()
4341 entry = *spte; in kvm_mmu_pte_write()
4342 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4346 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); in kvm_mmu_pte_write()
4347 if (need_remote_flush(entry, *spte)) in kvm_mmu_pte_write()
4349 ++spte; in kvm_mmu_pte_write()