Lines Matching refs:spte
161 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
165 __shadow_walk_next(&(_walker), spte))
178 static void mmu_spte_set(u64 *sptep, u64 spte);
215 static unsigned int get_mmio_spte_generation(u64 spte) in get_mmio_spte_generation() argument
219 spte &= ~shadow_mmio_mask; in get_mmio_spte_generation()
221 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; in get_mmio_spte_generation()
222 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; in get_mmio_spte_generation()
244 static bool is_mmio_spte(u64 spte) in is_mmio_spte() argument
246 return (spte & shadow_mmio_mask) == shadow_mmio_mask; in is_mmio_spte()
249 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument
252 return (spte & ~mask) >> PAGE_SHIFT; in get_mmio_spte_gfn()
255 static unsigned get_mmio_spte_access(u64 spte) in get_mmio_spte_access() argument
258 return (spte & ~mask) & ~PAGE_MASK; in get_mmio_spte_access()
272 static bool check_mmio_spte(struct kvm *kvm, u64 spte) in check_mmio_spte() argument
277 spte_gen = get_mmio_spte_generation(spte); in check_mmio_spte()
279 trace_check_mmio_spte(spte, kvm_gen, spte_gen); in check_mmio_spte()
341 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
343 *sptep = spte; in __set_spte()
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
348 *sptep = spte; in __update_clear_spte_fast()
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
353 return xchg(sptep, spte); in __update_clear_spte_slow()
366 u64 spte; member
369 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
373 if (is_shadow_present_pte(spte)) in count_spte_clear()
381 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
386 sspte = (union split_spte)spte; in __set_spte()
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
405 sspte = (union split_spte)spte; in __update_clear_spte_fast()
416 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
424 sspte = (union split_spte)spte; in __update_clear_spte_slow()
430 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
432 return orig.spte; in __update_clear_spte_slow()
456 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless() local
463 spte.spte_low = orig->spte_low; in __get_spte_lockless()
466 spte.spte_high = orig->spte_high; in __get_spte_lockless()
469 if (unlikely(spte.spte_low != orig->spte_low || in __get_spte_lockless()
473 return spte.spte; in __get_spte_lockless()
477 static bool spte_is_locklessly_modifiable(u64 spte) in spte_is_locklessly_modifiable() argument
479 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == in spte_is_locklessly_modifiable()
483 static bool spte_has_volatile_bits(u64 spte) in spte_has_volatile_bits() argument
491 if (spte_is_locklessly_modifiable(spte)) in spte_has_volatile_bits()
497 if (!is_shadow_present_pte(spte)) in spte_has_volatile_bits()
500 if ((spte & shadow_accessed_mask) && in spte_has_volatile_bits()
501 (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) in spte_has_volatile_bits()
898 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, in pte_list_add() argument
905 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); in pte_list_add()
906 *pte_list = (unsigned long)spte; in pte_list_add()
908 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); in pte_list_add()
911 desc->sptes[1] = spte; in pte_list_add()
915 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); in pte_list_add()
927 desc->sptes[i] = spte; in pte_list_add()
954 static void pte_list_remove(u64 *spte, unsigned long *pte_list) in pte_list_remove() argument
961 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); in pte_list_remove()
964 rmap_printk("pte_list_remove: %p 1->0\n", spte); in pte_list_remove()
965 if ((u64 *)*pte_list != spte) { in pte_list_remove()
966 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); in pte_list_remove()
971 rmap_printk("pte_list_remove: %p many->many\n", spte); in pte_list_remove()
976 if (desc->sptes[i] == spte) { in pte_list_remove()
985 pr_err("pte_list_remove: %p many->many\n", spte); in pte_list_remove()
990 typedef void (*pte_list_walk_fn) (u64 *spte);
1038 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1043 sp = page_header(__pa(spte)); in rmap_add()
1044 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1046 return pte_list_add(vcpu, spte, rmapp); in rmap_add()
1049 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove() argument
1055 sp = page_header(__pa(spte)); in rmap_remove()
1056 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1058 pte_list_remove(spte, rmapp); in rmap_remove()
1163 u64 spte = *sptep; in spte_write_protect() local
1165 if (!is_writable_pte(spte) && in spte_write_protect()
1166 !(pt_protect && spte_is_locklessly_modifiable(spte))) in spte_write_protect()
1172 spte &= ~SPTE_MMU_WRITEABLE; in spte_write_protect()
1173 spte = spte & ~PT_WRITABLE_MASK; in spte_write_protect()
1175 return mmu_spte_update(sptep, spte); in spte_write_protect()
1197 u64 spte = *sptep; in spte_clear_dirty() local
1201 spte &= ~shadow_dirty_mask; in spte_clear_dirty()
1203 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1224 u64 spte = *sptep; in spte_set_dirty() local
1228 spte |= shadow_dirty_mask; in spte_set_dirty()
1230 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1543 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1548 sp = page_header(__pa(spte)); in rmap_recycle()
1675 static void mark_unsync(u64 *spte);
1681 static void mark_unsync(u64 *spte) in mark_unsync() argument
1686 sp = page_header(__pa(spte)); in mark_unsync()
1687 index = spte - sp->spt; in mark_unsync()
1706 struct kvm_mmu_page *sp, u64 *spte, in nonpaging_update_pte() argument
1998 static void clear_sp_write_flooding_count(u64 *spte) in clear_sp_write_flooding_count() argument
2000 struct kvm_mmu_page *sp = page_header(__pa(spte)); in clear_sp_write_flooding_count()
2114 u64 spte) in __shadow_walk_next() argument
2116 if (is_last_spte(spte, iterator->level)) { in __shadow_walk_next()
2121 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; in __shadow_walk_next()
2132 u64 spte; in link_shadow_page() local
2137 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | in link_shadow_page()
2141 spte |= shadow_accessed_mask; in link_shadow_page()
2143 mmu_spte_set(sptep, spte); in link_shadow_page()
2169 u64 *spte) in mmu_page_zap_pte() argument
2174 pte = *spte; in mmu_page_zap_pte()
2177 drop_spte(kvm, spte); in mmu_page_zap_pte()
2182 drop_parent_pte(child, spte); in mmu_page_zap_pte()
2188 mmu_spte_clear_no_track(spte); in mmu_page_zap_pte()
2518 u64 spte; in set_spte() local
2524 spte = PT_PRESENT_MASK; in set_spte()
2526 spte |= shadow_accessed_mask; in set_spte()
2529 spte |= shadow_x_mask; in set_spte()
2531 spte |= shadow_nx_mask; in set_spte()
2534 spte |= shadow_user_mask; in set_spte()
2537 spte |= PT_PAGE_SIZE_MASK; in set_spte()
2539 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2543 spte |= SPTE_HOST_WRITEABLE; in set_spte()
2547 spte |= (u64)pfn << PAGE_SHIFT; in set_spte()
2561 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; in set_spte()
2577 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); in set_spte()
2583 spte |= shadow_dirty_mask; in set_spte()
2587 if (mmu_spte_update(sptep, spte)) in set_spte()
2695 u64 *spte, *start = NULL; in __direct_pte_prefetch() local
2701 spte = sp->spt + i; in __direct_pte_prefetch()
2703 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { in __direct_pte_prefetch()
2704 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
2707 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
2711 start = spte; in __direct_pte_prefetch()
2888 u64 *sptep, u64 spte) in fast_pf_fix_direct_spte() argument
2912 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) in fast_pf_fix_direct_spte()
2929 u64 spte = 0ull; in fast_page_fault() local
2938 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
2939 if (!is_shadow_present_pte(spte) || iterator.level < level) in fast_page_fault()
2946 if (!is_rmap_spte(spte)) { in fast_page_fault()
2952 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
2961 if (is_writable_pte(spte)) { in fast_page_fault()
2970 if (!spte_is_locklessly_modifiable(spte)) in fast_page_fault()
2991 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); in fast_page_fault()
2994 spte, ret); in fast_page_fault()
3326 u64 spte = 0ull; in walk_shadow_page_get_mmio_spte() local
3329 return spte; in walk_shadow_page_get_mmio_spte()
3332 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) in walk_shadow_page_get_mmio_spte()
3333 if (!is_shadow_present_pte(spte)) in walk_shadow_page_get_mmio_spte()
3337 return spte; in walk_shadow_page_get_mmio_spte()
3342 u64 spte; in handle_mmio_page_fault_common() local
3347 spte = walk_shadow_page_get_mmio_spte(vcpu, addr); in handle_mmio_page_fault_common()
3349 if (is_mmio_spte(spte)) { in handle_mmio_page_fault_common()
3350 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault_common()
3351 unsigned access = get_mmio_spte_access(spte); in handle_mmio_page_fault_common()
3353 if (!check_mmio_spte(vcpu->kvm, spte)) in handle_mmio_page_fault_common()
4016 struct kvm_mmu_page *sp, u64 *spte, in mmu_pte_write_new_pte() argument
4025 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4136 u64 *spte; in get_written_sptes() local
4160 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
4161 return spte; in get_written_sptes()
4170 u64 entry, gentry, *spte; in kvm_mmu_pte_write() local
4214 spte = get_written_sptes(sp, gpa, &npte); in kvm_mmu_pte_write()
4215 if (!spte) in kvm_mmu_pte_write()
4220 entry = *spte; in kvm_mmu_pte_write()
4221 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4225 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); in kvm_mmu_pte_write()
4226 if (need_remote_flush(entry, *spte)) in kvm_mmu_pte_write()
4228 ++spte; in kvm_mmu_pte_write()
4843 u64 spte; in kvm_mmu_get_spte_hierarchy() local
4850 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { in kvm_mmu_get_spte_hierarchy()
4851 sptes[iterator.level-1] = spte; in kvm_mmu_get_spte_hierarchy()
4853 if (!is_shadow_present_pte(spte)) in kvm_mmu_get_spte_hierarchy()