Searched refs:spte (Results 1 - 12 of 12) sorted by relevance

/linux-4.4.14/arch/x86/kvm/
H A Dmmu.c161 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
165 __shadow_walk_next(&(_walker), spte))
178 static void mmu_spte_set(u64 *sptep, u64 spte);
193 * spte bits 3-11 are used as bits 1-9 of the generation number,
215 static unsigned int get_mmio_spte_generation(u64 spte) get_mmio_spte_generation() argument
219 spte &= ~shadow_mmio_mask; get_mmio_spte_generation()
221 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; get_mmio_spte_generation()
222 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; get_mmio_spte_generation()
244 static bool is_mmio_spte(u64 spte) is_mmio_spte() argument
246 return (spte & shadow_mmio_mask) == shadow_mmio_mask; is_mmio_spte()
249 static gfn_t get_mmio_spte_gfn(u64 spte) get_mmio_spte_gfn() argument
252 return (spte & ~mask) >> PAGE_SHIFT; get_mmio_spte_gfn()
255 static unsigned get_mmio_spte_access(u64 spte) get_mmio_spte_access() argument
258 return (spte & ~mask) & ~PAGE_MASK; get_mmio_spte_access()
272 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) check_mmio_spte() argument
277 spte_gen = get_mmio_spte_generation(spte); check_mmio_spte()
279 trace_check_mmio_spte(spte, kvm_gen, spte_gen); check_mmio_spte()
341 static void __set_spte(u64 *sptep, u64 spte) __set_spte() argument
343 *sptep = spte; __set_spte()
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument
348 *sptep = spte; __update_clear_spte_fast()
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument
353 return xchg(sptep, spte); __update_clear_spte_slow()
366 u64 spte; member in union:split_spte
369 static void count_spte_clear(u64 *sptep, u64 spte) count_spte_clear() argument
373 if (is_shadow_present_pte(spte)) count_spte_clear()
376 /* Ensure the spte is completely set before we increase the count */ count_spte_clear()
381 static void __set_spte(u64 *sptep, u64 spte) __set_spte() argument
386 sspte = (union split_spte)spte; __set_spte()
391 * If we map the spte from nonpresent to present, We should store __set_spte()
393 * fetch this spte while we are setting the spte. __set_spte()
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument
405 sspte = (union split_spte)spte; __update_clear_spte_fast()
410 * If we map the spte from present to nonpresent, we should clear __update_clear_spte_fast()
416 count_spte_clear(sptep, spte); __update_clear_spte_fast()
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument
424 sspte = (union split_spte)spte; __update_clear_spte_slow()
430 count_spte_clear(sptep, spte); __update_clear_spte_slow()
432 return orig.spte; __update_clear_spte_slow()
436 * The idea using the light way get the spte on x86_32 guest is from
439 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
441 * we need to protect against in-progress updates of the spte.
443 * Reading the spte while an update is in progress may get the old value
444 * for the high part of the spte. The race is fine for a present->non-present
445 * change (because the high part of the spte is ignored for non-present spte),
446 * but for a present->present change we must reread the spte.
450 * present->non-present updates: if it changed while reading the spte,
456 union split_spte spte, *orig = (union split_spte *)sptep; __get_spte_lockless() local
463 spte.spte_low = orig->spte_low; __get_spte_lockless()
466 spte.spte_high = orig->spte_high; __get_spte_lockless()
469 if (unlikely(spte.spte_low != orig->spte_low || __get_spte_lockless()
473 return spte.spte; __get_spte_lockless()
477 static bool spte_is_locklessly_modifiable(u64 spte) spte_is_locklessly_modifiable() argument
479 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == spte_is_locklessly_modifiable()
483 static bool spte_has_volatile_bits(u64 spte) spte_has_volatile_bits() argument
486 * Always atomicly update spte if it can be updated spte_has_volatile_bits()
491 if (spte_is_locklessly_modifiable(spte)) spte_has_volatile_bits()
497 if (!is_shadow_present_pte(spte)) spte_has_volatile_bits()
500 if ((spte & shadow_accessed_mask) && spte_has_volatile_bits()
501 (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) spte_has_volatile_bits()
521 * the spte.
532 * Whenever we overwrite a writable spte with a read-only one we
534 * will find a read-only spte, even though the writable spte
556 * For the spte updated out of mmu-lock is safe, since mmu_spte_update()
619 * Directly clear spte without caring the state bits of sptep,
620 * it is used to set the upper level spte.
641 * Make sure a following spte read is not reordered ahead of the write walk_shadow_page_lockless_begin()
914 * If pte_list bit zero is zero, then pte_list point to the spte.
919 * Returns the number of pte entries before the spte was added or zero if
920 * the spte was not added.
923 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, pte_list_add() argument
930 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); pte_list_add()
931 *pte_list = (unsigned long)spte; pte_list_add()
933 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); pte_list_add()
936 desc->sptes[1] = spte; pte_list_add()
940 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); pte_list_add()
952 desc->sptes[i] = spte; pte_list_add()
979 static void pte_list_remove(u64 *spte, unsigned long *pte_list) pte_list_remove() argument
986 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); pte_list_remove()
989 rmap_printk("pte_list_remove: %p 1->0\n", spte); pte_list_remove()
990 if ((u64 *)*pte_list != spte) { pte_list_remove()
991 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); pte_list_remove()
996 rmap_printk("pte_list_remove: %p many->many\n", spte); pte_list_remove()
1001 if (desc->sptes[i] == spte) { pte_list_remove()
1010 pr_err("pte_list_remove: %p many->many\n", spte); pte_list_remove()
1015 typedef void (*pte_list_walk_fn) (u64 *spte); pte_list_walk()
1065 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_add() argument
1070 sp = page_header(__pa(spte)); rmap_add()
1071 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); rmap_add()
1073 return pte_list_add(vcpu, spte, rmapp); rmap_add()
1076 static void rmap_remove(struct kvm *kvm, u64 *spte) rmap_remove() argument
1082 sp = page_header(__pa(spte)); rmap_remove()
1083 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); rmap_remove()
1085 pte_list_remove(spte, rmapp); rmap_remove()
1182 * spte write-protection is caused by protecting shadow page table.
1184 * Note: write protection is difference between dirty logging and spte
1186 * - for dirty logging, the spte can be set to writable at anytime if
1188 * - for spte protection, the spte can be writable only after unsync-ing
1195 u64 spte = *sptep; spte_write_protect() local
1197 if (!is_writable_pte(spte) && spte_write_protect()
1198 !(pt_protect && spte_is_locklessly_modifiable(spte))) spte_write_protect()
1201 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); spte_write_protect()
1204 spte &= ~SPTE_MMU_WRITEABLE; spte_write_protect()
1205 spte = spte & ~PT_WRITABLE_MASK; spte_write_protect()
1207 return mmu_spte_update(sptep, spte); spte_write_protect()
1225 u64 spte = *sptep; spte_clear_dirty() local
1227 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); spte_clear_dirty()
1229 spte &= ~shadow_dirty_mask; spte_clear_dirty()
1231 return mmu_spte_update(sptep, spte); spte_clear_dirty()
1248 u64 spte = *sptep; spte_set_dirty() local
1250 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); spte_set_dirty()
1252 spte |= shadow_dirty_mask; spte_set_dirty()
1254 return mmu_spte_update(sptep, spte); spte_set_dirty()
1367 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); kvm_zap_rmapp()
1399 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", kvm_set_pte_rmapp()
1614 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_recycle() argument
1619 sp = page_header(__pa(spte)); rmap_recycle()
1746 static void mark_unsync(u64 *spte); kvm_mmu_mark_parents_unsync()
1752 static void mark_unsync(u64 *spte) mark_unsync() argument
1757 sp = page_header(__pa(spte)); mark_unsync()
1758 index = spte - sp->spt; mark_unsync()
1777 struct kvm_mmu_page *sp, u64 *spte, nonpaging_update_pte()
2069 static void clear_sp_write_flooding_count(u64 *spte) clear_sp_write_flooding_count() argument
2071 struct kvm_mmu_page *sp = page_header(__pa(spte)); clear_sp_write_flooding_count()
2185 u64 spte) __shadow_walk_next()
2187 if (is_last_spte(spte, iterator->level)) { __shadow_walk_next()
2192 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; __shadow_walk_next()
2203 u64 spte; link_shadow_page() local
2208 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | link_shadow_page()
2212 spte |= shadow_accessed_mask; link_shadow_page()
2214 mmu_spte_set(sptep, spte); link_shadow_page()
2227 * so we should update the spte at this point to get validate_direct_spte()
2240 u64 *spte) mmu_page_zap_pte()
2245 pte = *spte; mmu_page_zap_pte()
2248 drop_spte(kvm, spte); mmu_page_zap_pte()
2253 drop_parent_pte(child, spte); mmu_page_zap_pte()
2259 mmu_spte_clear_no_track(spte); mmu_page_zap_pte()
2492 u64 spte; set_spte() local
2498 spte = PT_PRESENT_MASK; set_spte()
2500 spte |= shadow_accessed_mask; set_spte()
2503 spte |= shadow_x_mask; set_spte()
2505 spte |= shadow_nx_mask; set_spte()
2508 spte |= shadow_user_mask; set_spte()
2511 spte |= PT_PAGE_SIZE_MASK; set_spte()
2513 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, set_spte()
2517 spte |= SPTE_HOST_WRITEABLE; set_spte()
2521 spte |= (u64)pfn << PAGE_SHIFT; set_spte()
2535 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; set_spte()
2538 * Optimization: for pte sync, if spte was writable the hash set_spte()
2551 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); set_spte()
2557 spte |= shadow_dirty_mask; set_spte()
2561 if (mmu_spte_update(sptep, spte)) set_spte()
2575 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, mmu_set_spte()
2610 pgprintk("%s: setting spte %llx\n", __func__, *sptep); mmu_set_spte()
2671 u64 *spte, *start = NULL; __direct_pte_prefetch() local
2677 spte = sp->spt + i; __direct_pte_prefetch()
2679 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { __direct_pte_prefetch()
2680 if (is_shadow_present_pte(*spte) || spte == sptep) { __direct_pte_prefetch()
2683 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) __direct_pte_prefetch()
2687 start = spte; __direct_pte_prefetch()
2766 * into the spte otherwise read access on readonly gfn also can kvm_handle_bad_page()
2844 * Do not fix the mmio spte with invalid generation number which page_fault_can_be_fast()
2853 * W bit of the spte which can be done out of mmu-lock. page_fault_can_be_fast()
2864 u64 *sptep, u64 spte) fast_pf_fix_direct_spte()
2871 * The gfn of direct spte is stable since it is calculated fast_pf_fix_direct_spte()
2888 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) fast_pf_fix_direct_spte()
2905 u64 spte = 0ull; fast_page_fault() local
2914 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) for_each_shadow_entry_lockless()
2915 if (!is_shadow_present_pte(spte) || iterator.level < level) for_each_shadow_entry_lockless()
2922 if (!is_rmap_spte(spte)) { for_each_shadow_entry_lockless()
2928 if (!is_last_spte(spte, sp->role.level))
2937 if (is_writable_pte(spte)) {
2943 * Currently, to simplify the code, only the spte write-protected
2946 if (!spte_is_locklessly_modifiable(spte))
2950 * Do not fix write-permission on the large spte since we only dirty
2954 * Instead, we let the slow page fault path create a normal spte to
2967 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte);
2970 spte, ret);
3303 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) is_shadow_zero_bits_set() argument
3305 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); is_shadow_zero_bits_set()
3316 /* return true if reserved bit is detected on spte. */
3321 u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; walk_shadow_page_get_mmio_spte() local
3333 __shadow_walk_next(&iterator, spte)) { walk_shadow_page_get_mmio_spte()
3334 spte = mmu_spte_get_lockless(iterator.sptep); walk_shadow_page_get_mmio_spte()
3336 sptes[leaf - 1] = spte; walk_shadow_page_get_mmio_spte()
3339 if (!is_shadow_present_pte(spte)) walk_shadow_page_get_mmio_spte()
3342 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, walk_shadow_page_get_mmio_spte()
3349 pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", walk_shadow_page_get_mmio_spte()
3352 pr_err("------ spte 0x%llx level %d.\n", walk_shadow_page_get_mmio_spte()
3358 *sptep = spte; walk_shadow_page_get_mmio_spte()
3364 u64 spte; handle_mmio_page_fault() local
3370 reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); handle_mmio_page_fault()
3374 if (is_mmio_spte(spte)) { handle_mmio_page_fault()
3375 gfn_t gfn = get_mmio_spte_gfn(spte); handle_mmio_page_fault()
3376 unsigned access = get_mmio_spte_access(spte); handle_mmio_page_fault()
3378 if (!check_mmio_spte(vcpu, spte)) handle_mmio_page_fault()
4136 struct kvm_mmu_page *sp, u64 *spte, mmu_pte_write_new_pte()
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); mmu_pte_write_new_pte()
4256 u64 *spte; get_written_sptes() local
4280 spte = &sp->spt[page_offset / sizeof(*spte)]; get_written_sptes()
4281 return spte; get_written_sptes()
4290 u64 entry, gentry, *spte; kvm_mmu_pte_write() local
4335 spte = get_written_sptes(sp, gpa, &npte); kvm_mmu_pte_write()
4336 if (!spte) kvm_mmu_pte_write()
4341 entry = *spte; kvm_mmu_pte_write()
4342 mmu_page_zap_pte(vcpu->kvm, sp, spte); kvm_mmu_pte_write()
4346 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); kvm_mmu_pte_write()
4347 if (need_remote_flush(entry, *spte)) kvm_mmu_pte_write()
4349 ++spte; kvm_mmu_pte_write()
4618 * corruption since we just change the spte from writable to kvm_mmu_slot_remove_write_access()
4620 * spte from present to present (changing the spte from present kvm_mmu_slot_remove_write_access()
1776 nonpaging_update_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte) nonpaging_update_pte() argument
2184 __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte) __shadow_walk_next() argument
2239 mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte) mmu_page_zap_pte() argument
2863 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep, u64 spte) fast_pf_fix_direct_spte() argument
4135 mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *new) mmu_pte_write_new_pte() argument
H A Dmmutrace.h305 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
306 TP_ARGS(spte, kvm_gen, spte_gen),
311 __field(u64, spte)
317 __entry->spte = spte;
320 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
H A Dmmu.h62 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
63 * fault path update the mmio spte.
116 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
119 * Anyway, whenever a spte is updated (only permission and status bits are
120 * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
125 * - if we want to see if it has writable tlb entry or if the spte can be
128 * - if we fix page fault on the spte or do write-protection by dirty logging,
H A Dpaging_tmpl.h164 struct kvm_mmu_page *sp, u64 *spte, prefetch_invalid_gpte()
180 drop_spte(vcpu->kvm, spte); prefetch_invalid_gpte()
455 u64 *spte, pt_element_t gpte, bool no_dirty_log) prefetch_gpte()
461 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) prefetch_gpte()
464 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); prefetch_gpte()
478 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, prefetch_gpte()
485 u64 *spte, const void *pte) update_pte()
489 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); update_pte()
520 u64 *spte; pte_prefetch() local
532 spte = sp->spt + i; pte_prefetch()
534 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { pte_prefetch()
535 if (spte == sptep) pte_prefetch()
538 if (is_shadow_present_pte(*spte)) pte_prefetch()
541 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) pte_prefetch()
767 * we will cache the incorrect access into mmio spte. page_fault()
915 * - The spte has a reference to the struct page, so the pfn for a given gfn
919 * We should flush all tlbs if spte is dropped even though guest is
163 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) prefetch_invalid_gpte() argument
454 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte, bool no_dirty_log) prefetch_gpte() argument
484 update_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte) update_pte() argument
H A Dmmu_audit.c157 audit_printk(kvm, "no rmap for writable spte %llx\n", inspect_spte_has_rmap()
H A Dx86.c7913 * spte on this slot until the end of the logging. kvm_mmu_slot_apply_flags()
7951 * which can be collapsed into a single large-page spte. Later kvm_arch_commit_memory_region()
H A Dvmx.c4760 * spte. ept_set_mmio_spte_mask()
/linux-4.4.14/drivers/lguest/
H A Dpage_tables.c224 * fit in spte.pfn. get_pfn() finds the real physical number of the gpte_to_spte()
398 pte_t *spte; demand_page() local
499 spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd)); demand_page()
500 if (!spte) demand_page()
507 release_pte(*spte); demand_page()
514 *spte = gpte_to_spte(cpu, gpte, 1); demand_page()
522 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); demand_page()
553 pte_t *spte; page_writable() local
561 spte = find_spte(cpu, vaddr, false, 0, 0); page_writable()
562 if (!spte) page_writable()
569 flags = pte_flags(*spte); page_writable()
948 pte_t *spte = spte_addr(cpu, *spgd, vaddr); __guest_set_pte() local
949 release_pte(*spte); __guest_set_pte()
961 set_pte(spte, __guest_set_pte()
969 set_pte(spte, __pte(0)); __guest_set_pte()
/linux-4.4.14/arch/x86/include/asm/
H A Dkvm_host.h228 /* hold the gfn of each spte inside spt */
242 * Used out of the mmu-lock to avoid reading spte values while an
285 u64 *spte, const void *pte);
305 * the bits spte never used.
/linux-4.4.14/mm/
H A Dhugetlb.c4196 pte_t *spte = NULL; huge_pmd_share() local
4210 spte = huge_pte_offset(svma->vm_mm, saddr); huge_pmd_share()
4211 if (spte) { huge_pmd_share()
4213 get_page(virt_to_page(spte)); huge_pmd_share()
4219 if (!spte) huge_pmd_share()
4222 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); huge_pmd_share()
4226 (pmd_t *)((unsigned long)spte & PAGE_MASK)); huge_pmd_share()
4228 put_page(virt_to_page(spte)); huge_pmd_share()
/linux-4.4.14/drivers/infiniband/core/
H A Dumem_odp.c71 * the page that is going to be mapped in the spte could have ib_umem_notifier_end_account()
/linux-4.4.14/virt/kvm/
H A Dkvm_main.c284 * here the kvm page fault will notice if the spte can't be kvm_mmu_notifier_invalidate_page()
286 * instead the kvm page fault establishes the spte before kvm_mmu_notifier_invalidate_page()
342 * spte can be established without taking the mmu_lock and kvm_mmu_notifier_invalidate_range_start()
366 * the page that is going to be mapped in the spte could have kvm_mmu_notifier_invalidate_range_end()

Completed in 622 milliseconds