/linux-4.4.14/Documentation/virtual/kvm/ |
D | locking.txt | 17 we just need change the W bit of the spte. 20 SPTE_MMU_WRITEABLE bit on the spte: 26 On fast page fault path, we will use cmpxchg to atomically set the spte W 27 bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, this 39 spte is the shadow page table entry corresponding with gpte and 40 spte = pfn1 45 old_spte = *spte; 47 spte = 0; 53 spte = pfn1; 55 if (cmpxchg(spte, old_spte, old_spte+W) [all …]
|
D | mmu.txt | 42 spte shadow pte (referring to pfns) 109 A nonleaf spte allows the hardware mmu to reach the leaf pages and 112 A leaf spte corresponds to either one or two translations encoded into 206 parent_ptes bit 0 is zero, only one spte points at this pages and 207 parent_ptes points at this single spte, otherwise, there exists multiple 228 Only present on 32-bit hosts, where a 64-bit spte cannot be written 292 - check for valid generation number in the spte (see "Fast invalidation of 305 - walk the shadow page table to find the spte for the translation, 307 - If this is an mmio request, cache the mmio info to the spte and set some 308 reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask) [all …]
|
/linux-4.4.14/arch/x86/kvm/ |
D | mmu.c | 161 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument 164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 165 __shadow_walk_next(&(_walker), spte)) 178 static void mmu_spte_set(u64 *sptep, u64 spte); 215 static unsigned int get_mmio_spte_generation(u64 spte) in get_mmio_spte_generation() argument 219 spte &= ~shadow_mmio_mask; in get_mmio_spte_generation() 221 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; in get_mmio_spte_generation() 222 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; in get_mmio_spte_generation() 244 static bool is_mmio_spte(u64 spte) in is_mmio_spte() argument 246 return (spte & shadow_mmio_mask) == shadow_mmio_mask; in is_mmio_spte() [all …]
|
D | mmutrace.h | 305 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen), 306 TP_ARGS(spte, kvm_gen, spte_gen), 311 __field(u64, spte) 317 __entry->spte = spte; 320 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
|
D | paging_tmpl.h | 164 struct kvm_mmu_page *sp, u64 *spte, in FNAME() 180 drop_spte(vcpu->kvm, spte); in FNAME() 455 u64 *spte, pt_element_t gpte, bool no_dirty_log) in FNAME() 461 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME() 464 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); in FNAME() 478 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, in FNAME() 485 u64 *spte, const void *pte) in FNAME() 489 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME() 520 u64 *spte; in FNAME() local 532 spte = sp->spt + i; in FNAME() [all …]
|
/linux-4.4.14/drivers/lguest/ |
D | page_tables.c | 398 pte_t *spte; in demand_page() local 499 spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd)); in demand_page() 500 if (!spte) in demand_page() 507 release_pte(*spte); in demand_page() 514 *spte = gpte_to_spte(cpu, gpte, 1); in demand_page() 522 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); in demand_page() 553 pte_t *spte; in page_writable() local 561 spte = find_spte(cpu, vaddr, false, 0, 0); in page_writable() 562 if (!spte) in page_writable() 569 flags = pte_flags(*spte); in page_writable() [all …]
|
/linux-4.4.14/mm/ |
D | hugetlb.c | 4196 pte_t *spte = NULL; in huge_pmd_share() local 4210 spte = huge_pte_offset(svma->vm_mm, saddr); in huge_pmd_share() 4211 if (spte) { in huge_pmd_share() 4213 get_page(virt_to_page(spte)); in huge_pmd_share() 4219 if (!spte) in huge_pmd_share() 4222 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); in huge_pmd_share() 4226 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share() 4228 put_page(virt_to_page(spte)); in huge_pmd_share()
|
/linux-4.4.14/arch/x86/include/asm/ |
D | kvm_host.h | 285 u64 *spte, const void *pte);
|