Home
last modified time | relevance | path

Searched refs:gpte (Results 1 – 7 of 7) sorted by relevance

/linux-4.1.27/drivers/lguest/
Dpage_tables.c206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) in gpte_to_spte() argument
216 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); in gpte_to_spte()
227 pfn = get_pfn(base + pte_pfn(gpte), write); in gpte_to_spte()
229 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); in gpte_to_spte()
253 static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte) in gpte_in_iomem() argument
256 if (pte_flags(gpte) & _PAGE_PSE) in gpte_in_iomem()
259 return (pte_pfn(gpte) >= cpu->lg->pfn_limit in gpte_in_iomem()
260 && pte_pfn(gpte) < cpu->lg->device_limit); in gpte_in_iomem()
263 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) in check_gpte() argument
265 if ((pte_flags(gpte) & _PAGE_PSE) || in check_gpte()
[all …]
/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_64_mmu.c209 struct kvmppc_pte *gpte, bool data, in kvmppc_mmu_book3s_64_xlate() argument
230 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate()
231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate()
232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate()
233 gpte->raddr &= KVM_PAM; in kvmppc_mmu_book3s_64_xlate()
234 gpte->may_execute = true; in kvmppc_mmu_book3s_64_xlate()
235 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate()
236 gpte->may_write = true; in kvmppc_mmu_book3s_64_xlate()
237 gpte->page_size = MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate()
309 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate()
[all …]
Dbook3s_64_mmu_hv.c308 struct kvmppc_pte *gpte, bool data, bool iswrite) in kvmppc_mmu_book3s_64_hv_xlate() argument
345 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate()
346 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate()
354 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
355 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
356 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate()
362 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate()
364 gpte->may_write = 0; in kvmppc_mmu_book3s_64_hv_xlate()
368 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
/linux-4.1.27/arch/x86/kvm/
Dpaging_tmpl.h109 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl() argument
111 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; in gpte_to_gfn_lvl()
114 static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) in FNAME()
126 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & in FNAME()
131 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME()
133 int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f; in FNAME()
135 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | in FNAME()
173 u64 gpte) in FNAME()
175 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
178 if (!FNAME(is_present_gpte)(gpte)) in FNAME()
[all …]
Dmmu.c333 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta() argument
337 return (gpte & PT32_DIR_PSE36_MASK) << shift; in pse36_gfn_delta()
3565 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) in is_last_gpte() argument
3570 index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2); in is_last_gpte()
/linux-4.1.27/Documentation/virtual/kvm/
Dlocking.txt37 gpte = gfn1
39 spte is the shadow page table entry corresponding with gpte and
51 gpte is changed to point to
Dmmu.txt41 gpte guest pte (referring to gfns)
149 first or second 512-gpte block in the guest page table. For second-level
150 page tables, each 32-bit gpte is converted to two 64-bit sptes
250 protected, and synchronize sptes to gptes when a gpte is written.
304 - if successful, we can let the guest continue and modify the gpte
338 we cannot map the permissions for gpte.u=1, gpte.w=0 to any spte (the
355 spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when