/linux-4.4.14/arch/powerpc/kvm/ |
H A D | book3s_64_mmu.c | 209 struct kvmppc_pte *gpte, bool data, kvmppc_mmu_book3s_64_xlate() 230 gpte->eaddr = eaddr; kvmppc_mmu_book3s_64_xlate() 231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); kvmppc_mmu_book3s_64_xlate() 232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); kvmppc_mmu_book3s_64_xlate() 233 gpte->raddr &= KVM_PAM; kvmppc_mmu_book3s_64_xlate() 234 gpte->may_execute = true; kvmppc_mmu_book3s_64_xlate() 235 gpte->may_read = true; kvmppc_mmu_book3s_64_xlate() 236 gpte->may_write = true; kvmppc_mmu_book3s_64_xlate() 237 gpte->page_size = MMU_PAGE_4K; kvmppc_mmu_book3s_64_xlate() 309 gpte->eaddr = eaddr; kvmppc_mmu_book3s_64_xlate() 310 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); kvmppc_mmu_book3s_64_xlate() 313 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); kvmppc_mmu_book3s_64_xlate() 314 gpte->page_size = pgsize; kvmppc_mmu_book3s_64_xlate() 315 gpte->may_execute = ((r & HPTE_R_N) ? false : true); kvmppc_mmu_book3s_64_xlate() 318 gpte->may_execute = true; kvmppc_mmu_book3s_64_xlate() 319 gpte->may_read = false; kvmppc_mmu_book3s_64_xlate() 320 gpte->may_write = false; kvmppc_mmu_book3s_64_xlate() 327 gpte->may_write = true; kvmppc_mmu_book3s_64_xlate() 333 gpte->may_read = true; kvmppc_mmu_book3s_64_xlate() 339 eaddr, avpn, gpte->vpage, gpte->raddr); kvmppc_mmu_book3s_64_xlate() 343 if (gpte->may_read && !(r & HPTE_R_R)) { kvmppc_mmu_book3s_64_xlate() 355 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { kvmppc_mmu_book3s_64_xlate() 365 if (!gpte->may_read || (iswrite && !gpte->may_write)) kvmppc_mmu_book3s_64_xlate() 208 kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_xlate() argument
|
H A D | book3s_64_mmu_hv.c | 309 struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_hv_xlate() 346 gpte->eaddr = eaddr; kvmppc_mmu_book3s_64_hv_xlate() 347 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); kvmppc_mmu_book3s_64_hv_xlate() 355 gpte->may_read = hpte_read_permission(pp, key); kvmppc_mmu_book3s_64_hv_xlate() 356 gpte->may_write = hpte_write_permission(pp, key); kvmppc_mmu_book3s_64_hv_xlate() 357 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); kvmppc_mmu_book3s_64_hv_xlate() 363 gpte->may_read = 0; kvmppc_mmu_book3s_64_hv_xlate() 365 gpte->may_write = 0; kvmppc_mmu_book3s_64_hv_xlate() 369 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); kvmppc_mmu_book3s_64_hv_xlate() 308 kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_hv_xlate() argument
|
/linux-4.4.14/arch/x86/kvm/ |
H A D | paging_tmpl.h | 109 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) gpte_to_gfn_lvl() argument 111 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; gpte_to_gfn_lvl() 114 static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) protect_clean_gpte() argument 126 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & protect_clean_gpte() 165 u64 gpte) prefetch_invalid_gpte() 167 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) prefetch_invalid_gpte() 170 if (!FNAME(is_present_gpte)(gpte)) prefetch_invalid_gpte() 173 /* if accessed bit is not supported prefetch non accessed gpte */ prefetch_invalid_gpte() 174 if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK)) prefetch_invalid_gpte() 184 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) gpte_access() argument 188 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | gpte_access() 189 ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | gpte_access() 192 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; gpte_access() 193 access &= ~(gpte >> PT64_NX_SHIFT); gpte_access() 455 u64 *spte, pt_element_t gpte, bool no_dirty_log) prefetch_gpte() 461 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) prefetch_gpte() 464 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); prefetch_gpte() 466 gfn = gpte_to_gfn(gpte); prefetch_gpte() 467 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); prefetch_gpte() 468 FNAME(protect_clean_gpte)(&pte_access, gpte); prefetch_gpte() 487 pt_element_t gpte = *(const pt_element_t *)pte; update_pte() local 489 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); update_pte() 567 * Verify that the top-level gpte is still there. Since the page fetch() 594 * Verify that the gpte in the page we've just write fetch() 846 pt_element_t gpte; for_each_shadow_entry() local 861 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, for_each_shadow_entry() 865 FNAME(update_pte)(vcpu, sp, sptep, &gpte); for_each_shadow_entry() 939 pt_element_t gpte; sync_page() local 948 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, sync_page() 952 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { sync_page() 957 gfn = gpte_to_gfn(gpte); sync_page() 959 pte_access &= FNAME(gpte_access)(vcpu, gpte); sync_page() 960 FNAME(protect_clean_gpte)(&pte_access, gpte); sync_page() 163 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) prefetch_invalid_gpte() argument 454 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte, bool no_dirty_log) prefetch_gpte() argument
|
H A D | mmu.c | 333 static gfn_t pse36_gfn_delta(u32 gpte) pse36_gfn_delta() argument 337 return (gpte & PT32_DIR_PSE36_MASK) << shift; pse36_gfn_delta() 3298 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) is_rsvd_bits_set() argument 3300 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); is_rsvd_bits_set() 3594 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) is_last_gpte() argument 3599 index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2); is_last_gpte() 4185 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ mmu_pte_write_fetch_gpte()
|
/linux-4.4.14/drivers/lguest/ |
H A D | page_tables.c | 206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) gpte_to_spte() argument 216 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); gpte_to_spte() 227 pfn = get_pfn(base + pte_pfn(gpte), write); gpte_to_spte() 229 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); gpte_to_spte() 253 static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte) gpte_in_iomem() argument 256 if (pte_flags(gpte) & _PAGE_PSE) gpte_in_iomem() 259 return (pte_pfn(gpte) >= cpu->lg->pfn_limit gpte_in_iomem() 260 && pte_pfn(gpte) < cpu->lg->device_limit); gpte_in_iomem() 263 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) check_gpte() argument 265 if ((pte_flags(gpte) & _PAGE_PSE) || check_gpte() 266 pte_pfn(gpte) >= cpu->lg->pfn_limit) { check_gpte() 397 pte_t gpte; demand_page() local 459 gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); demand_page() 462 gpte = lgread(cpu, gpte_ptr, pte_t); demand_page() 466 if (!(pte_flags(gpte) & _PAGE_PRESENT)) demand_page() 473 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) demand_page() 477 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) demand_page() 481 if (gpte_in_iomem(cpu, gpte)) { demand_page() 482 *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); demand_page() 490 if (!check_gpte(cpu, gpte)) demand_page() 494 gpte = pte_mkyoung(gpte); demand_page() 496 gpte = pte_mkdirty(gpte); demand_page() 513 if (pte_dirty(gpte)) demand_page() 514 *spte = gpte_to_spte(cpu, gpte, 1); demand_page() 522 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); demand_page() 529 lgwrite(cpu, gpte_ptr, pte_t, gpte); demand_page() 679 pte_t gpte; __guest_pa() local 700 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); __guest_pa() 702 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); __guest_pa() 704 if (!(pte_flags(gpte) & _PAGE_PRESENT)) __guest_pa() 707 *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); __guest_pa() 933 unsigned long vaddr, pte_t gpte) __guest_set_pte() 957 if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) __guest_set_pte() 958 && !gpte_in_iomem(cpu, gpte)) { __guest_set_pte() 959 if (!check_gpte(cpu, gpte)) __guest_set_pte() 962 gpte_to_spte(cpu, gpte, __guest_set_pte() 963 pte_flags(gpte) & _PAGE_DIRTY)); __guest_set_pte() 990 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) guest_set_pte() 1006 __guest_set_pte(cpu, i, vaddr, gpte); guest_set_pte() 1012 __guest_set_pte(cpu, pgdir, vaddr, gpte); guest_set_pte() 932 __guest_set_pte(struct lg_cpu *cpu, int idx, unsigned long vaddr, pte_t gpte) __guest_set_pte() argument 989 guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, unsigned long vaddr, pte_t gpte) guest_set_pte() argument
|