/linux-4.1.27/arch/x86/kvm/ |
D | iommu.c | 46 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, in kvm_pin_pages() argument 52 pfn = gfn_to_pfn_memslot(slot, gfn); in kvm_pin_pages() 53 end_gfn = gfn + npages; in kvm_pin_pages() 54 gfn += 1; in kvm_pin_pages() 59 while (gfn < end_gfn) in kvm_pin_pages() 60 gfn_to_pfn_memslot(slot, gfn++); in kvm_pin_pages() 75 gfn_t gfn, end_gfn; in kvm_iommu_map_pages() local 85 gfn = slot->base_gfn; in kvm_iommu_map_pages() 86 end_gfn = gfn + slot->npages; in kvm_iommu_map_pages() 95 while (gfn < end_gfn) { in kvm_iommu_map_pages() [all …]
|
D | mmutrace.h | 12 __field(__u64, gfn) \ 19 __entry->gfn = sp->gfn; \ 35 __entry->gfn, role.level, \ 202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen), 203 TP_ARGS(sptep, gfn, access, gen), 207 __field(gfn_t, gfn) 214 __entry->gfn = gfn; 220 __entry->gfn, __entry->access, __entry->gen) 225 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 226 TP_ARGS(addr, gfn, access), [all …]
|
D | mmu.c | 231 static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 238 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT; in mark_mmio_spte() 240 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte() 261 static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument 265 mark_mmio_spte(kvm, sptep, gfn, access); in set_mmio_spte() 759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn() 762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument 765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); in kvm_mmu_page_set_gfn() 767 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn() 774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument [all …]
|
D | mmu_audit.c | 99 gfn_t gfn; in audit_mappings() local 116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings() 117 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); in audit_mappings() 134 gfn_t gfn; in inspect_spte_has_rmap() local 137 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap() 139 if (!gfn_to_memslot(kvm, gfn)) { in inspect_spte_has_rmap() 142 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap() 144 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap() 149 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); in inspect_spte_has_rmap() 198 rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL); in audit_write_protection() [all …]
|
D | paging_tmpl.h | 105 gfn_t gfn; member 284 gfn_t gfn; in FNAME() local 373 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME() 374 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME() 377 gfn += pse36_gfn_delta(pte); in FNAME() 379 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 383 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME() 467 gfn_t gfn; in FNAME() local 475 gfn = gpte_to_gfn(gpte); in FNAME() 478 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME() [all …]
|
D | x86.h | 86 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument 90 vcpu->arch.mmio_gfn = gfn; in vcpu_cache_mmio_info()
|
D | x86.c | 500 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument 503 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page() 550 gfn_t gfn; in pdptrs_changed() local 560 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; in pdptrs_changed() 562 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed() 1990 u64 gfn; in set_msr_hyperv_pw() local 2001 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; in set_msr_hyperv_pw() 2002 addr = gfn_to_hva(kvm, gfn); in set_msr_hyperv_pw() 2010 mark_page_dirty(kvm, gfn); in set_msr_hyperv_pw() 2014 u64 gfn; in set_msr_hyperv_pw() local [all …]
|
D | vmx.c | 8505 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in vmx_get_mt_mask() argument 8523 ret = kvm_get_guest_memory_type(vcpu, gfn) << in vmx_get_mt_mask()
|
D | svm.c | 4078 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in svm_get_mt_mask() argument
|
/linux-4.1.27/include/trace/events/ |
D | kvm.h | 249 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), 250 TP_ARGS(gfn, level, slot, ref), 254 __field( u64, gfn ) 260 __entry->gfn = gfn; 262 __entry->hva = ((gfn - slot->base_gfn) << 268 __entry->hva, __entry->gfn, __entry->level, 275 TP_PROTO(u64 gva, u64 gfn), 277 TP_ARGS(gva, gfn), 281 __field(u64, gfn) 286 __entry->gfn = gfn; [all …]
|
/linux-4.1.27/include/linux/ |
D | kvm_host.h | 538 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 541 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 542 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 543 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 544 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 545 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 551 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 552 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 554 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 555 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, [all …]
|
/linux-4.1.27/arch/mips/kvm/ |
D | tlb.c | 41 pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); 144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) in kvm_mips_map_page() argument 149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) in kvm_mips_map_page() 153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); in kvm_mips_map_page() 156 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); in kvm_mips_map_page() 161 kvm->arch.guest_pmap[gfn] = pfn; in kvm_mips_map_page() 171 gfn_t gfn; in kvm_mips_translate_guest_kseg0_to_hpa() local 181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); in kvm_mips_translate_guest_kseg0_to_hpa() 183 if (gfn >= kvm->arch.guest_pmap_npages) { in kvm_mips_translate_guest_kseg0_to_hpa() 184 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, in kvm_mips_translate_guest_kseg0_to_hpa() [all …]
|
D | emulate.c | 1538 gfn_t gfn; in kvm_mips_sync_icache() local 1541 gfn = va >> PAGE_SHIFT; in kvm_mips_sync_icache() 1543 if (gfn >= kvm->arch.guest_pmap_npages) { in kvm_mips_sync_icache() 1544 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); in kvm_mips_sync_icache() 1549 pfn = kvm->arch.guest_pmap[gfn]; in kvm_mips_sync_icache()
|
/linux-4.1.27/virt/kvm/ |
D | kvm_main.c | 107 struct kvm_memory_slot *memslot, gfn_t gfn); 1089 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument 1091 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot() 1095 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument 1097 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn() 1107 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument 1114 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size() 1136 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many() argument 1146 *nr_pages = slot->npages - (gfn - slot->base_gfn); in __gfn_to_hva_many() 1148 return __gfn_to_hva_memslot(slot, gfn); in __gfn_to_hva_many() [all …]
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_64_mmu_hv.c | 443 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_hv_page_fault() local 483 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault() 484 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault() 512 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault() 670 unsigned long gfn)) in kvm_handle_hva_range() argument 680 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local 691 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range() 694 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range() 695 gfn_t gfn_offset = gfn - memslot->base_gfn; in kvm_handle_hva_range() 697 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); in kvm_handle_hva_range() [all …]
|
D | e500_mmu_host.c | 325 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument 355 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 356 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 383 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 411 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 414 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 416 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() 418 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map() 451 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map() 455 __func__, (long)gfn); in kvmppc_e500_shadow_map() [all …]
|
D | book3s_hv_rm_mmu.c | 106 unsigned long gfn, ptel, head; in remove_revmap_chain() local 113 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); in remove_revmap_chain() 114 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in remove_revmap_chain() 118 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in remove_revmap_chain() 141 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local 169 gfn = gpa >> PAGE_SHIFT; in kvmppc_do_h_enter() 170 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_do_h_enter() 184 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter() 188 hva = __gfn_to_hva_memslot(memslot, gfn); in kvmppc_do_h_enter()
|
D | book3s_64_mmu_host.c | 99 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() local 138 mark_page_dirty(vcpu->kvm, gfn); in kvmppc_mmu_map_page()
|
D | booke.c | 1228 gfn_t gfn; in kvmppc_handle_exit() local 1257 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit() 1259 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit() 1285 gfn_t gfn; in kvmppc_handle_exit() local 1305 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit() 1307 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
|
D | book3s.c | 371 gfn_t gfn = gpa >> PAGE_SHIFT; in kvmppc_gpa_to_pfn() local 389 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); in kvmppc_gpa_to_pfn()
|
D | book3s_pr.c | 262 gfn_t gfn, gfn_end; in do_kvm_unmap_hva() local 273 gfn = hva_to_gfn_memslot(hva_start, memslot); in do_kvm_unmap_hva() 276 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, in do_kvm_unmap_hva()
|
/linux-4.1.27/include/xen/ |
D | xen-ops.h | 51 xen_pfn_t *gfn, int nr, 70 xen_pfn_t gfn, int nr, 77 xen_pfn_t *gfn, int nr,
|
/linux-4.1.27/Documentation/virtual/kvm/ |
D | locking.txt | 21 - SPTE_HOST_WRITEABLE means the gfn is writable on host. 22 - SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when 23 the gfn is writable on guest mmu and it is not write-protected by shadow 31 1): The mapping from gfn to pfn 32 The mapping from gfn to pfn may be changed since we can only ensure the pfn 62 to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic() 63 to pin gfn to pfn, because after gfn_to_pfn_atomic(): 65 be reused for another gfn. 69 Then, we can ensure the dirty bitmaps is correctly set for a gfn.
|
D | mmu.txt | 34 gfn guest frame number 139 The linear range starts at (gfn << PAGE_SHIFT) and its size is determined 142 If clear, this page corresponds to a guest page table denoted by the gfn 176 gfn: 191 perform a reverse map from a pte to a gfn. When role.direct is set, any 192 element of this array can be calculated from the gfn field when used, in 193 this case, the array of gfns is not allocated. See role.direct and gfn. 240 reached given its gfn. This is used, for example, when swapping out a page.
|
/linux-4.1.27/arch/arm/kvm/ |
D | mmu.c | 1003 gfn_t gfn = *ipap >> PAGE_SHIFT; in transparent_hugepage_adjust() local 1026 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust() 1221 gfn_t gfn = fault_ipa >> PAGE_SHIFT; in user_mem_abort() local 1248 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; in user_mem_abort() 1283 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); in user_mem_abort() 1331 mark_page_dirty(kvm, gfn); in user_mem_abort() 1403 gfn_t gfn; in kvm_handle_guest_abort() local 1425 gfn = fault_ipa >> PAGE_SHIFT; in kvm_handle_guest_abort() 1426 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort() 1427 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in kvm_handle_guest_abort() [all …]
|
/linux-4.1.27/drivers/pinctrl/ |
D | pinctrl-tegra.c | 634 int fn, gn, gfn; in tegra_pinctrl_probe() local 666 for (gfn = 0; gfn < 4; gfn++) in tegra_pinctrl_probe() 667 if (g->funcs[gfn] == fn) in tegra_pinctrl_probe() 669 if (gfn == 4) in tegra_pinctrl_probe()
|
/linux-4.1.27/arch/x86/include/asm/ |
D | kvm_host.h | 77 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument 80 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index() 222 gfn_t gfn; member 783 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 843 gfn_t gfn; member 893 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 983 gfn_t gfn, void *data, int offset, int len, 1009 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 1167 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
/linux-4.1.27/drivers/scsi/bfa/ |
D | bfa_fcbuild.c | 1452 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); in fc_gfn_req_build() local 1459 memset(gfn, 0, sizeof(fcgs_gfn_req_t)); in fc_gfn_req_build() 1460 gfn->wwn = wwn; in fc_gfn_req_build()
|
/linux-4.1.27/arch/mips/include/asm/ |
D | kvm_host.h | 103 extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
|