Home
last modified time | relevance | path

Searched refs:gfn (Results 1 – 43 of 43) sorted by relevance

/linux-4.4.14/arch/x86/kvm/
Diommu.c46 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, in kvm_pin_pages() argument
52 pfn = gfn_to_pfn_memslot(slot, gfn); in kvm_pin_pages()
53 end_gfn = gfn + npages; in kvm_pin_pages()
54 gfn += 1; in kvm_pin_pages()
59 while (gfn < end_gfn) in kvm_pin_pages()
60 gfn_to_pfn_memslot(slot, gfn++); in kvm_pin_pages()
75 gfn_t gfn, end_gfn; in kvm_iommu_map_pages() local
85 gfn = slot->base_gfn; in kvm_iommu_map_pages()
86 end_gfn = gfn + slot->npages; in kvm_iommu_map_pages()
95 while (gfn < end_gfn) { in kvm_iommu_map_pages()
[all …]
Dmmutrace.h12 __field(__u64, gfn) \
19 __entry->gfn = sp->gfn; \
35 __entry->gfn, role.level, \
202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
203 TP_ARGS(sptep, gfn, access, gen),
207 __field(gfn_t, gfn)
214 __entry->gfn = gfn;
220 __entry->gfn, __entry->access, __entry->gen)
225 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
226 TP_ARGS(addr, gfn, access),
[all …]
Dmmu.c231 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
238 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT; in mark_mmio_spte()
240 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
265 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); in kvm_mmu_page_set_gfn()
767 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument
[all …]
Dmmu_audit.c99 gfn_t gfn; in audit_mappings() local
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings()
117 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); in audit_mappings()
136 gfn_t gfn; in inspect_spte_has_rmap() local
139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap()
142 slot = __gfn_to_memslot(slots, gfn); in inspect_spte_has_rmap()
146 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap()
148 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap()
153 rmapp = __gfn_to_rmap(gfn, rev_sp->role.level, slot); in inspect_spte_has_rmap()
205 slot = __gfn_to_memslot(slots, sp->gfn); in audit_write_protection()
[all …]
Dhyperv.c121 u64 gfn; in kvm_hv_set_msr_pw() local
132 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; in kvm_hv_set_msr_pw()
133 addr = gfn_to_hva(kvm, gfn); in kvm_hv_set_msr_pw()
141 mark_page_dirty(kvm, gfn); in kvm_hv_set_msr_pw()
145 u64 gfn; in kvm_hv_set_msr_pw() local
152 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_set_msr_pw()
155 gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, in kvm_hv_set_msr_pw()
158 mark_page_dirty(kvm, gfn); in kvm_hv_set_msr_pw()
196 u64 gfn; in kvm_hv_set_msr() local
205 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; in kvm_hv_set_msr()
[all …]
Dpaging_tmpl.h105 gfn_t gfn; member
276 gfn_t gfn; in FNAME() local
364 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
365 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
368 gfn += pse36_gfn_delta(pte); in FNAME()
370 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
374 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME()
458 gfn_t gfn; in FNAME() local
466 gfn = gpte_to_gfn(gpte); in FNAME()
469 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME()
[all …]
Dx86.h88 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument
92 vcpu->arch.mmio_gfn = gfn; in vcpu_cache_mmio_info()
176 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
180 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
Dmtrr.c624 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type() argument
633 start = gfn_to_gpa(gfn); in kvm_mtrr_get_guest_memory_type()
700 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency() argument
708 start = gfn_to_gpa(gfn); in kvm_mtrr_check_gfn_range_consistency()
709 end = gfn_to_gpa(gfn + page_num); in kvm_mtrr_check_gfn_range_consistency()
Dx86.c510 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument
513 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
561 gfn_t gfn; in pdptrs_changed() local
571 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; in pdptrs_changed()
573 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed()
8086 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) in kvm_async_pf_hash_fn() argument
8088 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); in kvm_async_pf_hash_fn()
8096 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
8098 u32 key = kvm_async_pf_hash_fn(gfn); in kvm_add_async_pf_gfn()
8103 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
[all …]
Dvmx.c8868 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in vmx_get_mt_mask() argument
8904 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); in vmx_get_mt_mask()
Dsvm.c3993 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in svm_get_mt_mask() argument
/linux-4.4.14/include/linux/
Dkvm_host.h592 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
595 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
596 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
597 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
598 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
599 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
605 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
606 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
607 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
609 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
[all …]
/linux-4.4.14/include/trace/events/
Dkvm.h249 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
250 TP_ARGS(gfn, level, slot, ref),
254 __field( u64, gfn )
260 __entry->gfn = gfn;
262 __entry->hva = ((gfn - slot->base_gfn) <<
268 __entry->hva, __entry->gfn, __entry->level,
275 TP_PROTO(u64 gva, u64 gfn),
277 TP_ARGS(gva, gfn),
281 __field(u64, gfn)
286 __entry->gfn = gfn;
[all …]
/linux-4.4.14/virt/kvm/
Dkvm_main.c115 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
1157 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1159 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1163 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
1165 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); in kvm_vcpu_gfn_to_memslot()
1168 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1170 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1180 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument
1187 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size()
1209 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many() argument
[all …]
/linux-4.4.14/arch/mips/kvm/
Dtlb.c41 pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) in kvm_mips_map_page() argument
149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) in kvm_mips_map_page()
153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); in kvm_mips_map_page()
156 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); in kvm_mips_map_page()
161 kvm->arch.guest_pmap[gfn] = pfn; in kvm_mips_map_page()
171 gfn_t gfn; in kvm_mips_translate_guest_kseg0_to_hpa() local
181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); in kvm_mips_translate_guest_kseg0_to_hpa()
183 if (gfn >= kvm->arch.guest_pmap_npages) { in kvm_mips_translate_guest_kseg0_to_hpa()
184 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, in kvm_mips_translate_guest_kseg0_to_hpa()
[all …]
Demulate.c1538 gfn_t gfn; in kvm_mips_sync_icache() local
1541 gfn = va >> PAGE_SHIFT; in kvm_mips_sync_icache()
1543 if (gfn >= kvm->arch.guest_pmap_npages) { in kvm_mips_sync_icache()
1544 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); in kvm_mips_sync_icache()
1549 pfn = kvm->arch.guest_pmap[gfn]; in kvm_mips_sync_icache()
/linux-4.4.14/drivers/xen/
Dxlate_mmu.c41 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
80 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument
85 info->h_gpfns[info->h_iter] = gfn; in setup_hparams()
142 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
155 data.fgfn = gfn; in xen_xlate_remap_gfn_array()
171 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument
176 xrp.gpfn = gfn; in unmap_gfn()
Dprivcmd.c350 xen_pfn_t gfn; in mmap_return_error() local
352 ret = get_user(gfn, st->user_gfn); in mmap_return_error()
360 gfn |= (err == -ENOENT) ? in mmap_return_error()
363 return __put_user(gfn, st->user_gfn++); in mmap_return_error()
/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_64_mmu_hv.c444 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_hv_page_fault() local
484 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault()
485 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
513 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
671 unsigned long gfn)) in kvm_handle_hva_range() argument
681 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local
692 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
695 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range()
696 gfn_t gfn_offset = gfn - memslot->base_gfn; in kvm_handle_hva_range()
698 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); in kvm_handle_hva_range()
[all …]
De500_mmu_host.c325 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument
355 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
356 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map()
383 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
411 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
414 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
416 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
418 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
451 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
455 __func__, (long)gfn); in kvmppc_e500_shadow_map()
[all …]
Dbook3s_hv_rm_mmu.c121 unsigned long gfn; in revmap_for_hpte() local
123 gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr)); in revmap_for_hpte()
124 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte()
128 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
171 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local
199 gfn = gpa >> PAGE_SHIFT; in kvmppc_do_h_enter()
200 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_do_h_enter()
214 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
218 hva = __gfn_to_hva_memslot(memslot, gfn); in kvmppc_do_h_enter()
Dbook3s_64_mmu_host.c100 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() local
139 mark_page_dirty(vcpu->kvm, gfn); in kvmppc_mmu_map_page()
Dbooke.c1230 gfn_t gfn; in kvmppc_handle_exit() local
1259 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit()
1261 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
1287 gfn_t gfn; in kvmppc_handle_exit() local
1307 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit()
1309 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
Dbook3s.c373 gfn_t gfn = gpa >> PAGE_SHIFT; in kvmppc_gpa_to_pfn() local
391 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); in kvmppc_gpa_to_pfn()
Dbook3s_pr.c262 gfn_t gfn, gfn_end; in do_kvm_unmap_hva() local
273 gfn = hva_to_gfn_memslot(hva_start, memslot); in do_kvm_unmap_hva()
276 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, in do_kvm_unmap_hva()
/linux-4.4.14/include/xen/
Dxen-ops.h51 xen_pfn_t *gfn, int nr,
70 xen_pfn_t gfn, int nr,
77 xen_pfn_t *gfn, int nr,
Dgrant_table.h246 typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
/linux-4.4.14/Documentation/virtual/kvm/
Dlocking.txt21 - SPTE_HOST_WRITEABLE means the gfn is writable on host.
22 - SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when
23 the gfn is writable on guest mmu and it is not write-protected by shadow
31 1): The mapping from gfn to pfn
32 The mapping from gfn to pfn may be changed since we can only ensure the pfn
62 to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
63 to pin gfn to pfn, because after gfn_to_pfn_atomic():
65 be reused for another gfn.
69 Then, we can ensure the dirty bitmaps is correctly set for a gfn.
Dmmu.txt34 gfn guest frame number
139 The linear range starts at (gfn << PAGE_SHIFT) and its size is determined
142 If clear, this page corresponds to a guest page table denoted by the gfn
182 gfn:
197 perform a reverse map from a pte to a gfn. When role.direct is set, any
198 element of this array can be calculated from the gfn field when used, in
199 this case, the array of gfns is not allocated. See role.direct and gfn.
246 reached given its gfn. This is used, for example, when swapping out a page.
/linux-4.4.14/arch/arm/include/asm/xen/
Dpage.h51 static inline unsigned long gfn_to_pfn(unsigned long gfn) in gfn_to_pfn() argument
53 return gfn; in gfn_to_pfn()
/linux-4.4.14/arch/x86/include/asm/xen/
Dpage.h200 static inline unsigned long gfn_to_pfn(unsigned long gfn) in gfn_to_pfn() argument
203 return gfn; in gfn_to_pfn()
205 return mfn_to_pfn(gfn); in gfn_to_pfn()
/linux-4.4.14/arch/arm/xen/
Denlighten.c54 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
59 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
67 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
/linux-4.4.14/arch/arm/kvm/
Dmmu.c1003 gfn_t gfn = *ipap >> PAGE_SHIFT; in transparent_hugepage_adjust() local
1026 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust()
1222 gfn_t gfn = fault_ipa >> PAGE_SHIFT; in user_mem_abort() local
1249 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; in user_mem_abort()
1284 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); in user_mem_abort()
1332 mark_page_dirty(kvm, gfn); in user_mem_abort()
1404 gfn_t gfn; in kvm_handle_guest_abort() local
1426 gfn = fault_ipa >> PAGE_SHIFT; in kvm_handle_guest_abort()
1427 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1428 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in kvm_handle_guest_abort()
[all …]
/linux-4.4.14/drivers/pinctrl/
Dpinctrl-tegra.c650 int fn, gn, gfn; in tegra_pinctrl_probe() local
682 for (gfn = 0; gfn < 4; gfn++) in tegra_pinctrl_probe()
683 if (g->funcs[gfn] == fn) in tegra_pinctrl_probe()
685 if (gfn == 4) in tegra_pinctrl_probe()
/linux-4.4.14/arch/x86/include/asm/
Dkvm_host.h79 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
82 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index()
224 gfn_t gfn; member
844 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
918 gfn_t gfn; member
1048 gfn_t gfn, void *data, int offset, int len,
1072 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1241 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
/linux-4.4.14/drivers/xen/xenbus/
Dxenbus_client.c608 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, in xenbus_map_ring_setup_grant_hvm() argument
614 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); in xenbus_map_ring_setup_grant_hvm()
816 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, in xenbus_unmap_ring_setup_grant_hvm() argument
823 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); in xenbus_unmap_ring_setup_grant_hvm()
/linux-4.4.14/drivers/tty/hvc/
Dhvc_xen.c203 unsigned long gfn; in xen_hvm_console_init() local
232 gfn = v; in xen_hvm_console_init()
233 info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE); in xen_hvm_console_init()
/linux-4.4.14/arch/x86/xen/
Dmmu.c2812 xen_pfn_t *gfn, int nr, in do_remap_gfn() argument
2828 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in do_remap_gfn()
2835 rmd.mfn = gfn; in do_remap_gfn()
2897 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
2901 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); in xen_remap_domain_gfn_range()
2907 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
2916 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); in xen_remap_domain_gfn_array()
/linux-4.4.14/drivers/block/
Dxen-blkfront.c291 unsigned long gfn, in get_grant() argument
308 gfn, 0); in get_grant()
500 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset, in blkif_setup_rw_req_grant() argument
525 gnt_list_entry = get_grant(&setup->gref_head, gfn, info); in blkif_setup_rw_req_grant()
1193 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset, in blkif_copy_from_grant() argument
/linux-4.4.14/drivers/net/xen-netback/
Dnetback.c287 static void xenvif_setup_copy_gop(unsigned long gfn, in xenvif_setup_copy_gop() argument
318 copy_gop->source.u.gmfn = gfn; in xenvif_setup_copy_gop()
336 static void xenvif_gop_frag_copy_grant(unsigned long gfn, in xenvif_gop_frag_copy_grant() argument
345 xenvif_setup_copy_gop(gfn, offset, &bytes, data); in xenvif_gop_frag_copy_grant()
/linux-4.4.14/drivers/net/
Dxen-netfront.c425 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, in xennet_tx_setup_grant() argument
443 gfn, GNTMAP_readonly); in xennet_tx_setup_grant()
475 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, in xennet_make_one_txreq() argument
482 xennet_tx_setup_grant(gfn, offset, len, data); in xennet_make_one_txreq()
/linux-4.4.14/drivers/scsi/bfa/
Dbfa_fcbuild.c1452 fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); in fc_gfn_req_build() local
1459 memset(gfn, 0, sizeof(fcgs_gfn_req_t)); in fc_gfn_req_build()
1460 gfn->wwn = wwn; in fc_gfn_req_build()
/linux-4.4.14/arch/mips/include/asm/
Dkvm_host.h104 extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);