Home
last modified time | relevance | path

Searched refs:gfn_t (Results 1 – 25 of 25) sorted by relevance

/linux-4.4.14/include/linux/
Dkvm_host.h299 gfn_t base_gfn;
592 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
595 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
596 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
597 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
598 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
599 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
605 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
606 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
607 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
[all …]
Dkvm_types.h50 typedef u64 gfn_t; typedef
/linux-4.4.14/virt/kvm/
Dkvm_main.c115 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
822 gfn_t base_gfn; in __kvm_set_memory_region()
1116 gfn_t offset; in kvm_get_dirty_log_protect()
1157 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot()
1163 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot()
1168 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn()
1180 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size()
1209 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many()
1210 gfn_t *nr_pages, bool write) in __gfn_to_hva_many()
1224 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in gfn_to_hva_many()
[all …]
/linux-4.4.14/arch/x86/kvm/
Diommu.c44 gfn_t base_gfn, unsigned long npages);
46 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, in kvm_pin_pages()
49 gfn_t end_gfn; in kvm_pin_pages()
75 gfn_t gfn, end_gfn; in kvm_iommu_map_pages()
274 gfn_t base_gfn, unsigned long npages) in kvm_iommu_put_pages()
277 gfn_t end_gfn, gfn; in kvm_iommu_put_pages()
Dmmu.c249 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn()
261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte()
333 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta()
754 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn()
774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot()
789 gfn_t gfn; in account_shadowed()
807 gfn_t gfn; in unaccount_shadowed()
821 static int __has_wrprotected_page(gfn_t gfn, int level, in __has_wrprotected_page()
834 static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in has_wrprotected_page()
[all …]
Dpaging_tmpl.h97 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
105 gfn_t gfn;
109 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl()
207 gfn_t table_gfn; in FNAME()
267 gfn_t table_gfn; in FNAME()
276 gfn_t gfn; in FNAME()
300 gfn_t real_gfn; in FNAME()
458 gfn_t gfn; in FNAME()
581 gfn_t table_gfn; in FNAME()
607 gfn_t direct_gfn; in FNAME()
[all …]
Dx86.h88 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info()
176 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
180 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
Dmmu.h176 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
Dmmutrace.h202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
207 __field(gfn_t, gfn)
225 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
230 __field(gfn_t, gfn)
Dmmu_audit.c99 gfn_t gfn; in audit_mappings()
136 gfn_t gfn; in inspect_spte_has_rmap()
Dmtrr.c319 gfn_t start, end; in update_mtrr()
624 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type()
700 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency()
Dx86.c492 gfn_t ngfn, void *data, int offset, int len, in kvm_read_guest_page_mmu()
496 gfn_t real_gfn; in kvm_read_guest_page_mmu()
510 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page()
522 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; in load_pdptrs()
561 gfn_t gfn; in pdptrs_changed()
8086 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) in kvm_async_pf_hash_fn()
8096 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn()
8106 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot()
8119 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn()
8124 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn()
Dvmx.c4185 gfn_t fn; in init_rmode_tss()
8868 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in vmx_get_mt_mask()
10575 gfn_t offset, unsigned long mask) in vmx_enable_log_dirty_pt_masked()
Dsvm.c3993 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in svm_get_mt_mask()
/linux-4.4.14/arch/x86/include/asm/
Dkvm_host.h79 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index()
224 gfn_t gfn;
229 gfn_t *gfns;
533 gfn_t mmio_gfn;
550 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
844 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
897 gfn_t offset, unsigned long mask);
918 gfn_t gfn;
947 gfn_t gfn_offset, unsigned long mask);
1048 gfn_t gfn, void *data, int offset, int len,
[all …]
/linux-4.4.14/arch/mips/kvm/
Dtlb.c41 pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) in kvm_mips_map_page()
171 gfn_t gfn; in kvm_mips_translate_guest_kseg0_to_hpa()
264 gfn_t gfn; in kvm_mips_handle_kseg0_tlb_fault()
Demulate.c1538 gfn_t gfn; in kvm_mips_sync_icache()
/linux-4.4.14/arch/powerpc/kvm/
De500_mmu_host.c325 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map()
562 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_tlb1_map()
614 gfn_t gfn = gpaddr >> PAGE_SHIFT; in kvmppc_mmu_map()
Dbook3s_64_mmu_hv.c681 gfn_t gfn, gfn_end; in kvm_handle_hva_range()
696 gfn_t gfn_offset = gfn - memslot->base_gfn; in kvm_handle_hva_range()
Dbook3s.c373 gfn_t gfn = gpa >> PAGE_SHIFT; in kvmppc_gpa_to_pfn()
Dbooke.c1230 gfn_t gfn; in kvmppc_handle_exit()
1287 gfn_t gfn; in kvmppc_handle_exit()
Dbook3s_pr.c262 gfn_t gfn, gfn_end; in do_kvm_unmap_hva()
/linux-4.4.14/arch/arm/kvm/
Dmmu.c1003 gfn_t gfn = *ipap >> PAGE_SHIFT; in transparent_hugepage_adjust()
1186 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked()
1204 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
1222 gfn_t gfn = fault_ipa >> PAGE_SHIFT; in user_mem_abort()
1404 gfn_t gfn; in kvm_handle_guest_abort()
1482 gfn_t gfn, gfn_end; in handle_hva_to_gpa()
/linux-4.4.14/arch/mips/include/asm/
Dkvm_host.h104 extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
/linux-4.4.14/arch/s390/kvm/
Dkvm-s390.c269 gfn_t cur_gfn, last_gfn; in kvm_s390_sync_dirty_log()