/linux-4.1.27/arch/nios2/mm/ |
D | init.c | 106 unsigned long vpage; in alloc_kuser_page() local 108 vpage = get_zeroed_page(GFP_ATOMIC); in alloc_kuser_page() 109 if (!vpage) in alloc_kuser_page() 113 memcpy((void *)vpage, __kuser_helper_start, kuser_sz); in alloc_kuser_page() 115 flush_icache_range(vpage, vpage + KUSER_SIZE); in alloc_kuser_page() 116 kuser_page[0] = virt_to_page(vpage); in alloc_kuser_page()
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | trace_pr.h | 43 __field( unsigned long long, vpage ) 53 __entry->vpage = orig_pte->vpage; 59 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) 72 __field( u64, vpage ) 81 __entry->vpage = pte->pte.vpage; 90 __entry->vpage, __entry->raddr, __entry->flags) 101 __field( u64, vpage ) 110 __entry->vpage = pte->pte.vpage; 119 __entry->vpage, __entry->raddr, __entry->flags)
|
D | book3s_mmu_hpte.c | 48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) in kvmppc_mmu_hash_vpte() argument 50 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); in kvmppc_mmu_hash_vpte() 53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) in kvmppc_mmu_hash_vpte_long() argument 55 return hash_64((vpage & 0xffffff000ULL) >> 12, in kvmppc_mmu_hash_vpte_long() 60 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage) in kvmppc_mmu_hash_vpte_64k() argument 62 return hash_64((vpage & 0xffffffff0ULL) >> 4, in kvmppc_mmu_hash_vpte_64k() 86 index = kvmppc_mmu_hash_vpte(pte->pte.vpage); in kvmppc_mmu_hpte_cache_map() 90 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); in kvmppc_mmu_hpte_cache_map() 96 index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage); in kvmppc_mmu_hpte_cache_map() 236 if ((pte->pte.vpage & vp_mask) == guest_vp) in kvmppc_mmu_pte_vflush_short() [all …]
|
D | book3s_32_mmu.c | 99 return pte.vpage; in kvmppc_mmu_book3s_32_ea_to_vp() 177 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; in kvmppc_mmu_book3s_32_xlate_bat() 216 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_32_xlate_pte() 319 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_32_xlate()
|
D | book3s_64_mmu.c | 231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate() 310 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate() 339 eaddr, avpn, gpte->vpage, gpte->raddr); in kvmppc_mmu_book3s_64_xlate()
|
D | book3s_32_mmu_host.c | 255 orig_pte->vpage, hpaddr); in kvmppc_mmu_map_page() 271 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL); in kvmppc_mmu_unmap_page()
|
D | book3s_pr.c | 557 pte.vpage = eaddr >> 12; in kvmppc_handle_pagefault() 563 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); in kvmppc_handle_pagefault() 575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); in kvmppc_handle_pagefault() 577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); in kvmppc_handle_pagefault() 578 pte.vpage |= vsid; in kvmppc_handle_pagefault()
|
D | book3s_64_mmu_host.c | 224 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); in kvmppc_mmu_unmap_page()
|
D | book3s.c | 406 pte->vpage = VSID_REAL | eaddr >> 12; in kvmppc_xlate()
|
D | booke.c | 1943 pte->vpage = eaddr >> PAGE_SHIFT; in kvmppc_xlate() 1972 pte->vpage = eaddr >> PAGE_SHIFT; in kvmppc_xlate()
|
D | book3s_64_mmu_hv.c | 346 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate()
|
/linux-4.1.27/arch/arm64/kernel/ |
D | vdso.c | 65 unsigned long vpage; in alloc_vectors_page() local 67 vpage = get_zeroed_page(GFP_ATOMIC); in alloc_vectors_page() 69 if (!vpage) in alloc_vectors_page() 73 memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, in alloc_vectors_page() 77 memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, in alloc_vectors_page() 80 flush_icache_range(vpage, vpage + PAGE_SIZE); in alloc_vectors_page() 81 vectors_page[0] = virt_to_page(vpage); in alloc_vectors_page()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_eq.c | 61 void *vpage; in ehca_create_eq() local 100 vpage = ipz_qpageit_get_inc(&eq->ipz_queue); in ehca_create_eq() 101 if (!vpage) in ehca_create_eq() 104 rpage = __pa(vpage); in ehca_create_eq() 112 vpage = ipz_qpageit_get_inc(&eq->ipz_queue); in ehca_create_eq() 113 if (h_ret != H_SUCCESS || vpage) in ehca_create_eq()
|
D | ehca_cq.c | 128 void *vpage; in ehca_create_cq() local 203 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); in ehca_create_cq() 204 if (!vpage) { in ehca_create_cq() 210 rpage = __pa(vpage); in ehca_create_cq() 232 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); in ehca_create_cq() 233 if ((h_ret != H_SUCCESS) || vpage) { in ehca_create_cq()
|
D | ehca_qp.c | 289 void *vpage; in init_qp_queue() local 317 vpage = ipz_qpageit_get_inc(queue); in init_qp_queue() 318 if (!vpage) { in init_qp_queue() 320 "failed p_vpage= %p", vpage); in init_qp_queue() 324 rpage = __pa(vpage); in init_qp_queue() 338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); in init_qp_queue() 339 if (vpage) { in init_qp_queue() 341 "should not succeed vpage=%p", vpage); in init_qp_queue()
|
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 130 void *vpage; in ehea_create_cq() local 159 vpage = hw_qpageit_get_inc(&cq->hw_queue); in ehea_create_cq() 160 if (!vpage) { in ehea_create_cq() 165 rpage = __pa(vpage); in ehea_create_cq() 176 vpage = hw_qpageit_get_inc(&cq->hw_queue); in ehea_create_cq() 178 if ((hret != H_SUCCESS) || (vpage)) { in ehea_create_cq() 255 void *vpage; in ehea_create_eq() local 283 vpage = hw_qpageit_get_inc(&eq->hw_queue); in ehea_create_eq() 284 if (!vpage) { in ehea_create_eq() 290 rpage = __pa(vpage); in ehea_create_eq() [all …]
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | kvm_host.h | 327 u64 vpage; member
|