vpage              94 arch/nios2/mm/init.c 	unsigned long vpage;
vpage              96 arch/nios2/mm/init.c 	vpage = get_zeroed_page(GFP_ATOMIC);
vpage              97 arch/nios2/mm/init.c 	if (!vpage)
vpage             101 arch/nios2/mm/init.c 	memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
vpage             103 arch/nios2/mm/init.c 	flush_icache_range(vpage, vpage + KUSER_SIZE);
vpage             104 arch/nios2/mm/init.c 	kuser_page[0] = virt_to_page(vpage);
vpage             380 arch/powerpc/include/asm/kvm_host.h 	u64 vpage;
vpage             465 arch/powerpc/kvm/book3s.c 		pte->vpage = VSID_REAL | eaddr >> 12;
vpage              87 arch/powerpc/kvm/book3s_32_mmu.c 		return pte.vpage;
vpage             165 arch/powerpc/kvm/book3s_32_mmu.c 			pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
vpage             204 arch/powerpc/kvm/book3s_32_mmu.c 	pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
vpage             309 arch/powerpc/kvm/book3s_32_mmu.c 		pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
vpage             244 arch/powerpc/kvm/book3s_32_mmu_host.c 		    orig_pte->vpage, hpaddr);
vpage             260 arch/powerpc/kvm/book3s_32_mmu_host.c 	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
vpage             228 arch/powerpc/kvm/book3s_64_mmu.c 		gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
vpage             309 arch/powerpc/kvm/book3s_64_mmu.c 	gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
vpage             339 arch/powerpc/kvm/book3s_64_mmu.c 		eaddr, avpn, gpte->vpage, gpte->raddr);
vpage             220 arch/powerpc/kvm/book3s_64_mmu_host.c 	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
vpage             384 arch/powerpc/kvm/book3s_64_mmu_hv.c 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
vpage              37 arch/powerpc/kvm/book3s_mmu_hpte.c static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
vpage              39 arch/powerpc/kvm/book3s_mmu_hpte.c 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
vpage              42 arch/powerpc/kvm/book3s_mmu_hpte.c static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
vpage              44 arch/powerpc/kvm/book3s_mmu_hpte.c 	return hash_64((vpage & 0xffffff000ULL) >> 12,
vpage              49 arch/powerpc/kvm/book3s_mmu_hpte.c static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
vpage              51 arch/powerpc/kvm/book3s_mmu_hpte.c 	return hash_64((vpage & 0xffffffff0ULL) >> 4,
vpage              75 arch/powerpc/kvm/book3s_mmu_hpte.c 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
vpage              79 arch/powerpc/kvm/book3s_mmu_hpte.c 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
vpage              85 arch/powerpc/kvm/book3s_mmu_hpte.c 	index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
vpage             225 arch/powerpc/kvm/book3s_mmu_hpte.c 		if ((pte->pte.vpage & vp_mask) == guest_vp)
vpage             247 arch/powerpc/kvm/book3s_mmu_hpte.c 		if ((pte->pte.vpage & vp_mask) == guest_vp)
vpage             269 arch/powerpc/kvm/book3s_mmu_hpte.c 		if ((pte->pte.vpage & vp_mask) == guest_vp)
vpage             693 arch/powerpc/kvm/book3s_pr.c 		pte.vpage = eaddr >> 12;
vpage             700 arch/powerpc/kvm/book3s_pr.c 		pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
vpage             712 arch/powerpc/kvm/book3s_pr.c 			pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
vpage             714 arch/powerpc/kvm/book3s_pr.c 			pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
vpage             715 arch/powerpc/kvm/book3s_pr.c 		pte.vpage |= vsid;
vpage            1975 arch/powerpc/kvm/booke.c 		pte->vpage = eaddr >> PAGE_SHIFT;
vpage            2004 arch/powerpc/kvm/booke.c 	pte->vpage = eaddr >> PAGE_SHIFT;
vpage              42 arch/powerpc/kvm/trace_pr.h 		__field(	unsigned long long,	vpage		)
vpage              52 arch/powerpc/kvm/trace_pr.h 		__entry->vpage	= orig_pte->vpage;
vpage              58 arch/powerpc/kvm/trace_pr.h 		  __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
vpage              71 arch/powerpc/kvm/trace_pr.h 		__field(	u64,		vpage		)
vpage              80 arch/powerpc/kvm/trace_pr.h 		__entry->vpage		= pte->pte.vpage;
vpage              89 arch/powerpc/kvm/trace_pr.h 		  __entry->vpage, __entry->raddr, __entry->flags)
vpage             100 arch/powerpc/kvm/trace_pr.h 		__field(	u64,		vpage		)
vpage             109 arch/powerpc/kvm/trace_pr.h 		__entry->vpage		= pte->pte.vpage;
vpage             118 arch/powerpc/kvm/trace_pr.h 		  __entry->vpage, __entry->raddr, __entry->flags)
vpage             115 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	void *vpage;
vpage             140 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		vpage = hw_qpageit_get_inc(&cq->hw_queue);
vpage             141 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		if (!vpage) {
vpage             146 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		rpage = __pa(vpage);
vpage             157 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			vpage = hw_qpageit_get_inc(&cq->hw_queue);
vpage             159 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			if ((hret != H_SUCCESS) || (vpage)) {
vpage             235 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	void *vpage;
vpage             263 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		vpage = hw_qpageit_get_inc(&eq->hw_queue);
vpage             264 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		if (!vpage) {
vpage             270 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		rpage = __pa(vpage);
vpage             278 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			vpage = hw_qpageit_get_inc(&eq->hw_queue);
vpage             279 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			if ((hret != H_SUCCESS) || (vpage))
vpage             363 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	void *vpage;
vpage             370 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		vpage = hw_qpageit_get_inc(hw_queue);
vpage             371 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		if (!vpage) {
vpage             375 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		rpage = __pa(vpage);