__va              572 arch/alpha/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
__va               67 arch/alpha/include/asm/mmzone.h     ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn  \
__va               95 arch/alpha/include/asm/mmzone.h 	kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT));	\
__va              587 arch/alpha/kernel/pci_iommu.c 		     __va(paddr), size, out->dma_address);
__va              599 arch/alpha/kernel/pci_iommu.c 		     __va(paddr), size, out->dma_address);
__va              625 arch/alpha/kernel/pci_iommu.c 	     __va(paddr), size, out->dma_address, npages);
__va              444 arch/alpha/kernel/setup.c 	hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
__va             1260 arch/alpha/kernel/setup.c 	read_mem_block(__va(0), stride, size);
__va             1264 arch/alpha/kernel/setup.c 		cycles = read_mem_block(__va(0), stride, size);
__va             1273 arch/alpha/kernel/setup.c 		read_mem_block(__va(size), stride, size);
__va              268 arch/alpha/kernel/sys_nautilus.c 		free_reserved_area(__va(alpha_mv.min_mem_address),
__va              269 arch/alpha/kernel/sys_nautilus.c 				   __va(memtop), -1, NULL);
__va              284 arch/alpha/mm/init.c 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
__va              133 arch/alpha/mm/numa.c 	node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
__va              125 arch/arc/mm/init.c 		initrd_start = (unsigned long)__va(phys_initrd_start);
__va               28 arch/arm/common/sharpsl_param.c #define param_start(x)	__va(x)
__va              446 arch/arm/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              297 arch/arm/include/asm/memory.h #define pfn_to_kaddr(pfn)	__va((phys_addr_t)(pfn) << PAGE_SHIFT)
__va              152 arch/arm/include/asm/pgtable-3level.h 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
__va              191 arch/arm/include/asm/pgtable.h 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
__va               80 arch/arm/mm/dma-mapping-nommu.c 	dmac_map_area(__va(paddr), size, dir);
__va               93 arch/arm/mm/dma-mapping-nommu.c 		dmac_unmap_area(__va(paddr), size, dir);
__va              484 arch/arm/mm/init.c 	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
__va             1240 arch/arm/mm/mmu.c 	high_memory = __va(arm_lowmem_limit - 1) + 1;
__va             1546 arch/arm/mm/mmu.c 	boot_data = __va(__atags_pointer);
__va              141 arch/arm/mm/nommu.c 	high_memory = __va(end - 1) + 1;
__va              308 arch/arm64/include/asm/memory.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va              502 arch/arm64/include/asm/pgtable.h #define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
__va              561 arch/arm64/include/asm/pgtable.h #define pmd_offset(dir, addr)		((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
__va              619 arch/arm64/include/asm/pgtable.h #define pud_offset(dir, addr)		((pud_t *)__va(pud_offset_phys((dir), (addr))))
__va              431 arch/arm64/mm/init.c 	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
__va              111 arch/arm64/mm/kasan_init.c 			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
__va              233 arch/arm64/mm/numa.c 	nd = __va(nd_pa);
__va              286 arch/arm64/mm/numa.c 	numa_distance = __va(phys);
__va              263 arch/c6x/kernel/setup.c 	void *dtb = __va(dt_ptr);
__va               38 arch/csky/include/asm/page.h #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
__va               91 arch/csky/include/asm/page.h #define pfn_to_kaddr(x)	__va(PFN_PHYS(x))
__va              133 arch/csky/include/asm/pgtable.h 	return __va(ptr);
__va               21 arch/csky/mm/dma-mapping.c 	void *start          = __va(page_to_phys(page));
__va               85 arch/csky/mm/fault.c 		pgd_base = (unsigned long)__va(get_pgd());
__va               48 arch/csky/mm/init.c 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
__va               60 arch/hexagon/include/asm/io.h 	return __va(address);
__va               67 arch/hexagon/include/asm/io.h #define xlate_dev_kmem_ptr(p)    __va(p)
__va               68 arch/hexagon/include/asm/io.h #define xlate_dev_mem_ptr(p)    __va(p)
__va               62 arch/hexagon/include/asm/mem-layout.h #define VMALLOC_START ((unsigned long) __va(high_memory + VMALLOC_OFFSET))
__va              131 arch/hexagon/include/asm/page.h #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
__va              133 arch/hexagon/include/asm/page.h #define page_to_virt(page)	__va(page_to_phys(page))
__va              426 arch/hexagon/include/asm/pgtable.h 	((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
__va               58 arch/ia64/hp/common/aml_nfw.c 	return __va(address);
__va              127 arch/ia64/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va              275 arch/ia64/include/asm/pgtable.h #define pmd_page_vaddr(pmd)		((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
__va              282 arch/ia64/include/asm/pgtable.h #define pud_page_vaddr(pud)		((unsigned long) __va(pud_val(pud) & _PFN_MASK))
__va              290 arch/ia64/include/asm/pgtable.h #define pgd_page_vaddr(pgd)		((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
__va              653 arch/ia64/include/asm/processor.h 	return __va(result);
__va              271 arch/ia64/include/asm/uaccess.h 		ptr = __va(p);
__va              168 arch/ia64/include/asm/uv/uv_hub.h 	return __va(gpa & uv_hub_info->gpa_mask);
__va              174 arch/ia64/include/asm/uv/uv_hub.h 	return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
__va              185 arch/ia64/include/asm/uv/uv_hub.h 	return __va(UV_GLOBAL_MMR32_BASE |
__va              208 arch/ia64/include/asm/uv/uv_hub.h 	return __va(UV_GLOBAL_MMR64_BASE |
__va              230 arch/ia64/include/asm/uv/uv_hub.h 	return __va(UV_LOCAL_MMR_BASE | offset);
__va               75 arch/ia64/kernel/acpi.c 	return __va(phys);
__va               42 arch/ia64/kernel/crash_dump.c 	vaddr = __va(pfn<<PAGE_SHIFT);
__va               76 arch/ia64/kernel/efi.c 	ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time),    \
__va               90 arch/ia64/kernel/efi.c 	ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time),    \
__va              106 arch/ia64/kernel/efi.c 		(efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time),      \
__va              124 arch/ia64/kernel/efi.c 		(efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time),      \
__va              143 arch/ia64/kernel/efi.c 		(efi_get_variable_t *) __va(runtime->get_variable),	       \
__va              160 arch/ia64/kernel/efi.c 		(efi_get_next_variable_t *) __va(runtime->get_next_variable),  \
__va              177 arch/ia64/kernel/efi.c 		(efi_set_variable_t *) __va(runtime->set_variable),	       \
__va              193 arch/ia64/kernel/efi.c 				__va(runtime->get_next_high_mono_count),       \
__va              212 arch/ia64/kernel/efi.c 		(efi_reset_system_t *) __va(runtime->reset_system),	       \
__va              360 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va              412 arch/ia64/kernel/efi.c 		return __va(md->phys_addr);
__va              437 arch/ia64/kernel/efi.c 	struct palo_table *palo = __va(phys_addr);
__va              511 arch/ia64/kernel/efi.c 	efi.systab = __va(ia64_boot_param->efi_systab);
__va              527 arch/ia64/kernel/efi.c 	c16 = __va(efi.systab->fw_vendor);
__va              546 arch/ia64/kernel/efi.c 	runtime = __va(efi.systab->runtime);
__va              557 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va              612 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va              624 arch/ia64/kernel/efi.c 				md->virt_addr = (u64) __va(md->phys_addr);
__va              657 arch/ia64/kernel/efi.c 	status = efi_call_phys(__va(runtime->set_virtual_address_map),
__va              696 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va              729 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va              750 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va              982 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va             1040 arch/ia64/kernel/efi.c 	return __va(as);
__va             1061 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va             1184 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va             1293 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va             1335 arch/ia64/kernel/efi.c 	efi_map_start = __va(ia64_boot_param->efi_memmap);
__va               59 arch/ia64/kernel/esi.c 	config_tables = __va(efi.systab->tables);
__va               71 arch/ia64/kernel/esi.c 	systab = __va(esi);
__va              122 arch/ia64/kernel/esi.c 				pdesc.addr = __va(esi->esi_proc);
__va              123 arch/ia64/kernel/esi.c 				pdesc.gp = __va(esi->gp);
__va             1619 arch/ia64/kernel/mca.c 		t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
__va             1848 arch/ia64/kernel/mca.c 		data = __va(__per_cpu_mca[cpu]);
__va              119 arch/ia64/kernel/sal.c 	ia64_pal_handler_init(__va(ep->pal_proc));
__va              120 arch/ia64/kernel/sal.c 	ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
__va              307 arch/ia64/kernel/setup.c 				(unsigned long)__va(base);
__va              309 arch/ia64/kernel/setup.c 				(unsigned long)__va(base + size);
__va              347 arch/ia64/kernel/setup.c 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
__va              351 arch/ia64/kernel/setup.c 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
__va              353 arch/ia64/kernel/setup.c 				+ strlen(__va(ia64_boot_param->command_line)) + 1);
__va              362 arch/ia64/kernel/setup.c 		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
__va              411 arch/ia64/kernel/setup.c 		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
__va              543 arch/ia64/kernel/setup.c 	*start = (unsigned long)__va(elfcorehdr_addr);
__va              557 arch/ia64/kernel/setup.c 	*cmdline_p = __va(ia64_boot_param->command_line);
__va              589 arch/ia64/kernel/setup.c 	ia64_sal_init(__va(sal_systab_phys));
__va               39 arch/ia64/kernel/traps.c 		fpswa_interface = __va(ia64_boot_param->fpswa);
__va              147 arch/ia64/mm/discontig.c 		memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
__va              148 arch/ia64/mm/discontig.c 		__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
__va              267 arch/ia64/mm/discontig.c 	memset(__va(pernode), 0, pernodesize);
__va              273 arch/ia64/mm/discontig.c 	pgdat_list[node] = __va(pernode);
__va              276 arch/ia64/mm/discontig.c 	mem_data[node].node_data = __va(pernode);
__va              652 arch/ia64/mm/init.c 	high_memory = __va(max_low_pfn * PAGE_SIZE);
__va               66 arch/ia64/uv/kernel/setup.c 	rsdp = (struct acpi_table_rsdp *)__va(efi.acpi20);
__va               72 arch/ia64/uv/kernel/setup.c 	xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
__va              398 arch/m68k/include/asm/io_mm.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              125 arch/m68k/include/asm/motorola_pgtable.h #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
__va              126 arch/m68k/include/asm/motorola_pgtable.h #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
__va              127 arch/m68k/include/asm/motorola_pgtable.h #define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
__va              134 arch/m68k/include/asm/motorola_pgtable.h #define pte_page(pte)		virt_to_page(__va(pte_val(pte)))
__va              147 arch/m68k/include/asm/motorola_pgtable.h #define pmd_page(pmd)		virt_to_page(__va(pmd_val(pmd)))
__va              154 arch/m68k/include/asm/motorola_pgtable.h #define pgd_page(pgd)		(mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
__va              125 arch/m68k/include/asm/page_mm.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
__va               24 arch/m68k/include/asm/page_no.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
__va               27 arch/m68k/include/asm/page_no.h #define page_to_virt(page)	__va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
__va               19 arch/m68k/include/asm/sun3_pgtable.h #define PTOV(addr)	__va(addr)
__va              119 arch/m68k/include/asm/sun3_pgtable.h ((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
__va              121 arch/m68k/include/asm/sun3_pgtable.h ((unsigned long) __va (pmd_val (pmd) & PAGE_MASK))
__va               28 arch/m68k/include/asm/virtconvert.h 	return __va(address);
__va              541 arch/m68k/kernel/traps.c 			 __va(desc), *(unsigned long *)__va(desc));
__va              649 arch/m68k/kernel/traps.c 		__va(desc), *(unsigned long *)__va(desc));
__va               77 arch/m68k/mm/sun3mmu.c 		pg_table = (pte_t *) __va ((unsigned long) pg_table);
__va              395 arch/m68k/sun3/mmu_emu.c 	pte = (pte_t *) __va ((unsigned long)(pte + offset));
__va              149 arch/microblaze/include/asm/page.h # define pfn_to_virt(pfn)	__va(pfn_to_phys((pfn)))
__va              154 arch/microblaze/include/asm/page.h #  define page_to_virt(page)   __va(page_to_pfn(page) << PAGE_SHIFT)
__va              326 arch/microblaze/include/asm/pgtable.h 	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
__va              191 arch/microblaze/mm/init.c 	high_memory = (void *)__va(memory_start + lowmem_size - 1);
__va              652 arch/mips/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              219 arch/mips/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va              206 arch/mips/kernel/setup.c 	initrd_end = (unsigned long)__va(end);
__va              207 arch/mips/kernel/setup.c 	initrd_start = (unsigned long)__va(__pa(initrd_start));
__va              252 arch/mips/loongson64/loongson-3/numa.c 	high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
__va               59 arch/mips/mm/dma-noncoherent.c 	return __va(addr) - UNCAC_BASE;
__va              463 arch/mips/mm/init.c 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
__va              394 arch/mips/sgi-ip27/ip27-memory.c 	__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
__va              463 arch/mips/sgi-ip27/ip27-memory.c 	high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
__va              198 arch/nds32/include/asm/pgtable.h #define pmd_page_kernel(pmd)	  	     ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
__va              331 arch/nds32/include/asm/pgtable.h #define pmd_page(pmd)        virt_to_page(__va(pmd_val(pmd)))
__va               21 arch/nds32/kernel/pm.c 	pgdv = (pgd_t *)__va((__nds32__mfsr(NDS32_SR_L1_PPTB) &
__va              276 arch/nds32/kernel/setup.c 	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
__va              366 arch/nds32/mm/fault.c 		pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
__va               67 arch/nds32/mm/init.c 	v = (u32) __va(p);
__va              222 arch/nds32/mm/init.c 		(unsigned long)__va(memory_start), (unsigned long)high_memory,
__va              224 arch/nds32/mm/init.c 		 (unsigned long)__va(memory_start)) >> 20,
__va               87 arch/nios2/include/asm/page.h # define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va               73 arch/nios2/mm/init.c 	high_memory = __va(end_mem);
__va              134 arch/nios2/mm/ioremap.c 		t_addr = __va(phys_addr);
__va               76 arch/openrisc/include/asm/page.h #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
__va              348 arch/openrisc/include/asm/pgtable.h 	return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
__va              369 arch/openrisc/include/asm/pgtable.h #define pmd_page_kernel(pmd)    ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
__va               88 arch/openrisc/mm/init.c 		v = (u32) __va(p);
__va              149 arch/openrisc/mm/init.c 	end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
__va              165 arch/openrisc/mm/init.c 		unsigned long *dtlb_vector = __va(0x900);
__va              166 arch/openrisc/mm/init.c 		unsigned long *itlb_vector = __va(0xa00);
__va              207 arch/openrisc/mm/init.c 	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
__va                9 arch/parisc/include/asm/io.h #define phys_to_virt(a) __va(a)
__va              327 arch/parisc/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              346 arch/parisc/include/asm/pgtable.h #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
__va              436 arch/parisc/include/asm/pgtable.h #define pmd_page_vaddr(pmd)	((unsigned long) __va(pmd_address(pmd)))
__va              438 arch/parisc/include/asm/pgtable.h #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
__va               84 arch/parisc/kernel/cache.c #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
__va              439 arch/parisc/kernel/pci-dma.c 	free_pages((unsigned long)__va(dma_handle), order);
__va               59 arch/parisc/kernel/setup.c 		strlcpy(boot_command_line, (char *)__va(boot_args[1]),
__va               65 arch/parisc/kernel/setup.c 		    initrd_start = (unsigned long)__va(boot_args[2]);
__va               66 arch/parisc/kernel/setup.c 		    initrd_end = (unsigned long)__va(boot_args[3]);
__va              404 arch/parisc/mm/init.c 		pgd_populate(NULL, pg_dir, __va(pmd));
__va              410 arch/parisc/mm/init.c 		pmd = (pmd_t *)__va(pmd) + start_pmd;
__va              426 arch/parisc/mm/init.c 			pmd_populate_kernel(NULL, pmd, __va(pg_table));
__va              430 arch/parisc/mm/init.c 			pg_table = (pte_t *) __va(pg_table) + start_pte;
__va              590 arch/parisc/mm/init.c 	high_memory = __va((max_pfn << PAGE_SHIFT));
__va              624 arch/parisc/mm/init.c 	       __va(0), high_memory,
__va              625 arch/parisc/mm/init.c 	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
__va              664 arch/parisc/mm/init.c 		map_pages((unsigned long)__va(start_paddr), start_paddr,
__va               57 arch/parisc/mm/ioremap.c 		t_addr = __va(phys_addr);
__va              348 arch/powerpc/include/asm/book3s/32/pgtable.h 	((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
__va               75 arch/powerpc/include/asm/book3s/64/hugetlb.h 	return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
__va              997 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_page_vaddr(pmd)	__va(pmd_val(pmd) & ~PMD_MASKED_BITS)
__va              998 arch/powerpc/include/asm/book3s/64/pgtable.h #define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
__va              999 arch/powerpc/include/asm/book3s/64/pgtable.h #define pgd_page_vaddr(pgd)	__va(pgd_val(pgd) & ~PGD_MASKED_BITS)
__va              622 arch/powerpc/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              794 arch/powerpc/include/asm/io.h 	return (void *)__va(address);
__va              828 arch/powerpc/include/asm/io.h         return __va(address - PCI_DRAM_OFFSET);
__va               11 arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h 	return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
__va              347 arch/powerpc/include/asm/nohash/32/pgtable.h 	((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
__va              133 arch/powerpc/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va               68 arch/powerpc/include/asm/sections.h 	return start < (unsigned long)__va(real_end) &&
__va               69 arch/powerpc/include/asm/sections.h 		(unsigned long)__va(real_start) < end;
__va              108 arch/powerpc/kernel/crash_dump.c 		vaddr = __va(paddr);
__va              626 arch/powerpc/kernel/fadump.c 	fdh = __va(fw_dump.fadumphdr_addr);
__va             1031 arch/powerpc/kernel/fadump.c 		phdr->p_vaddr = (unsigned long)__va(mbase);
__va             1049 arch/powerpc/kernel/fadump.c 	fdh = __va(addr);
__va             1082 arch/powerpc/kernel/fadump.c 	vaddr = __va(addr);
__va               71 arch/powerpc/kernel/ima_kexec.c 	*addr = __va(tmp_addr);
__va               92 arch/powerpc/kernel/machine_kexec_64.c 		addr = __va(entry & PAGE_MASK);
__va              129 arch/powerpc/kernel/machine_kexec_64.c 		flush_icache_range((unsigned long)__va(ranges[i].mem),
__va              130 arch/powerpc/kernel/machine_kexec_64.c 			(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
__va               92 arch/powerpc/kernel/setup_32.c 	early_init_devtree(__va(dt_ptr));
__va              319 arch/powerpc/kernel/setup_64.c 	if (!dt_cpu_ftrs_init(__va(dt_ptr)))
__va              333 arch/powerpc/kernel/setup_64.c 	early_init_devtree(__va(dt_ptr));
__va              383 arch/powerpc/kvm/book3s_32_mmu_host.c 	htab = (ulong)__va(sdr1 & 0xffff0000);
__va               39 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	return __va(addr);
__va              901 arch/powerpc/mm/book3s64/hash_utils.c 		htab_address = __va(table);
__va              932 arch/powerpc/mm/book3s64/hash_utils.c 		base = (unsigned long)__va(reg->base);
__va              956 arch/powerpc/mm/book3s64/hash_utils.c 		tce_alloc_start = (unsigned long)__va(tce_alloc_start);
__va              957 arch/powerpc/mm/book3s64/hash_utils.c 		tce_alloc_end = (unsigned long)__va(tce_alloc_end);
__va              284 arch/powerpc/mm/book3s64/radix_pgtable.c 		vaddr = (unsigned long)__va(addr);
__va              782 arch/powerpc/mm/book3s64/radix_pgtable.c 				(unsigned long)__va(memblock_end_of_DRAM()));
__va              323 arch/powerpc/mm/init_64.c 			free_pages((unsigned long)(__va(addr)), page_order);
__va              151 arch/powerpc/mm/kasan/kasan_init_32.c 		ret = kasan_init_region(__va(base), top - base);
__va              137 arch/powerpc/mm/mem.c 	start = (unsigned long)__va(start);
__va              160 arch/powerpc/mm/mem.c 	start = (unsigned long)__va(start);
__va              296 arch/powerpc/mm/mem.c 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
__va               52 arch/powerpc/mm/nohash/8xx.c 		return (unsigned long)__va(pa);
__va               73 arch/powerpc/mm/nohash/8xx.c 			mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
__va               94 arch/powerpc/mm/nohash/8xx.c 	modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
__va              294 arch/powerpc/mm/nohash/fsl_booke.c 	early_get_first_memblock_info(__va(dt_ptr), NULL);
__va              315 arch/powerpc/mm/nohash/fsl_booke.c 		restore_to_as0(n, offset, __va(dt_ptr), 1);
__va              814 arch/powerpc/mm/numa.c 	nd = __va(nd_pa);
__va              314 arch/powerpc/oprofile/op_model_power4.c 		return (unsigned long)__va(pc);
__va              108 arch/powerpc/platforms/44x/iss4xx.c 	spin_table = (u32 *)__va(*spin_table_addr_prop);
__va              179 arch/powerpc/platforms/44x/ppc476.c 	spin_table = (u32 *)__va(*spin_table_addr_prop);
__va              198 arch/powerpc/platforms/powernv/opal-core.c 			memcpy(to, __va(addr), tsz);
__va              231 arch/powerpc/platforms/powernv/opal-core.c 	bufp = __va(oc_conf->cpu_state_destination_vaddr);
__va              483 arch/powerpc/platforms/powernv/opal-core.c 	opalc_metadata = __va(addr);
__va              494 arch/powerpc/platforms/powernv/opal-core.c 	opalc_cpu_metadata = __va(addr);
__va              542 arch/powerpc/platforms/powernv/opal-core.c 	oc_conf->cpu_state_destination_vaddr = (u64)__va(addr);
__va              191 arch/powerpc/platforms/powernv/opal-fadump.c 	opal_fdm = __va(fadump_conf->kernel_metadata);
__va              236 arch/powerpc/platforms/powernv/opal-fadump.c 	opal_fdm = __va(fadump_conf->kernel_metadata);
__va              374 arch/powerpc/platforms/powernv/opal-fadump.c 		(u64)__va(be64_to_cpu(opal_cpu_metadata->region[0].dest));
__va              423 arch/powerpc/platforms/powernv/opal-fadump.c 		bufp = __va(fadump_conf->cpu_state_dest_vaddr);
__va              509 arch/powerpc/platforms/powernv/opal-fadump.c 	fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr));
__va              522 arch/powerpc/platforms/powernv/opal-fadump.c 	fdh = __va(fadump_conf->fadumphdr_addr);
__va              692 arch/powerpc/platforms/powernv/opal-fadump.c 	opal_fdm_active = __va(addr);
__va              709 arch/powerpc/platforms/powernv/opal-fadump.c 		opal_cpu_metadata = __va(addr);
__va              739 arch/powerpc/platforms/powernv/opal.c 	symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
__va              800 arch/powerpc/platforms/powernv/opal.c 		attr->private = __va(vals[0]);
__va             1102 arch/powerpc/platforms/powernv/opal.c 			sg = __va(next);
__va               87 arch/powerpc/platforms/powernv/pci-ioda-tce.c 		tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
__va              205 arch/powerpc/platforms/powernv/pci-ioda-tce.c 			pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
__va              736 arch/powerpc/platforms/ps3/os-area.c 	header = (struct os_area_header *)__va(lpar_addr);
__va              737 arch/powerpc/platforms/ps3/os-area.c 	params = (struct os_area_params *)__va(lpar_addr
__va              749 arch/powerpc/platforms/ps3/os-area.c 	db = (struct os_area_db *)__va(lpar_addr
__va              411 arch/powerpc/platforms/pseries/iommu.c 		void *uaddr = __va(start_pfn << PAGE_SHIFT);
__va              498 arch/powerpc/platforms/pseries/iommu.c 	tbl->it_base = (unsigned long)__va(*basep);
__va              436 arch/powerpc/platforms/pseries/ras.c 	savep = __va(regs->gpr[3]);
__va              316 arch/powerpc/platforms/pseries/rtas-fadump.c 	vaddr = __va(addr);
__va              342 arch/powerpc/platforms/pseries/rtas-fadump.c 		fdh = __va(fadump_conf->fadumphdr_addr);
__va              374 arch/powerpc/platforms/pseries/rtas-fadump.c 		fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr));
__va              412 arch/powerpc/platforms/pseries/rtas-fadump.c 	fdh = __va(fadump_conf->fadumphdr_addr);
__va              104 arch/riscv/include/asm/page.h #define pfn_to_virt(pfn)	(__va(pfn_to_phys(pfn)))
__va               54 arch/riscv/mm/init.c 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
__va              273 arch/riscv/mm/init.c #define alloc_pgd_next(__va)	alloc_pmd(__va)
__va              275 arch/riscv/mm/init.c #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
__va              276 arch/riscv/mm/init.c 	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
__va              281 arch/riscv/mm/init.c #define alloc_pgd_next(__va)	alloc_pte(__va)
__va              283 arch/riscv/mm/init.c #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
__va              284 arch/riscv/mm/init.c 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
__va              439 arch/riscv/mm/init.c 			va = (uintptr_t)__va(pa);
__va              165 arch/s390/include/asm/page.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
__va              231 arch/s390/kernel/ftrace.c 	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
__va              239 arch/s390/kernel/ftrace.c 	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
__va              113 arch/s390/mm/fault.c 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
__va              182 arch/s390/mm/init.c         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
__va              247 arch/sh/include/asm/io.h #define phys_to_virt(address)	(__va(address))
__va              382 arch/sh/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              168 arch/sh/include/asm/page.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va               69 arch/sh/include/asm/pgtable_64.h 	((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
__va              165 arch/sh/kernel/setup.c 	initrd_start = (unsigned long)__va(start);
__va              235 arch/sh/kernel/setup.c 	pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
__va              307 arch/sh/mm/init.c 	memory_start = (unsigned long)__va(__MEMORY_START);
__va              349 arch/sh/mm/init.c 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
__va               36 arch/sh/mm/numa.c 	pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
__va              452 arch/sparc/include/asm/io_64.h #define xlate_dev_mem_ptr(p)	__va(p)
__va              128 arch/sparc/include/asm/page_32.h #define phys_to_virt		__va
__va              150 arch/sparc/include/asm/page_64.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va              157 arch/sparc/include/asm/page_64.h #define phys_to_virt __va
__va              844 arch/sparc/include/asm/pgtable_64.h 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
__va              854 arch/sparc/include/asm/pgtable_64.h 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
__va              863 arch/sparc/include/asm/pgtable_64.h 	((unsigned long) __va(pgd_val(pgd)))
__va              145 arch/sparc/include/asm/pgtsrmmu.h #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
__va              146 arch/sparc/include/asm/pgtsrmmu.h #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
__va              268 arch/sparc/kernel/irq_64.c 		bucket = (struct ino_bucket *) __va(cookie);
__va              176 arch/sparc/kernel/mdesc.c 		hp = __va(paddr);
__va              473 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
__va              474 arch/sparc/kernel/smp_64.c 	mondo = __va(tb->cpu_mondo_block_pa);
__va              492 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
__va              493 arch/sparc/kernel/smp_64.c 	mondo = __va(tb->cpu_mondo_block_pa);
__va              661 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
__va              812 arch/sparc/kernel/smp_64.c 	mondo = __va(tb->cpu_mondo_block_pa);
__va              818 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
__va             2092 arch/sparc/kernel/traps_64.c 	ent = __va(paddr);
__va             2210 arch/sparc/kernel/traps_64.c 	ent = __va(paddr);
__va              277 arch/sparc/mm/init_32.c 	high_memory = __va(max_low_pfn << PAGE_SHIFT);
__va             1271 arch/sparc/mm/init_64.c 	mlgroups = __va(paddr);
__va             1312 arch/sparc/mm/init_64.c 	mblocks = __va(paddr);
__va             2515 arch/sparc/mm/init_64.c 	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
__va             2566 arch/sparc/mm/init_64.c 			((unsigned long) __va(kern_base)) -
__va              897 arch/sparc/mm/srmmu.c 		map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
__va               91 arch/um/include/asm/pgtable-3level.h #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
__va              327 arch/um/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
__va              331 arch/um/include/asm/pgtable.h 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
__va               81 arch/unicore32/include/asm/memory.h #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
__va              219 arch/unicore32/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
__va              185 arch/unicore32/mm/init.c 	high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
__va               84 arch/x86/include/asm/dma.h #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
__va              150 arch/x86/include/asm/io.h 	return __va(address);
__va               41 arch/x86/include/asm/numachip/numachip_csr.h 	return __va(NUMACHIP_LCSR_BASE | (1UL << 15) |
__va               69 arch/x86/include/asm/numachip/numachip_csr.h 	return (void __iomem *)__va(NUMACHIP2_LCSR_BASE |
__va               58 arch/x86/include/asm/page.h #ifndef __va
__va               62 arch/x86/include/asm/page.h #define __boot_va(x)		__va(x)
__va               70 arch/x86/include/asm/page.h #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
__va              798 arch/x86/include/asm/pgtable.h 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
__va              866 arch/x86/include/asm/pgtable.h 	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
__va              916 arch/x86/include/asm/pgtable.h 	return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
__va              957 arch/x86/include/asm/pgtable.h 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
__va               81 arch/x86/include/asm/realmode.h 	real_mode_header = (struct real_mode_header *) __va(mem);
__va              272 arch/x86/include/asm/tlbflush.h 	VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
__va              594 arch/x86/include/asm/uv/uv_hub.h 		return __va(((unsigned long)pnode << m_val) | offset);
__va              602 arch/x86/include/asm/uv/uv_hub.h 		return __va((unsigned long)offset);
__va              605 arch/x86/include/asm/uv/uv_hub.h 	return __va(base << UV_GAM_RANGE_SHFT | offset);
__va              632 arch/x86/include/asm/uv/uv_hub.h 	return __va(UV_GLOBAL_MMR32_BASE |
__va              652 arch/x86/include/asm/uv/uv_hub.h 	return __va(UV_GLOBAL_MMR64_BASE |
__va              682 arch/x86/include/asm/uv/uv_hub.h 	return __va(UV_LOCAL_MMR_BASE | offset);
__va              309 arch/x86/include/asm/xen/page.h #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
__va              313 arch/x86/include/asm/xen/page.h #define gfn_to_virt(g)		(__va(gfn_to_pfn(g) << PAGE_SHIFT))
__va               49 arch/x86/kernel/acpi/sleep.c 		(struct wakeup_header *) __va(real_mode_header->wakeup_header);
__va              777 arch/x86/kernel/amd_gart_64.c 	set_memory_np((unsigned long)__va(iommu_bus_base),
__va              429 arch/x86/kernel/apm_32.c 			(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
__va             2342 arch/x86/kernel/apm_32.c 		 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
__va             2344 arch/x86/kernel/apm_32.c 		 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
__va             2346 arch/x86/kernel/apm_32.c 		 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
__va              129 arch/x86/kernel/check.c 		memset(__va(start), 0, end - start);
__va              149 arch/x86/kernel/check.c 		unsigned long *addr = __va(scan_areas[i].addr);
__va              510 arch/x86/kernel/cpu/amd.c 				set_memory_4k((unsigned long)__va(tseg), 1);
__va              150 arch/x86/kernel/cpu/bugs.c 		set_memory_4k((unsigned long)__va(0), 1);
__va              219 arch/x86/kernel/cpu/hygon.c 			set_memory_4k((unsigned long)__va(tseg), 1);
__va               74 arch/x86/kernel/ebda.c 	bios_start = *(unsigned short *)__va(BIOS_RAM_SIZE_KB_PTR);
__va              107 arch/x86/kernel/ftrace.c 		ip = (unsigned long)__va(__pa_symbol(ip));
__va              412 arch/x86/kernel/head64.c 		command_line = __va(cmd_line_ptr);
__va              461 arch/x86/kernel/head64.c 	copy_bootdata(__va(real_mode_data));
__va              478 arch/x86/kernel/head64.c 		copy_bootdata(__va(real_mode_data));
__va              207 arch/x86/kernel/machine_kexec_64.c 	level4p = (pgd_t *)__va(start_pgtable);
__va             1330 arch/x86/kernel/pci-calgary_64.c 			info->tce_space = (u64 *)__va(tce_space);
__va              607 arch/x86/kernel/reboot.c 	*((unsigned short *)__va(0x472)) = mode;
__va             1099 arch/x86/kernel/setup.c 	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
__va             2682 arch/x86/kvm/mmu.c 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
__va              252 arch/x86/mm/fault.c 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
__va              293 arch/x86/mm/fault.c 	pgd_t *base = __va(read_cr3_pa());
__va              370 arch/x86/mm/fault.c 	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
__va              448 arch/x86/mm/fault.c 	pgd_t *base = __va(read_cr3_pa());
__va              618 arch/x86/mm/fault.c 		pgd = __va(read_cr3_pa());
__va              131 arch/x86/mm/init.c 		adr = __va((pfn + i) << PAGE_SHIFT);
__va              135 arch/x86/mm/init.c 	return __va(pfn << PAGE_SHIFT);
__va              690 arch/x86/mm/init_32.c 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
__va              692 arch/x86/mm/init_32.c 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
__va              816 arch/x86/mm/init_32.c 		(unsigned long)__va(0), (unsigned long)high_memory,
__va              817 arch/x86/mm/init_32.c 		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
__va              373 arch/x86/mm/init_64.c 		pgd = pgd_offset_k((unsigned long)__va(phys));
__va              379 arch/x86/mm/init_64.c 		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
__va              385 arch/x86/mm/init_64.c 		pud = pud_offset(p4d, (unsigned long)__va(phys));
__va              592 arch/x86/mm/init_64.c 	unsigned long vaddr = (unsigned long)__va(paddr);
__va              600 arch/x86/mm/init_64.c 		vaddr = (unsigned long)__va(paddr);
__va              677 arch/x86/mm/init_64.c 	vaddr = (unsigned long)__va(paddr);
__va              678 arch/x86/mm/init_64.c 	vaddr_end = (unsigned long)__va(paddr_end);
__va              731 arch/x86/mm/init_64.c 	vaddr = (unsigned long)__va(paddr_start);
__va              732 arch/x86/mm/init_64.c 	vaddr_end = (unsigned long)__va(paddr_end);
__va              842 arch/x86/mm/init_64.c 		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
__va             1204 arch/x86/mm/init_64.c 	start = (unsigned long)__va(start);
__va             1205 arch/x86/mm/init_64.c 	end = (unsigned long)__va(end);
__va              804 arch/x86/mm/ioremap.c 	pgd_t *base = __va(read_cr3_pa());
__va              168 arch/x86/mm/kaslr.c 	vaddr = (unsigned long)__va(paddr);
__va              157 arch/x86/mm/mem_encrypt.c 	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
__va              177 arch/x86/mm/mem_encrypt.c 	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
__va              240 arch/x86/mm/mem_encrypt.c 	clflush_cache_range(__va(pa), size);
__va              205 arch/x86/mm/numa.c 	nd = __va(nd_pa);
__va              370 arch/x86/mm/numa.c 	numa_distance = __va(phys);
__va               76 arch/x86/mm/numa_32.c 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
__va               78 arch/x86/mm/numa_32.c 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
__va              456 arch/x86/mm/numa_emulation.c 		phys_dist = __va(phys);
__va               59 arch/x86/mm/pageattr-test.c 		unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
__va              142 arch/x86/mm/pageattr-test.c 		addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
__va             1568 arch/x86/mm/pageattr.c 	unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
__va              864 arch/x86/mm/pat.c 	if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
__va              100 arch/x86/pci/irq.c 		rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr));
__va              105 arch/x86/pci/irq.c 	for (addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) {
__va              301 arch/x86/pci/pcbios.c 	for (check = (union bios32 *) __va(0xe0000);
__va              302 arch/x86/pci/pcbios.c 	     check <= (union bios32 *) __va(0xffff0);
__va              624 arch/x86/platform/efi/efi.c 		va = __va(md->phys_addr);
__va              107 arch/x86/platform/efi/efi_64.c 		vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
__va              133 arch/x86/platform/efi/efi_64.c 				vaddr = (unsigned long)__va(addr_pud);
__va              507 arch/x86/platform/efi/efi_64.c 		efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
__va              509 arch/x86/platform/efi/efi_64.c 	return (void __iomem *)__va(phys_addr);
__va               70 arch/x86/platform/intel-quark/imr_selftest.c 	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
__va               75 arch/x86/platform/intel-quark/imr_selftest.c 	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
__va               80 arch/x86/platform/intel-quark/imr_selftest.c 	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
__va               76 arch/x86/platform/olpc/olpc-xo1-pm.c 	void *pgd_addr = __va(read_cr3_pa());
__va               51 arch/x86/platform/pvh/enlighten.c 		ep = __va(pvh_start_info.memmap_paddr);
__va               83 arch/x86/platform/pvh/enlighten.c 			__va(pvh_start_info.modlist_paddr);
__va               38 arch/x86/platform/uv/bios_uv.c 		ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
__va              226 arch/x86/power/hibernate.c 	pgd = (pgd_t *)__va(read_cr3_pa()) +
__va               86 arch/x86/realmode/init.c 		__va(real_mode_header->trampoline_header);
__va              108 arch/x86/realmode/init.c 	trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
__va              136 arch/x86/realmode/init.c 		(unsigned long) __va(real_mode_header->text_start);
__va               69 arch/x86/xen/enlighten_hvm.c 	HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
__va              393 arch/x86/xen/enlighten_pv.c 		void *av = __va(PFN_PHYS(pfn));
__va              475 arch/x86/xen/enlighten_pv.c 	virt = __va(PFN_PHYS(pfn));
__va             1118 arch/x86/xen/mmu_pv.c 	void *vaddr = __va(paddr);
__va             1133 arch/x86/xen/mmu_pv.c 	ClearPagePinned(virt_to_page(__va(pa)));
__va             1274 arch/x86/xen/mmu_pv.c 	xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
__va             1435 arch/x86/xen/mmu_pv.c 		pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
__va             1581 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
__va             1591 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
__va             1599 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
__va             1604 arch/x86/xen/mmu_pv.c 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
__va             1623 arch/x86/xen/mmu_pv.c 	unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
__va             1739 arch/x86/xen/mmu_pv.c 	return __va(paddr);
__va             1990 arch/x86/xen/mmu_pv.c 	xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
__va             2093 arch/x86/xen/mmu_pv.c 	pgd = __va(read_cr3_pa());
__va             2115 arch/x86/xen/mmu_pv.c 				make_lowmem_page_readonly(__va(pt_phys));
__va             2123 arch/x86/xen/mmu_pv.c 			make_lowmem_page_readonly(__va(pmd_phys));
__va             2131 arch/x86/xen/mmu_pv.c 		make_lowmem_page_readonly(__va(pud_phys));
__va             2162 arch/x86/xen/mmu_pv.c 		make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
__va              313 arch/x86/xen/setup.c 	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
__va              437 arch/x86/xen/setup.c 			(unsigned long)__va(pfn << PAGE_SHIFT),
__va              189 arch/xtensa/include/asm/page.h #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
__va              119 arch/xtensa/kernel/setup.c 	initrd_start = (unsigned long)__va(mi->start);
__va              120 arch/xtensa/kernel/setup.c 	initrd_end = (unsigned long)__va(mi->end);
__va              133 arch/xtensa/kernel/setup.c 	dtb_start = __va(tag->data[0]);
__va              153 arch/xtensa/mm/init.c 	high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
__va               30 drivers/char/agp/alpha-agp.c 	page = virt_to_page(__va(pa));
__va              190 drivers/char/agp/uninorth-agp.c 		flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
__va              191 drivers/char/agp/uninorth-agp.c 				   (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
__va               67 drivers/char/tpm/eventlog/of.c 	log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
__va             1065 drivers/crypto/n2_core.c 		final_iv_addr = __va(iv_paddr);
__va             1083 drivers/crypto/n2_core.c 				memcpy(rctx->temp_iv, __va(pa),
__va              770 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			      __va(page_to_phys(pages[0])),
__va              173 drivers/iommu/io-pgtable-arm.c #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
__va              510 drivers/macintosh/smu.c 	smu->cmd_buf = __va(smu_cmdbuf_abs);
__va              522 drivers/misc/sgi-gru/grufile.c 	gru_start_vaddr = __va(gru_start_paddr);
__va               49 drivers/misc/sgi-xp/xp_uv.c 	unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
__va              147 drivers/misc/sgi-xp/xpc_partition.c 	rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
__va              790 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	return __va(ret << SECTION_SIZE_BITS);
__va              865 drivers/of/fdt.c 		initrd_start = (unsigned long)__va(start);
__va              866 drivers/of/fdt.c 		initrd_end = (unsigned long)__va(end);
__va              536 drivers/platform/x86/dcdbas.c 	for (addr = (u8 *)__va(0xf0000);
__va              537 drivers/platform/x86/dcdbas.c 	     addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
__va               64 drivers/pnp/pnpbios/bioscalls.c 			(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
__va              458 drivers/pnp/pnpbios/bioscalls.c 			       0, data, 65536, __va(nvram_base), 65536);
__va              487 drivers/pnp/pnpbios/bioscalls.c 			 (unsigned long)__va(header->fields.pm16cseg));
__va              489 drivers/pnp/pnpbios/bioscalls.c 			 (unsigned long)__va(header->fields.pm16dseg));
__va              438 drivers/pnp/pnpbios/core.c 	for (check = (union pnp_bios_install_struct *)__va(0xf0000);
__va              439 drivers/pnp/pnpbios/core.c 	     check < (union pnp_bios_install_struct *)__va(0xffff0);
__va             1406 drivers/s390/net/lcs.c 		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
__va              795 drivers/video/fbdev/ssd1307fb.c 	__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
__va               54 drivers/xen/mem-reservation.c 					(unsigned long)__va(pfn << PAGE_SHIFT),
__va               82 drivers/xen/mem-reservation.c 					(unsigned long)__va(pfn << PAGE_SHIFT),
__va              133 fs/proc/kcore.c 	ent->addr = (unsigned long)__va(0);
__va              427 fs/proc/vmcore.c 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
__va              918 include/asm-generic/io.h 	return __va(address);
__va             1045 include/asm-generic/io.h 	return __va(addr);
__va               82 include/asm-generic/page.h #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
__va              118 include/linux/mm.h #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
__va              122 include/linux/mm.h #define lm_alias(x)	__va(__pa_symbol(x))
__va               80 include/xen/arm/page.h #define gfn_to_virt(m)		(__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
__va              539 init/initramfs.c 	unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
__va              540 init/initramfs.c 	unsigned long crashk_end   = (unsigned long)__va(crashk_res.end);
__va               38 kernel/iomem.c 		return __va(offset);
__va              720 kernel/kexec_file.c 	zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
__va             1324 kernel/kexec_file.c 		phdr->p_vaddr = (unsigned long long) __va(mstart);
__va             1122 mm/kmemleak.c  		kmemleak_alloc(__va(phys), size, min_count, gfp);
__va             1136 mm/kmemleak.c  		kmemleak_free_part(__va(phys), size);
__va             1148 mm/kmemleak.c  		kmemleak_not_leak(__va(phys));
__va             1160 mm/kmemleak.c  		kmemleak_ignore(__va(phys));
__va              462 mm/memblock.c  		new_array = addr ? __va(addr) : NULL;
__va              128 mm/memremap.c  		kasan_remove_zero_shadow(__va(res->start), resource_size(res));
__va              277 mm/memremap.c  		error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
__va              307 mm/memremap.c  	return __va(res->start);
__va              310 mm/memremap.c  	kasan_remove_zero_shadow(__va(res->start), resource_size(res));
__va               43 mm/memtest.c   	start = __va(start_phys_aligned);
__va             1354 mm/page_alloc.c 		set_page_address(page, __va(pfn << PAGE_SHIFT));
__va              129 mm/sparse-vmemmap.c 	return __va(__pfn_to_phys(pfn));
__va             2333 virt/kvm/kvm_main.c 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));