vaddr 80 arch/alpha/boot/bootpz.c unsigned long vaddr, kaddr; vaddr 87 arch/alpha/boot/bootpz.c for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE) vaddr 89 arch/alpha/boot/bootpz.c kaddr = (find_pa(vaddr) | PAGE_OFFSET); vaddr 95 arch/alpha/boot/bootpz.c vaddr, kaddr, kstart, kend); vaddr 71 arch/alpha/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 73 arch/alpha/include/asm/cacheflush.h flush_icache_user_range(vma, page, vaddr, len); \ vaddr 75 arch/alpha/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 18 arch/alpha/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 20 arch/alpha/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ vaddr 25 arch/alpha/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 158 arch/alpha/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) vaddr 317 arch/alpha/kernel/core_irongate.c unsigned long vaddr; vaddr 383 arch/alpha/kernel/core_irongate.c for(baddr = addr, vaddr = (unsigned long)area->addr; vaddr 385 arch/alpha/kernel/core_irongate.c baddr += PAGE_SIZE, vaddr += PAGE_SIZE) vaddr 390 arch/alpha/kernel/core_irongate.c if (__alpha_remap_area_pages(vaddr, vaddr 400 arch/alpha/kernel/core_irongate.c vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); vaddr 403 arch/alpha/kernel/core_irongate.c addr, size, vaddr); vaddr 405 arch/alpha/kernel/core_irongate.c return (void __iomem *)vaddr; vaddr 692 arch/alpha/kernel/core_marvel.c unsigned long vaddr; vaddr 749 arch/alpha/kernel/core_marvel.c for (vaddr = (unsigned long)area->addr; vaddr 751 arch/alpha/kernel/core_marvel.c baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { vaddr 760 arch/alpha/kernel/core_marvel.c if (__alpha_remap_area_pages(vaddr, vaddr 771 arch/alpha/kernel/core_marvel.c vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); vaddr 773 arch/alpha/kernel/core_marvel.c return (void __iomem *) vaddr; vaddr 777 arch/alpha/kernel/core_marvel.c vaddr = baddr + hose->mem_space->start; vaddr 778 arch/alpha/kernel/core_marvel.c return (void __iomem *) vaddr; vaddr 463 arch/alpha/kernel/core_titan.c unsigned long vaddr; vaddr 491 arch/alpha/kernel/core_titan.c vaddr = addr - __direct_map_base + TITAN_MEM_BIAS; vaddr 492 arch/alpha/kernel/core_titan.c return (void __iomem *) vaddr; vaddr 520 arch/alpha/kernel/core_titan.c for (vaddr = (unsigned long)area->addr; vaddr 522 arch/alpha/kernel/core_titan.c baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { vaddr 531 arch/alpha/kernel/core_titan.c if (__alpha_remap_area_pages(vaddr, vaddr 542 arch/alpha/kernel/core_titan.c vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); vaddr 543 arch/alpha/kernel/core_titan.c return (void __iomem *) vaddr; vaddr 201 arch/alpha/kernel/core_tsunami.c tsunami_probe_read(volatile unsigned long *vaddr) vaddr 210 arch/alpha/kernel/core_tsunami.c dont_care = *vaddr; vaddr 223 arch/alpha/kernel/core_tsunami.c tsunami_probe_write(volatile unsigned long *vaddr) vaddr 228 arch/alpha/kernel/core_tsunami.c true_contents = *vaddr; vaddr 229 arch/alpha/kernel/core_tsunami.c *vaddr = 0; vaddr 236 arch/alpha/kernel/core_tsunami.c (unsigned long)vaddr); vaddr 239 arch/alpha/kernel/core_tsunami.c *vaddr = true_contents; vaddr 194 arch/alpha/mm/init.c unsigned long vaddr; vaddr 206 arch/alpha/mm/init.c vaddr = (unsigned long)console_remap_vm.addr; vaddr 212 arch/alpha/mm/init.c crb->map[i].va = vaddr; vaddr 217 arch/alpha/mm/init.c if (pmd != pmd_offset(pgd, vaddr)) { vaddr 219 arch/alpha/mm/init.c pmd = pmd_offset(pgd, vaddr); vaddr 223 arch/alpha/mm/init.c set_pte(pte_offset_kernel(pmd, vaddr), vaddr 226 arch/alpha/mm/init.c vaddr += PAGE_SIZE; vaddr 32 arch/arc/include/asm/cacheflush.h void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len); vaddr 33 arch/arc/include/asm/cacheflush.h void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr); vaddr 34 arch/arc/include/asm/cacheflush.h void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); vaddr 107 arch/arc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 111 arch/arc/include/asm/cacheflush.h __sync_icache_dcache((unsigned long)(dst), vaddr, len); \ vaddr 114 arch/arc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 98 arch/arc/include/asm/page.h #define __pa(vaddr) ((unsigned long)(vaddr)) vaddr 250 arch/arc/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 31 arch/arc/mm/cache.c void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, vaddr 250 arch/arc/mm/cache.c void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, vaddr 272 arch/arc/mm/cache.c vaddr &= CACHE_LINE_MASK; vaddr 278 arch/arc/mm/cache.c paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; vaddr 295 arch/arc/mm/cache.c void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, vaddr 318 arch/arc/mm/cache.c vaddr &= CACHE_LINE_MASK; vaddr 345 arch/arc/mm/cache.c write_aux_reg(aux_cmd, vaddr); vaddr 346 arch/arc/mm/cache.c vaddr += L1_CACHE_BYTES; vaddr 366 arch/arc/mm/cache.c void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, vaddr 420 arch/arc/mm/cache.c void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, vaddr 580 arch/arc/mm/cache.c static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, vaddr 590 arch/arc/mm/cache.c __cache_line_loop(paddr, vaddr, sz, op, full_page); vaddr 602 arch/arc/mm/cache.c #define __dc_line_op(paddr, vaddr, sz, op) vaddr 616 arch/arc/mm/cache.c __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, vaddr 623 arch/arc/mm/cache.c (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page); vaddr 634 arch/arc/mm/cache.c phys_addr_t paddr, vaddr; vaddr 642 arch/arc/mm/cache.c __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); vaddr 645 arch/arc/mm/cache.c static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, vaddr 650 arch/arc/mm/cache.c .vaddr = vaddr, vaddr 861 arch/arc/mm/cache.c unsigned long vaddr = page->index << PAGE_SHIFT; vaddr 863 arch/arc/mm/cache.c if (addr_not_cache_congruent(paddr, vaddr)) vaddr 864 arch/arc/mm/cache.c __flush_dcache_page(paddr, vaddr); vaddr 999 arch/arc/mm/cache.c void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) vaddr 1001 arch/arc/mm/cache.c __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); vaddr 1002 arch/arc/mm/cache.c __ic_line_inv_vaddr(paddr, vaddr, len); vaddr 1006 arch/arc/mm/cache.c void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr) vaddr 1008 arch/arc/mm/cache.c __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); vaddr 1015 arch/arc/mm/cache.c void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr) vaddr 1017 arch/arc/mm/cache.c __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); vaddr 65 arch/arc/mm/highmem.c unsigned long vaddr; vaddr 74 arch/arc/mm/highmem.c vaddr = FIXMAP_ADDR(idx); vaddr 76 arch/arc/mm/highmem.c set_pte_at(&init_mm, vaddr, fixmap_page_table + idx, vaddr 79 arch/arc/mm/highmem.c return (void *)vaddr; vaddr 56 arch/arc/mm/ioremap.c unsigned long vaddr; vaddr 85 arch/arc/mm/ioremap.c vaddr = (unsigned long)area->addr; vaddr 86 arch/arc/mm/ioremap.c if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) { vaddr 87 arch/arc/mm/ioremap.c vunmap((void __force *)vaddr); vaddr 90 arch/arc/mm/ioremap.c return (void __iomem *)(off + (char __iomem *)vaddr); vaddr 515 arch/arc/mm/tlb.c void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) vaddr 552 arch/arc/mm/tlb.c tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); vaddr 554 arch/arc/mm/tlb.c vaddr &= PAGE_MASK; vaddr 564 arch/arc/mm/tlb.c pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); vaddr 599 arch/arc/mm/tlb.c unsigned long vaddr = vaddr_unaligned & PAGE_MASK; vaddr 603 arch/arc/mm/tlb.c create_tlb(vma, vaddr, ptep); vaddr 619 arch/arc/mm/tlb.c addr_not_cache_congruent(paddr, vaddr)) { vaddr 628 arch/arc/mm/tlb.c __inv_icache_page(paddr, vaddr); vaddr 17 arch/arm/include/asm/cacheflush.h #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) vaddr 170 arch/arm/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 46 arch/arm/include/asm/mach/map.h extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr); vaddr 14 arch/arm/include/asm/page-nommu.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 15 arch/arm/include/asm/page-nommu.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 111 arch/arm/include/asm/page.h void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); vaddr 113 arch/arm/include/asm/page.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 127 arch/arm/include/asm/page.h extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); vaddr 129 arch/arm/include/asm/page.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 132 arch/arm/include/asm/page.h #define clear_user_highpage(page,vaddr) \ vaddr 133 arch/arm/include/asm/page.h __cpu_clear_user_highpage(page, vaddr) vaddr 136 arch/arm/include/asm/page.h #define copy_user_highpage(to,from,vaddr,vma) \ vaddr 137 arch/arm/include/asm/page.h __cpu_copy_user_highpage(to, from, vaddr, vma) vaddr 58 arch/arm/include/asm/pgtable-nommu.h #define ZERO_PAGE(vaddr) (virt_to_page(0)) vaddr 174 arch/arm/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (empty_zero_page) vaddr 34 arch/arm/kernel/crash_dump.c void *vaddr; vaddr 39 arch/arm/kernel/crash_dump.c vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); vaddr 40 arch/arm/kernel/crash_dump.c if (!vaddr) vaddr 44 arch/arm/kernel/crash_dump.c if (copy_to_user(buf, vaddr + offset, csize)) { vaddr 45 arch/arm/kernel/crash_dump.c iounmap(vaddr); vaddr 49 arch/arm/kernel/crash_dump.c memcpy(buf, vaddr + offset, csize); vaddr 52 arch/arm/kernel/crash_dump.c iounmap(vaddr); vaddr 76 arch/arm/kernel/tcm.c unsigned long vaddr; vaddr 81 arch/arm/kernel/tcm.c vaddr = gen_pool_alloc(tcm_pool, len); vaddr 82 arch/arm/kernel/tcm.c if (!vaddr) vaddr 85 arch/arm/kernel/tcm.c return (void *) vaddr; vaddr 97 arch/arm/mach-ixp4xx/include/mach/io.h const u8 *vaddr = p; vaddr 100 arch/arm/mach-ixp4xx/include/mach/io.h writeb(*vaddr++, bus_addr); vaddr 122 arch/arm/mach-ixp4xx/include/mach/io.h const u16 *vaddr = p; vaddr 125 arch/arm/mach-ixp4xx/include/mach/io.h writew(*vaddr++, bus_addr); vaddr 143 arch/arm/mach-ixp4xx/include/mach/io.h const u32 *vaddr = p; vaddr 145 arch/arm/mach-ixp4xx/include/mach/io.h writel(*vaddr++, bus_addr); vaddr 167 arch/arm/mach-ixp4xx/include/mach/io.h u8 *vaddr = p; vaddr 170 arch/arm/mach-ixp4xx/include/mach/io.h *vaddr++ = readb(bus_addr); vaddr 192 arch/arm/mach-ixp4xx/include/mach/io.h u16 *vaddr = p; vaddr 195 arch/arm/mach-ixp4xx/include/mach/io.h *vaddr++ = readw(bus_addr); vaddr 215 arch/arm/mach-ixp4xx/include/mach/io.h u32 *vaddr = p; vaddr 218 arch/arm/mach-ixp4xx/include/mach/io.h *vaddr++ = readl(bus_addr); vaddr 258 arch/arm/mach-ixp4xx/include/mach/io.h const u8 *vaddr = p; vaddr 261 arch/arm/mach-ixp4xx/include/mach/io.h outb(*vaddr++, io_addr); vaddr 277 arch/arm/mach-ixp4xx/include/mach/io.h const u16 *vaddr = p; vaddr 279 arch/arm/mach-ixp4xx/include/mach/io.h outw(cpu_to_le16(*vaddr++), io_addr); vaddr 291 arch/arm/mach-ixp4xx/include/mach/io.h const u32 *vaddr = p; vaddr 293 arch/arm/mach-ixp4xx/include/mach/io.h outl(cpu_to_le32(*vaddr++), io_addr); vaddr 311 arch/arm/mach-ixp4xx/include/mach/io.h u8 *vaddr = p; vaddr 313 arch/arm/mach-ixp4xx/include/mach/io.h *vaddr++ = inb(io_addr); vaddr 331 arch/arm/mach-ixp4xx/include/mach/io.h u16 *vaddr = p; vaddr 333 arch/arm/mach-ixp4xx/include/mach/io.h *vaddr++ = le16_to_cpu(inw(io_addr)); vaddr 349 arch/arm/mach-ixp4xx/include/mach/io.h u32 *vaddr = p; vaddr 351 arch/arm/mach-ixp4xx/include/mach/io.h *vaddr++ = le32_to_cpu(inl(io_addr)); vaddr 375 arch/arm/mach-ixp4xx/include/mach/io.h static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count) vaddr 379 arch/arm/mach-ixp4xx/include/mach/io.h insb(port & PIO_MASK, vaddr, count); vaddr 382 arch/arm/mach-ixp4xx/include/mach/io.h __raw_readsb(addr, vaddr, count); vaddr 384 arch/arm/mach-ixp4xx/include/mach/io.h __indirect_readsb(addr, vaddr, count); vaddr 403 arch/arm/mach-ixp4xx/include/mach/io.h static inline void ioread16_rep(const void __iomem *addr, void *vaddr, vaddr 408 arch/arm/mach-ixp4xx/include/mach/io.h insw(port & PIO_MASK, vaddr, count); vaddr 411 arch/arm/mach-ixp4xx/include/mach/io.h __raw_readsw(addr, vaddr, count); vaddr 413 arch/arm/mach-ixp4xx/include/mach/io.h __indirect_readsw(addr, vaddr, count); vaddr 433 arch/arm/mach-ixp4xx/include/mach/io.h static inline void ioread32_rep(const void __iomem *addr, void *vaddr, vaddr 438 arch/arm/mach-ixp4xx/include/mach/io.h insl(port & PIO_MASK, vaddr, count); vaddr 441 arch/arm/mach-ixp4xx/include/mach/io.h __raw_readsl(addr, vaddr, count); vaddr 443 arch/arm/mach-ixp4xx/include/mach/io.h __indirect_readsl(addr, vaddr, count); vaddr 462 arch/arm/mach-ixp4xx/include/mach/io.h static inline void iowrite8_rep(void __iomem *addr, const void *vaddr, vaddr 467 arch/arm/mach-ixp4xx/include/mach/io.h outsb(port & PIO_MASK, vaddr, count); vaddr 470 arch/arm/mach-ixp4xx/include/mach/io.h __raw_writesb(addr, vaddr, count); vaddr 472 arch/arm/mach-ixp4xx/include/mach/io.h __indirect_writesb(addr, vaddr, count); vaddr 491 arch/arm/mach-ixp4xx/include/mach/io.h static inline void iowrite16_rep(void __iomem *addr, const void *vaddr, vaddr 496 arch/arm/mach-ixp4xx/include/mach/io.h outsw(port & PIO_MASK, vaddr, count); vaddr 499 arch/arm/mach-ixp4xx/include/mach/io.h __raw_writesw(addr, vaddr, count); vaddr 501 arch/arm/mach-ixp4xx/include/mach/io.h __indirect_writesw(addr, vaddr, count); vaddr 520 arch/arm/mach-ixp4xx/include/mach/io.h static inline void iowrite32_rep(void __iomem *addr, const void *vaddr, vaddr 525 arch/arm/mach-ixp4xx/include/mach/io.h outsl(port & PIO_MASK, vaddr, count); vaddr 528 arch/arm/mach-ixp4xx/include/mach/io.h __raw_writesl(addr, vaddr, count); vaddr 530 arch/arm/mach-ixp4xx/include/mach/io.h __indirect_writesl(addr, vaddr, count); vaddr 54 arch/arm/mm/cache-feroceon-l2.c void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); vaddr 55 arch/arm/mm/cache-feroceon-l2.c return (unsigned long)vaddr + (paddr & ~PAGE_MASK); vaddr 61 arch/arm/mm/cache-feroceon-l2.c static inline void l2_put_va(unsigned long vaddr) vaddr 64 arch/arm/mm/cache-feroceon-l2.c kunmap_atomic((void *)vaddr); vaddr 88 arch/arm/mm/cache-xsc3l2.c unsigned long vaddr; vaddr 95 arch/arm/mm/cache-xsc3l2.c vaddr = -1; /* to force the first mapping */ vaddr 101 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); vaddr 102 arch/arm/mm/cache-xsc3l2.c xsc3_l2_clean_mva(vaddr); vaddr 103 arch/arm/mm/cache-xsc3l2.c xsc3_l2_inv_mva(vaddr); vaddr 111 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); vaddr 112 arch/arm/mm/cache-xsc3l2.c xsc3_l2_inv_mva(vaddr); vaddr 120 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); vaddr 121 arch/arm/mm/cache-xsc3l2.c xsc3_l2_clean_mva(vaddr); vaddr 122 arch/arm/mm/cache-xsc3l2.c xsc3_l2_inv_mva(vaddr); vaddr 125 arch/arm/mm/cache-xsc3l2.c l2_unmap_va(vaddr); vaddr 132 arch/arm/mm/cache-xsc3l2.c unsigned long vaddr; vaddr 134 arch/arm/mm/cache-xsc3l2.c vaddr = -1; /* to force the first mapping */ vaddr 138 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); vaddr 139 arch/arm/mm/cache-xsc3l2.c xsc3_l2_clean_mva(vaddr); vaddr 143 arch/arm/mm/cache-xsc3l2.c l2_unmap_va(vaddr); vaddr 170 arch/arm/mm/cache-xsc3l2.c unsigned long vaddr; vaddr 177 arch/arm/mm/cache-xsc3l2.c vaddr = -1; /* to force the first mapping */ vaddr 181 arch/arm/mm/cache-xsc3l2.c vaddr = l2_map_va(start, vaddr); vaddr 182 arch/arm/mm/cache-xsc3l2.c xsc3_l2_clean_mva(vaddr); vaddr 183 arch/arm/mm/cache-xsc3l2.c xsc3_l2_inv_mva(vaddr); vaddr 187 arch/arm/mm/cache-xsc3l2.c l2_unmap_va(vaddr); vaddr 39 arch/arm/mm/copypage-fa.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 55 arch/arm/mm/copypage-fa.c void fa_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 66 arch/arm/mm/copypage-feroceon.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 72 arch/arm/mm/copypage-feroceon.c flush_cache_page(vma, vaddr, page_to_pfn(from)); vaddr 78 arch/arm/mm/copypage-feroceon.c void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 65 arch/arm/mm/copypage-v4mc.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 86 arch/arm/mm/copypage-v4mc.c void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 48 arch/arm/mm/copypage-v4wb.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 54 arch/arm/mm/copypage-v4wb.c flush_cache_page(vma, vaddr, page_to_pfn(from)); vaddr 65 arch/arm/mm/copypage-v4wb.c void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 44 arch/arm/mm/copypage-v4wt.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 60 arch/arm/mm/copypage-v4wt.c void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 31 arch/arm/mm/copypage-v6.c struct page *from, unsigned long vaddr, struct vm_area_struct *vma) vaddr 46 arch/arm/mm/copypage-v6.c static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) vaddr 70 arch/arm/mm/copypage-v6.c struct page *from, unsigned long vaddr, struct vm_area_struct *vma) vaddr 72 arch/arm/mm/copypage-v6.c unsigned int offset = CACHE_COLOUR(vaddr); vaddr 103 arch/arm/mm/copypage-v6.c static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) vaddr 105 arch/arm/mm/copypage-v6.c unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); vaddr 64 arch/arm/mm/copypage-xsc3.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 70 arch/arm/mm/copypage-xsc3.c flush_cache_page(vma, vaddr, page_to_pfn(from)); vaddr 79 arch/arm/mm/copypage-xsc3.c void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 85 arch/arm/mm/copypage-xscale.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 107 arch/arm/mm/copypage-xscale.c xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 905 arch/arm/mm/dma-mapping.c void *vaddr; vaddr 914 arch/arm/mm/dma-mapping.c vaddr = kmap_atomic(page); vaddr 915 arch/arm/mm/dma-mapping.c op(vaddr + offset, len, dir); vaddr 916 arch/arm/mm/dma-mapping.c kunmap_atomic(vaddr); vaddr 918 arch/arm/mm/dma-mapping.c vaddr = kmap_high_get(page); vaddr 919 arch/arm/mm/dma-mapping.c if (vaddr) { vaddr 920 arch/arm/mm/dma-mapping.c op(vaddr + offset, len, dir); vaddr 925 arch/arm/mm/dma-mapping.c vaddr = page_address(page) + offset; vaddr 926 arch/arm/mm/dma-mapping.c op(vaddr, len, dir); vaddr 38 arch/arm/mm/flush.c static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) vaddr 40 arch/arm/mm/flush.c unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); vaddr 52 arch/arm/mm/flush.c static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) vaddr 54 arch/arm/mm/flush.c unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); vaddr 55 arch/arm/mm/flush.c unsigned long offset = vaddr & (PAGE_SIZE - 1); vaddr 115 arch/arm/mm/flush.c #define flush_pfn_alias(pfn,vaddr) do { } while (0) vaddr 116 arch/arm/mm/flush.c #define flush_icache_alias(pfn,vaddr,len) do { } while (0) vaddr 20 arch/arm/mm/highmem.c unsigned long vaddr = __fix_to_virt(idx); vaddr 21 arch/arm/mm/highmem.c pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); vaddr 24 arch/arm/mm/highmem.c local_flush_tlb_kernel_page(vaddr); vaddr 27 arch/arm/mm/highmem.c static inline pte_t get_fixmap_pte(unsigned long vaddr) vaddr 29 arch/arm/mm/highmem.c pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); vaddr 55 arch/arm/mm/highmem.c unsigned long vaddr; vaddr 80 arch/arm/mm/highmem.c vaddr = __fix_to_virt(idx); vaddr 86 arch/arm/mm/highmem.c BUG_ON(!pte_none(get_fixmap_pte(vaddr))); vaddr 95 arch/arm/mm/highmem.c return (void *)vaddr; vaddr 101 arch/arm/mm/highmem.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 109 arch/arm/mm/highmem.c __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); vaddr 111 arch/arm/mm/highmem.c BUG_ON(vaddr != __fix_to_virt(idx)); vaddr 117 arch/arm/mm/highmem.c } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { vaddr 119 arch/arm/mm/highmem.c kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); vaddr 128 arch/arm/mm/highmem.c unsigned long vaddr; vaddr 139 arch/arm/mm/highmem.c vaddr = __fix_to_virt(idx); vaddr 141 arch/arm/mm/highmem.c BUG_ON(!pte_none(get_fixmap_pte(vaddr))); vaddr 145 arch/arm/mm/highmem.c return (void *)vaddr; vaddr 70 arch/arm/mm/ioremap.c struct static_vm *find_static_vm_vaddr(void *vaddr) vaddr 79 arch/arm/mm/ioremap.c if (vm->addr > vaddr) vaddr 82 arch/arm/mm/ioremap.c if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) vaddr 93 arch/arm/mm/ioremap.c void *vaddr; vaddr 97 arch/arm/mm/ioremap.c vaddr = vm->addr; vaddr 102 arch/arm/mm/ioremap.c if (vm->addr > vaddr) vaddr 80 arch/arm/mm/mm.h extern struct static_vm *find_static_vm_vaddr(void *vaddr); vaddr 408 arch/arm/mm/mmu.c unsigned long vaddr = __fix_to_virt(idx); vaddr 409 arch/arm/mm/mmu.c pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr); vaddr 422 arch/arm/mm/mmu.c set_pte_at(NULL, vaddr, pte, vaddr 425 arch/arm/mm/mmu.c pte_clear(NULL, vaddr, pte); vaddr 426 arch/arm/mm/mmu.c local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); vaddr 55 arch/arm/plat-samsung/pm-debug.c unsigned long vaddr; vaddr 57 arch/arm/plat-samsung/pm-debug.c debug_ll_addr(&paddr, &vaddr); vaddr 59 arch/arm/plat-samsung/pm-debug.c return (void __iomem *)vaddr; vaddr 30 arch/arm/probes/uprobes/core.c unsigned long vaddr) vaddr 32 arch/arm/probes/uprobes/core.c return uprobe_write_opcode(auprobe, mm, vaddr, vaddr 113 arch/arm/probes/uprobes/core.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, vaddr 117 arch/arm/probes/uprobes/core.c void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); vaddr 125 arch/arm/probes/uprobes/core.c flush_uprobe_xol_access(page, vaddr, dst, len); vaddr 154 arch/arm/probes/uprobes/core.c regs->ARM_pc = utask->vaddr + 4; vaddr 175 arch/arm/probes/uprobes/core.c instruction_pointer_set(regs, utask->vaddr); vaddr 367 arch/arm/xen/enlighten.c &xen_auto_xlat_grant_frames.vaddr, vaddr 126 arch/arm64/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 24 arch/arm64/include/asm/page.h #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) vaddr 25 arch/arm64/include/asm/page.h #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) vaddr 48 arch/arm64/include/asm/pgtable.h #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) vaddr 32 arch/arm64/kernel/crash_dump.c void *vaddr; vaddr 37 arch/arm64/kernel/crash_dump.c vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); vaddr 38 arch/arm64/kernel/crash_dump.c if (!vaddr) vaddr 42 arch/arm64/kernel/crash_dump.c if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { vaddr 43 arch/arm64/kernel/crash_dump.c memunmap(vaddr); vaddr 47 arch/arm64/kernel/crash_dump.c memcpy(buf, vaddr + offset, csize); vaddr 50 arch/arm64/kernel/crash_dump.c memunmap(vaddr); vaddr 14 arch/arm64/kernel/probes/uprobes.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, vaddr 18 arch/arm64/kernel/probes/uprobes.c void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); vaddr 84 arch/arm64/kernel/probes/uprobes.c instruction_pointer_set(regs, utask->vaddr + 4); vaddr 128 arch/arm64/kernel/probes/uprobes.c instruction_pointer_set(regs, utask->vaddr); vaddr 14 arch/arm64/mm/copypage.c void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) vaddr 22 arch/arm64/mm/copypage.c void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) vaddr 53 arch/c6x/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 59 arch/c6x/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 57 arch/c6x/include/asm/pgtable.h #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) vaddr 102 arch/c6x/mm/dma-coherent.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 112 arch/c6x/mm/dma-coherent.c __free_dma_pages(virt_to_phys(vaddr), order); vaddr 55 arch/csky/abiv1/inc/abi/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 60 arch/csky/abiv1/inc/abi/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 14 arch/csky/abiv1/inc/abi/page.h static inline void clear_user_page(void *addr, unsigned long vaddr, vaddr 18 arch/csky/abiv1/inc/abi/page.h if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) vaddr 22 arch/csky/abiv1/inc/abi/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr, vaddr 26 arch/csky/abiv1/inc/abi/page.h if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) vaddr 21 arch/csky/abiv2/cacheflush.c unsigned long vaddr, int len) vaddr 25 arch/csky/abiv2/cacheflush.c kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); vaddr 33 arch/csky/abiv2/inc/abi/cacheflush.h unsigned long vaddr, int len); vaddr 38 arch/csky/abiv2/inc/abi/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 43 arch/csky/abiv2/inc/abi/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 4 arch/csky/abiv2/inc/abi/page.h static inline void clear_user_page(void *addr, unsigned long vaddr, vaddr 10 arch/csky/abiv2/inc/abi/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr, vaddr 78 arch/csky/include/asm/bitops.h #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) vaddr 108 arch/csky/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 309 arch/csky/include/asm/pgtable.h #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ vaddr 310 arch/csky/include/asm/pgtable.h remap_pfn_range(vma, vaddr, pfn, size, prot) vaddr 23 arch/csky/include/asm/tlbflush.h extern void flush_tlb_one(unsigned long vaddr); vaddr 41 arch/csky/mm/highmem.c unsigned long vaddr; vaddr 51 arch/csky/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 56 arch/csky/mm/highmem.c flush_tlb_one((unsigned long)vaddr); vaddr 58 arch/csky/mm/highmem.c return (void *)vaddr; vaddr 64 arch/csky/mm/highmem.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 67 arch/csky/mm/highmem.c if (vaddr < FIXADDR_START) vaddr 73 arch/csky/mm/highmem.c BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); vaddr 75 arch/csky/mm/highmem.c pte_clear(&init_mm, vaddr, kmap_pte - idx); vaddr 76 arch/csky/mm/highmem.c flush_tlb_one(vaddr); vaddr 93 arch/csky/mm/highmem.c unsigned long vaddr; vaddr 100 arch/csky/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 102 arch/csky/mm/highmem.c flush_tlb_one(vaddr); vaddr 104 arch/csky/mm/highmem.c return (void *) vaddr; vaddr 109 arch/csky/mm/highmem.c unsigned long idx, vaddr = (unsigned long)ptr; vaddr 112 arch/csky/mm/highmem.c if (vaddr < FIXADDR_START) vaddr 115 arch/csky/mm/highmem.c idx = virt_to_fix(vaddr); vaddr 129 arch/csky/mm/highmem.c unsigned long vaddr; vaddr 131 arch/csky/mm/highmem.c vaddr = start; vaddr 132 arch/csky/mm/highmem.c i = __pgd_offset(vaddr); vaddr 133 arch/csky/mm/highmem.c j = __pud_offset(vaddr); vaddr 134 arch/csky/mm/highmem.c k = __pmd_offset(vaddr); vaddr 137 arch/csky/mm/highmem.c for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { vaddr 139 arch/csky/mm/highmem.c for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { vaddr 141 arch/csky/mm/highmem.c for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { vaddr 152 arch/csky/mm/highmem.c vaddr += PMD_SIZE; vaddr 163 arch/csky/mm/highmem.c unsigned long vaddr; vaddr 176 arch/csky/mm/highmem.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr 177 arch/csky/mm/highmem.c fixrange_init(vaddr, 0, pgd_base); vaddr 183 arch/csky/mm/highmem.c vaddr = PKMAP_BASE; vaddr 184 arch/csky/mm/highmem.c fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); vaddr 186 arch/csky/mm/highmem.c pgd = swapper_pg_dir + __pgd_offset(vaddr); vaddr 188 arch/csky/mm/highmem.c pmd = pmd_offset(pud, vaddr); vaddr 189 arch/csky/mm/highmem.c pte = pte_offset_kernel(pmd, vaddr); vaddr 196 arch/csky/mm/highmem.c unsigned long vaddr; vaddr 200 arch/csky/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN); vaddr 202 arch/csky/mm/highmem.c kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); vaddr 15 arch/csky/mm/ioremap.c unsigned long offset, vaddr; vaddr 30 arch/csky/mm/ioremap.c vaddr = (unsigned long)area->addr; vaddr 32 arch/csky/mm/ioremap.c if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { vaddr 37 arch/csky/mm/ioremap.c return (void __iomem *)(vaddr + offset); vaddr 28 arch/h8300/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(0)) vaddr 42 arch/h8300/mm/memory.c void cache_push_v(unsigned long vaddr, int len) vaddr 80 arch/hexagon/include/asm/cacheflush.h unsigned long vaddr, void *dst, void *src, int len); vaddr 82 arch/hexagon/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 18 arch/hexagon/include/asm/fixmap.h #define kmap_get_fixmap_pte(vaddr) \ vaddr 19 arch/hexagon/include/asm/fixmap.h pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \ vaddr 20 arch/hexagon/include/asm/fixmap.h (vaddr)), (vaddr)), (vaddr)) vaddr 121 arch/hexagon/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 122 arch/hexagon/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 430 arch/hexagon/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) vaddr 52 arch/hexagon/kernel/dma.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 55 arch/hexagon/kernel/dma.c gen_pool_free(coherent_pool, (unsigned long) vaddr, size); vaddr 119 arch/hexagon/mm/cache.c unsigned long vaddr, void *dst, void *src, int len) vaddr 37 arch/hexagon/mm/vm_tlb.c void flush_tlb_one(unsigned long vaddr) vaddr 39 arch/hexagon/mm/vm_tlb.c __vmclrmap((void *)vaddr, PAGE_SIZE); vaddr 66 arch/hexagon/mm/vm_tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr) vaddr 71 arch/hexagon/mm/vm_tlb.c __vmclrmap((void *)vaddr, PAGE_SIZE); vaddr 1167 arch/ia64/hp/common/sba_iommu.c static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, vaddr 1171 arch/ia64/hp/common/sba_iommu.c free_pages((unsigned long) vaddr, get_order(size)); vaddr 1240 arch/ia64/hp/common/sba_iommu.c unsigned long vaddr = (unsigned long) sba_sg_address(startsg); vaddr 1251 arch/ia64/hp/common/sba_iommu.c sba_io_pdir_entry(pdirp, vaddr); vaddr 1252 arch/ia64/hp/common/sba_iommu.c vaddr += iovp_size; vaddr 1308 arch/ia64/hp/common/sba_iommu.c unsigned long vaddr = (unsigned long) sba_sg_address(startsg); vaddr 1315 arch/ia64/hp/common/sba_iommu.c vcontig_end += vaddr; vaddr 1316 arch/ia64/hp/common/sba_iommu.c dma_offset = vaddr & ~iovp_mask; vaddr 1326 arch/ia64/hp/common/sba_iommu.c unsigned long vaddr; /* tmp */ vaddr 1353 arch/ia64/hp/common/sba_iommu.c vaddr = (unsigned long) sba_sg_address(startsg); vaddr 1354 arch/ia64/hp/common/sba_iommu.c if (vcontig_end == vaddr) vaddr 1386 arch/ia64/hp/common/sba_iommu.c if (DMA_CONTIG(vcontig_end, vaddr)) vaddr 1388 arch/ia64/hp/common/sba_iommu.c vcontig_end = vcontig_len + vaddr; vaddr 48 arch/ia64/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 50 arch/ia64/include/asm/cacheflush.h flush_icache_user_range(vma, page, vaddr, len); \ vaddr 52 arch/ia64/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 72 arch/ia64/include/asm/page.h #define clear_user_page(addr, vaddr, page) \ vaddr 78 arch/ia64/include/asm/page.h #define copy_user_page(to, from, vaddr, page) \ vaddr 85 arch/ia64/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ vaddr 88 arch/ia64/include/asm/page.h GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \ vaddr 488 arch/ia64/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr) vaddr 38 arch/ia64/kernel/crash_dump.c void *vaddr; vaddr 42 arch/ia64/kernel/crash_dump.c vaddr = __va(pfn<<PAGE_SHIFT); vaddr 44 arch/ia64/kernel/crash_dump.c if (copy_to_user(buf, (vaddr + offset), csize)) { vaddr 48 arch/ia64/kernel/crash_dump.c memcpy(buf, (vaddr + offset), csize); vaddr 358 arch/ia64/kernel/efi.c u64 vaddr, mask; vaddr 379 arch/ia64/kernel/efi.c vaddr = PAGE_OFFSET + md->phys_addr; vaddr 394 arch/ia64/kernel/efi.c if ((vaddr & mask) == (KERNEL_START & mask)) { vaddr 410 arch/ia64/kernel/efi.c vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); vaddr 1403 arch/ia64/kernel/perfmon.c pfm_remove_smpl_mapping(void *vaddr, unsigned long size) vaddr 1409 arch/ia64/kernel/perfmon.c if (task->mm == NULL || size == 0UL || vaddr == NULL) { vaddr 1414 arch/ia64/kernel/perfmon.c DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size)); vaddr 1419 arch/ia64/kernel/perfmon.c r = vm_munmap((unsigned long)vaddr, size); vaddr 1422 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); vaddr 1425 arch/ia64/kernel/perfmon.c DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); vaddr 31 arch/m68k/include/asm/bitops.h static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr) vaddr 33 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 41 arch/m68k/include/asm/bitops.h static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr) vaddr 43 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 50 arch/m68k/include/asm/bitops.h static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) vaddr 54 arch/m68k/include/asm/bitops.h : "d" (nr ^ 31), "o" (*vaddr) vaddr 59 arch/m68k/include/asm/bitops.h #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr) vaddr 61 arch/m68k/include/asm/bitops.h #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr) vaddr 63 arch/m68k/include/asm/bitops.h #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ vaddr 64 arch/m68k/include/asm/bitops.h bset_mem_set_bit(nr, vaddr) : \ vaddr 65 arch/m68k/include/asm/bitops.h bfset_mem_set_bit(nr, vaddr)) vaddr 68 arch/m68k/include/asm/bitops.h #define __set_bit(nr, vaddr) set_bit(nr, vaddr) vaddr 71 arch/m68k/include/asm/bitops.h static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) vaddr 73 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 81 arch/m68k/include/asm/bitops.h static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) vaddr 83 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 90 arch/m68k/include/asm/bitops.h static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) vaddr 94 arch/m68k/include/asm/bitops.h : "d" (nr ^ 31), "o" (*vaddr) vaddr 99 arch/m68k/include/asm/bitops.h #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr) vaddr 101 arch/m68k/include/asm/bitops.h #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr) vaddr 103 arch/m68k/include/asm/bitops.h #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ vaddr 104 arch/m68k/include/asm/bitops.h bclr_mem_clear_bit(nr, vaddr) : \ vaddr 105 arch/m68k/include/asm/bitops.h bfclr_mem_clear_bit(nr, vaddr)) vaddr 108 arch/m68k/include/asm/bitops.h #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) vaddr 111 arch/m68k/include/asm/bitops.h static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr) vaddr 113 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 121 arch/m68k/include/asm/bitops.h static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr) vaddr 123 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 130 arch/m68k/include/asm/bitops.h static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) vaddr 134 arch/m68k/include/asm/bitops.h : "d" (nr ^ 31), "o" (*vaddr) vaddr 139 arch/m68k/include/asm/bitops.h #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr) vaddr 141 arch/m68k/include/asm/bitops.h #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr) vaddr 143 arch/m68k/include/asm/bitops.h #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ vaddr 144 arch/m68k/include/asm/bitops.h bchg_mem_change_bit(nr, vaddr) : \ vaddr 145 arch/m68k/include/asm/bitops.h bfchg_mem_change_bit(nr, vaddr)) vaddr 148 arch/m68k/include/asm/bitops.h #define __change_bit(nr, vaddr) change_bit(nr, vaddr) vaddr 151 arch/m68k/include/asm/bitops.h static inline int test_bit(int nr, const volatile unsigned long *vaddr) vaddr 153 arch/m68k/include/asm/bitops.h return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; vaddr 158 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 160 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 171 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 173 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 183 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 189 arch/m68k/include/asm/bitops.h : "d" (nr ^ 31), "o" (*vaddr) vaddr 195 arch/m68k/include/asm/bitops.h #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr) vaddr 197 arch/m68k/include/asm/bitops.h #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr) vaddr 199 arch/m68k/include/asm/bitops.h #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ vaddr 200 arch/m68k/include/asm/bitops.h bset_mem_test_and_set_bit(nr, vaddr) : \ vaddr 201 arch/m68k/include/asm/bitops.h bfset_mem_test_and_set_bit(nr, vaddr)) vaddr 204 arch/m68k/include/asm/bitops.h #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) vaddr 208 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 210 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 221 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 223 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 233 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 239 arch/m68k/include/asm/bitops.h : "d" (nr ^ 31), "o" (*vaddr) vaddr 245 arch/m68k/include/asm/bitops.h #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr) vaddr 247 arch/m68k/include/asm/bitops.h #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr) vaddr 249 arch/m68k/include/asm/bitops.h #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ vaddr 250 arch/m68k/include/asm/bitops.h bclr_mem_test_and_clear_bit(nr, vaddr) : \ vaddr 251 arch/m68k/include/asm/bitops.h bfclr_mem_test_and_clear_bit(nr, vaddr)) vaddr 254 arch/m68k/include/asm/bitops.h #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) vaddr 258 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 260 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 271 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 273 arch/m68k/include/asm/bitops.h char *p = (char *)vaddr + (nr ^ 31) / 8; vaddr 283 arch/m68k/include/asm/bitops.h volatile unsigned long *vaddr) vaddr 289 arch/m68k/include/asm/bitops.h : "d" (nr ^ 31), "o" (*vaddr) vaddr 295 arch/m68k/include/asm/bitops.h #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr) vaddr 297 arch/m68k/include/asm/bitops.h #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr) vaddr 299 arch/m68k/include/asm/bitops.h #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ vaddr 300 arch/m68k/include/asm/bitops.h bchg_mem_test_and_change_bit(nr, vaddr) : \ vaddr 301 arch/m68k/include/asm/bitops.h bfchg_mem_test_and_change_bit(nr, vaddr)) vaddr 304 arch/m68k/include/asm/bitops.h #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) vaddr 317 arch/m68k/include/asm/bitops.h static inline int find_first_zero_bit(const unsigned long *vaddr, vaddr 320 arch/m68k/include/asm/bitops.h const unsigned long *p = vaddr; vaddr 338 arch/m68k/include/asm/bitops.h res += ((long)p - (long)vaddr - 4) * 8; vaddr 343 arch/m68k/include/asm/bitops.h static inline int find_next_zero_bit(const unsigned long *vaddr, int size, vaddr 346 arch/m68k/include/asm/bitops.h const unsigned long *p = vaddr + (offset >> 5); vaddr 373 arch/m68k/include/asm/bitops.h static inline int find_first_bit(const unsigned long *vaddr, unsigned size) vaddr 375 arch/m68k/include/asm/bitops.h const unsigned long *p = vaddr; vaddr 393 arch/m68k/include/asm/bitops.h res += ((long)p - (long)vaddr - 4) * 8; vaddr 398 arch/m68k/include/asm/bitops.h static inline int find_next_bit(const unsigned long *vaddr, int size, vaddr 401 arch/m68k/include/asm/bitops.h const unsigned long *p = vaddr + (offset >> 5); vaddr 156 arch/m68k/include/asm/cacheflush_mm.h extern void cache_push_v(unsigned long vaddr, int len); vaddr 223 arch/m68k/include/asm/cacheflush_mm.h static inline void __flush_page_to_ram(void *vaddr) vaddr 227 arch/m68k/include/asm/cacheflush_mm.h addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1); vaddr 240 arch/m68k/include/asm/cacheflush_mm.h : : "a" (__pa(vaddr))); vaddr 262 arch/m68k/include/asm/cacheflush_mm.h struct page *page, unsigned long vaddr, vaddr 265 arch/m68k/include/asm/cacheflush_mm.h flush_cache_page(vma, vaddr, page_to_pfn(page)); vaddr 267 arch/m68k/include/asm/cacheflush_mm.h flush_icache_user_range(vma, page, vaddr, len); vaddr 270 arch/m68k/include/asm/cacheflush_mm.h struct page *page, unsigned long vaddr, vaddr 273 arch/m68k/include/asm/cacheflush_mm.h flush_cache_page(vma, vaddr, page_to_pfn(page)); vaddr 27 arch/m68k/include/asm/cacheflush_no.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 29 arch/m68k/include/asm/cacheflush_no.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 32 arch/m68k/include/asm/dvma.h extern void dvma_free(void *vaddr); vaddr 61 arch/m68k/include/asm/dvma.h static inline int dvma_map_cpu(unsigned long kaddr, unsigned long vaddr, vaddr 81 arch/m68k/include/asm/dvma.h extern int dvma_map_cpu(unsigned long kaddr, unsigned long vaddr, int len); vaddr 360 arch/m68k/include/asm/mcf_pgtable.h static inline void nocache_page(void *vaddr) vaddr 365 arch/m68k/include/asm/mcf_pgtable.h unsigned long addr = (unsigned long) vaddr; vaddr 376 arch/m68k/include/asm/mcf_pgtable.h static inline void cache_page(void *vaddr) vaddr 381 arch/m68k/include/asm/mcf_pgtable.h unsigned long addr = (unsigned long) vaddr; vaddr 236 arch/m68k/include/asm/motorola_pgtable.h static inline void nocache_page(void *vaddr) vaddr 238 arch/m68k/include/asm/motorola_pgtable.h unsigned long addr = (unsigned long)vaddr; vaddr 252 arch/m68k/include/asm/motorola_pgtable.h static inline void cache_page(void *vaddr) vaddr 254 arch/m68k/include/asm/motorola_pgtable.h unsigned long addr = (unsigned long)vaddr; vaddr 56 arch/m68k/include/asm/page_mm.h #define clear_user_page(addr, vaddr, page) \ vaddr 60 arch/m68k/include/asm/page_mm.h #define copy_user_page(to, from, vaddr, page) \ vaddr 71 arch/m68k/include/asm/page_mm.h static inline unsigned long ___pa(void *vaddr) vaddr 78 arch/m68k/include/asm/page_mm.h : "0" (vaddr), "i" (m68k_fixup_memoffset)); vaddr 81 arch/m68k/include/asm/page_mm.h #define __pa(vaddr) ___pa((void *)(long)(vaddr)) vaddr 84 arch/m68k/include/asm/page_mm.h void *vaddr; vaddr 88 arch/m68k/include/asm/page_mm.h : "=r" (vaddr) vaddr 90 arch/m68k/include/asm/page_mm.h return vaddr; vaddr 13 arch/m68k/include/asm/page_no.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 14 arch/m68k/include/asm/page_no.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 16 arch/m68k/include/asm/page_no.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ vaddr 17 arch/m68k/include/asm/page_no.h alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) vaddr 20 arch/m68k/include/asm/page_no.h #define __pa(vaddr) ((unsigned long)(vaddr)) vaddr 113 arch/m68k/include/asm/pgtable_mm.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 45 arch/m68k/include/asm/pgtable_no.h #define ZERO_PAGE(vaddr) (virt_to_page(0)) vaddr 56 arch/m68k/kernel/dma.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 59 arch/m68k/kernel/dma.c free_pages((unsigned long)vaddr, get_order(size)); vaddr 112 arch/m68k/kernel/signal.c static inline void push_cache (unsigned long vaddr) vaddr 132 arch/m68k/kernel/signal.c : "a" (vaddr)); vaddr 135 arch/m68k/kernel/signal.c temp |= vaddr & ~PAGE_MASK; vaddr 149 arch/m68k/kernel/signal.c : "0" (vaddr)); vaddr 165 arch/m68k/kernel/signal.c : : "r" (vaddr), "r" (temp)); vaddr 168 arch/m68k/kernel/signal.c : : "r" (vaddr + 4), "r" (temp)); vaddr 175 arch/m68k/kernel/signal.c clear_cf_icache(vaddr, vaddr + 8); vaddr 212 arch/m68k/kernel/signal.c static inline void push_cache(unsigned long vaddr) vaddr 53 arch/m68k/kernel/sys_m68k.c #define virt_to_phys_040(vaddr) \ vaddr 62 arch/m68k/kernel/sys_m68k.c : "a" (vaddr)); \ vaddr 216 arch/m68k/kernel/sys_m68k.c #define virt_to_phys_060(vaddr) \ vaddr 223 arch/m68k/kernel/sys_m68k.c : "0" (vaddr)); \ vaddr 15 arch/m68k/mm/cache.c static unsigned long virt_to_phys_slow(unsigned long vaddr) vaddr 37 arch/m68k/mm/cache.c : "0" (vaddr)); vaddr 47 arch/m68k/mm/cache.c : "a" (vaddr)); vaddr 50 arch/m68k/mm/cache.c return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); vaddr 58 arch/m68k/mm/cache.c : "a" (vaddr), "d" (get_fs().seg)); vaddr 64 arch/m68k/mm/cache.c return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); vaddr 66 arch/m68k/mm/cache.c return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); vaddr 68 arch/m68k/mm/cache.c return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); vaddr 25 arch/m68k/mm/sun3kmap.c extern void print_pte_vaddr(unsigned long vaddr); vaddr 28 arch/m68k/mm/sun3kmap.c extern void mmu_emu_map_pmeg (int context, int vaddr); vaddr 24 arch/m68k/sun3/dvma.c static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) vaddr 36 arch/m68k/sun3/dvma.c if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { vaddr 37 arch/m68k/sun3/dvma.c sun3_put_pte(vaddr, pte); vaddr 38 arch/m68k/sun3/dvma.c ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; vaddr 41 arch/m68k/sun3/dvma.c return (vaddr + (kaddr & ~PAGE_MASK)); vaddr 50 arch/m68k/sun3/dvma.c unsigned long vaddr; vaddr 52 arch/m68k/sun3/dvma.c vaddr = dvma_btov(baddr); vaddr 54 arch/m68k/sun3/dvma.c end = vaddr + len; vaddr 56 arch/m68k/sun3/dvma.c while(vaddr < end) { vaddr 57 arch/m68k/sun3/dvma.c dvma_page(kaddr, vaddr); vaddr 59 arch/m68k/sun3/dvma.c vaddr += PAGE_SIZE; vaddr 119 arch/m68k/sun3/mmu_emu.c void print_pte_vaddr (unsigned long vaddr) vaddr 121 arch/m68k/sun3/mmu_emu.c pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr)); vaddr 122 arch/m68k/sun3/mmu_emu.c print_pte (__pte (sun3_get_pte (vaddr))); vaddr 284 arch/m68k/sun3/mmu_emu.c inline void mmu_emu_map_pmeg (int context, int vaddr) vaddr 290 arch/m68k/sun3/mmu_emu.c vaddr &= ~SUN3_PMEG_MASK; vaddr 299 arch/m68k/sun3/mmu_emu.c curr_pmeg, context, vaddr); vaddr 311 arch/m68k/sun3/mmu_emu.c if(vaddr >= PAGE_OFFSET) { vaddr 317 arch/m68k/sun3/mmu_emu.c sun3_put_segmap (vaddr, curr_pmeg); vaddr 327 arch/m68k/sun3/mmu_emu.c sun3_put_segmap (vaddr, curr_pmeg); vaddr 330 arch/m68k/sun3/mmu_emu.c pmeg_vaddr[curr_pmeg] = vaddr; vaddr 334 arch/m68k/sun3/mmu_emu.c sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM); vaddr 356 arch/m68k/sun3/mmu_emu.c int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault) vaddr 376 arch/m68k/sun3/mmu_emu.c vaddr, read_flag ? "read" : "write", crp); vaddr 379 arch/m68k/sun3/mmu_emu.c segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF; vaddr 380 arch/m68k/sun3/mmu_emu.c offset = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF; vaddr 402 arch/m68k/sun3/mmu_emu.c if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG) vaddr 403 arch/m68k/sun3/mmu_emu.c mmu_emu_map_pmeg (context, vaddr); vaddr 406 arch/m68k/sun3/mmu_emu.c sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte)); vaddr 424 arch/m68k/sun3/mmu_emu.c print_pte_vaddr (vaddr); vaddr 343 arch/m68k/sun3/sun3dvma.c unsigned long vaddr; vaddr 359 arch/m68k/sun3/sun3dvma.c vaddr = dvma_btov(baddr); vaddr 361 arch/m68k/sun3/sun3dvma.c if(dvma_map_cpu(kaddr, vaddr, len) < 0) { vaddr 370 arch/m68k/sun3/sun3dvma.c return (void *)vaddr; vaddr 375 arch/m68k/sun3/sun3dvma.c void dvma_free(void *vaddr) vaddr 80 arch/m68k/sun3x/dvma.c unsigned long vaddr, int len) vaddr 87 arch/m68k/sun3x/dvma.c vaddr &= PAGE_MASK; vaddr 89 arch/m68k/sun3x/dvma.c end = PAGE_ALIGN(vaddr + len); vaddr 91 arch/m68k/sun3x/dvma.c pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr); vaddr 92 arch/m68k/sun3x/dvma.c pgd = pgd_offset_k(vaddr); vaddr 98 arch/m68k/sun3x/dvma.c if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) { vaddr 103 arch/m68k/sun3x/dvma.c if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK)) vaddr 104 arch/m68k/sun3x/dvma.c end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK; vaddr 112 arch/m68k/sun3x/dvma.c if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { vaddr 117 arch/m68k/sun3x/dvma.c if((end2 & PMD_MASK) > (vaddr & PMD_MASK)) vaddr 118 arch/m68k/sun3x/dvma.c end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK; vaddr 124 arch/m68k/sun3x/dvma.c __pa(kaddr), vaddr); vaddr 129 arch/m68k/sun3x/dvma.c vaddr += PAGE_SIZE; vaddr 130 arch/m68k/sun3x/dvma.c } while(vaddr < end3); vaddr 132 arch/m68k/sun3x/dvma.c } while(vaddr < end2); vaddr 134 arch/m68k/sun3x/dvma.c } while(vaddr < end); vaddr 106 arch/microblaze/include/asm/cacheflush.h struct page *page, unsigned long vaddr, vaddr 118 arch/microblaze/include/asm/cacheflush.h struct page *page, unsigned long vaddr, vaddr 81 arch/microblaze/include/asm/page.h # define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) vaddr 82 arch/microblaze/include/asm/page.h # define copy_user_page(vto, vfrom, vaddr, topg) \ vaddr 148 arch/microblaze/include/asm/page.h # define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) vaddr 158 arch/microblaze/include/asm/page.h # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) vaddr 176 arch/microblaze/include/asm/page.h #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) vaddr 45 arch/microblaze/include/asm/pgtable.h #define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) vaddr 292 arch/microblaze/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 38 arch/microblaze/mm/highmem.c unsigned long vaddr; vaddr 49 arch/microblaze/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 53 arch/microblaze/mm/highmem.c set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); vaddr 54 arch/microblaze/mm/highmem.c local_flush_tlb_page(NULL, vaddr); vaddr 56 arch/microblaze/mm/highmem.c return (void *) vaddr; vaddr 62 arch/microblaze/mm/highmem.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 66 arch/microblaze/mm/highmem.c if (vaddr < __fix_to_virt(FIX_KMAP_END)) { vaddr 76 arch/microblaze/mm/highmem.c BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); vaddr 82 arch/microblaze/mm/highmem.c pte_clear(&init_mm, vaddr, kmap_pte-idx); vaddr 83 arch/microblaze/mm/highmem.c local_flush_tlb_page(NULL, vaddr); vaddr 54 arch/microblaze/mm/init.c static inline pte_t *virt_to_kpte(unsigned long vaddr) vaddr 56 arch/microblaze/mm/init.c return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), vaddr 57 arch/microblaze/mm/init.c vaddr), vaddr); vaddr 60 arch/mips/boot/elf2ecoff.c uint32_t vaddr; vaddr 107 arch/mips/boot/elf2ecoff.c if (base->vaddr + base->len != new->vaddr) { vaddr 109 arch/mips/boot/elf2ecoff.c base->len = new->vaddr - base->vaddr; vaddr 284 arch/mips/boot/elf2ecoff.c text.vaddr = data.vaddr = bss.vaddr = 0; vaddr 364 arch/mips/boot/elf2ecoff.c ndata.vaddr = ph[i].p_vaddr; vaddr 366 arch/mips/boot/elf2ecoff.c nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; vaddr 374 arch/mips/boot/elf2ecoff.c ntxt.vaddr = ph[i].p_vaddr; vaddr 394 arch/mips/boot/elf2ecoff.c if (text.vaddr > data.vaddr || data.vaddr > bss.vaddr || vaddr 395 arch/mips/boot/elf2ecoff.c text.vaddr + text.len > data.vaddr vaddr 396 arch/mips/boot/elf2ecoff.c || data.vaddr + data.len > bss.vaddr) { vaddr 408 arch/mips/boot/elf2ecoff.c data.vaddr = text.vaddr + text.len; vaddr 416 arch/mips/boot/elf2ecoff.c if (text.vaddr + text.len < data.vaddr) vaddr 417 arch/mips/boot/elf2ecoff.c text.len = data.vaddr - text.vaddr; vaddr 426 arch/mips/boot/elf2ecoff.c eah.text_start = text.vaddr; vaddr 427 arch/mips/boot/elf2ecoff.c eah.data_start = data.vaddr; vaddr 428 arch/mips/boot/elf2ecoff.c eah.bss_start = bss.vaddr; vaddr 79 arch/mips/dec/kn01-berr.c long asid, entryhi, vaddr; vaddr 101 arch/mips/dec/kn01-berr.c vaddr = regs->regs[insn.i_format.rs] + vaddr 104 arch/mips/dec/kn01-berr.c vaddr = (long)pc; vaddr 105 arch/mips/dec/kn01-berr.c if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1) vaddr 106 arch/mips/dec/kn01-berr.c address = CPHYSADDR(vaddr); vaddr 111 arch/mips/dec/kn01-berr.c entryhi |= vaddr & ~(PAGE_SIZE - 1); vaddr 119 arch/mips/dec/kn01-berr.c offset = vaddr & (PAGE_SIZE - 1); vaddr 105 arch/mips/include/asm/cacheflush.h struct page *page, unsigned long vaddr, void *dst, const void *src, vaddr 109 arch/mips/include/asm/cacheflush.h struct page *page, unsigned long vaddr, void *dst, const void *src, vaddr 139 arch/mips/include/asm/cacheflush.h extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); vaddr 141 arch/mips/include/asm/cacheflush.h static inline void flush_kernel_vmap_range(void *vaddr, int size) vaddr 144 arch/mips/include/asm/cacheflush.h __flush_kernel_vmap_range((unsigned long) vaddr, size); vaddr 147 arch/mips/include/asm/cacheflush.h static inline void invalidate_kernel_vmap_range(void *vaddr, int size) vaddr 150 arch/mips/include/asm/cacheflush.h __flush_kernel_vmap_range((unsigned long) vaddr, size); vaddr 72 arch/mips/include/asm/fixmap.h #define kmap_get_fixmap_pte(vaddr) \ vaddr 73 arch/mips/include/asm/fixmap.h pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) vaddr 103 arch/mips/include/asm/page.h static inline void clear_user_page(void *addr, unsigned long vaddr, vaddr 109 arch/mips/include/asm/page.h if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) vaddr 115 arch/mips/include/asm/page.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 84 arch/mips/include/asm/pgtable.h #define ZERO_PAGE(vaddr) \ vaddr 85 arch/mips/include/asm/pgtable.h (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) vaddr 498 arch/mips/include/asm/pgtable.h unsigned long vaddr, vaddr 504 arch/mips/include/asm/pgtable.h return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); vaddr 23 arch/mips/include/asm/tlbflush.h extern void local_flush_tlb_one(unsigned long vaddr); vaddr 35 arch/mips/include/asm/tlbflush.h extern void flush_tlb_one(unsigned long vaddr); vaddr 45 arch/mips/include/asm/tlbflush.h #define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) vaddr 581 arch/mips/jazz/jazzdma.c static void jazz_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 585 arch/mips/jazz/jazzdma.c dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs); vaddr 30 arch/mips/kernel/crash_dump.c void *vaddr; vaddr 35 arch/mips/kernel/crash_dump.c vaddr = kmap_atomic_pfn(pfn); vaddr 38 arch/mips/kernel/crash_dump.c memcpy(buf, (vaddr + offset), csize); vaddr 39 arch/mips/kernel/crash_dump.c kunmap_atomic(vaddr); vaddr 46 arch/mips/kernel/crash_dump.c copy_page(kdump_buf_page, vaddr); vaddr 47 arch/mips/kernel/crash_dump.c kunmap_atomic(vaddr); vaddr 909 arch/mips/kernel/mips-r2-to-r6-emul.c unsigned long vaddr; vaddr 1207 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1208 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 4)) { vaddr 1209 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1268 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1280 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1281 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 4)) { vaddr 1282 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1343 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1354 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1355 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 4)) { vaddr 1356 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1414 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1424 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1425 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 4)) { vaddr 1426 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1484 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1499 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1500 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 8)) { vaddr 1501 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1603 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1618 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1619 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 8)) { vaddr 1620 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1722 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1737 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1738 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 8)) { vaddr 1739 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1841 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1855 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1856 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 8)) { vaddr 1857 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1959 arch/mips/kernel/mips-r2-to-r6-emul.c "+&r"(vaddr), "+&r"(err) vaddr 1967 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 1968 arch/mips/kernel/mips-r2-to-r6-emul.c if (vaddr & 0x3) { vaddr 1969 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 1973 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 4)) { vaddr 1974 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2013 arch/mips/kernel/mips-r2-to-r6-emul.c : "r"(vaddr), "i"(SIGSEGV) vaddr 2023 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 2024 arch/mips/kernel/mips-r2-to-r6-emul.c if (vaddr & 0x3) { vaddr 2025 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2029 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 4)) { vaddr 2030 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2071 arch/mips/kernel/mips-r2-to-r6-emul.c : "r"(vaddr), "i"(SIGSEGV)); vaddr 2086 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 2087 arch/mips/kernel/mips-r2-to-r6-emul.c if (vaddr & 0x7) { vaddr 2088 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2092 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 8)) { vaddr 2093 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2132 arch/mips/kernel/mips-r2-to-r6-emul.c : "r"(vaddr), "i"(SIGSEGV) vaddr 2147 arch/mips/kernel/mips-r2-to-r6-emul.c vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr 2148 arch/mips/kernel/mips-r2-to-r6-emul.c if (vaddr & 0x7) { vaddr 2149 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2153 arch/mips/kernel/mips-r2-to-r6-emul.c if (!access_ok((void __user *)vaddr, 8)) { vaddr 2154 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; vaddr 2195 arch/mips/kernel/mips-r2-to-r6-emul.c : "r"(vaddr), "i"(SIGSEGV)); vaddr 684 arch/mips/kernel/smp.c unsigned long vaddr = (unsigned long) info; vaddr 686 arch/mips/kernel/smp.c local_flush_tlb_one(vaddr); vaddr 689 arch/mips/kernel/smp.c void flush_tlb_one(unsigned long vaddr) vaddr 691 arch/mips/kernel/smp.c smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); vaddr 523 arch/mips/kernel/traps.c unsigned long value, __user *vaddr; vaddr 536 arch/mips/kernel/traps.c vaddr = (unsigned long __user *) vaddr 539 arch/mips/kernel/traps.c if ((unsigned long)vaddr & 3) vaddr 541 arch/mips/kernel/traps.c if (get_user(value, vaddr)) vaddr 562 arch/mips/kernel/traps.c unsigned long __user *vaddr; vaddr 576 arch/mips/kernel/traps.c vaddr = (unsigned long __user *) vaddr 580 arch/mips/kernel/traps.c if ((unsigned long)vaddr & 3) vaddr 593 arch/mips/kernel/traps.c if (put_user(regs->regs[reg], vaddr)) vaddr 193 arch/mips/kernel/uprobes.c instruction_pointer_set(regs, utask->vaddr); vaddr 222 arch/mips/kernel/uprobes.c unsigned long vaddr) vaddr 224 arch/mips/kernel/uprobes.c return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); vaddr 227 arch/mips/kernel/uprobes.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, vaddr 234 arch/mips/kernel/uprobes.c kstart = kaddr + (vaddr & ~PAGE_MASK); vaddr 33 arch/mips/kvm/dyntrans.c unsigned long vaddr = (unsigned long)opc; vaddr 49 arch/mips/kvm/dyntrans.c err = kvm_trap_emul_gva_fault(vcpu, vaddr, true); vaddr 63 arch/mips/kvm/dyntrans.c __local_flush_icache_user_range(vaddr, vaddr + 4); vaddr 159 arch/mips/mm/c-octeon.c static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) vaddr 277 arch/mips/mm/c-r3k.c static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size) vaddr 656 arch/mips/mm/c-r4k.c void *vaddr; vaddr 679 arch/mips/mm/c-r4k.c vaddr = NULL; vaddr 689 arch/mips/mm/c-r4k.c vaddr = kmap_coherent(page, addr); vaddr 691 arch/mips/mm/c-r4k.c vaddr = kmap_atomic(page); vaddr 692 arch/mips/mm/c-r4k.c addr = (unsigned long)vaddr; vaddr 696 arch/mips/mm/c-r4k.c vaddr ? r4k_blast_dcache_page(addr) : vaddr 702 arch/mips/mm/c-r4k.c if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { vaddr 705 arch/mips/mm/c-r4k.c vaddr ? r4k_blast_icache_page(addr) : vaddr 709 arch/mips/mm/c-r4k.c if (vaddr) { vaddr 713 arch/mips/mm/c-r4k.c kunmap_atomic(vaddr); vaddr 947 arch/mips/mm/c-r4k.c unsigned long vaddr; vaddr 963 arch/mips/mm/c-r4k.c unsigned long vaddr = vmra->vaddr; vaddr 971 arch/mips/mm/c-r4k.c blast_dcache_range(vaddr, vaddr + size); vaddr 974 arch/mips/mm/c-r4k.c static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) vaddr 978 arch/mips/mm/c-r4k.c args.vaddr = (unsigned long) vaddr; vaddr 254 arch/mips/mm/c-tx39.c static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size) vaddr 46 arch/mips/mm/cache.c void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); vaddr 136 arch/mips/mm/dma-noncoherent.c void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, vaddr 141 arch/mips/mm/dma-noncoherent.c dma_sync_virt(vaddr, size, direction); vaddr 49 arch/mips/mm/highmem.c unsigned long vaddr; vaddr 59 arch/mips/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 64 arch/mips/mm/highmem.c local_flush_tlb_one((unsigned long)vaddr); vaddr 66 arch/mips/mm/highmem.c return (void*) vaddr; vaddr 72 arch/mips/mm/highmem.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 75 arch/mips/mm/highmem.c if (vaddr < FIXADDR_START) { // FIXME vaddr 86 arch/mips/mm/highmem.c BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); vaddr 92 arch/mips/mm/highmem.c pte_clear(&init_mm, vaddr, kmap_pte-idx); vaddr 93 arch/mips/mm/highmem.c local_flush_tlb_one(vaddr); vaddr 108 arch/mips/mm/highmem.c unsigned long vaddr; vaddr 116 arch/mips/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 118 arch/mips/mm/highmem.c flush_tlb_one(vaddr); vaddr 120 arch/mips/mm/highmem.c return (void*) vaddr; vaddr 88 arch/mips/mm/init.c unsigned long vaddr, flags, entrylo; vaddr 99 arch/mips/mm/init.c vaddr = __fix_to_virt(FIX_CMAP_END - idx); vaddr 111 arch/mips/mm/init.c write_c0_entryhi(vaddr & (PAGE_MASK << 1)); vaddr 136 arch/mips/mm/init.c return (void*) vaddr; vaddr 172 arch/mips/mm/init.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 179 arch/mips/mm/init.c vfrom = kmap_coherent(from, vaddr); vaddr 188 arch/mips/mm/init.c pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) vaddr 196 arch/mips/mm/init.c struct page *page, unsigned long vaddr, void *dst, const void *src, vaddr 201 arch/mips/mm/init.c void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); vaddr 210 arch/mips/mm/init.c flush_cache_page(vma, vaddr, page_to_pfn(page)); vaddr 214 arch/mips/mm/init.c struct page *page, unsigned long vaddr, void *dst, const void *src, vaddr 219 arch/mips/mm/init.c void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); vaddr 239 arch/mips/mm/init.c unsigned long vaddr; vaddr 241 arch/mips/mm/init.c vaddr = start; vaddr 242 arch/mips/mm/init.c i = __pgd_offset(vaddr); vaddr 243 arch/mips/mm/init.c j = __pud_offset(vaddr); vaddr 244 arch/mips/mm/init.c k = __pmd_offset(vaddr); vaddr 247 arch/mips/mm/init.c for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { vaddr 249 arch/mips/mm/init.c for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { vaddr 251 arch/mips/mm/init.c for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { vaddr 263 arch/mips/mm/init.c vaddr += PMD_SIZE; vaddr 122 arch/mips/mm/mmap.c unsigned long vaddr = (unsigned long)kaddr; vaddr 124 arch/mips/mm/mmap.c if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) vaddr 55 arch/mips/mm/pgtable-32.c unsigned long vaddr; vaddr 74 arch/mips/mm/pgtable-32.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); vaddr 75 arch/mips/mm/pgtable-32.c fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); vaddr 81 arch/mips/mm/pgtable-32.c vaddr = PKMAP_BASE; vaddr 82 arch/mips/mm/pgtable-32.c fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); vaddr 84 arch/mips/mm/pgtable-32.c pgd = swapper_pg_dir + __pgd_offset(vaddr); vaddr 85 arch/mips/mm/pgtable-32.c pud = pud_offset(pgd, vaddr); vaddr 86 arch/mips/mm/pgtable-32.c pmd = pmd_offset(pud, vaddr); vaddr 87 arch/mips/mm/pgtable-32.c pte = pte_offset_kernel(pmd, vaddr); vaddr 109 arch/mips/mm/pgtable-64.c unsigned long vaddr; vaddr 124 arch/mips/mm/pgtable-64.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr 125 arch/mips/mm/pgtable-64.c fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); vaddr 304 arch/mips/sgi-ip22/ip28-berr.c static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr) vaddr 308 arch/mips/sgi-ip22/ip28-berr.c vaddr &= 0x7fffffff; /* Doc. states that top bit is ignored */ vaddr 311 arch/mips/sgi-ip22/ip28-berr.c if ((lo & 2) && (vaddr >> 21) == ((hi<<1) >> 22)) { vaddr 317 arch/mips/sgi-ip22/ip28-berr.c pte += 8*((vaddr >> pgsz) & 0x1ff); vaddr 327 arch/mips/sgi-ip22/ip28-berr.c a += vaddr & ((1 << pgsz) - 1); vaddr 31 arch/nds32/include/asm/cacheflush.h unsigned long vaddr, void *dst, void *src, int len); vaddr 33 arch/nds32/include/asm/cacheflush.h unsigned long vaddr, void *dst, void *src, int len); vaddr 37 arch/nds32/include/asm/cacheflush.h struct page *page, unsigned long vaddr); vaddr 27 arch/nds32/include/asm/page.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 28 arch/nds32/include/asm/page.h extern void clear_user_highpage(struct page *page, unsigned long vaddr); vaddr 30 arch/nds32/include/asm/page.h void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 32 arch/nds32/include/asm/page.h void clear_user_page(void *addr, unsigned long vaddr, struct page *page); vaddr 36 arch/nds32/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 37 arch/nds32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 185 arch/nds32/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (empty_zero_page) vaddr 178 arch/nds32/mm/cacheflush.c void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 181 arch/nds32/mm/cacheflush.c cpu_dcache_wbinval_page((unsigned long)vaddr); vaddr 182 arch/nds32/mm/cacheflush.c cpu_icache_inval_page((unsigned long)vaddr); vaddr 188 arch/nds32/mm/cacheflush.c void clear_user_page(void *addr, unsigned long vaddr, struct page *page) vaddr 190 arch/nds32/mm/cacheflush.c cpu_dcache_wbinval_page((unsigned long)vaddr); vaddr 191 arch/nds32/mm/cacheflush.c cpu_icache_inval_page((unsigned long)vaddr); vaddr 198 arch/nds32/mm/cacheflush.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 207 arch/nds32/mm/cacheflush.c if (aliasing(vaddr, (unsigned long)kfrom)) vaddr 209 arch/nds32/mm/cacheflush.c vto = kremap0(vaddr, pto); vaddr 210 arch/nds32/mm/cacheflush.c vfrom = kremap1(vaddr, pfrom); vaddr 219 arch/nds32/mm/cacheflush.c void clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 226 arch/nds32/mm/cacheflush.c if (aliasing(kto, vaddr) && kto != 0) { vaddr 230 arch/nds32/mm/cacheflush.c vto = kremap0(vaddr, page_to_phys(page)); vaddr 252 arch/nds32/mm/cacheflush.c unsigned long vaddr, kto; vaddr 254 arch/nds32/mm/cacheflush.c vaddr = page->index << PAGE_SHIFT; vaddr 255 arch/nds32/mm/cacheflush.c if (aliasing(vaddr, kaddr)) { vaddr 256 arch/nds32/mm/cacheflush.c kto = kremap0(vaddr, page_to_phys(page)); vaddr 267 arch/nds32/mm/cacheflush.c unsigned long vaddr, void *dst, void *src, int len) vaddr 272 arch/nds32/mm/cacheflush.c vto = kremap0(vaddr, page_to_phys(page)); vaddr 273 arch/nds32/mm/cacheflush.c dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1))); vaddr 288 arch/nds32/mm/cacheflush.c unsigned long vaddr, void *dst, void *src, int len) vaddr 293 arch/nds32/mm/cacheflush.c vto = kremap0(vaddr, page_to_phys(page)); vaddr 294 arch/nds32/mm/cacheflush.c src = (void *)(vto | (vaddr & (PAGE_SIZE - 1))); vaddr 301 arch/nds32/mm/cacheflush.c struct page *page, unsigned long vaddr) vaddr 312 arch/nds32/mm/cacheflush.c cpu_icache_inval_page(vaddr & PAGE_MASK); vaddr 314 arch/nds32/mm/cacheflush.c if (aliasing(vaddr, kaddr)) { vaddr 315 arch/nds32/mm/cacheflush.c ktmp = kremap0(vaddr, page_to_phys(page)); vaddr 15 arch/nds32/mm/highmem.c unsigned long vaddr; vaddr 19 arch/nds32/mm/highmem.c vaddr = (unsigned long)kmap_high(page); vaddr 20 arch/nds32/mm/highmem.c return (void *)vaddr; vaddr 38 arch/nds32/mm/highmem.c unsigned long vaddr, pte; vaddr 50 arch/nds32/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 52 arch/nds32/mm/highmem.c ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); vaddr 55 arch/nds32/mm/highmem.c __nds32__tlbop_inv(vaddr); vaddr 56 arch/nds32/mm/highmem.c __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN); vaddr 59 arch/nds32/mm/highmem.c return (void *)vaddr; vaddr 67 arch/nds32/mm/highmem.c unsigned long vaddr = (unsigned long)kvaddr; vaddr 70 arch/nds32/mm/highmem.c __nds32__tlbop_inv(vaddr); vaddr 72 arch/nds32/mm/highmem.c ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); vaddr 101 arch/nds32/mm/init.c unsigned long vaddr; vaddr 112 arch/nds32/mm/init.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); vaddr 113 arch/nds32/mm/init.c pgd = swapper_pg_dir + pgd_index(vaddr); vaddr 114 arch/nds32/mm/init.c pud = pud_offset(pgd, vaddr); vaddr 115 arch/nds32/mm/init.c pmd = pmd_offset(pud, vaddr); vaddr 126 arch/nds32/mm/init.c vaddr = PKMAP_BASE; vaddr 128 arch/nds32/mm/init.c pgd = swapper_pg_dir + pgd_index(vaddr); vaddr 129 arch/nds32/mm/init.c pud = pud_offset(pgd, vaddr); vaddr 130 arch/nds32/mm/init.c pmd = pmd_offset(pud, vaddr); vaddr 56 arch/nios2/include/asm/io.h #define phys_to_virt(vaddr) \ vaddr 57 arch/nios2/include/asm/io.h ((void *)((unsigned long)(vaddr) | CONFIG_NIOS2_KERNEL_REGION_BASE)) vaddr 59 arch/nios2/include/asm/io.h #define virt_to_phys(vaddr) \ vaddr 60 arch/nios2/include/asm/io.h ((unsigned long)((unsigned long)(vaddr) & ~0xE0000000)) vaddr 53 arch/nios2/include/asm/page.h extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page); vaddr 54 arch/nios2/include/asm/page.h extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 98 arch/nios2/include/asm/page.h # define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr))) vaddr 99 arch/nios2/include/asm/page.h # define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr))) vaddr 92 arch/nios2/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 233 arch/nios2/mm/cacheflush.c void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 236 arch/nios2/mm/cacheflush.c __flush_dcache(vaddr, vaddr + PAGE_SIZE); vaddr 237 arch/nios2/mm/cacheflush.c __flush_icache(vaddr, vaddr + PAGE_SIZE); vaddr 243 arch/nios2/mm/cacheflush.c void clear_user_page(void *addr, unsigned long vaddr, struct page *page) vaddr 245 arch/nios2/mm/cacheflush.c __flush_dcache(vaddr, vaddr + PAGE_SIZE); vaddr 246 arch/nios2/mm/cacheflush.c __flush_icache(vaddr, vaddr + PAGE_SIZE); vaddr 24 arch/nios2/mm/dma-mapping.c void *vaddr = phys_to_virt(paddr); vaddr 28 arch/nios2/mm/dma-mapping.c invalidate_dcache_range((unsigned long)vaddr, vaddr 29 arch/nios2/mm/dma-mapping.c (unsigned long)(vaddr + size)); vaddr 37 arch/nios2/mm/dma-mapping.c flush_dcache_range((unsigned long)vaddr, vaddr 38 arch/nios2/mm/dma-mapping.c (unsigned long)(vaddr + size)); vaddr 48 arch/nios2/mm/dma-mapping.c void *vaddr = phys_to_virt(paddr); vaddr 53 arch/nios2/mm/dma-mapping.c invalidate_dcache_range((unsigned long)vaddr, vaddr 54 arch/nios2/mm/dma-mapping.c (unsigned long)(vaddr + size)); vaddr 82 arch/openrisc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 89 arch/openrisc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 78 arch/openrisc/include/asm/fixmap.h static inline unsigned long virt_to_fix(const unsigned long vaddr) vaddr 80 arch/openrisc/include/asm/fixmap.h BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); vaddr 81 arch/openrisc/include/asm/fixmap.h return __virt_to_fix(vaddr); vaddr 42 arch/openrisc/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 43 arch/openrisc/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 201 arch/openrisc/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 116 arch/openrisc/kernel/dma.c arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 119 arch/openrisc/kernel/dma.c unsigned long va = (unsigned long)vaddr; vaddr 125 arch/openrisc/kernel/dma.c free_pages_exact(vaddr, size); vaddr 394 arch/openrisc/kernel/traps.c unsigned long __user *vaddr; vaddr 407 arch/openrisc/kernel/traps.c vaddr = (unsigned long __user *)(regs->gpr[ra] + imm); vaddr 409 arch/openrisc/kernel/traps.c if (!lwa_flag || vaddr != lwa_addr) { vaddr 414 arch/openrisc/kernel/traps.c if ((unsigned long)vaddr & 0x3) { vaddr 419 arch/openrisc/kernel/traps.c if (put_user(regs->gpr[rb], vaddr)) { vaddr 431 arch/openrisc/kernel/traps.c *((unsigned long *)vaddr) = regs->gpr[rb]; vaddr 49 arch/parisc/include/asm/cacheflush.h void flush_kernel_vmap_range(void *vaddr, int size); vaddr 50 arch/parisc/include/asm/cacheflush.h void invalidate_kernel_vmap_range(void *vaddr, int size); vaddr 71 arch/parisc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 73 arch/parisc/include/asm/cacheflush.h flush_cache_page(vma, vaddr, page_to_pfn(page)); \ vaddr 78 arch/parisc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 80 arch/parisc/include/asm/cacheflush.h flush_cache_page(vma, vaddr, page_to_pfn(page)); \ vaddr 89 arch/parisc/include/asm/cacheflush.h void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); vaddr 32 arch/parisc/include/asm/page.h #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) vaddr 33 arch/parisc/include/asm/page.h void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 312 arch/parisc/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 37 arch/parisc/kernel/cache.c void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); vaddr 39 arch/parisc/kernel/cache.c void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); vaddr 40 arch/parisc/kernel/cache.c void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); vaddr 469 arch/parisc/kernel/cache.c void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 477 arch/parisc/kernel/cache.c flush_dcache_page_asm(__pa(vfrom), vaddr); vaddr 648 arch/parisc/kernel/cache.c void flush_kernel_vmap_range(void *vaddr, int size) vaddr 650 arch/parisc/kernel/cache.c unsigned long start = (unsigned long)vaddr; vaddr 665 arch/parisc/kernel/cache.c void invalidate_kernel_vmap_range(void *vaddr, int size) vaddr 667 arch/parisc/kernel/cache.c unsigned long start = (unsigned long)vaddr; vaddr 78 arch/parisc/kernel/pci-dma.c unsigned long vaddr, vaddr 82 arch/parisc/kernel/pci-dma.c unsigned long orig_vaddr = vaddr; vaddr 84 arch/parisc/kernel/pci-dma.c vaddr &= ~PMD_MASK; vaddr 85 arch/parisc/kernel/pci-dma.c end = vaddr + size; vaddr 97 arch/parisc/kernel/pci-dma.c vaddr += PAGE_SIZE; vaddr 101 arch/parisc/kernel/pci-dma.c } while (vaddr < end); vaddr 105 arch/parisc/kernel/pci-dma.c static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, vaddr 109 arch/parisc/kernel/pci-dma.c unsigned long orig_vaddr = vaddr; vaddr 111 arch/parisc/kernel/pci-dma.c vaddr &= ~PGDIR_MASK; vaddr 112 arch/parisc/kernel/pci-dma.c end = vaddr + size; vaddr 116 arch/parisc/kernel/pci-dma.c pte_t * pte = pte_alloc_kernel(pmd, vaddr); vaddr 119 arch/parisc/kernel/pci-dma.c if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) vaddr 121 arch/parisc/kernel/pci-dma.c vaddr = (vaddr + PMD_SIZE) & PMD_MASK; vaddr 124 arch/parisc/kernel/pci-dma.c } while (vaddr < end); vaddr 128 arch/parisc/kernel/pci-dma.c static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, vaddr 132 arch/parisc/kernel/pci-dma.c unsigned long end = vaddr + size; vaddr 134 arch/parisc/kernel/pci-dma.c dir = pgd_offset_k(vaddr); vaddr 138 arch/parisc/kernel/pci-dma.c pmd = pmd_alloc(NULL, dir, vaddr); vaddr 141 arch/parisc/kernel/pci-dma.c if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) vaddr 143 arch/parisc/kernel/pci-dma.c vaddr = vaddr + PGDIR_SIZE; vaddr 145 arch/parisc/kernel/pci-dma.c } while (vaddr && (vaddr < end)); vaddr 149 arch/parisc/kernel/pci-dma.c static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, vaddr 154 arch/parisc/kernel/pci-dma.c unsigned long orig_vaddr = vaddr; vaddr 163 arch/parisc/kernel/pci-dma.c pte = pte_offset_map(pmd, vaddr); vaddr 164 arch/parisc/kernel/pci-dma.c vaddr &= ~PMD_MASK; vaddr 165 arch/parisc/kernel/pci-dma.c end = vaddr + size; vaddr 172 arch/parisc/kernel/pci-dma.c pte_clear(&init_mm, vaddr, pte); vaddr 176 arch/parisc/kernel/pci-dma.c vaddr += PAGE_SIZE; vaddr 182 arch/parisc/kernel/pci-dma.c } while (vaddr < end); vaddr 185 arch/parisc/kernel/pci-dma.c static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, vaddr 190 arch/parisc/kernel/pci-dma.c unsigned long orig_vaddr = vaddr; vaddr 199 arch/parisc/kernel/pci-dma.c pmd = pmd_offset(dir, vaddr); vaddr 200 arch/parisc/kernel/pci-dma.c vaddr &= ~PGDIR_MASK; vaddr 201 arch/parisc/kernel/pci-dma.c end = vaddr + size; vaddr 205 arch/parisc/kernel/pci-dma.c unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); vaddr 206 arch/parisc/kernel/pci-dma.c vaddr = (vaddr + PMD_SIZE) & PMD_MASK; vaddr 209 arch/parisc/kernel/pci-dma.c } while (vaddr < end); vaddr 212 arch/parisc/kernel/pci-dma.c static void unmap_uncached_pages(unsigned long vaddr, unsigned long size) vaddr 215 arch/parisc/kernel/pci-dma.c unsigned long end = vaddr + size; vaddr 217 arch/parisc/kernel/pci-dma.c dir = pgd_offset_k(vaddr); vaddr 219 arch/parisc/kernel/pci-dma.c unmap_uncached_pmd(dir, vaddr, end - vaddr); vaddr 220 arch/parisc/kernel/pci-dma.c vaddr = vaddr + PGDIR_SIZE; vaddr 222 arch/parisc/kernel/pci-dma.c } while (vaddr && (vaddr < end)); vaddr 301 arch/parisc/kernel/pci-dma.c pcxl_free_range(unsigned long vaddr, size_t size) vaddr 304 arch/parisc/kernel/pci-dma.c unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3); vaddr 400 arch/parisc/kernel/pci-dma.c unsigned long vaddr; vaddr 409 arch/parisc/kernel/pci-dma.c vaddr = pcxl_alloc_range(size); vaddr 413 arch/parisc/kernel/pci-dma.c map_uncached_pages(vaddr, size, paddr); vaddr 424 arch/parisc/kernel/pci-dma.c return (void *)vaddr; vaddr 427 arch/parisc/kernel/pci-dma.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 436 arch/parisc/kernel/pci-dma.c unmap_uncached_pages((unsigned long)vaddr, size); vaddr 437 arch/parisc/kernel/pci-dma.c pcxl_free_range((unsigned long)vaddr, size); vaddr 454 arch/parisc/kernel/pci-dma.c void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, vaddr 457 arch/parisc/kernel/pci-dma.c flush_kernel_dcache_range((unsigned long)vaddr, size); vaddr 15 arch/parisc/mm/fixmap.c unsigned long vaddr = __fix_to_virt(idx); vaddr 16 arch/parisc/mm/fixmap.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 17 arch/parisc/mm/fixmap.c pmd_t *pmd = pmd_offset(pgd, vaddr); vaddr 21 arch/parisc/mm/fixmap.c pmd = pmd_alloc(NULL, pgd, vaddr); vaddr 23 arch/parisc/mm/fixmap.c pte = pte_offset_kernel(pmd, vaddr); vaddr 25 arch/parisc/mm/fixmap.c pte = pte_alloc_kernel(pmd, vaddr); vaddr 27 arch/parisc/mm/fixmap.c set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); vaddr 28 arch/parisc/mm/fixmap.c flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); vaddr 33 arch/parisc/mm/fixmap.c unsigned long vaddr = __fix_to_virt(idx); vaddr 34 arch/parisc/mm/fixmap.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 35 arch/parisc/mm/fixmap.c pmd_t *pmd = pmd_offset(pgd, vaddr); vaddr 36 arch/parisc/mm/fixmap.c pte_t *pte = pte_offset_kernel(pmd, vaddr); vaddr 41 arch/parisc/mm/fixmap.c pte_clear(&init_mm, vaddr, pte); vaddr 43 arch/parisc/mm/fixmap.c flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); vaddr 363 arch/parisc/mm/init.c unsigned long vaddr; vaddr 385 arch/parisc/mm/init.c vaddr = start_vaddr; vaddr 464 arch/parisc/mm/init.c vaddr += PAGE_SIZE; vaddr 118 arch/powerpc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 121 arch/powerpc/include/asm/cacheflush.h flush_icache_user_range(vma, page, vaddr, len); \ vaddr 123 arch/powerpc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 267 arch/powerpc/include/asm/iommu.h void *vaddr, dma_addr_t dma_handle); vaddr 321 arch/powerpc/include/asm/page.h extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); vaddr 322 arch/powerpc/include/asm/page.h extern void copy_user_page(void *to, void *from, unsigned long vaddr, vaddr 65 arch/powerpc/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 89 arch/powerpc/kernel/btext.c unsigned long vaddr = PAGE_OFFSET + 0x10000000; vaddr 102 arch/powerpc/kernel/btext.c disp_BAT[0] = vaddr | (BL_16M<<2) | 2; vaddr 108 arch/powerpc/kernel/btext.c disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4; vaddr 111 arch/powerpc/kernel/btext.c logicalDisplayBase = (void *) (vaddr + lowbits); vaddr 70 arch/powerpc/kernel/crash_dump.c static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, vaddr 74 arch/powerpc/kernel/crash_dump.c if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) vaddr 77 arch/powerpc/kernel/crash_dump.c memcpy(buf, (vaddr + offset), csize); vaddr 98 arch/powerpc/kernel/crash_dump.c void *vaddr; vaddr 108 arch/powerpc/kernel/crash_dump.c vaddr = __va(paddr); vaddr 109 arch/powerpc/kernel/crash_dump.c csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); vaddr 111 arch/powerpc/kernel/crash_dump.c vaddr = ioremap_cache(paddr, PAGE_SIZE); vaddr 112 arch/powerpc/kernel/crash_dump.c csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); vaddr 113 arch/powerpc/kernel/crash_dump.c iounmap(vaddr); vaddr 50 arch/powerpc/kernel/dma-iommu.c void *vaddr, dma_addr_t dma_handle, vaddr 54 arch/powerpc/kernel/dma-iommu.c dma_direct_free(dev, size, vaddr, dma_handle, attrs); vaddr 56 arch/powerpc/kernel/dma-iommu.c iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, vaddr 679 arch/powerpc/kernel/fadump.c void *vaddr; vaddr 681 arch/powerpc/kernel/fadump.c vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); vaddr 682 arch/powerpc/kernel/fadump.c if (!vaddr) vaddr 686 arch/powerpc/kernel/fadump.c page = virt_to_page(vaddr); vaddr 689 arch/powerpc/kernel/fadump.c return vaddr; vaddr 692 arch/powerpc/kernel/fadump.c static void fadump_free_buffer(unsigned long vaddr, unsigned long size) vaddr 694 arch/powerpc/kernel/fadump.c free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL); vaddr 1064 arch/powerpc/kernel/fadump.c void *vaddr; vaddr 1082 arch/powerpc/kernel/fadump.c vaddr = __va(addr); vaddr 1085 arch/powerpc/kernel/fadump.c fadump_create_elfcore_headers(vaddr); vaddr 27 arch/powerpc/kernel/io-workarounds.c static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) vaddr 37 arch/powerpc/kernel/io-workarounds.c if (vaddr) { vaddr 40 arch/powerpc/kernel/io-workarounds.c if ((vaddr >= vstart) && (vaddr <= vend)) vaddr 67 arch/powerpc/kernel/io-workarounds.c unsigned long vaddr, paddr; vaddr 70 arch/powerpc/kernel/io-workarounds.c vaddr = (unsigned long)PCI_FIX_ADDR(addr); vaddr 71 arch/powerpc/kernel/io-workarounds.c if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) vaddr 77 arch/powerpc/kernel/io-workarounds.c ptep = find_init_mm_pte(vaddr, &hugepage_shift); vaddr 84 arch/powerpc/kernel/io-workarounds.c bus = iowa_pci_find(vaddr, paddr); vaddr 102 arch/powerpc/kernel/io-workarounds.c unsigned long vaddr = (unsigned long)pci_io_base + port; vaddr 103 arch/powerpc/kernel/io-workarounds.c return iowa_pci_find(vaddr, 0); vaddr 447 arch/powerpc/kernel/iommu.c unsigned long vaddr, npages, entry, slen; vaddr 456 arch/powerpc/kernel/iommu.c vaddr = (unsigned long) sg_virt(s); vaddr 457 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); vaddr 460 arch/powerpc/kernel/iommu.c (vaddr & ~PAGE_MASK) == 0) vaddr 465 arch/powerpc/kernel/iommu.c DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); vaddr 472 arch/powerpc/kernel/iommu.c "vaddr %lx npages %lu\n", tbl, vaddr, vaddr 487 arch/powerpc/kernel/iommu.c vaddr & IOMMU_PAGE_MASK(tbl), vaddr 547 arch/powerpc/kernel/iommu.c unsigned long vaddr, npages; vaddr 549 arch/powerpc/kernel/iommu.c vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); vaddr 552 arch/powerpc/kernel/iommu.c __iommu_free(tbl, vaddr, npages); vaddr 800 arch/powerpc/kernel/iommu.c void *vaddr; vaddr 806 arch/powerpc/kernel/iommu.c vaddr = page_address(page) + offset; vaddr 807 arch/powerpc/kernel/iommu.c uaddr = (unsigned long)vaddr; vaddr 813 arch/powerpc/kernel/iommu.c ((unsigned long)vaddr & ~PAGE_MASK) == 0) vaddr 816 arch/powerpc/kernel/iommu.c dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, vaddr 823 arch/powerpc/kernel/iommu.c "vaddr %p npages %d\n", tbl, vaddr, vaddr 900 arch/powerpc/kernel/iommu.c void *vaddr, dma_addr_t dma_handle) vaddr 909 arch/powerpc/kernel/iommu.c free_pages((unsigned long)vaddr, get_order(size)); vaddr 114 arch/powerpc/kernel/uprobes.c regs->nip = utask->vaddr + MAX_UINSN_BYTES; vaddr 159 arch/powerpc/kernel/uprobes.c instruction_pointer_set(regs, utask->vaddr); vaddr 455 arch/powerpc/kvm/book3s_emulate.c ulong addr, vaddr; vaddr 466 arch/powerpc/kvm/book3s_emulate.c vaddr = addr; vaddr 471 arch/powerpc/kvm/book3s_emulate.c kvmppc_set_dar(vcpu, vaddr); vaddr 472 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.fault_dar = vaddr; vaddr 248 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vaddr, paddr; vaddr 260 arch/powerpc/mm/book3s64/hash_utils.c for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr 261 arch/powerpc/mm/book3s64/hash_utils.c vaddr += step, paddr += step) { vaddr 263 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid = get_kernel_vsid(vaddr, ssize); vaddr 264 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); vaddr 273 arch/powerpc/mm/book3s64/hash_utils.c if (overlaps_kernel_text(vaddr, vaddr + step)) vaddr 287 arch/powerpc/mm/book3s64/hash_utils.c overlaps_interrupt_vector_text(vaddr, vaddr + step)) vaddr 321 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vaddr; vaddr 332 arch/powerpc/mm/book3s64/hash_utils.c for (vaddr = vstart; vaddr < vend; vaddr += step) { vaddr 333 arch/powerpc/mm/book3s64/hash_utils.c rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); vaddr 1871 arch/powerpc/mm/book3s64/hash_utils.c static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) vaddr 1874 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); vaddr 1875 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); vaddr 1885 arch/powerpc/mm/book3s64/hash_utils.c ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, vaddr 1896 arch/powerpc/mm/book3s64/hash_utils.c static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) vaddr 1899 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); vaddr 1900 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); vaddr 1919 arch/powerpc/mm/book3s64/hash_utils.c unsigned long flags, vaddr, lmi; vaddr 1924 arch/powerpc/mm/book3s64/hash_utils.c vaddr = (unsigned long)page_address(page); vaddr 1925 arch/powerpc/mm/book3s64/hash_utils.c lmi = __pa(vaddr) >> PAGE_SHIFT; vaddr 1929 arch/powerpc/mm/book3s64/hash_utils.c kernel_map_linear_page(vaddr, lmi); vaddr 1931 arch/powerpc/mm/book3s64/hash_utils.c kernel_unmap_linear_page(vaddr, lmi); vaddr 257 arch/powerpc/mm/book3s64/radix_pgtable.c unsigned long vaddr, addr, mapping_size = 0; vaddr 284 arch/powerpc/mm/book3s64/radix_pgtable.c vaddr = (unsigned long)__va(addr); vaddr 286 arch/powerpc/mm/book3s64/radix_pgtable.c if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || vaddr 287 arch/powerpc/mm/book3s64/radix_pgtable.c overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) { vaddr 300 arch/powerpc/mm/book3s64/radix_pgtable.c rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); vaddr 22 arch/powerpc/mm/dma-noncoherent.c static void __dma_sync(void *vaddr, size_t size, int direction) vaddr 24 arch/powerpc/mm/dma-noncoherent.c unsigned long start = (unsigned long)vaddr; vaddr 35 arch/powerpc/mm/highmem.c unsigned long vaddr; vaddr 45 arch/powerpc/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 47 arch/powerpc/mm/highmem.c __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); vaddr 48 arch/powerpc/mm/highmem.c local_flush_tlb_page(NULL, vaddr); vaddr 50 arch/powerpc/mm/highmem.c return (void*) vaddr; vaddr 56 arch/powerpc/mm/highmem.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 58 arch/powerpc/mm/highmem.c if (vaddr < __fix_to_virt(FIX_KMAP_END)) { vaddr 69 arch/powerpc/mm/highmem.c WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); vaddr 75 arch/powerpc/mm/highmem.c pte_clear(&init_mm, vaddr, kmap_pte-idx); vaddr 76 arch/powerpc/mm/highmem.c local_flush_tlb_page(NULL, vaddr); vaddr 68 arch/powerpc/mm/mem.c static inline pte_t *virt_to_kpte(unsigned long vaddr) vaddr 70 arch/powerpc/mm/mem.c return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr 71 arch/powerpc/mm/mem.c vaddr), vaddr), vaddr); vaddr 539 arch/powerpc/mm/mem.c void clear_user_page(void *page, unsigned long vaddr, struct page *pg) vaddr 552 arch/powerpc/mm/mem.c void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, vaddr 53 arch/powerpc/platforms/44x/canyonlands.c void __iomem *vaddr; vaddr 78 arch/powerpc/platforms/44x/canyonlands.c vaddr = of_iomap(np, 0); vaddr 81 arch/powerpc/platforms/44x/canyonlands.c if (!vaddr) { vaddr 101 arch/powerpc/platforms/44x/canyonlands.c setbits32((vaddr + GPIO0_OSRH), 0x42000000); vaddr 102 arch/powerpc/platforms/44x/canyonlands.c setbits32((vaddr + GPIO0_TSRH), 0x42000000); vaddr 104 arch/powerpc/platforms/44x/canyonlands.c iounmap(vaddr); vaddr 222 arch/powerpc/platforms/512x/mpc512x_shared.c void __iomem *vaddr; vaddr 246 arch/powerpc/platforms/512x/mpc512x_shared.c vaddr = ioremap(desc, sizeof(struct diu_ad)); vaddr 247 arch/powerpc/platforms/512x/mpc512x_shared.c if (!vaddr) { vaddr 251 arch/powerpc/platforms/512x/mpc512x_shared.c memcpy(&diu_shared_fb.ad0, vaddr, sizeof(struct diu_ad)); vaddr 257 arch/powerpc/platforms/512x/mpc512x_shared.c pix_fmt = in_le32(vaddr); vaddr 259 arch/powerpc/platforms/512x/mpc512x_shared.c diu_shared_fb.fb_phys = in_le32(vaddr + 4); vaddr 262 arch/powerpc/platforms/512x/mpc512x_shared.c iounmap(vaddr); vaddr 265 arch/powerpc/platforms/512x/mpc512x_shared.c vaddr = ioremap(desc, sizeof(diu_shared_fb.gamma)); vaddr 266 arch/powerpc/platforms/512x/mpc512x_shared.c if (!vaddr) { vaddr 271 arch/powerpc/platforms/512x/mpc512x_shared.c memcpy(&diu_shared_fb.gamma, vaddr, sizeof(diu_shared_fb.gamma)); vaddr 276 arch/powerpc/platforms/512x/mpc512x_shared.c iounmap(vaddr); vaddr 545 arch/powerpc/platforms/ps3/system-bus.c static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, vaddr 551 arch/powerpc/platforms/ps3/system-bus.c free_pages((unsigned long)vaddr, get_order(size)); vaddr 79 arch/powerpc/platforms/pseries/ibmebus.c size_t size, void *vaddr, vaddr 83 arch/powerpc/platforms/pseries/ibmebus.c kfree(vaddr); vaddr 313 arch/powerpc/platforms/pseries/rtas-fadump.c void *vaddr; vaddr 316 arch/powerpc/platforms/pseries/rtas-fadump.c vaddr = __va(addr); vaddr 318 arch/powerpc/platforms/pseries/rtas-fadump.c reg_header = vaddr; vaddr 329 arch/powerpc/platforms/pseries/rtas-fadump.c vaddr += be32_to_cpu(reg_header->num_cpu_offset); vaddr 330 arch/powerpc/platforms/pseries/rtas-fadump.c num_cpus = be32_to_cpu(*((__be32 *)(vaddr))); vaddr 332 arch/powerpc/platforms/pseries/rtas-fadump.c vaddr += sizeof(u32); vaddr 333 arch/powerpc/platforms/pseries/rtas-fadump.c reg_entry = (struct rtas_fadump_reg_entry *)vaddr; vaddr 503 arch/powerpc/platforms/pseries/vio.c void *vaddr, dma_addr_t dma_handle, vaddr 508 arch/powerpc/platforms/pseries/vio.c iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); vaddr 62 arch/riscv/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 65 arch/riscv/include/asm/cacheflush.h flush_icache_user_range(vma, page, vaddr, len); \ vaddr 67 arch/riscv/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 53 arch/riscv/include/asm/page.h #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) vaddr 54 arch/riscv/include/asm/page.h #define copy_user_page(vto, vfrom, vaddr, topg) \ vaddr 103 arch/riscv/include/asm/page.h #define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr))) vaddr 106 arch/riscv/include/asm/page.h #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) vaddr 122 arch/riscv/include/asm/page.h #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) vaddr 121 arch/riscv/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 27 arch/riscv/mm/ioremap.c unsigned long offset, vaddr; vaddr 43 arch/riscv/mm/ioremap.c vaddr = (unsigned long)area->addr; vaddr 45 arch/riscv/mm/ioremap.c if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { vaddr 50 arch/riscv/mm/ioremap.c return (void __iomem *)(vaddr + offset); vaddr 30 arch/s390/include/asm/idals.h idal_is_needed(void *vaddr, unsigned int length) vaddr 32 arch/s390/include/asm/idals.h return ((__pa(vaddr) + length - 1) >> 31) != 0; vaddr 39 arch/s390/include/asm/idals.h static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) vaddr 41 arch/s390/include/asm/idals.h return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + vaddr 49 arch/s390/include/asm/idals.h void *vaddr, unsigned int length) vaddr 54 arch/s390/include/asm/idals.h paddr = __pa(vaddr); vaddr 71 arch/s390/include/asm/idals.h set_normalized_cda(struct ccw1 * ccw, void *vaddr) vaddr 78 arch/s390/include/asm/idals.h nridaws = idal_nr_words(vaddr, ccw->count); vaddr 84 arch/s390/include/asm/idals.h idal_create_words(idal, vaddr, ccw->count); vaddr 86 arch/s390/include/asm/idals.h vaddr = idal; vaddr 88 arch/s390/include/asm/idals.h ccw->cda = (__u32)(unsigned long) vaddr; vaddr 68 arch/s390/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 69 arch/s390/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 71 arch/s390/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ vaddr 72 arch/s390/include/asm/page.h alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) vaddr 59 arch/s390/include/asm/pgtable.h #define ZERO_PAGE(vaddr) \ vaddr 61 arch/s390/include/asm/pgtable.h (((unsigned long)(vaddr)) &zero_page_mask)))) vaddr 396 arch/s390/kernel/sthyi.c static int sthyi(u64 vaddr, u64 *rc) vaddr 399 arch/s390/kernel/sthyi.c register u64 addr asm("2") = vaddr; vaddr 89 arch/s390/kernel/uprobes.c regs->psw.addr += utask->vaddr - utask->xol_vaddr; vaddr 93 arch/s390/kernel/uprobes.c regs->gprs[reg] += utask->vaddr - utask->xol_vaddr; vaddr 99 arch/s390/kernel/uprobes.c regs->psw.addr = utask->vaddr + ilen; vaddr 104 arch/s390/kernel/uprobes.c current->thread.per_event.address = utask->vaddr; vaddr 139 arch/s390/kernel/uprobes.c regs->psw.addr = current->utask->vaddr; vaddr 140 arch/s390/kernel/uprobes.c current->thread.per_event.address = current->utask->vaddr; vaddr 618 arch/s390/kvm/gaccess.c union vaddress vaddr = {.addr = gva}; vaddr 636 arch/s390/kvm/gaccess.c if (vaddr.rfx01 > asce.tl) vaddr 638 arch/s390/kvm/gaccess.c ptr += vaddr.rfx * 8; vaddr 641 arch/s390/kvm/gaccess.c if (vaddr.rfx) vaddr 643 arch/s390/kvm/gaccess.c if (vaddr.rsx01 > asce.tl) vaddr 645 arch/s390/kvm/gaccess.c ptr += vaddr.rsx * 8; vaddr 648 arch/s390/kvm/gaccess.c if (vaddr.rfx || vaddr.rsx) vaddr 650 arch/s390/kvm/gaccess.c if (vaddr.rtx01 > asce.tl) vaddr 652 arch/s390/kvm/gaccess.c ptr += vaddr.rtx * 8; vaddr 655 arch/s390/kvm/gaccess.c if (vaddr.rfx || vaddr.rsx || vaddr.rtx) vaddr 657 arch/s390/kvm/gaccess.c if (vaddr.sx01 > asce.tl) vaddr 659 arch/s390/kvm/gaccess.c ptr += vaddr.sx * 8; vaddr 674 arch/s390/kvm/gaccess.c if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) vaddr 678 arch/s390/kvm/gaccess.c ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8; vaddr 692 arch/s390/kvm/gaccess.c if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) vaddr 696 arch/s390/kvm/gaccess.c ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8; vaddr 718 arch/s390/kvm/gaccess.c if (vaddr.sx01 < rtte.fc0.tf) vaddr 720 arch/s390/kvm/gaccess.c if (vaddr.sx01 > rtte.fc0.tl) vaddr 724 arch/s390/kvm/gaccess.c ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8; vaddr 747 arch/s390/kvm/gaccess.c ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8; vaddr 988 arch/s390/kvm/gaccess.c union vaddress vaddr; vaddr 995 arch/s390/kvm/gaccess.c vaddr.addr = saddr; vaddr 1005 arch/s390/kvm/gaccess.c if (vaddr.rfx01 > asce.tl && !*fake) vaddr 1009 arch/s390/kvm/gaccess.c if (vaddr.rfx) vaddr 1011 arch/s390/kvm/gaccess.c if (vaddr.rsx01 > asce.tl) vaddr 1015 arch/s390/kvm/gaccess.c if (vaddr.rfx || vaddr.rsx) vaddr 1017 arch/s390/kvm/gaccess.c if (vaddr.rtx01 > asce.tl) vaddr 1021 arch/s390/kvm/gaccess.c if (vaddr.rfx || vaddr.rsx || vaddr.rtx) vaddr 1023 arch/s390/kvm/gaccess.c if (vaddr.sx01 > asce.tl) vaddr 1033 arch/s390/kvm/gaccess.c ptr += vaddr.rfx * _REGION1_SIZE; vaddr 1037 arch/s390/kvm/gaccess.c rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val); vaddr 1044 arch/s390/kvm/gaccess.c if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) vaddr 1058 arch/s390/kvm/gaccess.c ptr += vaddr.rsx * _REGION2_SIZE; vaddr 1062 arch/s390/kvm/gaccess.c rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val); vaddr 1069 arch/s390/kvm/gaccess.c if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) vaddr 1084 arch/s390/kvm/gaccess.c ptr += vaddr.rtx * _REGION3_SIZE; vaddr 1088 arch/s390/kvm/gaccess.c rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val); vaddr 1104 arch/s390/kvm/gaccess.c if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl) vaddr 1119 arch/s390/kvm/gaccess.c ptr += vaddr.sx * _SEGMENT_SIZE; vaddr 1123 arch/s390/kvm/gaccess.c rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val); vaddr 1167 arch/s390/kvm/gaccess.c union vaddress vaddr; vaddr 1186 arch/s390/kvm/gaccess.c vaddr.addr = saddr; vaddr 1188 arch/s390/kvm/gaccess.c pte.val = pgt + vaddr.px * PAGE_SIZE; vaddr 1192 arch/s390/kvm/gaccess.c rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); vaddr 875 arch/s390/kvm/vsie.c static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr, vaddr 882 arch/s390/kvm/vsie.c (vaddr & 0xfffffffffffff000UL) | vaddr 1272 arch/s390/mm/gmap.c static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr) vaddr 1276 arch/s390/mm/gmap.c : : "a" (asce), "a" (vaddr) : "cc", "memory"); vaddr 22 arch/sh/boards/mach-sdk7786/sram.c void __iomem *vaddr; vaddr 50 arch/sh/boards/mach-sdk7786/sram.c vaddr = ioremap(phys, SZ_2K); vaddr 51 arch/sh/boards/mach-sdk7786/sram.c if (unlikely(!vaddr)) { vaddr 60 arch/sh/boards/mach-sdk7786/sram.c ret = gen_pool_add(sram_pool, (unsigned long)vaddr, SZ_2K, -1); vaddr 63 arch/sh/boards/mach-sdk7786/sram.c iounmap(vaddr); vaddr 83 arch/sh/include/asm/cacheflush.h struct page *page, unsigned long vaddr, void *dst, const void *src, vaddr 87 arch/sh/include/asm/cacheflush.h struct page *page, unsigned long vaddr, void *dst, const void *src, vaddr 104 arch/sh/include/asm/cacheflush.h static inline void *sh_cacheop_vaddr(void *vaddr) vaddr 107 arch/sh/include/asm/cacheflush.h vaddr = (void *)CAC_ADDR((unsigned long)vaddr); vaddr 108 arch/sh/include/asm/cacheflush.h return vaddr; vaddr 63 arch/sh/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) vaddr 69 arch/sh/include/asm/page.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 71 arch/sh/include/asm/page.h extern void clear_user_highpage(struct page *page, unsigned long vaddr); vaddr 29 arch/sh/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 30 arch/sh/include/cpu-sh4/cpu/sq.h void sq_unmap(unsigned long vaddr); vaddr 206 arch/sh/kernel/cpu/sh4/sq.c void sq_unmap(unsigned long vaddr) vaddr 212 arch/sh/kernel/cpu/sh4/sq.c if (map->sq_addr == vaddr) vaddr 217 arch/sh/kernel/cpu/sh4/sq.c __func__, vaddr); vaddr 29 arch/sh/kernel/crash_dump.c void *vaddr; vaddr 34 arch/sh/kernel/crash_dump.c vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); vaddr 37 arch/sh/kernel/crash_dump.c if (copy_to_user(buf, (vaddr + offset), csize)) { vaddr 38 arch/sh/kernel/crash_dump.c iounmap(vaddr); vaddr 42 arch/sh/kernel/crash_dump.c memcpy(buf, (vaddr + offset), csize); vaddr 44 arch/sh/kernel/crash_dump.c iounmap(vaddr); vaddr 46 arch/sh/kernel/dma-coherent.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 59 arch/sh/kernel/dma-coherent.c iounmap(vaddr); vaddr 166 arch/sh/kernel/io_trapped.c unsigned long vaddr = (unsigned long)tiop->virt_base; vaddr 173 arch/sh/kernel/io_trapped.c if (address < (vaddr + len)) vaddr 174 arch/sh/kernel/io_trapped.c return res->start + (address - vaddr); vaddr 175 arch/sh/kernel/io_trapped.c vaddr += len; vaddr 462 arch/sh/kernel/smp.c void flush_tlb_one(unsigned long asid, unsigned long vaddr) vaddr 467 arch/sh/kernel/smp.c fd.addr2 = vaddr; vaddr 470 arch/sh/kernel/smp.c local_flush_tlb_one(asid, vaddr); vaddr 215 arch/sh/mm/cache-sh4.c void *vaddr; vaddr 236 arch/sh/mm/cache-sh4.c vaddr = NULL; vaddr 246 arch/sh/mm/cache-sh4.c vaddr = kmap_coherent(page, address); vaddr 248 arch/sh/mm/cache-sh4.c vaddr = kmap_atomic(page); vaddr 250 arch/sh/mm/cache-sh4.c address = (unsigned long)vaddr; vaddr 259 arch/sh/mm/cache-sh4.c if (vaddr) { vaddr 261 arch/sh/mm/cache-sh4.c kunmap_coherent(vaddr); vaddr 263 arch/sh/mm/cache-sh4.c kunmap_atomic(vaddr); vaddr 597 arch/sh/mm/cache-sh5.c static void sh5_flush_cache_sigtramp(void *vaddr) vaddr 599 arch/sh/mm/cache-sh5.c unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; vaddr 601 arch/sh/mm/cache-sh5.c __flush_wback_region(vaddr, L1_CACHE_BYTES); vaddr 603 arch/sh/mm/cache-sh5.c sh64_icache_inv_current_user_range((unsigned long)vaddr, end); vaddr 61 arch/sh/mm/cache.c unsigned long vaddr, void *dst, const void *src, vaddr 66 arch/sh/mm/cache.c void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); vaddr 76 arch/sh/mm/cache.c flush_cache_page(vma, vaddr, page_to_pfn(page)); vaddr 80 arch/sh/mm/cache.c unsigned long vaddr, void *dst, const void *src, vaddr 85 arch/sh/mm/cache.c void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); vaddr 96 arch/sh/mm/cache.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 104 arch/sh/mm/cache.c vfrom = kmap_coherent(from, vaddr); vaddr 113 arch/sh/mm/cache.c if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || vaddr 123 arch/sh/mm/cache.c void clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 129 arch/sh/mm/cache.c if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) vaddr 159 arch/sh/mm/init.c unsigned long vaddr, pte_t *lastpte) vaddr 172 arch/sh/mm/init.c unsigned long vaddr; vaddr 174 arch/sh/mm/init.c vaddr = start; vaddr 175 arch/sh/mm/init.c i = __pgd_offset(vaddr); vaddr 176 arch/sh/mm/init.c j = __pud_offset(vaddr); vaddr 177 arch/sh/mm/init.c k = __pmd_offset(vaddr); vaddr 180 arch/sh/mm/init.c for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { vaddr 182 arch/sh/mm/init.c for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { vaddr 187 arch/sh/mm/init.c for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { vaddr 189 arch/sh/mm/init.c pmd, vaddr, pte); vaddr 190 arch/sh/mm/init.c vaddr += PMD_SIZE; vaddr 281 arch/sh/mm/init.c unsigned long vaddr, end; vaddr 329 arch/sh/mm/init.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr 331 arch/sh/mm/init.c page_table_range_init(vaddr, end, swapper_pg_dir); vaddr 108 arch/sh/mm/ioremap.c unsigned long vaddr = (unsigned long __force)addr; vaddr 114 arch/sh/mm/ioremap.c if (iomapping_nontranslatable(vaddr)) vaddr 129 arch/sh/mm/ioremap.c p = remove_vm_area((void *)(vaddr & PAGE_MASK)); vaddr 17 arch/sh/mm/kmap.c #define kmap_get_fixmap_pte(vaddr) \ vaddr 18 arch/sh/mm/kmap.c pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) vaddr 24 arch/sh/mm/kmap.c unsigned long vaddr; vaddr 27 arch/sh/mm/kmap.c vaddr = __fix_to_virt(FIX_CMAP_BEGIN); vaddr 28 arch/sh/mm/kmap.c kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); vaddr 34 arch/sh/mm/kmap.c unsigned long vaddr; vaddr 45 arch/sh/mm/kmap.c vaddr = __fix_to_virt(idx); vaddr 50 arch/sh/mm/kmap.c return (void *)vaddr; vaddr 56 arch/sh/mm/kmap.c unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; vaddr 57 arch/sh/mm/kmap.c enum fixed_addresses idx = __virt_to_fix(vaddr); vaddr 60 arch/sh/mm/kmap.c __flush_purge_region((void *)vaddr, PAGE_SIZE); vaddr 62 arch/sh/mm/kmap.c pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); vaddr 63 arch/sh/mm/kmap.c local_flush_tlb_one(get_asid(), vaddr); vaddr 137 arch/sh/mm/pmb.c static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, vaddr 156 arch/sh/mm/pmb.c if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) vaddr 335 arch/sh/mm/pmb.c int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, vaddr 345 arch/sh/mm/pmb.c if (!pmb_addr_valid(vaddr, size)) vaddr 347 arch/sh/mm/pmb.c if (pmb_mapping_exists(vaddr, phys, size)) vaddr 350 arch/sh/mm/pmb.c orig_addr = vaddr; vaddr 353 arch/sh/mm/pmb.c flush_tlb_kernel_range(vaddr, vaddr + size); vaddr 363 arch/sh/mm/pmb.c pmbe = pmb_alloc(vaddr, phys, pmb_flags | vaddr 377 arch/sh/mm/pmb.c vaddr += pmbe->size; vaddr 413 arch/sh/mm/pmb.c unsigned long vaddr; vaddr 452 arch/sh/mm/pmb.c vaddr = (unsigned long)area->addr; vaddr 454 arch/sh/mm/pmb.c ret = pmb_bolt_mapping(vaddr, phys, size, prot); vaddr 458 arch/sh/mm/pmb.c return (void __iomem *)(offset + (char *)vaddr); vaddr 464 arch/sh/mm/pmb.c unsigned long vaddr = (unsigned long __force)addr; vaddr 472 arch/sh/mm/pmb.c if (pmbe->vpn == vaddr) { vaddr 22 arch/sparc/include/asm/cacheflush_32.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 24 arch/sparc/include/asm/cacheflush_32.h flush_cache_page(vma, vaddr, page_to_pfn(page));\ vaddr 27 arch/sparc/include/asm/cacheflush_32.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 29 arch/sparc/include/asm/cacheflush_32.h flush_cache_page(vma, vaddr, page_to_pfn(page));\ vaddr 58 arch/sparc/include/asm/cacheflush_64.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 60 arch/sparc/include/asm/cacheflush_64.h flush_cache_page(vma, vaddr, page_to_pfn(page)); \ vaddr 62 arch/sparc/include/asm/cacheflush_64.h flush_ptrace_access(vma, page, vaddr, src, len, 0); \ vaddr 65 arch/sparc/include/asm/cacheflush_64.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 67 arch/sparc/include/asm/cacheflush_64.h flush_cache_page(vma, vaddr, page_to_pfn(page)); \ vaddr 69 arch/sparc/include/asm/cacheflush_64.h flush_ptrace_access(vma, page, vaddr, dst, len, 1); \ vaddr 202 arch/sparc/include/asm/floppy_64.h unsigned char *vaddr = pdma_vaddr; vaddr 209 arch/sparc/include/asm/floppy_64.h pdma_vaddr = vaddr; vaddr 214 arch/sparc/include/asm/floppy_64.h pdma_vaddr = vaddr; vaddr 221 arch/sparc/include/asm/floppy_64.h *vaddr++ = readb(stat + 1); vaddr 223 arch/sparc/include/asm/floppy_64.h unsigned char data = *vaddr++; vaddr 231 arch/sparc/include/asm/floppy_64.h pdma_vaddr = vaddr; vaddr 10 arch/sparc/include/asm/hvtramp.h __u64 vaddr; vaddr 770 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr, vaddr 200 arch/sparc/include/asm/leon.h unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr); vaddr 156 arch/sparc/include/asm/oplib_64.h unsigned long vaddr); vaddr 160 arch/sparc/include/asm/oplib_64.h unsigned long vaddr); vaddr 176 arch/sparc/include/asm/oplib_64.h unsigned long vaddr, unsigned long paddr); vaddr 177 arch/sparc/include/asm/oplib_64.h void prom_unmap(unsigned long size, unsigned long vaddr); vaddr 22 arch/sparc/include/asm/page_32.h #define clear_user_page(addr, vaddr, page) \ vaddr 26 arch/sparc/include/asm/page_32.h #define copy_user_page(to, from, vaddr, page) \ vaddr 48 arch/sparc/include/asm/page_64.h void clear_user_page(void *addr, unsigned long vaddr, struct page *page); vaddr 50 arch/sparc/include/asm/page_64.h void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); vaddr 54 arch/sparc/include/asm/page_64.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 97 arch/sparc/include/asm/pgtable_32.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 232 arch/sparc/include/asm/pgtable_64.h #define ZERO_PAGE(vaddr) (mem_map_zero) vaddr 919 arch/sparc/include/asm/pgtable_64.h void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, vaddr 923 arch/sparc/include/asm/pgtable_64.h static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, vaddr 934 arch/sparc/include/asm/pgtable_64.h tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift); vaddr 21 arch/sparc/include/asm/tlbflush_64.h void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, vaddr 51 arch/sparc/include/asm/tlbflush_64.h void __flush_tlb_page(unsigned long context, unsigned long vaddr); vaddr 56 arch/sparc/include/asm/tlbflush_64.h static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) vaddr 58 arch/sparc/include/asm/tlbflush_64.h __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); vaddr 64 arch/sparc/include/asm/tlbflush_64.h void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); vaddr 66 arch/sparc/include/asm/tlbflush_64.h #define global_flush_tlb_page(mm, vaddr) \ vaddr 67 arch/sparc/include/asm/tlbflush_64.h smp_flush_tlb_page(mm, vaddr) vaddr 213 arch/sparc/include/asm/viking.h static inline unsigned long viking_hwprobe(unsigned long vaddr) vaddr 217 arch/sparc/include/asm/viking.h vaddr &= PAGE_MASK; vaddr 221 arch/sparc/include/asm/viking.h : "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); vaddr 228 arch/sparc/include/asm/viking.h : "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE)); vaddr 230 arch/sparc/include/asm/viking.h vaddr &= ~SRMMU_PGDIR_MASK; vaddr 231 arch/sparc/include/asm/viking.h vaddr >>= PAGE_SHIFT; vaddr 232 arch/sparc/include/asm/viking.h return val | (vaddr << 8); vaddr 238 arch/sparc/include/asm/viking.h : "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE)); vaddr 240 arch/sparc/include/asm/viking.h vaddr &= ~SRMMU_REAL_PMD_MASK; vaddr 241 arch/sparc/include/asm/viking.h vaddr >>= PAGE_SHIFT; vaddr 242 arch/sparc/include/asm/viking.h return val | (vaddr << 8); vaddr 248 arch/sparc/include/asm/viking.h : "r" (vaddr), "i" (ASI_M_FLUSH_PROBE)); vaddr 321 arch/sparc/kernel/iommu.c u32 vaddr, unsigned long ctx, unsigned long npages, vaddr 356 arch/sparc/kernel/iommu.c for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) vaddr 357 arch/sparc/kernel/iommu.c iommu_write(strbuf->strbuf_pflush, vaddr); vaddr 383 arch/sparc/kernel/iommu.c vaddr, ctx, npages); vaddr 559 arch/sparc/kernel/iommu.c unsigned long vaddr, npages, entry, j; vaddr 562 arch/sparc/kernel/iommu.c vaddr = s->dma_address & IO_PAGE_MASK; vaddr 566 arch/sparc/kernel/iommu.c entry = (vaddr - iommu->tbl.table_map_base) vaddr 573 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, vaddr, npages, vaddr 138 arch/sparc/kernel/ioport.c unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; vaddr 145 arch/sparc/kernel/ioport.c if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) { vaddr 146 arch/sparc/kernel/ioport.c printk("free_io/iounmap: cannot free %lx\n", vaddr); vaddr 606 arch/sparc/kernel/pci_sun4v.c unsigned long vaddr, npages; vaddr 608 arch/sparc/kernel/pci_sun4v.c vaddr = s->dma_address & IO_PAGE_MASK; vaddr 611 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, vaddr, npages, vaddr 328 arch/sparc/kernel/smp_64.c hdesc->maps[i].vaddr = tte_vaddr; vaddr 1134 arch/sparc/kernel/smp_64.c void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) vaddr 1143 arch/sparc/kernel/smp_64.c context, vaddr, 0, vaddr 1145 arch/sparc/kernel/smp_64.c __flush_tlb_page(context, vaddr); vaddr 1466 arch/sparc/kernel/traps_64.c unsigned long vaddr = PAGE_OFFSET + paddr; vaddr 1468 arch/sparc/kernel/traps_64.c if (vaddr > (unsigned long) high_memory) vaddr 1471 arch/sparc/kernel/traps_64.c return kern_addr_valid(vaddr); vaddr 31 arch/sparc/kernel/uprobes.c static void copy_to_page(struct page *page, unsigned long vaddr, vaddr 36 arch/sparc/kernel/uprobes.c memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); vaddr 47 arch/sparc/kernel/uprobes.c void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, vaddr 67 arch/sparc/kernel/uprobes.c copy_to_page(page, vaddr, &insn, len); vaddr 68 arch/sparc/kernel/uprobes.c copy_to_page(page, vaddr+len, &stp_insn, 4); vaddr 104 arch/sparc/kernel/uprobes.c unsigned long real_pc = (unsigned long) utask->vaddr; vaddr 226 arch/sparc/kernel/uprobes.c rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr); vaddr 228 arch/sparc/kernel/uprobes.c regs->tnpc = utask->vaddr+4; vaddr 297 arch/sparc/kernel/uprobes.c instruction_pointer_set(regs, utask->vaddr); vaddr 62 arch/sparc/mm/fault_64.c static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) vaddr 68 arch/sparc/mm/fault_64.c printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); vaddr 54 arch/sparc/mm/highmem.c unsigned long vaddr; vaddr 64 arch/sparc/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 68 arch/sparc/mm/highmem.c __flush_cache_one(vaddr); vaddr 79 arch/sparc/mm/highmem.c __flush_tlb_one(vaddr); vaddr 84 arch/sparc/mm/highmem.c return (void*) vaddr; vaddr 90 arch/sparc/mm/highmem.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 93 arch/sparc/mm/highmem.c if (vaddr < FIXADDR_START) { // FIXME vaddr 106 arch/sparc/mm/highmem.c BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); vaddr 110 arch/sparc/mm/highmem.c __flush_cache_one(vaddr); vaddr 119 arch/sparc/mm/highmem.c pte_clear(&init_mm, vaddr, kmap_pte-idx); vaddr 122 arch/sparc/mm/highmem.c __flush_tlb_one(vaddr); vaddr 298 arch/sparc/mm/init_32.c unsigned long vaddr = (unsigned long)page_address(page); vaddr 300 arch/sparc/mm/init_32.c if (vaddr) vaddr 301 arch/sparc/mm/init_32.c __flush_page_to_ram(vaddr); vaddr 594 arch/sparc/mm/init_64.c static inline int in_obp_range(unsigned long vaddr) vaddr 596 arch/sparc/mm/init_64.c return (vaddr >= LOW_OBP_ADDRESS && vaddr 597 arch/sparc/mm/init_64.c vaddr < HI_OBP_ADDRESS); vaddr 678 arch/sparc/mm/init_64.c static void __init hypervisor_tlb_lock(unsigned long vaddr, vaddr 682 arch/sparc/mm/init_64.c unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); vaddr 686 arch/sparc/mm/init_64.c "errors with %lx\n", vaddr, 0, pte, mmu, ret); vaddr 3129 arch/sparc/mm/init_64.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 3135 arch/sparc/mm/init_64.c copy_user_page(vto, vfrom, vaddr, to); vaddr 95 arch/sparc/mm/io-unit.c static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) vaddr 101 arch/sparc/mm/io-unit.c npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr 110 arch/sparc/mm/io-unit.c IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); vaddr 125 arch/sparc/mm/io-unit.c panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); vaddr 133 arch/sparc/mm/io-unit.c iopte = MKIOPTE(__pa(vaddr & PAGE_MASK)); vaddr 134 arch/sparc/mm/io-unit.c vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); vaddr 139 arch/sparc/mm/io-unit.c IOD(("%08lx\n", vaddr)); vaddr 140 arch/sparc/mm/io-unit.c return vaddr; vaddr 147 arch/sparc/mm/io-unit.c void *vaddr = page_address(page) + offset; vaddr 156 arch/sparc/mm/io-unit.c ret = iounit_get_area(iounit, (unsigned long)vaddr, len); vaddr 179 arch/sparc/mm/io-unit.c static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len, vaddr 186 arch/sparc/mm/io-unit.c len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr 187 arch/sparc/mm/io-unit.c vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; vaddr 188 arch/sparc/mm/io-unit.c IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); vaddr 189 arch/sparc/mm/io-unit.c for (len += vaddr; vaddr < len; vaddr++) vaddr 190 arch/sparc/mm/io-unit.c clear_bit(vaddr, iounit->bmap); vaddr 198 arch/sparc/mm/io-unit.c unsigned long flags, vaddr, len; vaddr 205 arch/sparc/mm/io-unit.c vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; vaddr 206 arch/sparc/mm/io-unit.c IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); vaddr 207 arch/sparc/mm/io-unit.c for (len += vaddr; vaddr < len; vaddr++) vaddr 208 arch/sparc/mm/io-unit.c clear_bit(vaddr, iounit->bmap); vaddr 200 arch/sparc/mm/iommu.c unsigned long vaddr, p; vaddr 202 arch/sparc/mm/iommu.c vaddr = (unsigned long)page_address(page) + offset; vaddr 203 arch/sparc/mm/iommu.c for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE) vaddr 36 arch/sparc/mm/leon_mm.c unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) vaddr 89 arch/sparc/mm/leon_mm.c ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); vaddr 112 arch/sparc/mm/leon_mm.c ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); vaddr 140 arch/sparc/mm/leon_mm.c ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); vaddr 161 arch/sparc/mm/leon_mm.c (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); vaddr 165 arch/sparc/mm/leon_mm.c (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); vaddr 169 arch/sparc/mm/leon_mm.c (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); vaddr 173 arch/sparc/mm/leon_mm.c paddr_calc = vaddr; vaddr 221 arch/sparc/mm/srmmu.c unsigned long vaddr; vaddr 224 arch/sparc/mm/srmmu.c vaddr = (unsigned long)addr; vaddr 225 arch/sparc/mm/srmmu.c if (vaddr < SRMMU_NOCACHE_VADDR) { vaddr 227 arch/sparc/mm/srmmu.c vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); vaddr 230 arch/sparc/mm/srmmu.c if (vaddr + size > srmmu_nocache_end) { vaddr 232 arch/sparc/mm/srmmu.c vaddr, srmmu_nocache_end); vaddr 243 arch/sparc/mm/srmmu.c if (vaddr & (size - 1)) { vaddr 244 arch/sparc/mm/srmmu.c printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); vaddr 248 arch/sparc/mm/srmmu.c offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; vaddr 301 arch/sparc/mm/srmmu.c unsigned long paddr, vaddr; vaddr 328 arch/sparc/mm/srmmu.c vaddr = SRMMU_NOCACHE_VADDR; vaddr 330 arch/sparc/mm/srmmu.c while (vaddr < srmmu_nocache_end) { vaddr 331 arch/sparc/mm/srmmu.c pgd = pgd_offset_k(vaddr); vaddr 332 arch/sparc/mm/srmmu.c pmd = pmd_offset(__nocache_fix(pgd), vaddr); vaddr 333 arch/sparc/mm/srmmu.c pte = pte_offset_kernel(__nocache_fix(pmd), vaddr); vaddr 342 arch/sparc/mm/srmmu.c vaddr += PAGE_SIZE; vaddr 755 arch/sparc/mm/srmmu.c static inline unsigned long srmmu_probe(unsigned long vaddr) vaddr 761 arch/sparc/mm/srmmu.c vaddr &= PAGE_MASK; vaddr 764 arch/sparc/mm/srmmu.c "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); vaddr 766 arch/sparc/mm/srmmu.c retval = leon_swprobe(vaddr, NULL); vaddr 856 arch/sparc/mm/srmmu.c static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) vaddr 858 arch/sparc/mm/srmmu.c pgd_t *pgdp = pgd_offset_k(vaddr); vaddr 70 arch/sparc/mm/tlb.c static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, vaddr 76 arch/sparc/mm/tlb.c vaddr &= PAGE_MASK; vaddr 78 arch/sparc/mm/tlb.c vaddr |= 0x1UL; vaddr 88 arch/sparc/mm/tlb.c flush_tsb_user_page(mm, vaddr, hugepage_shift); vaddr 89 arch/sparc/mm/tlb.c global_flush_tlb_page(mm, vaddr); vaddr 104 arch/sparc/mm/tlb.c tb->vaddrs[nr] = vaddr; vaddr 113 arch/sparc/mm/tlb.c void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, vaddr 136 arch/sparc/mm/tlb.c if ((paddr ^ vaddr) & (1 << 13)) vaddr 142 arch/sparc/mm/tlb.c tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); vaddr 146 arch/sparc/mm/tlb.c static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, vaddr 152 arch/sparc/mm/tlb.c pte = pte_offset_map(&pmd, vaddr); vaddr 153 arch/sparc/mm/tlb.c end = vaddr + HPAGE_SIZE; vaddr 154 arch/sparc/mm/tlb.c while (vaddr < end) { vaddr 158 arch/sparc/mm/tlb.c tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); vaddr 161 arch/sparc/mm/tlb.c vaddr += PAGE_SIZE; vaddr 22 arch/sparc/mm/tsb.c static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) vaddr 24 arch/sparc/mm/tsb.c vaddr >>= hash_shift; vaddr 25 arch/sparc/mm/tsb.c return vaddr & (nentries - 1); vaddr 28 arch/sparc/mm/tsb.c static inline int tag_compare(unsigned long tag, unsigned long vaddr) vaddr 30 arch/sparc/mm/tsb.c return (tag == (vaddr >> 22)); vaddr 152 arch/sparc/mm/tsb.c void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, vaddr 165 arch/sparc/mm/tsb.c __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, vaddr 169 arch/sparc/mm/tsb.c __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, vaddr 179 arch/sparc/mm/tsb.c __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, vaddr 204 arch/sparc/prom/misc_64.c unsigned long tte_data, unsigned long vaddr) vaddr 213 arch/sparc/prom/misc_64.c args[5] = vaddr; vaddr 225 arch/sparc/prom/misc_64.c unsigned long vaddr) vaddr 227 arch/sparc/prom/misc_64.c return tlb_load("SUNW,itlb-load", index, tte_data, vaddr); vaddr 232 arch/sparc/prom/misc_64.c unsigned long vaddr) vaddr 234 arch/sparc/prom/misc_64.c return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr); vaddr 238 arch/sparc/prom/misc_64.c unsigned long vaddr, unsigned long paddr) vaddr 250 arch/sparc/prom/misc_64.c args[7] = vaddr; vaddr 263 arch/sparc/prom/misc_64.c void prom_unmap(unsigned long size, unsigned long vaddr) vaddr 273 arch/sparc/prom/misc_64.c args[6] = vaddr; vaddr 32 arch/um/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 33 arch/um/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 93 arch/um/include/asm/pgtable.h #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) vaddr 366 arch/um/include/asm/pgtable.h #define kpte_clear_flush(ptep, vaddr) \ vaddr 368 arch/um/include/asm/pgtable.h pte_clear(&init_mm, (vaddr), (ptep)); \ vaddr 369 arch/um/include/asm/pgtable.h __flush_tlb_one((vaddr)); \ vaddr 102 arch/um/kernel/mem.c unsigned long vaddr; vaddr 104 arch/um/kernel/mem.c vaddr = start; vaddr 105 arch/um/kernel/mem.c i = pgd_index(vaddr); vaddr 106 arch/um/kernel/mem.c j = pmd_index(vaddr); vaddr 109 arch/um/kernel/mem.c for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { vaddr 110 arch/um/kernel/mem.c pud = pud_offset(pgd, vaddr); vaddr 113 arch/um/kernel/mem.c pmd = pmd_offset(pud, vaddr); vaddr 114 arch/um/kernel/mem.c for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { vaddr 116 arch/um/kernel/mem.c vaddr += PMD_SIZE; vaddr 131 arch/um/kernel/mem.c unsigned long v, vaddr = FIXADDR_USER_START; vaddr 144 arch/um/kernel/mem.c for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, vaddr 146 arch/um/kernel/mem.c pgd = swapper_pg_dir + pgd_index(vaddr); vaddr 147 arch/um/kernel/mem.c pud = pud_offset(pgd, vaddr); vaddr 148 arch/um/kernel/mem.c pmd = pmd_offset(pud, vaddr); vaddr 149 arch/um/kernel/mem.c pte = pte_offset_kernel(pmd, vaddr); vaddr 157 arch/um/kernel/mem.c unsigned long zones_size[MAX_NR_ZONES], vaddr; vaddr 177 arch/um/kernel/mem.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr 178 arch/um/kernel/mem.c fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); vaddr 16 arch/unicore32/include/asm/cacheflush.h #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) vaddr 108 arch/unicore32/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 25 arch/unicore32/include/asm/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 26 arch/unicore32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 148 arch/unicore32/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (empty_zero_page) vaddr 369 arch/x86/include/asm/io.h extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, vaddr 197 arch/x86/include/asm/kexec.h extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, vaddr 201 arch/x86/include/asm/kexec.h extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); vaddr 43 arch/x86/include/asm/mem_encrypt.h int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); vaddr 44 arch/x86/include/asm/mem_encrypt.h int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); vaddr 76 arch/x86/include/asm/mem_encrypt.h early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; } vaddr 78 arch/x86/include/asm/mem_encrypt.h early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } vaddr 25 arch/x86/include/asm/page.h static inline void clear_user_page(void *page, unsigned long vaddr, vaddr 31 arch/x86/include/asm/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr, vaddr 37 arch/x86/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ vaddr 38 arch/x86/include/asm/page.h alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) vaddr 51 arch/x86/include/asm/pgtable.h #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page)) vaddr 684 arch/x86/include/asm/pgtable.h pmd_t *populate_extra_pmd(unsigned long vaddr); vaddr 685 arch/x86/include/asm/pgtable.h pte_t *populate_extra_pte(unsigned long vaddr); vaddr 60 arch/x86/include/asm/pgtable_32.h #define kpte_clear_flush(ptep, vaddr) \ vaddr 62 arch/x86/include/asm/pgtable_32.h pte_clear(&init_mm, (vaddr), (ptep)); \ vaddr 63 arch/x86/include/asm/pgtable_32.h __flush_tlb_one_kernel((vaddr)); \ vaddr 56 arch/x86/include/asm/pgtable_64.h void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); vaddr 57 arch/x86/include/asm/pgtable_64.h void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); vaddr 529 arch/x86/include/asm/pgtable_types.h void set_pte_vaddr(unsigned long vaddr, pte_t pte); vaddr 669 arch/x86/include/asm/uv/uv_bau.h unsigned long (*bau_gpa_to_offset)(unsigned long vaddr); vaddr 355 arch/x86/include/asm/xen/page.h unsigned long arbitrary_virt_to_mfn(void *vaddr); vaddr 356 arch/x86/include/asm/xen/page.h void make_lowmem_page_readonly(void *vaddr); vaddr 357 arch/x86/include/asm/xen/page.h void make_lowmem_page_readwrite(void *vaddr); vaddr 470 arch/x86/kernel/amd_gart_64.c void *vaddr; vaddr 472 arch/x86/kernel/amd_gart_64.c vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); vaddr 473 arch/x86/kernel/amd_gart_64.c if (!vaddr || vaddr 475 arch/x86/kernel/amd_gart_64.c return vaddr; vaddr 477 arch/x86/kernel/amd_gart_64.c *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, vaddr 482 arch/x86/kernel/amd_gart_64.c return vaddr; vaddr 484 arch/x86/kernel/amd_gart_64.c dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); vaddr 490 arch/x86/kernel/amd_gart_64.c gart_free_coherent(struct device *dev, size_t size, void *vaddr, vaddr 494 arch/x86/kernel/amd_gart_64.c dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); vaddr 54 arch/x86/kernel/crash_dump_32.c void *vaddr; vaddr 62 arch/x86/kernel/crash_dump_32.c vaddr = kmap_atomic_pfn(pfn); vaddr 65 arch/x86/kernel/crash_dump_32.c memcpy(buf, (vaddr + offset), csize); vaddr 66 arch/x86/kernel/crash_dump_32.c kunmap_atomic(vaddr); vaddr 71 arch/x86/kernel/crash_dump_32.c kunmap_atomic(vaddr); vaddr 74 arch/x86/kernel/crash_dump_32.c copy_page(kdump_buf_page, vaddr); vaddr 75 arch/x86/kernel/crash_dump_32.c kunmap_atomic(vaddr); vaddr 18 arch/x86/kernel/crash_dump_64.c void *vaddr; vaddr 24 arch/x86/kernel/crash_dump_64.c vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE); vaddr 26 arch/x86/kernel/crash_dump_64.c vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); vaddr 28 arch/x86/kernel/crash_dump_64.c if (!vaddr) vaddr 32 arch/x86/kernel/crash_dump_64.c if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { vaddr 33 arch/x86/kernel/crash_dump_64.c iounmap((void __iomem *)vaddr); vaddr 37 arch/x86/kernel/crash_dump_64.c memcpy(buf, vaddr + offset, csize); vaddr 40 arch/x86/kernel/crash_dump_64.c iounmap((void __iomem *)vaddr); vaddr 200 arch/x86/kernel/early_printk.c u32 __iomem *vaddr = (u32 __iomem *)addr; vaddr 202 arch/x86/kernel/early_printk.c writel(value, vaddr + offset); vaddr 207 arch/x86/kernel/early_printk.c u32 __iomem *vaddr = (u32 __iomem *)addr; vaddr 209 arch/x86/kernel/early_printk.c return readl(vaddr + offset); vaddr 116 arch/x86/kernel/head64.c unsigned long vaddr, vaddr_end; vaddr 267 arch/x86/kernel/head64.c vaddr = (unsigned long)__start_bss_decrypted; vaddr 269 arch/x86/kernel/head64.c for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { vaddr 270 arch/x86/kernel/head64.c i = pmd_index(vaddr); vaddr 93 arch/x86/kernel/machine_kexec_32.c unsigned long vaddr, unsigned long paddr) vaddr 98 arch/x86/kernel/machine_kexec_32.c pgd += pgd_index(vaddr); vaddr 103 arch/x86/kernel/machine_kexec_32.c p4d = p4d_offset(pgd, vaddr); vaddr 104 arch/x86/kernel/machine_kexec_32.c pud = pud_offset(p4d, vaddr); vaddr 105 arch/x86/kernel/machine_kexec_32.c pmd = pmd_offset(pud, vaddr); vaddr 108 arch/x86/kernel/machine_kexec_32.c pte = pte_offset_kernel(pmd, vaddr); vaddr 127 arch/x86/kernel/machine_kexec_64.c unsigned long vaddr, paddr; vaddr 134 arch/x86/kernel/machine_kexec_64.c vaddr = (unsigned long)relocate_kernel; vaddr 136 arch/x86/kernel/machine_kexec_64.c pgd += pgd_index(vaddr); vaddr 144 arch/x86/kernel/machine_kexec_64.c p4d = p4d_offset(pgd, vaddr); vaddr 152 arch/x86/kernel/machine_kexec_64.c pud = pud_offset(p4d, vaddr); vaddr 160 arch/x86/kernel/machine_kexec_64.c pmd = pmd_offset(pud, vaddr); vaddr 168 arch/x86/kernel/machine_kexec_64.c pte = pte_offset_kernel(pmd, vaddr); vaddr 666 arch/x86/kernel/machine_kexec_64.c int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) vaddr 676 arch/x86/kernel/machine_kexec_64.c return set_memory_decrypted((unsigned long)vaddr, pages); vaddr 679 arch/x86/kernel/machine_kexec_64.c void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) vaddr 688 arch/x86/kernel/machine_kexec_64.c set_memory_encrypted((unsigned long)vaddr, pages); vaddr 255 arch/x86/kernel/pci-calgary_64.c void *vaddr, unsigned int npages, int direction) vaddr 268 arch/x86/kernel/pci-calgary_64.c ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK); vaddr 271 arch/x86/kernel/pci-calgary_64.c tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, vaddr 355 arch/x86/kernel/pci-calgary_64.c unsigned long vaddr; vaddr 363 arch/x86/kernel/pci-calgary_64.c vaddr = (unsigned long) sg_virt(s); vaddr 364 arch/x86/kernel/pci-calgary_64.c npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); vaddr 376 arch/x86/kernel/pci-calgary_64.c tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); vaddr 396 arch/x86/kernel/pci-calgary_64.c void *vaddr = page_address(page) + offset; vaddr 401 arch/x86/kernel/pci-calgary_64.c uaddr = (unsigned long)vaddr; vaddr 404 arch/x86/kernel/pci-calgary_64.c return iommu_alloc(dev, tbl, vaddr, npages, dir); vaddr 450 arch/x86/kernel/pci-calgary_64.c void *vaddr, dma_addr_t dma_handle, vaddr 460 arch/x86/kernel/pci-calgary_64.c free_pages((unsigned long)vaddr, get_order(size)); vaddr 104 arch/x86/kernel/tboot.c static int map_tboot_page(unsigned long vaddr, unsigned long pfn, vaddr 113 arch/x86/kernel/tboot.c pgd = pgd_offset(&tboot_mm, vaddr); vaddr 114 arch/x86/kernel/tboot.c p4d = p4d_alloc(&tboot_mm, pgd, vaddr); vaddr 117 arch/x86/kernel/tboot.c pud = pud_alloc(&tboot_mm, p4d, vaddr); vaddr 120 arch/x86/kernel/tboot.c pmd = pmd_alloc(&tboot_mm, pud, vaddr); vaddr 123 arch/x86/kernel/tboot.c pte = pte_alloc_map(&tboot_mm, pmd, vaddr); vaddr 126 arch/x86/kernel/tboot.c set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); vaddr 142 arch/x86/kernel/tboot.c static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, vaddr 150 arch/x86/kernel/tboot.c for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { vaddr 151 arch/x86/kernel/tboot.c if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) vaddr 476 arch/x86/kernel/uprobes.c *sr = utask->vaddr + auprobe->defparam.ilen; vaddr 559 arch/x86/kernel/uprobes.c long correction = utask->vaddr - utask->xol_vaddr; vaddr 563 arch/x86/kernel/uprobes.c if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) vaddr 976 arch/x86/kernel/uprobes.c regs->ip = utask->vaddr; vaddr 1038 arch/x86/kernel/uprobes.c regs->ip = utask->vaddr; vaddr 3990 arch/x86/kvm/mmu.c static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, vaddr 3995 arch/x86/kvm/mmu.c return vaddr; vaddr 3998 arch/x86/kvm/mmu.c static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, vaddr 4004 arch/x86/kvm/mmu.c return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); vaddr 149 arch/x86/kvm/paging_tmpl.h unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; vaddr 154 arch/x86/kvm/paging_tmpl.h vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); vaddr 159 arch/x86/kvm/paging_tmpl.h pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; vaddr 969 arch/x86/kvm/paging_tmpl.h static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, vaddr 979 arch/x86/kvm/paging_tmpl.h WARN_ON_ONCE(vaddr >> 32); vaddr 982 arch/x86/kvm/paging_tmpl.h r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); vaddr 986 arch/x86/kvm/paging_tmpl.h gpa |= vaddr & ~PAGE_MASK; vaddr 6518 arch/x86/kvm/svm.c unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; vaddr 6535 arch/x86/kvm/svm.c vaddr = params.uaddr; vaddr 6537 arch/x86/kvm/svm.c vaddr_end = vaddr + size; vaddr 6540 arch/x86/kvm/svm.c inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); vaddr 6554 arch/x86/kvm/svm.c for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { vaddr 6561 arch/x86/kvm/svm.c offset = vaddr & (PAGE_SIZE - 1); vaddr 6576 arch/x86/kvm/svm.c next_vaddr = vaddr + len; vaddr 6789 arch/x86/kvm/svm.c unsigned long __user vaddr, vaddr 6799 arch/x86/kvm/svm.c if (!IS_ALIGNED(vaddr, 16)) { vaddr 6805 arch/x86/kvm/svm.c (void __user *)(uintptr_t)vaddr, size)) { vaddr 6844 arch/x86/kvm/svm.c (void __user *)(uintptr_t)vaddr, size)) { vaddr 6867 arch/x86/kvm/svm.c unsigned long vaddr, vaddr_end, next_vaddr; vaddr 6886 arch/x86/kvm/svm.c vaddr = debug.src_uaddr; vaddr 6888 arch/x86/kvm/svm.c vaddr_end = vaddr + size; vaddr 6891 arch/x86/kvm/svm.c for (; vaddr < vaddr_end; vaddr = next_vaddr) { vaddr 6895 arch/x86/kvm/svm.c src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); vaddr 6918 arch/x86/kvm/svm.c s_off = vaddr & ~PAGE_MASK; vaddr 6931 arch/x86/kvm/svm.c vaddr, vaddr 6942 arch/x86/kvm/svm.c next_vaddr = vaddr + len; vaddr 9026 arch/x86/kvm/x86.c unsigned long vaddr = tr->linear_address; vaddr 9033 arch/x86/kvm/x86.c gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); vaddr 882 arch/x86/mm/fault.c static bool is_vsyscall_vaddr(unsigned long vaddr) vaddr 884 arch/x86/mm/fault.c return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); vaddr 36 arch/x86/mm/highmem_32.c unsigned long vaddr; vaddr 47 arch/x86/mm/highmem_32.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 52 arch/x86/mm/highmem_32.c return (void *)vaddr; vaddr 74 arch/x86/mm/highmem_32.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 76 arch/x86/mm/highmem_32.c if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr 77 arch/x86/mm/highmem_32.c vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { vaddr 84 arch/x86/mm/highmem_32.c WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); vaddr 92 arch/x86/mm/highmem_32.c kpte_clear_flush(kmap_pte-idx, vaddr); vaddr 98 arch/x86/mm/highmem_32.c BUG_ON(vaddr < PAGE_OFFSET); vaddr 99 arch/x86/mm/highmem_32.c BUG_ON(vaddr >= (unsigned long)high_memory); vaddr 109 arch/x86/mm/init_32.c pmd_t * __init populate_extra_pmd(unsigned long vaddr) vaddr 111 arch/x86/mm/init_32.c int pgd_idx = pgd_index(vaddr); vaddr 112 arch/x86/mm/init_32.c int pmd_idx = pmd_index(vaddr); vaddr 117 arch/x86/mm/init_32.c pte_t * __init populate_extra_pte(unsigned long vaddr) vaddr 119 arch/x86/mm/init_32.c int pte_idx = pte_index(vaddr); vaddr 122 arch/x86/mm/init_32.c pmd = populate_extra_pmd(vaddr); vaddr 134 arch/x86/mm/init_32.c unsigned long vaddr; vaddr 139 arch/x86/mm/init_32.c vaddr = start; vaddr 140 arch/x86/mm/init_32.c pgd_idx = pgd_index(vaddr); vaddr 141 arch/x86/mm/init_32.c pmd_idx = pmd_index(vaddr); vaddr 143 arch/x86/mm/init_32.c for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { vaddr 144 arch/x86/mm/init_32.c for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); vaddr 146 arch/x86/mm/init_32.c if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && vaddr 147 arch/x86/mm/init_32.c (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) vaddr 149 arch/x86/mm/init_32.c vaddr += PMD_SIZE; vaddr 158 arch/x86/mm/init_32.c unsigned long vaddr, pte_t *lastpte, vaddr 172 arch/x86/mm/init_32.c && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin vaddr 173 arch/x86/mm/init_32.c && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) { vaddr 191 arch/x86/mm/init_32.c BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) vaddr 192 arch/x86/mm/init_32.c && vaddr > fix_to_virt(FIX_KMAP_END) vaddr 211 arch/x86/mm/init_32.c unsigned long vaddr; vaddr 221 arch/x86/mm/init_32.c vaddr = start; vaddr 222 arch/x86/mm/init_32.c pgd_idx = pgd_index(vaddr); vaddr 223 arch/x86/mm/init_32.c pmd_idx = pmd_index(vaddr); vaddr 226 arch/x86/mm/init_32.c for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { vaddr 228 arch/x86/mm/init_32.c pmd = pmd + pmd_index(vaddr); vaddr 229 arch/x86/mm/init_32.c for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); vaddr 232 arch/x86/mm/init_32.c pmd, vaddr, pte, &adr); vaddr 234 arch/x86/mm/init_32.c vaddr += PMD_SIZE; vaddr 393 arch/x86/mm/init_32.c static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) vaddr 395 arch/x86/mm/init_32.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 396 arch/x86/mm/init_32.c p4d_t *p4d = p4d_offset(pgd, vaddr); vaddr 397 arch/x86/mm/init_32.c pud_t *pud = pud_offset(p4d, vaddr); vaddr 398 arch/x86/mm/init_32.c pmd_t *pmd = pmd_offset(pud, vaddr); vaddr 399 arch/x86/mm/init_32.c return pte_offset_kernel(pmd, vaddr); vaddr 416 arch/x86/mm/init_32.c unsigned long vaddr; vaddr 423 arch/x86/mm/init_32.c vaddr = PKMAP_BASE; vaddr 424 arch/x86/mm/init_32.c page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); vaddr 426 arch/x86/mm/init_32.c pgd = swapper_pg_dir + pgd_index(vaddr); vaddr 427 arch/x86/mm/init_32.c p4d = p4d_offset(pgd, vaddr); vaddr 428 arch/x86/mm/init_32.c pud = pud_offset(p4d, vaddr); vaddr 429 arch/x86/mm/init_32.c pmd = pmd_offset(pud, vaddr); vaddr 430 arch/x86/mm/init_32.c pte = pte_offset_kernel(pmd, vaddr); vaddr 542 arch/x86/mm/init_32.c unsigned long vaddr, end; vaddr 548 arch/x86/mm/init_32.c vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr 550 arch/x86/mm/init_32.c page_table_range_init(vaddr, end, pgd_base); vaddr 243 arch/x86/mm/init_64.c static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) vaddr 252 arch/x86/mm/init_64.c return p4d_offset(pgd, vaddr); vaddr 255 arch/x86/mm/init_64.c static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) vaddr 264 arch/x86/mm/init_64.c return pud_offset(p4d, vaddr); vaddr 267 arch/x86/mm/init_64.c static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) vaddr 276 arch/x86/mm/init_64.c return pmd_offset(pud, vaddr); vaddr 279 arch/x86/mm/init_64.c static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) vaddr 287 arch/x86/mm/init_64.c return pte_offset_kernel(pmd, vaddr); vaddr 290 arch/x86/mm/init_64.c static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) vaddr 292 arch/x86/mm/init_64.c pmd_t *pmd = fill_pmd(pud, vaddr); vaddr 293 arch/x86/mm/init_64.c pte_t *pte = fill_pte(pmd, vaddr); vaddr 301 arch/x86/mm/init_64.c __flush_tlb_one_kernel(vaddr); vaddr 304 arch/x86/mm/init_64.c void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) vaddr 306 arch/x86/mm/init_64.c p4d_t *p4d = p4d_page + p4d_index(vaddr); vaddr 307 arch/x86/mm/init_64.c pud_t *pud = fill_pud(p4d, vaddr); vaddr 309 arch/x86/mm/init_64.c __set_pte_vaddr(pud, vaddr, new_pte); vaddr 312 arch/x86/mm/init_64.c void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) vaddr 314 arch/x86/mm/init_64.c pud_t *pud = pud_page + pud_index(vaddr); vaddr 316 arch/x86/mm/init_64.c __set_pte_vaddr(pud, vaddr, new_pte); vaddr 319 arch/x86/mm/init_64.c void set_pte_vaddr(unsigned long vaddr, pte_t pteval) vaddr 324 arch/x86/mm/init_64.c pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); vaddr 326 arch/x86/mm/init_64.c pgd = pgd_offset_k(vaddr); vaddr 334 arch/x86/mm/init_64.c set_pte_vaddr_p4d(p4d_page, vaddr, pteval); vaddr 337 arch/x86/mm/init_64.c pmd_t * __init populate_extra_pmd(unsigned long vaddr) vaddr 343 arch/x86/mm/init_64.c pgd = pgd_offset_k(vaddr); vaddr 344 arch/x86/mm/init_64.c p4d = fill_p4d(pgd, vaddr); vaddr 345 arch/x86/mm/init_64.c pud = fill_pud(p4d, vaddr); vaddr 346 arch/x86/mm/init_64.c return fill_pmd(pud, vaddr); vaddr 349 arch/x86/mm/init_64.c pte_t * __init populate_extra_pte(unsigned long vaddr) vaddr 353 arch/x86/mm/init_64.c pmd = populate_extra_pmd(vaddr); vaddr 354 arch/x86/mm/init_64.c return fill_pte(pmd, vaddr); vaddr 422 arch/x86/mm/init_64.c unsigned long vaddr = __START_KERNEL_map; vaddr 435 arch/x86/mm/init_64.c for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { vaddr 438 arch/x86/mm/init_64.c if (vaddr < (unsigned long) _text || vaddr > end) vaddr 592 arch/x86/mm/init_64.c unsigned long vaddr = (unsigned long)__va(paddr); vaddr 593 arch/x86/mm/init_64.c int i = pud_index(vaddr); vaddr 600 arch/x86/mm/init_64.c vaddr = (unsigned long)__va(paddr); vaddr 601 arch/x86/mm/init_64.c pud = pud_page + pud_index(vaddr); vaddr 674 arch/x86/mm/init_64.c unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; vaddr 677 arch/x86/mm/init_64.c vaddr = (unsigned long)__va(paddr); vaddr 684 arch/x86/mm/init_64.c for (; vaddr < vaddr_end; vaddr = vaddr_next) { vaddr 685 arch/x86/mm/init_64.c p4d_t *p4d = p4d_page + p4d_index(vaddr); vaddr 688 arch/x86/mm/init_64.c vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; vaddr 689 arch/x86/mm/init_64.c paddr = __pa(vaddr); vaddr 728 arch/x86/mm/init_64.c unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; vaddr 731 arch/x86/mm/init_64.c vaddr = (unsigned long)__va(paddr_start); vaddr 733 arch/x86/mm/init_64.c vaddr_start = vaddr; vaddr 735 arch/x86/mm/init_64.c for (; vaddr < vaddr_end; vaddr = vaddr_next) { vaddr 736 arch/x86/mm/init_64.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 739 arch/x86/mm/init_64.c vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; vaddr 743 arch/x86/mm/init_64.c paddr_last = phys_p4d_init(p4d, __pa(vaddr), vaddr 751 arch/x86/mm/init_64.c paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), vaddr 758 arch/x86/mm/init_64.c p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr), vaddr 49 arch/x86/mm/iomap_32.c unsigned long vaddr; vaddr 57 arch/x86/mm/iomap_32.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 61 arch/x86/mm/iomap_32.c return (void *)vaddr; vaddr 91 arch/x86/mm/iomap_32.c unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; vaddr 93 arch/x86/mm/iomap_32.c if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr 94 arch/x86/mm/iomap_32.c vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { vaddr 101 arch/x86/mm/iomap_32.c WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); vaddr 109 arch/x86/mm/iomap_32.c kpte_clear_flush(kmap_pte-idx, vaddr); vaddr 43 arch/x86/mm/ioremap.c int ioremap_change_attr(unsigned long vaddr, unsigned long size, vaddr 52 arch/x86/mm/ioremap.c err = _set_memory_uc(vaddr, nrpages); vaddr 55 arch/x86/mm/ioremap.c err = _set_memory_wc(vaddr, nrpages); vaddr 58 arch/x86/mm/ioremap.c err = _set_memory_wt(vaddr, nrpages); vaddr 61 arch/x86/mm/ioremap.c err = _set_memory_wb(vaddr, nrpages); vaddr 179 arch/x86/mm/ioremap.c unsigned long offset, vaddr; vaddr 277 arch/x86/mm/ioremap.c vaddr = (unsigned long) area->addr; vaddr 282 arch/x86/mm/ioremap.c if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) vaddr 285 arch/x86/mm/ioremap.c ret_addr = (void __iomem *) (vaddr + offset); vaddr 511 arch/x86/mm/ioremap.c void *vaddr; vaddr 514 arch/x86/mm/ioremap.c vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); vaddr 517 arch/x86/mm/ioremap.c if (vaddr) vaddr 518 arch/x86/mm/ioremap.c vaddr += offset; vaddr 520 arch/x86/mm/ioremap.c return vaddr; vaddr 77 arch/x86/mm/kaslr.c unsigned long vaddr_start, vaddr; vaddr 84 arch/x86/mm/kaslr.c vaddr = vaddr_start; vaddr 138 arch/x86/mm/kaslr.c vaddr += entropy; vaddr 139 arch/x86/mm/kaslr.c *kaslr_regions[i].base = vaddr; vaddr 145 arch/x86/mm/kaslr.c vaddr += get_padding(&kaslr_regions[i]); vaddr 146 arch/x86/mm/kaslr.c vaddr = round_up(vaddr + 1, PUD_SIZE); vaddr 155 arch/x86/mm/kaslr.c unsigned long paddr, vaddr; vaddr 168 arch/x86/mm/kaslr.c vaddr = (unsigned long)__va(paddr); vaddr 169 arch/x86/mm/kaslr.c pgd = pgd_offset_k(vaddr); vaddr 171 arch/x86/mm/kaslr.c p4d = p4d_offset(pgd, vaddr); vaddr 172 arch/x86/mm/kaslr.c pud = pud_offset(p4d, vaddr); vaddr 119 arch/x86/mm/mem_encrypt.c static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, vaddr 122 arch/x86/mm/mem_encrypt.c unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; vaddr 130 arch/x86/mm/mem_encrypt.c __early_make_pgtable((unsigned long)vaddr, pmd); vaddr 132 arch/x86/mm/mem_encrypt.c vaddr += PMD_SIZE; vaddr 253 arch/x86/mm/mem_encrypt.c static int __init early_set_memory_enc_dec(unsigned long vaddr, vaddr 262 arch/x86/mm/mem_encrypt.c vaddr_next = vaddr; vaddr 263 arch/x86/mm/mem_encrypt.c vaddr_end = vaddr + size; vaddr 265 arch/x86/mm/mem_encrypt.c for (; vaddr < vaddr_end; vaddr = vaddr_next) { vaddr 266 arch/x86/mm/mem_encrypt.c kpte = lookup_address(vaddr, &level); vaddr 274 arch/x86/mm/mem_encrypt.c vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; vaddr 287 arch/x86/mm/mem_encrypt.c if (vaddr == (vaddr & pmask) && vaddr 288 arch/x86/mm/mem_encrypt.c ((vaddr_end - vaddr) >= psize)) { vaddr 290 arch/x86/mm/mem_encrypt.c vaddr_next = (vaddr & pmask) + psize; vaddr 309 arch/x86/mm/mem_encrypt.c kernel_physical_mapping_change(__pa(vaddr & pmask), vaddr 321 arch/x86/mm/mem_encrypt.c int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) vaddr 323 arch/x86/mm/mem_encrypt.c return early_set_memory_enc_dec(vaddr, size, false); vaddr 326 arch/x86/mm/mem_encrypt.c int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) vaddr 328 arch/x86/mm/mem_encrypt.c return early_set_memory_enc_dec(vaddr, size, true); vaddr 382 arch/x86/mm/mem_encrypt.c unsigned long vaddr, vaddr_end, npages; vaddr 385 arch/x86/mm/mem_encrypt.c vaddr = (unsigned long)__start_bss_decrypted_unused; vaddr 387 arch/x86/mm/mem_encrypt.c npages = (vaddr_end - vaddr) >> PAGE_SHIFT; vaddr 394 arch/x86/mm/mem_encrypt.c r = set_memory_encrypted(vaddr, npages); vaddr 401 arch/x86/mm/mem_encrypt.c free_init_pages("unused decrypted", vaddr, vaddr_end); vaddr 69 arch/x86/mm/mem_encrypt_identity.c unsigned long vaddr; vaddr 95 arch/x86/mm/mem_encrypt_identity.c pgd_start = ppd->vaddr & PGDIR_MASK; vaddr 100 arch/x86/mm/mem_encrypt_identity.c pgd_p = ppd->pgd + pgd_index(ppd->vaddr); vaddr 112 arch/x86/mm/mem_encrypt_identity.c pgd = ppd->pgd + pgd_index(ppd->vaddr); vaddr 120 arch/x86/mm/mem_encrypt_identity.c p4d = p4d_offset(pgd, ppd->vaddr); vaddr 128 arch/x86/mm/mem_encrypt_identity.c pud = pud_offset(p4d, ppd->vaddr); vaddr 151 arch/x86/mm/mem_encrypt_identity.c pmd = pmd_offset(pud, ppd->vaddr); vaddr 168 arch/x86/mm/mem_encrypt_identity.c pmd = pmd_offset(pud, ppd->vaddr); vaddr 179 arch/x86/mm/mem_encrypt_identity.c pte = pte_offset_map(pmd, ppd->vaddr); vaddr 186 arch/x86/mm/mem_encrypt_identity.c while (ppd->vaddr < ppd->vaddr_end) { vaddr 189 arch/x86/mm/mem_encrypt_identity.c ppd->vaddr += PMD_PAGE_SIZE; vaddr 196 arch/x86/mm/mem_encrypt_identity.c while (ppd->vaddr < ppd->vaddr_end) { vaddr 199 arch/x86/mm/mem_encrypt_identity.c ppd->vaddr += PAGE_SIZE; vaddr 216 arch/x86/mm/mem_encrypt_identity.c ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); vaddr 385 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = workarea_start; vaddr 419 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = kernel_start; vaddr 425 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = kernel_start + decrypted_base; vaddr 432 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = initrd_start; vaddr 439 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = initrd_start + decrypted_base; vaddr 446 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = workarea_start; vaddr 451 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = workarea_start + decrypted_base; vaddr 469 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = kernel_start + decrypted_base; vaddr 474 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = initrd_start + decrypted_base; vaddr 479 arch/x86/mm/mem_encrypt_identity.c ppd.vaddr = workarea_start + decrypted_base; vaddr 36 arch/x86/mm/pageattr.c unsigned long *vaddr; vaddr 270 arch/x86/mm/pageattr.c return cpa->vaddr[idx]; vaddr 272 arch/x86/mm/pageattr.c return *cpa->vaddr + idx * PAGE_SIZE; vaddr 279 arch/x86/mm/pageattr.c static void clflush_cache_range_opt(void *vaddr, unsigned int size) vaddr 282 arch/x86/mm/pageattr.c void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); vaddr 283 arch/x86/mm/pageattr.c void *vend = vaddr + size; vaddr 300 arch/x86/mm/pageattr.c void clflush_cache_range(void *vaddr, unsigned int size) vaddr 303 arch/x86/mm/pageattr.c clflush_cache_range_opt(vaddr, size); vaddr 1445 arch/x86/mm/pageattr.c static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, vaddr 1454 arch/x86/mm/pageattr.c return populate_pgd(cpa, vaddr); vaddr 1472 arch/x86/mm/pageattr.c if (within(vaddr, PAGE_OFFSET, vaddr 1475 arch/x86/mm/pageattr.c cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; vaddr 1483 arch/x86/mm/pageattr.c "vaddr = %lx cpa->vaddr = %lx\n", vaddr, vaddr 1484 arch/x86/mm/pageattr.c *cpa->vaddr); vaddr 1569 arch/x86/mm/pageattr.c unsigned long vaddr; vaddr 1579 arch/x86/mm/pageattr.c vaddr = __cpa_addr(cpa, cpa->curpage); vaddr 1580 arch/x86/mm/pageattr.c if (!(within(vaddr, PAGE_OFFSET, vaddr 1584 arch/x86/mm/pageattr.c alias_cpa.vaddr = &laddr; vaddr 1601 arch/x86/mm/pageattr.c if (!within(vaddr, (unsigned long)_text, _brk_end) && vaddr 1606 arch/x86/mm/pageattr.c alias_cpa.vaddr = &temp_cpa_vaddr; vaddr 1715 arch/x86/mm/pageattr.c cpa.vaddr = addr; vaddr 1957 arch/x86/mm/pageattr.c cpa.vaddr = &addr; vaddr 2120 arch/x86/mm/pageattr.c struct cpa_data cpa = { .vaddr = &tempaddr, vaddr 2139 arch/x86/mm/pageattr.c struct cpa_data cpa = { .vaddr = &tempaddr, vaddr 2217 arch/x86/mm/pageattr.c .vaddr = &address, vaddr 2260 arch/x86/mm/pageattr.c .vaddr = &address, vaddr 28 arch/x86/mm/pgtable_32.c void set_pte_vaddr(unsigned long vaddr, pte_t pteval) vaddr 36 arch/x86/mm/pgtable_32.c pgd = swapper_pg_dir + pgd_index(vaddr); vaddr 41 arch/x86/mm/pgtable_32.c p4d = p4d_offset(pgd, vaddr); vaddr 46 arch/x86/mm/pgtable_32.c pud = pud_offset(p4d, vaddr); vaddr 51 arch/x86/mm/pgtable_32.c pmd = pmd_offset(pud, vaddr); vaddr 56 arch/x86/mm/pgtable_32.c pte = pte_offset_kernel(pmd, vaddr); vaddr 58 arch/x86/mm/pgtable_32.c set_pte_at(&init_mm, vaddr, pte, pteval); vaddr 60 arch/x86/mm/pgtable_32.c pte_clear(&init_mm, vaddr, pte); vaddr 66 arch/x86/mm/pgtable_32.c __flush_tlb_one_kernel(vaddr); vaddr 77 arch/x86/platform/efi/efi_64.c unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; vaddr 107 arch/x86/platform/efi/efi_64.c vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); vaddr 133 arch/x86/platform/efi/efi_64.c vaddr = (unsigned long)__va(addr_pud); vaddr 135 arch/x86/platform/efi/efi_64.c pgd_k = pgd_offset_k(vaddr); vaddr 136 arch/x86/platform/efi/efi_64.c p4d_k = p4d_offset(pgd_k, vaddr); vaddr 137 arch/x86/platform/efi/efi_64.c pud[j] = *pud_offset(p4d_k, vaddr); vaddr 151 arch/x86/xen/grant-table.c &xen_auto_xlat_grant_frames.vaddr, vaddr 11 arch/x86/xen/mmu.c unsigned long arbitrary_virt_to_mfn(void *vaddr) vaddr 13 arch/x86/xen/mmu.c xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); vaddr 18 arch/x86/xen/mmu.c xmaddr_t arbitrary_virt_to_machine(void *vaddr) vaddr 20 arch/x86/xen/mmu.c unsigned long address = (unsigned long)vaddr; vaddr 29 arch/x86/xen/mmu.c if (virt_addr_valid(vaddr)) vaddr 30 arch/x86/xen/mmu.c return virt_to_machine(vaddr); vaddr 18 arch/x86/xen/mmu.h void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); vaddr 136 arch/x86/xen/mmu_pv.c void make_lowmem_page_readonly(void *vaddr) vaddr 139 arch/x86/xen/mmu_pv.c unsigned long address = (unsigned long)vaddr; vaddr 152 arch/x86/xen/mmu_pv.c void make_lowmem_page_readwrite(void *vaddr) vaddr 155 arch/x86/xen/mmu_pv.c unsigned long address = (unsigned long)vaddr; vaddr 255 arch/x86/xen/mmu_pv.c void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) vaddr 257 arch/x86/xen/mmu_pv.c set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); vaddr 1093 arch/x86/xen/mmu_pv.c static void __init xen_cleanhighmap(unsigned long vaddr, vaddr 1097 arch/x86/xen/mmu_pv.c pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); vaddr 1101 arch/x86/xen/mmu_pv.c for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); vaddr 1102 arch/x86/xen/mmu_pv.c pmd++, vaddr += PMD_SIZE) { vaddr 1105 arch/x86/xen/mmu_pv.c if (vaddr < (unsigned long) _text || vaddr > kernel_end) vaddr 1118 arch/x86/xen/mmu_pv.c void *vaddr = __va(paddr); vaddr 1119 arch/x86/xen/mmu_pv.c void *vaddr_end = vaddr + size; vaddr 1121 arch/x86/xen/mmu_pv.c for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) vaddr 1122 arch/x86/xen/mmu_pv.c make_lowmem_page_readwrite(vaddr); vaddr 1208 arch/x86/xen/mmu_pv.c static void __init xen_cleanmfnmap(unsigned long vaddr) vaddr 1214 arch/x86/xen/mmu_pv.c unpin = (vaddr == 2 * PGDIR_SIZE); vaddr 1215 arch/x86/xen/mmu_pv.c vaddr &= PMD_MASK; vaddr 1216 arch/x86/xen/mmu_pv.c pgd = pgd_offset_k(vaddr); vaddr 1998 arch/x86/xen/mmu_pv.c unsigned long *vaddr; vaddr 2001 arch/x86/xen/mmu_pv.c vaddr = early_memremap_ro(addr, sizeof(val)); vaddr 2002 arch/x86/xen/mmu_pv.c val = *vaddr; vaddr 2003 arch/x86/xen/mmu_pv.c early_memunmap(vaddr, sizeof(val)); vaddr 2012 arch/x86/xen/mmu_pv.c static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) vaddr 2021 arch/x86/xen/mmu_pv.c pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * vaddr 2027 arch/x86/xen/mmu_pv.c pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) * vaddr 2033 arch/x86/xen/mmu_pv.c return pa + (vaddr & ~PUD_MASK); vaddr 2035 arch/x86/xen/mmu_pv.c pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * vaddr 2041 arch/x86/xen/mmu_pv.c return pa + (vaddr & ~PMD_MASK); vaddr 2043 arch/x86/xen/mmu_pv.c pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) * vaddr 2049 arch/x86/xen/mmu_pv.c return pa | (vaddr & ~PAGE_MASK); vaddr 2349 arch/x86/xen/mmu_pv.c unsigned long vaddr = __fix_to_virt(idx); vaddr 2350 arch/x86/xen/mmu_pv.c set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); vaddr 2475 arch/x86/xen/mmu_pv.c static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, vaddr 2483 arch/x86/xen/mmu_pv.c for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { vaddr 2487 arch/x86/xen/mmu_pv.c in_frames[i] = virt_to_mfn(vaddr); vaddr 2489 arch/x86/xen/mmu_pv.c MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); vaddr 2490 arch/x86/xen/mmu_pv.c __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); vaddr 2493 arch/x86/xen/mmu_pv.c out_frames[i] = virt_to_pfn(vaddr); vaddr 2503 arch/x86/xen/mmu_pv.c static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, vaddr 2513 arch/x86/xen/mmu_pv.c for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { vaddr 2532 arch/x86/xen/mmu_pv.c MULTI_update_va_mapping(mcs.mc, vaddr, vaddr 2535 arch/x86/xen/mmu_pv.c set_phys_to_machine(virt_to_pfn(vaddr), mfn); vaddr 479 arch/x86/xen/p2m.c unsigned long vaddr; vaddr 493 arch/x86/xen/p2m.c vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1); vaddr 499 arch/x86/xen/p2m.c pmdp = lookup_pmd_address(vaddr); vaddr 504 arch/x86/xen/p2m.c ptechk = lookup_address(vaddr, &level); vaddr 522 arch/x86/xen/p2m.c vaddr += PMD_SIZE; vaddr 170 arch/xtensa/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 177 arch/xtensa/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 71 arch/xtensa/include/asm/fixmap.h static inline unsigned long virt_to_fix(const unsigned long vaddr) vaddr 73 arch/xtensa/include/asm/fixmap.h BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); vaddr 74 arch/xtensa/include/asm/fixmap.h return __virt_to_fix(vaddr); vaddr 79 arch/xtensa/include/asm/fixmap.h #define kmap_get_fixmap_pte(vaddr) \ vaddr 81 arch/xtensa/include/asm/fixmap.h pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ vaddr 82 arch/xtensa/include/asm/fixmap.h (vaddr) \ vaddr 141 arch/xtensa/include/asm/page.h extern void clear_page_alias(void *vaddr, unsigned long paddr); vaddr 146 arch/xtensa/include/asm/page.h void clear_user_highpage(struct page *page, unsigned long vaddr); vaddr 149 arch/xtensa/include/asm/page.h unsigned long vaddr, struct vm_area_struct *vma); vaddr 151 arch/xtensa/include/asm/page.h # define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 152 arch/xtensa/include/asm/page.h # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 232 arch/xtensa/include/asm/pgtable.h #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) vaddr 37 arch/xtensa/kernel/pci-dma.c void *vaddr = kmap_atomic(page); vaddr 39 arch/xtensa/kernel/pci-dma.c fn((unsigned long)vaddr + off, sz); vaddr 40 arch/xtensa/kernel/pci-dma.c kunmap_atomic(vaddr); vaddr 185 arch/xtensa/kernel/pci-dma.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 191 arch/xtensa/kernel/pci-dma.c if (platform_vaddr_uncached(vaddr)) { vaddr 192 arch/xtensa/kernel/pci-dma.c page = virt_to_page(platform_vaddr_to_cached(vaddr)); vaddr 195 arch/xtensa/kernel/pci-dma.c dma_common_free_remap(vaddr, size); vaddr 440 arch/xtensa/kernel/setup.c unsigned long vaddr = (unsigned long)cpu_reset; vaddr 441 arch/xtensa/kernel/setup.c unsigned long paddr = __pa(vaddr); vaddr 442 arch/xtensa/kernel/setup.c unsigned long tmpaddr = vaddr + SZ_512M; vaddr 525 arch/xtensa/kernel/setup.c : "a"(tmpaddr - vaddr), vaddr 526 arch/xtensa/kernel/setup.c "a"(paddr - vaddr), vaddr 61 arch/xtensa/mm/cache.c unsigned long vaddr) vaddr 63 arch/xtensa/mm/cache.c if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { vaddr 81 arch/xtensa/mm/cache.c unsigned long vaddr, unsigned long *paddr) vaddr 83 arch/xtensa/mm/cache.c if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { vaddr 85 arch/xtensa/mm/cache.c return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); vaddr 92 arch/xtensa/mm/cache.c void clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 95 arch/xtensa/mm/cache.c void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); vaddr 98 arch/xtensa/mm/cache.c kmap_invalidate_coherent(page, vaddr); vaddr 106 arch/xtensa/mm/cache.c unsigned long vaddr, struct vm_area_struct *vma) vaddr 109 arch/xtensa/mm/cache.c void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, vaddr 111 arch/xtensa/mm/cache.c void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, vaddr 115 arch/xtensa/mm/cache.c kmap_invalidate_coherent(dst, vaddr); vaddr 259 arch/xtensa/mm/cache.c unsigned long vaddr, void *dst, const void *src, vaddr 263 arch/xtensa/mm/cache.c unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); vaddr 268 arch/xtensa/mm/cache.c unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); vaddr 282 arch/xtensa/mm/cache.c unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); vaddr 295 arch/xtensa/mm/cache.c unsigned long vaddr, void *dst, const void *src, vaddr 299 arch/xtensa/mm/cache.c unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); vaddr 307 arch/xtensa/mm/cache.c unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); vaddr 43 arch/xtensa/mm/highmem.c unsigned long vaddr; vaddr 52 arch/xtensa/mm/highmem.c vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr 58 arch/xtensa/mm/highmem.c return (void *)vaddr; vaddr 20 arch/xtensa/mm/ioremap.c unsigned long vaddr; vaddr 33 arch/xtensa/mm/ioremap.c vaddr = (unsigned long)area->addr; vaddr 36 arch/xtensa/mm/ioremap.c err = ioremap_page_range(vaddr, vaddr + size, paddr, prot); vaddr 39 arch/xtensa/mm/ioremap.c vunmap((void *)vaddr); vaddr 43 arch/xtensa/mm/ioremap.c flush_cache_vmap(vaddr, vaddr + size); vaddr 44 arch/xtensa/mm/ioremap.c return (void __iomem *)(offset + vaddr); vaddr 21 arch/xtensa/mm/kasan_init.c unsigned long vaddr = KASAN_SHADOW_START; vaddr 22 arch/xtensa/mm/kasan_init.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 23 arch/xtensa/mm/kasan_init.c pmd_t *pmd = pmd_offset(pgd, vaddr); vaddr 31 arch/xtensa/mm/kasan_init.c for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { vaddr 43 arch/xtensa/mm/kasan_init.c unsigned long vaddr = (unsigned long)start; vaddr 44 arch/xtensa/mm/kasan_init.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 45 arch/xtensa/mm/kasan_init.c pmd_t *pmd = pmd_offset(pgd, vaddr); vaddr 22 arch/xtensa/mm/mmu.c static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) vaddr 24 arch/xtensa/mm/mmu.c pgd_t *pgd = pgd_offset_k(vaddr); vaddr 25 arch/xtensa/mm/mmu.c pmd_t *pmd = pmd_offset(pgd, vaddr); vaddr 32 arch/xtensa/mm/mmu.c __func__, vaddr, n_pages); vaddr 167 arch/xtensa/mm/tlb.c static unsigned get_pte_for_vaddr(unsigned vaddr) vaddr 177 arch/xtensa/mm/tlb.c pgd = pgd_offset(mm, vaddr); vaddr 180 arch/xtensa/mm/tlb.c pmd = pmd_offset(pgd, vaddr); vaddr 183 arch/xtensa/mm/tlb.c pte = pte_offset_map(pmd, vaddr); vaddr 31 crypto/scatterwalk.c u8 *vaddr; vaddr 37 crypto/scatterwalk.c vaddr = scatterwalk_map(walk); vaddr 38 crypto/scatterwalk.c memcpy_dir(buf, vaddr, len_this_page, out); vaddr 39 crypto/scatterwalk.c scatterwalk_unmap(vaddr); vaddr 44 crypto/skcipher.c static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) vaddr 47 crypto/skcipher.c kunmap_atomic(vaddr); vaddr 69 drivers/acpi/apei/erst.c void __iomem *vaddr; vaddr 792 drivers/acpi/apei/erst.c memcpy(erst_erange.vaddr, record, record->record_length); vaddr 793 drivers/acpi/apei/erst.c rcd_erange = erst_erange.vaddr; vaddr 830 drivers/acpi/apei/erst.c rcd_tmp = erst_erange.vaddr + offset; vaddr 1159 drivers/acpi/apei/erst.c erst_erange.vaddr = ioremap_cache(erst_erange.base, vaddr 1161 drivers/acpi/apei/erst.c if (!erst_erange.vaddr) vaddr 145 drivers/acpi/apei/ghes.c static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) vaddr 147 drivers/acpi/apei/ghes.c int _idx = virt_to_fix((unsigned long)vaddr); vaddr 291 drivers/acpi/apei/ghes.c void __iomem *vaddr; vaddr 297 drivers/acpi/apei/ghes.c vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx); vaddr 301 drivers/acpi/apei/ghes.c memcpy_fromio(buffer, vaddr + offset, trunk); vaddr 303 drivers/acpi/apei/ghes.c memcpy_toio(vaddr + offset, buffer, trunk); vaddr 307 drivers/acpi/apei/ghes.c ghes_unmap(vaddr, fixmap_idx); vaddr 970 drivers/acpi/cppc_acpi.c void __iomem *vaddr = 0; vaddr 981 drivers/acpi/cppc_acpi.c vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); vaddr 983 drivers/acpi/cppc_acpi.c vaddr = reg_res->sys_mem_vaddr; vaddr 992 drivers/acpi/cppc_acpi.c *val = readb_relaxed(vaddr); vaddr 995 drivers/acpi/cppc_acpi.c *val = readw_relaxed(vaddr); vaddr 998 drivers/acpi/cppc_acpi.c *val = readl_relaxed(vaddr); vaddr 1001 drivers/acpi/cppc_acpi.c *val = readq_relaxed(vaddr); vaddr 1015 drivers/acpi/cppc_acpi.c void __iomem *vaddr = 0; vaddr 1020 drivers/acpi/cppc_acpi.c vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); vaddr 1022 drivers/acpi/cppc_acpi.c vaddr = reg_res->sys_mem_vaddr; vaddr 1031 drivers/acpi/cppc_acpi.c writeb_relaxed(val, vaddr); vaddr 1034 drivers/acpi/cppc_acpi.c writew_relaxed(val, vaddr); vaddr 1037 drivers/acpi/cppc_acpi.c writel_relaxed(val, vaddr); vaddr 1040 drivers/acpi/cppc_acpi.c writeq_relaxed(val, vaddr); vaddr 294 drivers/acpi/osl.c static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) vaddr 302 drivers/acpi/osl.c iounmap(vaddr); vaddr 2622 drivers/block/skd_main.c void *vaddr, dma_addr_t dma_handle, vaddr 2625 drivers/block/skd_main.c if (!vaddr) vaddr 2630 drivers/block/skd_main.c kmem_cache_free(s, vaddr); vaddr 366 drivers/block/xen-blkback/blkback.c vaddr(persistent_gnt->page), vaddr 711 drivers/block/xen-blkback/blkback.c gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page), vaddr 846 drivers/block/xen-blkback/blkback.c addr = vaddr(pages[i]->page); vaddr 177 drivers/block/z2ram.c unsigned long size, paddr, vaddr; vaddr 194 drivers/block/z2ram.c vaddr = (unsigned long)ioremap_wt(paddr, size); vaddr 197 drivers/block/z2ram.c vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size); vaddr 211 drivers/block/z2ram.c z2ram_map[ z2ram_size++ ] = vaddr; vaddr 213 drivers/block/z2ram.c vaddr += Z2RAM_CHUNKSIZE; vaddr 268 drivers/crypto/cavium/nitrox/nitrox_aead.c nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + vaddr 219 drivers/crypto/cavium/nitrox/nitrox_lib.c void *vaddr; vaddr 226 drivers/crypto/cavium/nitrox/nitrox_lib.c vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); vaddr 227 drivers/crypto/cavium/nitrox/nitrox_lib.c if (!vaddr) { vaddr 233 drivers/crypto/cavium/nitrox/nitrox_lib.c ctx = vaddr; vaddr 240 drivers/crypto/cavium/nitrox/nitrox_lib.c chdr->vaddr = vaddr; vaddr 257 drivers/crypto/cavium/nitrox/nitrox_lib.c dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); vaddr 196 drivers/crypto/cavium/nitrox/nitrox_req.h void *vaddr; vaddr 67 drivers/crypto/cavium/nitrox/nitrox_skcipher.c nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + vaddr 692 drivers/crypto/hisilicon/sec/sec_drv.c outorder_msg = cq_ring->vaddr + ooo_read; vaddr 694 drivers/crypto/hisilicon/sec/sec_drv.c msg = msg_ring->vaddr + q_id; vaddr 705 drivers/crypto/hisilicon/sec/sec_drv.c msg = msg_ring->vaddr + queue->expected; vaddr 718 drivers/crypto/hisilicon/sec/sec_drv.c outorder_msg = cq_ring->vaddr + ooo_read; vaddr 720 drivers/crypto/hisilicon/sec/sec_drv.c msg = msg_ring->vaddr + q_id; vaddr 870 drivers/crypto/hisilicon/sec/sec_drv.c memcpy(msg_ring->vaddr + write, msg, sizeof(*msg)); vaddr 1085 drivers/crypto/hisilicon/sec/sec_drv.c ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, vaddr 1087 drivers/crypto/hisilicon/sec/sec_drv.c if (!ring_cmd->vaddr) vaddr 1094 drivers/crypto/hisilicon/sec/sec_drv.c ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, vaddr 1096 drivers/crypto/hisilicon/sec/sec_drv.c if (!ring_cq->vaddr) { vaddr 1101 drivers/crypto/hisilicon/sec/sec_drv.c ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, vaddr 1103 drivers/crypto/hisilicon/sec/sec_drv.c if (!ring_db->vaddr) { vaddr 1117 drivers/crypto/hisilicon/sec/sec_drv.c dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr, vaddr 1120 drivers/crypto/hisilicon/sec/sec_drv.c dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr, vaddr 1123 drivers/crypto/hisilicon/sec/sec_drv.c dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr, vaddr 1133 drivers/crypto/hisilicon/sec/sec_drv.c dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr, vaddr 1135 drivers/crypto/hisilicon/sec/sec_drv.c dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr, vaddr 1137 drivers/crypto/hisilicon/sec/sec_drv.c dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr, vaddr 183 drivers/crypto/hisilicon/sec/sec_drv.h struct sec_bd_info *vaddr; vaddr 190 drivers/crypto/hisilicon/sec/sec_drv.h struct sec_debug_bd_info *vaddr; vaddr 196 drivers/crypto/hisilicon/sec/sec_drv.h struct sec_out_bd_info *vaddr; vaddr 1016 drivers/dma-buf/dma-buf.c void *vaddr) vaddr 1021 drivers/dma-buf/dma-buf.c dmabuf->ops->unmap(dmabuf, page_num, vaddr); vaddr 1134 drivers/dma-buf/dma-buf.c void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) vaddr 1141 drivers/dma-buf/dma-buf.c BUG_ON(dmabuf->vmap_ptr != vaddr); vaddr 1146 drivers/dma-buf/dma-buf.c dmabuf->ops->vunmap(dmabuf, vaddr); vaddr 105 drivers/dma-buf/udmabuf.c void *vaddr) vaddr 107 drivers/dma-buf/udmabuf.c kunmap(vaddr); vaddr 926 drivers/dma/at_hdmac.c void __iomem *vaddr; vaddr 943 drivers/dma/at_hdmac.c vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); vaddr 944 drivers/dma/at_hdmac.c if (!vaddr) { vaddr 949 drivers/dma/at_hdmac.c *(u32*)vaddr = value; vaddr 959 drivers/dma/at_hdmac.c desc->memset_vaddr = vaddr; vaddr 973 drivers/dma/at_hdmac.c dma_pool_free(atdma->memset_pool, vaddr, paddr); vaddr 987 drivers/dma/at_hdmac.c void __iomem *vaddr; vaddr 1001 drivers/dma/at_hdmac.c vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); vaddr 1002 drivers/dma/at_hdmac.c if (!vaddr) { vaddr 1007 drivers/dma/at_hdmac.c *(u32*)vaddr = value; vaddr 1036 drivers/dma/at_hdmac.c desc->memset_vaddr = vaddr; vaddr 84 drivers/dma/dw-edma/dw-edma-core.c chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; vaddr 712 drivers/dma/dw-edma/dw-edma-core.c dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; vaddr 53 drivers/dma/dw-edma/dw-edma-core.h void __iomem *vaddr; vaddr 133 drivers/dma/dw-edma/dw-edma-pcie.c dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar]; vaddr 134 drivers/dma/dw-edma/dw-edma-pcie.c dw->rg_region.vaddr += pdata->rg_off; vaddr 139 drivers/dma/dw-edma/dw-edma-pcie.c dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar]; vaddr 140 drivers/dma/dw-edma/dw-edma-pcie.c dw->ll_region.vaddr += pdata->ll_off; vaddr 145 drivers/dma/dw-edma/dw-edma-pcie.c dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar]; vaddr 146 drivers/dma/dw-edma/dw-edma-pcie.c dw->dt_region.vaddr += pdata->dt_off; vaddr 163 drivers/dma/dw-edma/dw-edma-pcie.c dw->rg_region.vaddr, &dw->rg_region.paddr); vaddr 167 drivers/dma/dw-edma/dw-edma-pcie.c dw->ll_region.vaddr, &dw->ll_region.paddr); vaddr 171 drivers/dma/dw-edma/dw-edma-pcie.c dw->dt_region.vaddr, &dw->dt_region.paddr); vaddr 28 drivers/dma/dw-edma/dw-edma-v0-core.c return dw->rg_region.vaddr; vaddr 200 drivers/dma/dw-edma/dw-edma-v0-core.c lli = chunk->ll_region.vaddr; vaddr 292 drivers/dma/dw-edma/dw-edma-v0-debugfs.c regs = dw->rg_region.vaddr; vaddr 91 drivers/firmware/stratix10-svc.c void *vaddr; vaddr 182 drivers/firmware/stratix10-svc.c return pmem->vaddr; vaddr 604 drivers/firmware/stratix10-svc.c unsigned long vaddr; vaddr 623 drivers/firmware/stratix10-svc.c vaddr = (unsigned long)va; vaddr 627 drivers/firmware/stratix10-svc.c if ((vaddr & page_mask) || (paddr & page_mask) || vaddr 638 drivers/firmware/stratix10-svc.c ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1); vaddr 839 drivers/firmware/stratix10-svc.c if (p_mem->vaddr == p_msg->payload) { vaddr 916 drivers/firmware/stratix10-svc.c pmem->vaddr = (void *)va; vaddr 921 drivers/firmware/stratix10-svc.c pmem->vaddr, (unsigned int)pmem->paddr); vaddr 940 drivers/firmware/stratix10-svc.c if (pmem->vaddr == kaddr) { vaddr 946 drivers/firmware/stratix10-svc.c pmem->vaddr = NULL; vaddr 60 drivers/gpio/gpio-ts5500.c #define TS5500_DIO_IN_OUT(vaddr, vbit, caddr, cbit) \ vaddr 62 drivers/gpio/gpio-ts5500.c .value_addr = vaddr, \ vaddr 95 drivers/gpio/gpio-ts5500.c #define TS5500_DIO_GROUP(vaddr, vbitfrom, caddr, cbit) \ vaddr 96 drivers/gpio/gpio-ts5500.c TS5500_DIO_IN_OUT(vaddr, vbitfrom + 0, caddr, cbit), \ vaddr 97 drivers/gpio/gpio-ts5500.c TS5500_DIO_IN_OUT(vaddr, vbitfrom + 1, caddr, cbit), \ vaddr 98 drivers/gpio/gpio-ts5500.c TS5500_DIO_IN_OUT(vaddr, vbitfrom + 2, caddr, cbit), \ vaddr 99 drivers/gpio/gpio-ts5500.c TS5500_DIO_IN_OUT(vaddr, vbitfrom + 3, caddr, cbit) vaddr 86 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 38 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 132 drivers/gpu/drm/ati_pcigart.c pci_gart = gart_info->table_handle->vaddr; vaddr 133 drivers/gpu/drm/ati_pcigart.c address = gart_info->table_handle->vaddr; vaddr 332 drivers/gpu/drm/drm_bufs.c map->handle = dmah->vaddr; vaddr 557 drivers/gpu/drm/drm_bufs.c dmah.vaddr = map->handle; vaddr 996 drivers/gpu/drm/drm_bufs.c (unsigned long)dmah->vaddr + PAGE_SIZE * i); vaddr 998 drivers/gpu/drm/drm_bufs.c = (unsigned long)dmah->vaddr + PAGE_SIZE * i; vaddr 1009 drivers/gpu/drm/drm_bufs.c buf->address = (void *)(dmah->vaddr + offset); vaddr 237 drivers/gpu/drm/drm_client.c drm_gem_vunmap(buffer->gem, buffer->vaddr); vaddr 307 drivers/gpu/drm/drm_client.c void *vaddr; vaddr 309 drivers/gpu/drm/drm_client.c if (buffer->vaddr) vaddr 310 drivers/gpu/drm/drm_client.c return buffer->vaddr; vaddr 320 drivers/gpu/drm/drm_client.c vaddr = drm_gem_vmap(buffer->gem); vaddr 321 drivers/gpu/drm/drm_client.c if (IS_ERR(vaddr)) vaddr 322 drivers/gpu/drm/drm_client.c return vaddr; vaddr 324 drivers/gpu/drm/drm_client.c buffer->vaddr = vaddr; vaddr 326 drivers/gpu/drm/drm_client.c return vaddr; vaddr 340 drivers/gpu/drm/drm_client.c drm_gem_vunmap(buffer->gem, buffer->vaddr); vaddr 341 drivers/gpu/drm/drm_client.c buffer->vaddr = NULL; vaddr 388 drivers/gpu/drm/drm_fb_helper.c void *dst = fb_helper->buffer->vaddr + offset; vaddr 406 drivers/gpu/drm/drm_fb_helper.c void *vaddr; vaddr 419 drivers/gpu/drm/drm_fb_helper.c vaddr = drm_client_buffer_vmap(helper->buffer); vaddr 420 drivers/gpu/drm/drm_fb_helper.c if (IS_ERR(vaddr)) vaddr 2207 drivers/gpu/drm/drm_fb_helper.c void *vaddr; vaddr 2257 drivers/gpu/drm/drm_fb_helper.c vaddr = drm_client_buffer_vmap(fb_helper->buffer); vaddr 2258 drivers/gpu/drm/drm_fb_helper.c if (IS_ERR(vaddr)) vaddr 2259 drivers/gpu/drm/drm_fb_helper.c return PTR_ERR(vaddr); vaddr 2261 drivers/gpu/drm/drm_fb_helper.c fbi->screen_buffer = vaddr; vaddr 36 drivers/gpu/drm/drm_format_helper.c void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, vaddr 43 drivers/gpu/drm/drm_format_helper.c vaddr += clip_offset(clip, fb->pitches[0], cpp); vaddr 45 drivers/gpu/drm/drm_format_helper.c memcpy(dst, vaddr, len); vaddr 46 drivers/gpu/drm/drm_format_helper.c vaddr += fb->pitches[0]; vaddr 62 drivers/gpu/drm/drm_format_helper.c void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, vaddr 71 drivers/gpu/drm/drm_format_helper.c vaddr += offset; vaddr 74 drivers/gpu/drm/drm_format_helper.c memcpy_toio(dst, vaddr, len); vaddr 75 drivers/gpu/drm/drm_format_helper.c vaddr += fb->pitches[0]; vaddr 88 drivers/gpu/drm/drm_format_helper.c void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, vaddr 104 drivers/gpu/drm/drm_format_helper.c src = vaddr + (y * fb->pitches[0]); vaddr 148 drivers/gpu/drm/drm_format_helper.c void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, vaddr 166 drivers/gpu/drm/drm_format_helper.c vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); vaddr 168 drivers/gpu/drm/drm_format_helper.c memcpy(sbuf, vaddr, src_len); vaddr 170 drivers/gpu/drm/drm_format_helper.c vaddr += fb->pitches[0]; vaddr 194 drivers/gpu/drm/drm_format_helper.c void *vaddr, struct drm_framebuffer *fb, vaddr 206 drivers/gpu/drm/drm_format_helper.c vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); vaddr 209 drivers/gpu/drm/drm_format_helper.c drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab); vaddr 211 drivers/gpu/drm/drm_format_helper.c vaddr += fb->pitches[0]; vaddr 246 drivers/gpu/drm/drm_format_helper.c void *vaddr, struct drm_framebuffer *fb, vaddr 258 drivers/gpu/drm/drm_format_helper.c vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); vaddr 261 drivers/gpu/drm/drm_format_helper.c drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels); vaddr 263 drivers/gpu/drm/drm_format_helper.c vaddr += fb->pitches[0]; vaddr 287 drivers/gpu/drm/drm_format_helper.c void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, vaddr 306 drivers/gpu/drm/drm_format_helper.c src = vaddr + (y * fb->pitches[0]); vaddr 1242 drivers/gpu/drm/drm_gem.c void *vaddr; vaddr 1245 drivers/gpu/drm/drm_gem.c vaddr = obj->funcs->vmap(obj); vaddr 1247 drivers/gpu/drm/drm_gem.c vaddr = obj->dev->driver->gem_prime_vmap(obj); vaddr 1249 drivers/gpu/drm/drm_gem.c vaddr = ERR_PTR(-EOPNOTSUPP); vaddr 1251 drivers/gpu/drm/drm_gem.c if (!vaddr) vaddr 1252 drivers/gpu/drm/drm_gem.c vaddr = ERR_PTR(-ENOMEM); vaddr 1254 drivers/gpu/drm/drm_gem.c return vaddr; vaddr 1257 drivers/gpu/drm/drm_gem.c void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 1259 drivers/gpu/drm/drm_gem.c if (!vaddr) vaddr 1263 drivers/gpu/drm/drm_gem.c obj->funcs->vunmap(obj, vaddr); vaddr 1265 drivers/gpu/drm/drm_gem.c obj->dev->driver->gem_prime_vunmap(obj, vaddr); vaddr 105 drivers/gpu/drm/drm_gem_cma_helper.c cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr, vaddr 107 drivers/gpu/drm/drm_gem_cma_helper.c if (!cma_obj->vaddr) { vaddr 183 drivers/gpu/drm/drm_gem_cma_helper.c if (cma_obj->vaddr) vaddr 184 drivers/gpu/drm/drm_gem_cma_helper.c dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); vaddr 186 drivers/gpu/drm/drm_gem_cma_helper.c } else if (cma_obj->vaddr) { vaddr 188 drivers/gpu/drm/drm_gem_cma_helper.c cma_obj->vaddr, cma_obj->paddr); vaddr 282 drivers/gpu/drm/drm_gem_cma_helper.c ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr, vaddr 391 drivers/gpu/drm/drm_gem_cma_helper.c return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL; vaddr 411 drivers/gpu/drm/drm_gem_cma_helper.c drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr); vaddr 437 drivers/gpu/drm/drm_gem_cma_helper.c ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr, vaddr 554 drivers/gpu/drm/drm_gem_cma_helper.c return cma_obj->vaddr; vaddr 569 drivers/gpu/drm/drm_gem_cma_helper.c void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 637 drivers/gpu/drm/drm_gem_cma_helper.c void *vaddr; vaddr 639 drivers/gpu/drm/drm_gem_cma_helper.c vaddr = dma_buf_vmap(attach->dmabuf); vaddr 640 drivers/gpu/drm/drm_gem_cma_helper.c if (!vaddr) { vaddr 647 drivers/gpu/drm/drm_gem_cma_helper.c dma_buf_vunmap(attach->dmabuf, vaddr); vaddr 652 drivers/gpu/drm/drm_gem_cma_helper.c cma_obj->vaddr = vaddr; vaddr 251 drivers/gpu/drm/drm_gem_shmem_helper.c return shmem->vaddr; vaddr 258 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); vaddr 260 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, vaddr 263 drivers/gpu/drm/drm_gem_shmem_helper.c if (!shmem->vaddr) { vaddr 269 drivers/gpu/drm/drm_gem_shmem_helper.c return shmem->vaddr; vaddr 292 drivers/gpu/drm/drm_gem_shmem_helper.c void *vaddr; vaddr 298 drivers/gpu/drm/drm_gem_shmem_helper.c vaddr = drm_gem_shmem_vmap_locked(shmem); vaddr 301 drivers/gpu/drm/drm_gem_shmem_helper.c return vaddr; vaddr 316 drivers/gpu/drm/drm_gem_shmem_helper.c dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); vaddr 318 drivers/gpu/drm/drm_gem_shmem_helper.c vunmap(shmem->vaddr); vaddr 320 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->vaddr = NULL; vaddr 330 drivers/gpu/drm/drm_gem_shmem_helper.c void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 572 drivers/gpu/drm/drm_gem_shmem_helper.c drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); vaddr 619 drivers/gpu/drm/drm_gem_vram_helper.c void *vaddr) vaddr 139 drivers/gpu/drm/drm_internal.h void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 207 drivers/gpu/drm/drm_mipi_dbi.c void *src = cma_obj->vaddr; vaddr 271 drivers/gpu/drm/drm_mipi_dbi.c tr = cma_obj->vaddr; vaddr 67 drivers/gpu/drm/drm_pci.c dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, vaddr 71 drivers/gpu/drm/drm_pci.c if (dmah->vaddr == NULL) { vaddr 88 drivers/gpu/drm/drm_pci.c if (dmah->vaddr) vaddr 89 drivers/gpu/drm/drm_pci.c dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, vaddr 672 drivers/gpu/drm/drm_prime.c void *vaddr; vaddr 674 drivers/gpu/drm/drm_prime.c vaddr = drm_gem_vmap(obj); vaddr 675 drivers/gpu/drm/drm_prime.c if (IS_ERR(vaddr)) vaddr 676 drivers/gpu/drm/drm_prime.c vaddr = NULL; vaddr 678 drivers/gpu/drm/drm_prime.c return vaddr; vaddr 690 drivers/gpu/drm/drm_prime.c void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) vaddr 694 drivers/gpu/drm/drm_prime.c drm_gem_vunmap(obj, vaddr); vaddr 287 drivers/gpu/drm/drm_vm.c dmah.vaddr = map->handle; vaddr 26 drivers/gpu/drm/etnaviv/etnaviv_buffer.c u32 *vaddr = (u32 *)buffer->vaddr; vaddr 30 drivers/gpu/drm/etnaviv/etnaviv_buffer.c vaddr[buffer->user_size / 4] = data; vaddr 118 drivers/gpu/drm/etnaviv/etnaviv_buffer.c u32 *ptr = buf->vaddr + off; vaddr 138 drivers/gpu/drm/etnaviv/etnaviv_buffer.c u32 *lw = buffer->vaddr + wl_offset; vaddr 466 drivers/gpu/drm/etnaviv/etnaviv_buffer.c cmdbuf->vaddr); vaddr 470 drivers/gpu/drm/etnaviv/etnaviv_buffer.c cmdbuf->vaddr, cmdbuf->size, 0); vaddr 472 drivers/gpu/drm/etnaviv/etnaviv_buffer.c pr_info("link op: %p\n", buffer->vaddr + waitlink_offset); vaddr 23 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c void *vaddr; vaddr 48 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE, vaddr 50 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c if (!suballoc->vaddr) { vaddr 80 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr, vaddr 113 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset; vaddr 22 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h void *vaddr; vaddr 163 drivers/gpu/drm/etnaviv/etnaviv_drv.c u32 *ptr = buf->vaddr; vaddr 167 drivers/gpu/drm/etnaviv/etnaviv_drv.c buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf), vaddr 55 drivers/gpu/drm/etnaviv/etnaviv_drv.h void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 173 drivers/gpu/drm/etnaviv/etnaviv_dump.c etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, vaddr 179 drivers/gpu/drm/etnaviv/etnaviv_dump.c submit->cmdbuf.vaddr, submit->cmdbuf.size, vaddr 199 drivers/gpu/drm/etnaviv/etnaviv_dump.c void *vaddr; vaddr 218 drivers/gpu/drm/etnaviv/etnaviv_dump.c vaddr = etnaviv_gem_vmap(&obj->base); vaddr 219 drivers/gpu/drm/etnaviv/etnaviv_dump.c if (vaddr) vaddr 220 drivers/gpu/drm/etnaviv/etnaviv_dump.c memcpy(iter.data, vaddr, obj->base.size); vaddr 336 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (etnaviv_obj->vaddr) vaddr 337 drivers/gpu/drm/etnaviv/etnaviv_gem.c return etnaviv_obj->vaddr; vaddr 344 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (!etnaviv_obj->vaddr) vaddr 345 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); vaddr 348 drivers/gpu/drm/etnaviv/etnaviv_gem.c return etnaviv_obj->vaddr; vaddr 464 drivers/gpu/drm/etnaviv/etnaviv_gem.c off, etnaviv_obj->vaddr, obj->size); vaddr 507 drivers/gpu/drm/etnaviv/etnaviv_gem.c vunmap(etnaviv_obj->vaddr); vaddr 48 drivers/gpu/drm/etnaviv/etnaviv_gem.h void *vaddr; vaddr 30 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 73 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c if (etnaviv_obj->vaddr) vaddr 75 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c etnaviv_obj->vaddr); vaddr 574 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c memcpy(submit->cmdbuf.vaddr, stream, args->stream_size); vaddr 531 drivers/gpu/drm/exynos/exynos_drm_gem.c void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 119 drivers/gpu/drm/exynos/exynos_drm_gem.h void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 181 drivers/gpu/drm/i915/display/intel_fbdev.c void __iomem *vaddr; vaddr 243 drivers/gpu/drm/i915/display/intel_fbdev.c vaddr = i915_vma_pin_iomap(vma); vaddr 244 drivers/gpu/drm/i915/display/intel_fbdev.c if (IS_ERR(vaddr)) { vaddr 246 drivers/gpu/drm/i915/display/intel_fbdev.c ret = PTR_ERR(vaddr); vaddr 249 drivers/gpu/drm/i915/display/intel_fbdev.c info->screen_base = vaddr; vaddr 88 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) vaddr 247 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned long vaddr; /** Current kmap address */ vaddr 901 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = 0; vaddr 950 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c void *vaddr; vaddr 955 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!cache->vaddr) vaddr 958 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = unmask_page(cache->vaddr); vaddr 959 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr & KMAP) { vaddr 960 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr & CLFLUSH_AFTER) vaddr 963 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c kunmap_atomic(vaddr); vaddr 969 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c io_mapping_unmap_atomic((void __iomem *)vaddr); vaddr 981 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = 0; vaddr 989 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c void *vaddr; vaddr 991 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr) { vaddr 992 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c kunmap_atomic(unmask_page(cache->vaddr)); vaddr 1004 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = flushes | KMAP; vaddr 1010 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); vaddr 1011 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; vaddr 1014 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return vaddr; vaddr 1023 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c void *vaddr; vaddr 1025 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr) { vaddr 1027 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); vaddr 1072 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, vaddr 1075 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = (unsigned long)vaddr; vaddr 1077 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return vaddr; vaddr 1084 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c void *vaddr; vaddr 1087 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = unmask_page(cache->vaddr); vaddr 1089 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = NULL; vaddr 1090 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if ((cache->vaddr & KMAP) == 0) vaddr 1091 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = reloc_iomap(obj, cache, page); vaddr 1092 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!vaddr) vaddr 1093 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = reloc_kmap(obj, cache, page); vaddr 1096 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return vaddr; vaddr 1267 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c void *vaddr; vaddr 1269 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!eb->reloc_cache.vaddr && vaddr 1329 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); vaddr 1330 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (IS_ERR(vaddr)) vaddr 1331 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return PTR_ERR(vaddr); vaddr 1333 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c clflush_write32(vaddr + offset_in_page(offset), vaddr 1335 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->reloc_cache.vaddr); vaddr 27 drivers/gpu/drm/i915/gem/i915_gem_phys.c void *vaddr; vaddr 39 drivers/gpu/drm/i915/gem/i915_gem_phys.c vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, vaddr 42 drivers/gpu/drm/i915/gem/i915_gem_phys.c if (!vaddr) vaddr 56 drivers/gpu/drm/i915/gem/i915_gem_phys.c sg_assign_page(sg, (struct page *)vaddr); vaddr 60 drivers/gpu/drm/i915/gem/i915_gem_phys.c dst = vaddr; vaddr 89 drivers/gpu/drm/i915/gem/i915_gem_phys.c vaddr, dma); vaddr 98 drivers/gpu/drm/i915/gem/i915_gem_phys.c void *vaddr = sg_page(pages->sgl); vaddr 104 drivers/gpu/drm/i915/gem/i915_gem_phys.c void *src = vaddr; vaddr 135 drivers/gpu/drm/i915/gem/i915_gem_phys.c vaddr, dma); vaddr 370 drivers/gpu/drm/i915/gem/i915_gem_shmem.c void *data, *vaddr; vaddr 393 drivers/gpu/drm/i915/gem/i915_gem_shmem.c vaddr = kmap_atomic(page); vaddr 394 drivers/gpu/drm/i915/gem/i915_gem_shmem.c unwritten = __copy_from_user_inatomic(vaddr + pg, vaddr 397 drivers/gpu/drm/i915/gem/i915_gem_shmem.c kunmap_atomic(vaddr); vaddr 551 drivers/gpu/drm/i915/gem/i915_gem_shmem.c void *pgdata, *vaddr; vaddr 559 drivers/gpu/drm/i915/gem/i915_gem_shmem.c vaddr = kmap(page); vaddr 560 drivers/gpu/drm/i915/gem/i915_gem_shmem.c memcpy(vaddr, data, len); vaddr 1454 drivers/gpu/drm/i915/gem/selftests/huge_pages.c u32 *vaddr; vaddr 1471 drivers/gpu/drm/i915/gem/selftests/huge_pages.c vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); vaddr 1472 drivers/gpu/drm/i915/gem/selftests/huge_pages.c if (IS_ERR(vaddr)) { vaddr 1473 drivers/gpu/drm/i915/gem/selftests/huge_pages.c err = PTR_ERR(vaddr); vaddr 1476 drivers/gpu/drm/i915/gem/selftests/huge_pages.c *vaddr = 0xdeadbeaf; vaddr 22 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c u32 *vaddr; vaddr 46 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); vaddr 47 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c if (IS_ERR(vaddr)) { vaddr 48 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c err = PTR_ERR(vaddr); vaddr 62 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c memset32(vaddr, val ^ 0xdeadbeaf, vaddr 81 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c if (vaddr[i] != val) { vaddr 83 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c vaddr[i], val); vaddr 22 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c u32 *vaddr; vaddr 52 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); vaddr 53 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c if (IS_ERR(vaddr)) { vaddr 54 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c err = PTR_ERR(vaddr); vaddr 62 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c memset32(vaddr, val ^ 0xdeadbeaf, vaddr 81 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c if (vaddr[i] != val) { vaddr 83 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c vaddr[i], val); vaddr 113 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c u32 *vaddr; vaddr 137 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c vaddr = i915_gem_object_pin_map(src, I915_MAP_WB); vaddr 138 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c if (IS_ERR(vaddr)) { vaddr 139 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c err = PTR_ERR(vaddr); vaddr 143 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c memset32(vaddr, val, vaddr 157 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB); vaddr 158 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c if (IS_ERR(vaddr)) { vaddr 159 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c err = PTR_ERR(vaddr); vaddr 163 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c memset32(vaddr, val ^ 0xdeadbeaf, vaddr 182 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c if (vaddr[i] != val) { vaddr 184 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c vaddr[i], val); vaddr 72 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) vaddr 76 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c vm_unmap_ram(vaddr, mock->npages); vaddr 245 drivers/gpu/drm/i915/gt/intel_engine.h GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); vaddr 277 drivers/gpu/drm/i915/gt/intel_engine.h u32 offset = addr - rq->ring->vaddr; vaddr 539 drivers/gpu/drm/i915/gt/intel_engine_cs.c void *vaddr; vaddr 563 drivers/gpu/drm/i915/gt/intel_engine_cs.c vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); vaddr 564 drivers/gpu/drm/i915/gt/intel_engine_cs.c if (IS_ERR(vaddr)) { vaddr 565 drivers/gpu/drm/i915/gt/intel_engine_cs.c ret = PTR_ERR(vaddr); vaddr 569 drivers/gpu/drm/i915/gt/intel_engine_cs.c engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); vaddr 683 drivers/gpu/drm/i915/gt/intel_engine_cs.c frame->ring.vaddr = frame->cs; vaddr 1338 drivers/gpu/drm/i915/gt/intel_engine_cs.c const void *vaddr = rq->ring->vaddr; vaddr 1344 drivers/gpu/drm/i915/gt/intel_engine_cs.c memcpy(ring, vaddr + head, len); vaddr 1347 drivers/gpu/drm/i915/gt/intel_engine_cs.c memcpy(ring + len, vaddr + head, size - len); vaddr 90 drivers/gpu/drm/i915/gt/intel_engine_types.h void *vaddr; vaddr 1726 drivers/gpu/drm/i915/gt/intel_lrc.c set_redzone(void *vaddr, const struct intel_engine_cs *engine) vaddr 1731 drivers/gpu/drm/i915/gt/intel_lrc.c vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr 1732 drivers/gpu/drm/i915/gt/intel_lrc.c vaddr += engine->context_size; vaddr 1734 drivers/gpu/drm/i915/gt/intel_lrc.c memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); vaddr 1738 drivers/gpu/drm/i915/gt/intel_lrc.c check_redzone(const void *vaddr, const struct intel_engine_cs *engine) vaddr 1743 drivers/gpu/drm/i915/gt/intel_lrc.c vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr 1744 drivers/gpu/drm/i915/gt/intel_lrc.c vaddr += engine->context_size; vaddr 1746 drivers/gpu/drm/i915/gt/intel_lrc.c if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) vaddr 1789 drivers/gpu/drm/i915/gt/intel_lrc.c void *vaddr; vaddr 1799 drivers/gpu/drm/i915/gt/intel_lrc.c vaddr = i915_gem_object_pin_map(ce->state->obj, vaddr 1802 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(vaddr)) { vaddr 1803 drivers/gpu/drm/i915/gt/intel_lrc.c ret = PTR_ERR(vaddr); vaddr 1812 drivers/gpu/drm/i915/gt/intel_lrc.c ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; vaddr 3297 drivers/gpu/drm/i915/gt/intel_lrc.c void *vaddr; vaddr 3301 drivers/gpu/drm/i915/gt/intel_lrc.c vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); vaddr 3302 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(vaddr)) { vaddr 3303 drivers/gpu/drm/i915/gt/intel_lrc.c ret = PTR_ERR(vaddr); vaddr 3308 drivers/gpu/drm/i915/gt/intel_lrc.c set_redzone(vaddr, engine); vaddr 3326 drivers/gpu/drm/i915/gt/intel_lrc.c memcpy(vaddr + start, defaults + start, engine->context_size); vaddr 3332 drivers/gpu/drm/i915/gt/intel_lrc.c regs = vaddr + LRC_STATE_PN * PAGE_SIZE; vaddr 1223 drivers/gpu/drm/i915/gt/intel_ringbuffer.c GEM_BUG_ON(ring->vaddr); vaddr 1224 drivers/gpu/drm/i915/gt/intel_ringbuffer.c ring->vaddr = addr; vaddr 1260 drivers/gpu/drm/i915/gt/intel_ringbuffer.c GEM_BUG_ON(!ring->vaddr); vaddr 1261 drivers/gpu/drm/i915/gt/intel_ringbuffer.c ring->vaddr = NULL; vaddr 1433 drivers/gpu/drm/i915/gt/intel_ringbuffer.c void *defaults, *vaddr; vaddr 1435 drivers/gpu/drm/i915/gt/intel_ringbuffer.c vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); vaddr 1436 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(vaddr)) { vaddr 1437 drivers/gpu/drm/i915/gt/intel_ringbuffer.c err = PTR_ERR(vaddr); vaddr 1448 drivers/gpu/drm/i915/gt/intel_ringbuffer.c memcpy(vaddr, defaults, engine->context_size); vaddr 1969 drivers/gpu/drm/i915/gt/intel_ringbuffer.c memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); vaddr 1976 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = ring->vaddr + ring->emit; vaddr 29 drivers/gpu/drm/i915/gt/intel_timeline.c void *vaddr; vaddr 133 drivers/gpu/drm/i915/gt/intel_timeline.c __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); vaddr 145 drivers/gpu/drm/i915/gt/intel_timeline.c if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) vaddr 162 drivers/gpu/drm/i915/gt/intel_timeline.c void *vaddr; vaddr 170 drivers/gpu/drm/i915/gt/intel_timeline.c vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); vaddr 171 drivers/gpu/drm/i915/gt/intel_timeline.c if (IS_ERR(vaddr)) { vaddr 173 drivers/gpu/drm/i915/gt/intel_timeline.c return ERR_CAST(vaddr); vaddr 178 drivers/gpu/drm/i915/gt/intel_timeline.c cl->vaddr = page_pack_bits(vaddr, cacheline); vaddr 200 drivers/gpu/drm/i915/gt/intel_timeline.c GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); vaddr 201 drivers/gpu/drm/i915/gt/intel_timeline.c cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); vaddr 211 drivers/gpu/drm/i915/gt/intel_timeline.c void *vaddr; vaddr 238 drivers/gpu/drm/i915/gt/intel_timeline.c vaddr = page_mask_bits(cl->vaddr); vaddr 242 drivers/gpu/drm/i915/gt/intel_timeline.c vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); vaddr 243 drivers/gpu/drm/i915/gt/intel_timeline.c if (IS_ERR(vaddr)) vaddr 244 drivers/gpu/drm/i915/gt/intel_timeline.c return PTR_ERR(vaddr); vaddr 248 drivers/gpu/drm/i915/gt/intel_timeline.c memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); vaddr 398 drivers/gpu/drm/i915/gt/intel_timeline.c void *vaddr; vaddr 457 drivers/gpu/drm/i915/gt/intel_timeline.c vaddr = page_mask_bits(cl->vaddr); vaddr 460 drivers/gpu/drm/i915/gt/intel_timeline.c memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); vaddr 518 drivers/gpu/drm/i915/gt/intel_timeline.c ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * vaddr 58 drivers/gpu/drm/i915/gt/mock_engine.c ring->vaddr = (void *)(ring + 1); vaddr 73 drivers/gpu/drm/i915/gt/selftest_context.c void *vaddr; vaddr 84 drivers/gpu/drm/i915/gt/selftest_context.c vaddr = i915_gem_object_pin_map(ce->state->obj, vaddr 86 drivers/gpu/drm/i915/gt/selftest_context.c if (IS_ERR(vaddr)) { vaddr 87 drivers/gpu/drm/i915/gt/selftest_context.c err = PTR_ERR(vaddr); vaddr 105 drivers/gpu/drm/i915/gt/selftest_context.c vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr 107 drivers/gpu/drm/i915/gt/selftest_context.c vaddr += engine->context_size - I915_GTT_PAGE_SIZE; vaddr 108 drivers/gpu/drm/i915/gt/selftest_context.c memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); vaddr 131 drivers/gpu/drm/i915/gt/selftest_context.c if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) { vaddr 55 drivers/gpu/drm/i915/gt/selftest_hangcheck.c void *vaddr; vaddr 80 drivers/gpu/drm/i915/gt/selftest_hangcheck.c vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); vaddr 81 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (IS_ERR(vaddr)) { vaddr 82 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = PTR_ERR(vaddr); vaddr 85 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr 87 drivers/gpu/drm/i915/gt/selftest_hangcheck.c vaddr = i915_gem_object_pin_map(h->obj, vaddr 89 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (IS_ERR(vaddr)) { vaddr 90 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = PTR_ERR(vaddr); vaddr 93 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->batch = vaddr; vaddr 139 drivers/gpu/drm/i915/gt/selftest_hangcheck.c void *vaddr; vaddr 147 drivers/gpu/drm/i915/gt/selftest_hangcheck.c vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915)); vaddr 148 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (IS_ERR(vaddr)) { vaddr 150 drivers/gpu/drm/i915/gt/selftest_hangcheck.c return ERR_CAST(vaddr); vaddr 157 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->batch = vaddr; vaddr 230 drivers/gpu/drm/i915/gt/selftest_lrc.c void *vaddr; vaddr 258 drivers/gpu/drm/i915/gt/selftest_lrc.c vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); vaddr 259 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(vaddr)) { vaddr 260 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(vaddr); vaddr 276 drivers/gpu/drm/i915/gt/selftest_lrc.c memset(vaddr, 0, PAGE_SIZE); vaddr 184 drivers/gpu/drm/i915/gt/selftest_workarounds.c u32 *vaddr; vaddr 202 drivers/gpu/drm/i915/gt/selftest_workarounds.c vaddr = i915_gem_object_pin_map(results, I915_MAP_WB); vaddr 203 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(vaddr)) { vaddr 204 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(vaddr); vaddr 210 drivers/gpu/drm/i915/gt/selftest_workarounds.c u32 actual = vaddr[i]; vaddr 213 drivers/gpu/drm/i915/gt/selftest_workarounds.c print_results(engine, vaddr); vaddr 88 drivers/gpu/drm/i915/gt/uc/intel_guc.c void *vaddr; vaddr 94 drivers/gpu/drm/i915/gt/uc/intel_guc.c vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); vaddr 95 drivers/gpu/drm/i915/gt/uc/intel_guc.c if (IS_ERR(vaddr)) { vaddr 97 drivers/gpu/drm/i915/gt/uc/intel_guc.c return PTR_ERR(vaddr); vaddr 101 drivers/gpu/drm/i915/gt/uc/intel_guc.c guc->shared_data_vaddr = vaddr; vaddr 331 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c void *vaddr; vaddr 343 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); vaddr 344 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c if (IS_ERR(vaddr)) vaddr 345 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c return PTR_ERR(vaddr); vaddr 347 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c log->relay.buf_addr = vaddr; vaddr 183 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c return client->vaddr + client->doorbell_offset; vaddr 276 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c return client->vaddr + client->proc_desc_offset; vaddr 314 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c void *vaddr; vaddr 322 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); vaddr 323 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c if (IS_ERR(vaddr)) { vaddr 325 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c return PTR_ERR(vaddr); vaddr 329 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c guc->stage_desc_pool_vaddr = vaddr; vaddr 422 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c wqi = client->vaddr + wq_off + GUC_DB_SIZE; vaddr 802 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c void *vaddr; vaddr 831 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); vaddr 832 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c if (IS_ERR(vaddr)) { vaddr 833 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c ret = PTR_ERR(vaddr); vaddr 836 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c client->vaddr = vaddr; vaddr 41 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h void *vaddr; vaddr 35 drivers/gpu/drm/i915/gt/uc/intel_huc.c void *vaddr; vaddr 57 drivers/gpu/drm/i915/gt/uc/intel_huc.c vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); vaddr 58 drivers/gpu/drm/i915/gt/uc/intel_huc.c if (IS_ERR(vaddr)) { vaddr 60 drivers/gpu/drm/i915/gt/uc/intel_huc.c return PTR_ERR(vaddr); vaddr 63 drivers/gpu/drm/i915/gt/uc/intel_huc.c copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size); vaddr 703 drivers/gpu/drm/i915/gvt/gtt.c ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ vaddr 707 drivers/gpu/drm/i915/gvt/gtt.c ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ vaddr 852 drivers/gpu/drm/i915/gvt/gtt.c spt->shadow_page.vaddr = page_address(spt->shadow_page.page); vaddr 1084 drivers/gpu/drm/i915/gvt/gtt.c clear_page(spt->shadow_page.vaddr); vaddr 239 drivers/gpu/drm/i915/gvt/gtt.h void *vaddr; vaddr 1149 drivers/gpu/drm/i915/i915_drv.h u8 *vaddr; vaddr 139 drivers/gpu/drm/i915/i915_gem.c void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; vaddr 148 drivers/gpu/drm/i915/i915_gem.c if (copy_from_user(vaddr, user_data, args->size)) vaddr 151 drivers/gpu/drm/i915/i915_gem.c drm_clflush_virt_range(vaddr, args->size); vaddr 247 drivers/gpu/drm/i915/i915_gem.c char *vaddr; vaddr 250 drivers/gpu/drm/i915/i915_gem.c vaddr = kmap(page); vaddr 253 drivers/gpu/drm/i915/i915_gem.c drm_clflush_virt_range(vaddr + offset, len); vaddr 255 drivers/gpu/drm/i915/i915_gem.c ret = __copy_to_user(user_data, vaddr + offset, len); vaddr 308 drivers/gpu/drm/i915/i915_gem.c void __iomem *vaddr; vaddr 312 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_atomic_wc(mapping, base); vaddr 314 drivers/gpu/drm/i915/i915_gem.c (void __force *)vaddr + offset, vaddr 316 drivers/gpu/drm/i915/i915_gem.c io_mapping_unmap_atomic(vaddr); vaddr 318 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); vaddr 320 drivers/gpu/drm/i915/i915_gem.c (void __force *)vaddr + offset, vaddr 322 drivers/gpu/drm/i915/i915_gem.c io_mapping_unmap(vaddr); vaddr 495 drivers/gpu/drm/i915/i915_gem.c void __iomem *vaddr; vaddr 499 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_atomic_wc(mapping, base); vaddr 500 drivers/gpu/drm/i915/i915_gem.c unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, vaddr 502 drivers/gpu/drm/i915/i915_gem.c io_mapping_unmap_atomic(vaddr); vaddr 504 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); vaddr 505 drivers/gpu/drm/i915/i915_gem.c unwritten = copy_from_user((void __force *)vaddr + offset, vaddr 507 drivers/gpu/drm/i915/i915_gem.c io_mapping_unmap(vaddr); vaddr 662 drivers/gpu/drm/i915/i915_gem.c char *vaddr; vaddr 665 drivers/gpu/drm/i915/i915_gem.c vaddr = kmap(page); vaddr 668 drivers/gpu/drm/i915/i915_gem.c drm_clflush_virt_range(vaddr + offset, len); vaddr 670 drivers/gpu/drm/i915/i915_gem.c ret = __copy_from_user(vaddr + offset, user_data, len); vaddr 672 drivers/gpu/drm/i915/i915_gem.c drm_clflush_virt_range(vaddr + offset, len); vaddr 1327 drivers/gpu/drm/i915/i915_gem.c void *vaddr; vaddr 1361 drivers/gpu/drm/i915/i915_gem.c vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB); vaddr 1362 drivers/gpu/drm/i915/i915_gem.c if (IS_ERR(vaddr)) { vaddr 1363 drivers/gpu/drm/i915/i915_gem.c err = PTR_ERR(vaddr); vaddr 732 drivers/gpu/drm/i915/i915_gem_fence_reg.c char *vaddr; vaddr 735 drivers/gpu/drm/i915/i915_gem_fence_reg.c vaddr = kmap(page); vaddr 738 drivers/gpu/drm/i915/i915_gem_fence_reg.c memcpy(temp, &vaddr[i], 64); vaddr 739 drivers/gpu/drm/i915/i915_gem_fence_reg.c memcpy(&vaddr[i], &vaddr[i + 64], 64); vaddr 740 drivers/gpu/drm/i915/i915_gem_fence_reg.c memcpy(&vaddr[i + 64], temp, 64); vaddr 763 drivers/gpu/drm/i915/i915_gem_gtt.c u64 * const vaddr = kmap_atomic(pdma->page); vaddr 765 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr[idx] = encoded_entry; vaddr 766 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 996 drivers/gpu/drm/i915/i915_gem_gtt.c u64 *vaddr; vaddr 1005 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(pt); vaddr 1006 drivers/gpu/drm/i915/i915_gem_gtt.c memset64(vaddr + gen8_pd_index(start, 0), vaddr 1009 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1176 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pte_t *vaddr; vaddr 1179 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); vaddr 1182 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; vaddr 1205 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1206 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); vaddr 1209 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1233 drivers/gpu/drm/i915/i915_gem_gtt.c gen8_pte_t *vaddr; vaddr 1244 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(pd); vaddr 1259 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(pt); vaddr 1264 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr[index++] = encode | iter->dma; vaddr 1289 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1303 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(pd); vaddr 1304 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; vaddr 1305 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1321 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); vaddr 1324 drivers/gpu/drm/i915/i915_gem_gtt.c memset64(vaddr + i, encode, 15); vaddr 1326 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1618 drivers/gpu/drm/i915/i915_gem_gtt.c gen6_pte_t *vaddr; vaddr 1635 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(pt); vaddr 1636 drivers/gpu/drm/i915/i915_gem_gtt.c memset32(vaddr + pte, scratch_pte, count); vaddr 1637 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1655 drivers/gpu/drm/i915/i915_gem_gtt.c gen6_pte_t *vaddr; vaddr 1659 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); vaddr 1662 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); vaddr 1675 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 1676 drivers/gpu/drm/i915/i915_gem_gtt.c vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); vaddr 1680 drivers/gpu/drm/i915/i915_gem_gtt.c kunmap_atomic(vaddr); vaddr 660 drivers/gpu/drm/i915/i915_perf.c u8 *oa_buf_base = stream->oa_buffer.vaddr; vaddr 871 drivers/gpu/drm/i915/i915_perf.c if (WARN_ON(!stream->oa_buffer.vaddr)) vaddr 948 drivers/gpu/drm/i915/i915_perf.c u8 *oa_buf_base = stream->oa_buffer.vaddr; vaddr 1082 drivers/gpu/drm/i915/i915_perf.c if (WARN_ON(!stream->oa_buffer.vaddr)) vaddr 1359 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.vaddr = NULL; vaddr 1435 drivers/gpu/drm/i915/i915_perf.c memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); vaddr 1494 drivers/gpu/drm/i915/i915_perf.c memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); vaddr 1537 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.vaddr = vaddr 1539 drivers/gpu/drm/i915/i915_perf.c if (IS_ERR(stream->oa_buffer.vaddr)) { vaddr 1540 drivers/gpu/drm/i915/i915_perf.c ret = PTR_ERR(stream->oa_buffer.vaddr); vaddr 1546 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.vaddr); vaddr 1556 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.vaddr = NULL; vaddr 436 drivers/gpu/drm/i915/i915_request.c request->ring->vaddr + request->postfix); vaddr 1102 drivers/gpu/drm/i915/i915_request.c void *vaddr = rq->ring->vaddr; vaddr 1118 drivers/gpu/drm/i915/i915_request.c memset(vaddr + head, 0, rq->ring->size - head); vaddr 1121 drivers/gpu/drm/i915/i915_request.c memset(vaddr + head, 0, rq->postfix - head); vaddr 1190 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u32 __iomem *vaddr; vaddr 1192 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); vaddr 1193 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c iowrite32(n, vaddr + n); vaddr 1194 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c io_mapping_unmap_atomic(vaddr); vaddr 1201 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u32 __iomem *vaddr; vaddr 1204 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); vaddr 1205 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c val = ioread32(vaddr + n); vaddr 1206 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c io_mapping_unmap_atomic(vaddr); vaddr 15 drivers/gpu/drm/i915/selftests/igt_spinner.c void *vaddr; vaddr 36 drivers/gpu/drm/i915/selftests/igt_spinner.c vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); vaddr 37 drivers/gpu/drm/i915/selftests/igt_spinner.c if (IS_ERR(vaddr)) { vaddr 38 drivers/gpu/drm/i915/selftests/igt_spinner.c err = PTR_ERR(vaddr); vaddr 41 drivers/gpu/drm/i915/selftests/igt_spinner.c spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr 44 drivers/gpu/drm/i915/selftests/igt_spinner.c vaddr = i915_gem_object_pin_map(spin->obj, mode); vaddr 45 drivers/gpu/drm/i915/selftests/igt_spinner.c if (IS_ERR(vaddr)) { vaddr 46 drivers/gpu/drm/i915/selftests/igt_spinner.c err = PTR_ERR(vaddr); vaddr 49 drivers/gpu/drm/i915/selftests/igt_spinner.c spin->batch = vaddr; vaddr 17 drivers/gpu/drm/lima/lima_object.h void *vaddr; vaddr 279 drivers/gpu/drm/mediatek/mtk_drm_gem.c void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 286 drivers/gpu/drm/mediatek/mtk_drm_gem.c vunmap(vaddr); vaddr 49 drivers/gpu/drm/mediatek/mtk_drm_gem.h void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 290 drivers/gpu/drm/msm/msm_drv.h void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 574 drivers/gpu/drm/msm/msm_gem.c if (!msm_obj->vaddr) { vaddr 580 drivers/gpu/drm/msm/msm_gem.c msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, vaddr 582 drivers/gpu/drm/msm/msm_gem.c if (msm_obj->vaddr == NULL) { vaddr 589 drivers/gpu/drm/msm/msm_gem.c return msm_obj->vaddr; vaddr 685 drivers/gpu/drm/msm/msm_gem.c if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) vaddr 688 drivers/gpu/drm/msm/msm_gem.c vunmap(msm_obj->vaddr); vaddr 689 drivers/gpu/drm/msm/msm_gem.c msm_obj->vaddr = NULL; vaddr 827 drivers/gpu/drm/msm/msm_gem.c off, msm_obj->vaddr); vaddr 910 drivers/gpu/drm/msm/msm_gem.c if (msm_obj->vaddr) vaddr 911 drivers/gpu/drm/msm/msm_gem.c dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); vaddr 1160 drivers/gpu/drm/msm/msm_gem.c void *vaddr; vaddr 1173 drivers/gpu/drm/msm/msm_gem.c vaddr = msm_gem_get_vaddr(obj); vaddr 1174 drivers/gpu/drm/msm/msm_gem.c if (IS_ERR(vaddr)) { vaddr 1176 drivers/gpu/drm/msm/msm_gem.c ret = PTR_ERR(vaddr); vaddr 1183 drivers/gpu/drm/msm/msm_gem.c return vaddr; vaddr 72 drivers/gpu/drm/msm/msm_gem.h void *vaddr; vaddr 102 drivers/gpu/drm/msm/msm_gem.h return (msm_obj->vmap_count == 0) && msm_obj->vaddr; vaddr 30 drivers/gpu/drm/msm/msm_gem_prime.c void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 51 drivers/gpu/drm/nouveau/nouveau_prime.c void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 57 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c u32 *vaddr; vaddr 149 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c vunmap(obj->base.vaddr); vaddr 150 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c obj->base.vaddr = NULL; vaddr 182 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c return node->vaddr; vaddr 197 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c if (node->base.vaddr) { vaddr 209 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP, vaddr 211 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c if (!node->base.vaddr) { vaddr 225 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c return node->base.vaddr; vaddr 269 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c return node->vaddr[offset / 4]; vaddr 277 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->vaddr[offset / 4] = data; vaddr 301 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c if (unlikely(!node->base.vaddr)) vaddr 305 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.vaddr, node->handle, imem->attrs); vaddr 326 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c if (node->base.vaddr) vaddr 397 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, vaddr 400 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c if (!node->base.vaddr) { vaddr 4601 drivers/gpu/drm/omapdrm/dss/dispc.c void *vaddr; vaddr 4612 drivers/gpu/drm/omapdrm/dss/dispc.c i734_buf.vaddr = dma_alloc_wc(&dispc->pdev->dev, i734_buf.size, vaddr 4614 drivers/gpu/drm/omapdrm/dss/dispc.c if (!i734_buf.vaddr) { vaddr 4628 drivers/gpu/drm/omapdrm/dss/dispc.c dma_free_wc(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr, vaddr 95 drivers/gpu/drm/omapdrm/omap_gem.c void *vaddr; vaddr 382 drivers/gpu/drm/omapdrm/omap_gem.c unsigned long vaddr; vaddr 414 drivers/gpu/drm/omapdrm/omap_gem.c vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); vaddr 435 drivers/gpu/drm/omapdrm/omap_gem.c vaddr += off << PAGE_SHIFT; vaddr 464 drivers/gpu/drm/omapdrm/omap_gem.c vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); vaddr 468 drivers/gpu/drm/omapdrm/omap_gem.c vaddr += PAGE_SIZE * m; vaddr 958 drivers/gpu/drm/omapdrm/omap_gem.c void *vaddr; vaddr 963 drivers/gpu/drm/omapdrm/omap_gem.c if (!omap_obj->vaddr) { vaddr 966 drivers/gpu/drm/omapdrm/omap_gem.c vaddr = ERR_PTR(ret); vaddr 970 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT, vaddr 974 drivers/gpu/drm/omapdrm/omap_gem.c vaddr = omap_obj->vaddr; vaddr 978 drivers/gpu/drm/omapdrm/omap_gem.c return vaddr; vaddr 1034 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->vaddr, omap_obj->roll); vaddr 1106 drivers/gpu/drm/omapdrm/omap_gem.c dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, vaddr 1108 drivers/gpu/drm/omapdrm/omap_gem.c } else if (omap_obj->vaddr) { vaddr 1109 drivers/gpu/drm/omapdrm/omap_gem.c vunmap(omap_obj->vaddr); vaddr 1208 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->vaddr = dma_alloc_wc(dev->dev, size, vaddr 1211 drivers/gpu/drm/omapdrm/omap_gem.c if (!omap_obj->vaddr) vaddr 454 drivers/gpu/drm/qxl/qxl_drv.h void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 70 drivers/gpu/drm/qxl/qxl_prime.c void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 158 drivers/gpu/drm/radeon/radeon_drv.c void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 55 drivers/gpu/drm/radeon/radeon_prime.c void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 194 drivers/gpu/drm/rcar-du/rcar_du_vsp.c ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr, vaddr 556 drivers/gpu/drm/rockchip/rockchip_drm_gem.c void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 561 drivers/gpu/drm/rockchip/rockchip_drm_gem.c vunmap(vaddr); vaddr 35 drivers/gpu/drm/rockchip/rockchip_drm_gem.h void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 278 drivers/gpu/drm/sti/sti_cursor.c sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr); vaddr 61 drivers/gpu/drm/tegra/falcon.c u32 *firmware_vaddr = falcon->firmware.vaddr; vaddr 86 drivers/gpu/drm/tegra/falcon.c struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr; vaddr 107 drivers/gpu/drm/tegra/falcon.c os = falcon->firmware.vaddr + bin->os_header_offset; vaddr 139 drivers/gpu/drm/tegra/falcon.c falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size, vaddr 141 drivers/gpu/drm/tegra/falcon.c if (IS_ERR(falcon->firmware.vaddr)) { vaddr 143 drivers/gpu/drm/tegra/falcon.c return PTR_ERR(falcon->firmware.vaddr); vaddr 163 drivers/gpu/drm/tegra/falcon.c falcon->firmware.paddr, falcon->firmware.vaddr); vaddr 174 drivers/gpu/drm/tegra/falcon.c falcon->firmware.vaddr = NULL; vaddr 186 drivers/gpu/drm/tegra/falcon.c if (falcon->firmware.vaddr) { vaddr 189 drivers/gpu/drm/tegra/falcon.c falcon->firmware.vaddr); vaddr 190 drivers/gpu/drm/tegra/falcon.c falcon->firmware.vaddr = NULL; vaddr 200 drivers/gpu/drm/tegra/falcon.c if (!falcon->firmware.vaddr) vaddr 83 drivers/gpu/drm/tegra/falcon.h dma_addr_t paddr, void *vaddr); vaddr 97 drivers/gpu/drm/tegra/falcon.h void *vaddr; vaddr 263 drivers/gpu/drm/tegra/fb.c bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, vaddr 265 drivers/gpu/drm/tegra/fb.c if (!bo->vaddr) { vaddr 273 drivers/gpu/drm/tegra/fb.c info->screen_base = (void __iomem *)bo->vaddr + offset; vaddr 353 drivers/gpu/drm/tegra/fb.c vunmap(bo->vaddr); vaddr 354 drivers/gpu/drm/tegra/fb.c bo->vaddr = NULL; vaddr 47 drivers/gpu/drm/tegra/gem.c if (obj->vaddr) vaddr 48 drivers/gpu/drm/tegra/gem.c return obj->vaddr; vaddr 60 drivers/gpu/drm/tegra/gem.c if (obj->vaddr) vaddr 72 drivers/gpu/drm/tegra/gem.c if (obj->vaddr) vaddr 73 drivers/gpu/drm/tegra/gem.c return obj->vaddr + page * PAGE_SIZE; vaddr 86 drivers/gpu/drm/tegra/gem.c if (obj->vaddr) vaddr 211 drivers/gpu/drm/tegra/gem.c } else if (bo->vaddr) { vaddr 212 drivers/gpu/drm/tegra/gem.c dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); vaddr 267 drivers/gpu/drm/tegra/gem.c bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, vaddr 269 drivers/gpu/drm/tegra/gem.c if (!bo->vaddr) { vaddr 464 drivers/gpu/drm/tegra/gem.c err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, vaddr 612 drivers/gpu/drm/tegra/gem.c return bo->vaddr; vaddr 615 drivers/gpu/drm/tegra/gem.c static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) vaddr 35 drivers/gpu/drm/tegra/gem.h void *vaddr; vaddr 135 drivers/gpu/drm/tegra/vic.c hdr = vic->falcon.firmware.vaddr; vaddr 137 drivers/gpu/drm/tegra/vic.c hdr = vic->falcon.firmware.vaddr + vaddr 267 drivers/gpu/drm/tiny/gm12u320.c void *vaddr; vaddr 281 drivers/gpu/drm/tiny/gm12u320.c vaddr = drm_gem_shmem_vmap(fb->obj[0]); vaddr 282 drivers/gpu/drm/tiny/gm12u320.c if (IS_ERR(vaddr)) { vaddr 283 drivers/gpu/drm/tiny/gm12u320.c GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); vaddr 296 drivers/gpu/drm/tiny/gm12u320.c src = vaddr + y1 * fb->pitches[0] + x1 * 4; vaddr 337 drivers/gpu/drm/tiny/gm12u320.c drm_gem_shmem_vunmap(fb->obj[0], vaddr); vaddr 109 drivers/gpu/drm/tiny/ili9225.c tr = cma_obj->vaddr; vaddr 570 drivers/gpu/drm/tiny/repaper.c drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); vaddr 64 drivers/gpu/drm/tiny/st7586.c static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, vaddr 76 drivers/gpu/drm/tiny/st7586.c drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip); vaddr 96 drivers/gpu/drm/tiny/st7586.c void *src = cma_obj->vaddr; vaddr 124 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c unsigned long vaddr; vaddr 290 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; vaddr 294 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); vaddr 303 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c void *vaddr; vaddr 312 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, vaddr 314 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (vaddr) { vaddr 315 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (is_vmalloc_addr(vaddr)) vaddr 316 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->p = vmalloc_to_page(vaddr); vaddr 318 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->p = virt_to_page(vaddr); vaddr 319 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr = (unsigned long)vaddr; vaddr 321 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr |= VADDR_FLAG_HUGE_POOL; vaddr 936 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; vaddr 973 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; vaddr 1011 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) vaddr 1015 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { vaddr 1018 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; vaddr 1044 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { vaddr 1047 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; vaddr 296 drivers/gpu/drm/vc4/vc4_bo.c dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr); vaddr 297 drivers/gpu/drm/vc4/vc4_bo.c bo->base.vaddr = NULL; vaddr 421 drivers/gpu/drm/vc4/vc4_bo.c memset(bo->base.vaddr, 0, bo->base.base.size); vaddr 554 drivers/gpu/drm/vc4/vc4_bo.c if (!bo->base.vaddr) { vaddr 748 drivers/gpu/drm/vc4/vc4_bo.c ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, vaddr 897 drivers/gpu/drm/vc4/vc4_bo.c if (copy_from_user(bo->base.vaddr, vaddr 906 drivers/gpu/drm/vc4/vc4_bo.c memset(bo->base.vaddr + args->size, 0, vaddr 905 drivers/gpu/drm/vc4/vc4_gem.c exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset; vaddr 909 drivers/gpu/drm/vc4/vc4_gem.c exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset; vaddr 914 drivers/gpu/drm/vc4/vc4_gem.c exec->exec_bo->vaddr + bin_offset, vaddr 58 drivers/gpu/drm/vc4/vc4_render_cl.c *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val; vaddr 64 drivers/gpu/drm/vc4/vc4_render_cl.c *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val; vaddr 70 drivers/gpu/drm/vc4/vc4_render_cl.c *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val; vaddr 789 drivers/gpu/drm/vc4/vc4_validate_shaders.c validation_state.shader = shader_obj->vaddr; vaddr 76 drivers/gpu/drm/vgem/vgem_drv.c unsigned long vaddr = vmf->address; vaddr 80 drivers/gpu/drm/vgem/vgem_drv.c page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; vaddr 398 drivers/gpu/drm/vgem/vgem_drv.c static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 402 drivers/gpu/drm/vgem/vgem_drv.c vunmap(vaddr); vaddr 375 drivers/gpu/drm/virtio/virtgpu_drv.h void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 63 drivers/gpu/drm/virtio/virtgpu_prime.c void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vaddr 104 drivers/gpu/drm/vkms/vkms_composer.c if (WARN_ON(!cursor_vkms_obj->vaddr)) vaddr 107 drivers/gpu/drm/vkms/vkms_composer.c blend(vaddr_out, cursor_vkms_obj->vaddr, vaddr 125 drivers/gpu/drm/vkms/vkms_composer.c if (WARN_ON(!vkms_obj->vaddr)) { vaddr 130 drivers/gpu/drm/vkms/vkms_composer.c memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size); vaddr 92 drivers/gpu/drm/vkms/vkms_drv.h void *vaddr; vaddr 36 drivers/gpu/drm/vkms/vkms_gem.c WARN_ON(gem->vaddr); vaddr 47 drivers/gpu/drm/vkms/vkms_gem.c unsigned long vaddr = vmf->address; vaddr 52 drivers/gpu/drm/vkms/vkms_gem.c page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; vaddr 172 drivers/gpu/drm/vkms/vkms_gem.c WARN_ON(vkms_obj->vaddr); vaddr 181 drivers/gpu/drm/vkms/vkms_gem.c vunmap(vkms_obj->vaddr); vaddr 182 drivers/gpu/drm/vkms/vkms_gem.c vkms_obj->vaddr = NULL; vaddr 197 drivers/gpu/drm/vkms/vkms_gem.c if (!vkms_obj->vaddr) { vaddr 206 drivers/gpu/drm/vkms/vkms_gem.c vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); vaddr 207 drivers/gpu/drm/vkms/vkms_gem.c if (!vkms_obj->vaddr) vaddr 70 drivers/gpu/drm/vmwgfx/vmwgfx_prime.c static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) vaddr 288 drivers/gpu/drm/xen/xen_drm_front_gem.c void *vaddr) vaddr 290 drivers/gpu/drm/xen/xen_drm_front_gem.c vunmap(vaddr); vaddr 40 drivers/gpu/drm/xen/xen_drm_front_gem.h void *vaddr); vaddr 25 drivers/hwtracing/coresight/coresight-tmc-etr.c void *vaddr; vaddr 605 drivers/hwtracing/coresight/coresight-tmc-etr.c flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size, vaddr 607 drivers/hwtracing/coresight/coresight-tmc-etr.c if (!flat_buf->vaddr) { vaddr 628 drivers/hwtracing/coresight/coresight-tmc-etr.c flat_buf->vaddr, flat_buf->daddr); vaddr 651 drivers/hwtracing/coresight/coresight-tmc-etr.c *bufpp = (char *)flat_buf->vaddr + offset; vaddr 82 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 126 drivers/i2c/busses/i2c-ibm_iic.c out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0); vaddr 134 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 179 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 240 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 326 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 344 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 377 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 409 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 460 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 518 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 554 drivers/i2c/busses/i2c-ibm_iic.c volatile struct iic_regs __iomem *iic = dev->vaddr; vaddr 704 drivers/i2c/busses/i2c-ibm_iic.c dev->vaddr = of_iomap(np, 0); vaddr 705 drivers/i2c/busses/i2c-ibm_iic.c if (dev->vaddr == NULL) { vaddr 762 drivers/i2c/busses/i2c-ibm_iic.c if (dev->vaddr) vaddr 763 drivers/i2c/busses/i2c-ibm_iic.c iounmap(dev->vaddr); vaddr 783 drivers/i2c/busses/i2c-ibm_iic.c iounmap(dev->vaddr); vaddr 42 drivers/i2c/busses/i2c-ibm_iic.h volatile struct iic_regs __iomem *vaddr; vaddr 102 drivers/iio/buffer/industrialio-buffer-dma.c block->vaddr, block->phys_addr); vaddr 175 drivers/iio/buffer/industrialio-buffer-dma.c block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), vaddr 177 drivers/iio/buffer/industrialio-buffer-dma.c if (!block->vaddr) { vaddr 510 drivers/iio/buffer/industrialio-buffer-dma.c if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { vaddr 2018 drivers/infiniband/hw/hfi1/hfi.h int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, vaddr 2076 drivers/infiniband/hw/hfi1/rc.c u64 *vaddr = wqe->sg_list[0].vaddr; vaddr 2077 drivers/infiniband/hw/hfi1/rc.c *vaddr = val; vaddr 2637 drivers/infiniband/hw/hfi1/rc.c u64 vaddr = get_ib_reth_vaddr(reth); vaddr 2640 drivers/infiniband/hw/hfi1/rc.c ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, vaddr 2645 drivers/infiniband/hw/hfi1/rc.c e->rdma_sge.vaddr = NULL; vaddr 3025 drivers/infiniband/hw/hfi1/rc.c u64 vaddr = get_ib_reth_vaddr(reth); vaddr 3029 drivers/infiniband/hw/hfi1/rc.c ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, vaddr 3037 drivers/infiniband/hw/hfi1/rc.c qp->r_sge.sge.vaddr = NULL; vaddr 3080 drivers/infiniband/hw/hfi1/rc.c u64 vaddr = get_ib_reth_vaddr(reth); vaddr 3084 drivers/infiniband/hw/hfi1/rc.c ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, vaddr 3095 drivers/infiniband/hw/hfi1/rc.c e->rdma_sge.vaddr = NULL; vaddr 3128 drivers/infiniband/hw/hfi1/rc.c u64 vaddr = get_ib_ateth_vaddr(ateth); vaddr 3130 drivers/infiniband/hw/hfi1/rc.c vaddr == HFI1_VERBS_E_ATOMIC_VADDR; vaddr 3156 drivers/infiniband/hw/hfi1/rc.c if (unlikely(vaddr & (sizeof(u64) - 1))) vaddr 3161 drivers/infiniband/hw/hfi1/rc.c vaddr, rkey, vaddr 3165 drivers/infiniband/hw/hfi1/rc.c maddr = (atomic64_t *)qp->r_sge.sge.vaddr; vaddr 3169 drivers/infiniband/hw/hfi1/rc.c (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, vaddr 881 drivers/infiniband/hw/hfi1/tid_rdma.c void *vaddr, *this_vaddr; vaddr 891 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr = page_address(pages[0]); vaddr 892 drivers/infiniband/hw/hfi1/tid_rdma.c trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); vaddr 901 drivers/infiniband/hw/hfi1/tid_rdma.c if (this_vaddr != (vaddr + PAGE_SIZE)) { vaddr 938 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr = this_vaddr; vaddr 940 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr += PAGE_SIZE; vaddr 1091 drivers/infiniband/hw/hfi1/tid_rdma.c pages[i++] = virt_to_page(sge->vaddr); vaddr 1093 drivers/infiniband/hw/hfi1/tid_rdma.c sge->vaddr += len; vaddr 1104 drivers/infiniband/hw/hfi1/tid_rdma.c sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; vaddr 1720 drivers/infiniband/hw/hfi1/tid_rdma.c wpriv->ss.sge.vaddr = req_addr; vaddr 1741 drivers/infiniband/hw/hfi1/tid_rdma.c rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr + vaddr 1886 drivers/infiniband/hw/hfi1/tid_rdma.c u32 bth0, u32 psn, u64 vaddr, u32 len) vaddr 2020 drivers/infiniband/hw/hfi1/tid_rdma.c u64 vaddr; vaddr 2036 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr = get_ib_reth_vaddr(reth); vaddr 2039 drivers/infiniband/hw/hfi1/tid_rdma.c ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, vaddr 2056 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr, len)) vaddr 2234 drivers/infiniband/hw/hfi1/tid_rdma.c u64 vaddr; vaddr 2253 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr = be64_to_cpu(reth->vaddr); vaddr 2283 drivers/infiniband/hw/hfi1/tid_rdma.c if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, vaddr 2288 drivers/infiniband/hw/hfi1/tid_rdma.c if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr, vaddr 3262 drivers/infiniband/hw/hfi1/tid_rdma.c if ((u64)sge->vaddr & ~PAGE_MASK || vaddr 3372 drivers/infiniband/hw/hfi1/tid_rdma.c ohdr->u.tid_rdma.w_req.reth.vaddr = vaddr 3658 drivers/infiniband/hw/hfi1/tid_rdma.c u64 vaddr; vaddr 3676 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr = be64_to_cpu(reth->vaddr); vaddr 3733 drivers/infiniband/hw/hfi1/tid_rdma.c if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, vaddr 3881 drivers/infiniband/hw/hfi1/tid_rdma.c epriv->ss.sge.vaddr = resp_addr; vaddr 340 drivers/infiniband/hw/hfi1/trace.c ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr), vaddr 379 drivers/infiniband/hw/hfi1/trace.c ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr), vaddr 417 drivers/infiniband/hw/hfi1/trace_tid.h char mtu8k, char v1, void *vaddr), vaddr 418 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, flow, index, mtu8k, v1, vaddr), vaddr 426 drivers/infiniband/hw/hfi1/trace_tid.h __field(u64, vaddr) vaddr 434 drivers/infiniband/hw/hfi1/trace_tid.h __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL; vaddr 435 drivers/infiniband/hw/hfi1/trace_tid.h __entry->vaddr = (u64)vaddr; vaddr 444 drivers/infiniband/hw/hfi1/trace_tid.h __entry->vaddr vaddr 451 drivers/infiniband/hw/hfi1/trace_tid.h char mtu8k, char v1, void *vaddr), vaddr 452 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, flow, index, mtu8k, v1, vaddr) vaddr 1330 drivers/infiniband/hw/hfi1/trace_tid.h __field(u64, vaddr) vaddr 1337 drivers/infiniband/hw/hfi1/trace_tid.h __entry->vaddr = (u64)sge->vaddr; vaddr 1345 drivers/infiniband/hw/hfi1/trace_tid.h __entry->vaddr, vaddr 185 drivers/infiniband/hw/hfi1/uc.c ohdr->u.rc.reth.vaddr = vaddr 496 drivers/infiniband/hw/hfi1/uc.c u64 vaddr = be64_to_cpu(reth->vaddr); vaddr 501 drivers/infiniband/hw/hfi1/uc.c vaddr, rkey, IB_ACCESS_REMOTE_WRITE); vaddr 508 drivers/infiniband/hw/hfi1/uc.c qp->r_sge.sge.vaddr = NULL; vaddr 228 drivers/infiniband/hw/hfi1/ud.c rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false); vaddr 219 drivers/infiniband/hw/hfi1/user_exp_rcv.c unsigned long vaddr = tidbuf->vaddr; vaddr 224 drivers/infiniband/hw/hfi1/user_exp_rcv.c npages = num_user_pages(vaddr, tidbuf->length); vaddr 234 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!access_ok((void __user *)vaddr, vaddr 237 drivers/infiniband/hw/hfi1/user_exp_rcv.c (void *)vaddr, npages); vaddr 255 drivers/infiniband/hw/hfi1/user_exp_rcv.c pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages); vaddr 326 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!PAGE_ALIGNED(tinfo->vaddr)) vaddr 333 drivers/infiniband/hw/hfi1/user_exp_rcv.c tidbuf->vaddr = tinfo->vaddr; vaddr 779 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE); vaddr 59 drivers/infiniband/hw/hfi1/user_exp_rcv.h unsigned long vaddr; vaddr 103 drivers/infiniband/hw/hfi1/user_pages.c int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, vaddr 109 drivers/infiniband/hw/hfi1/user_pages.c ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); vaddr 713 drivers/infiniband/hw/hfi1/verbs.c ss->sge.vaddr, vaddr 1081 drivers/infiniband/hw/hfi1/verbs.c void *addr = ss->sge.vaddr; vaddr 156 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset); vaddr 159 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); vaddr 181 drivers/infiniband/hw/i40iw/i40iw_pble.c vfree(chunk->vaddr); vaddr 182 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->vaddr = NULL; vaddr 206 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->vaddr = vmalloc(size); vaddr 207 drivers/infiniband/hw/i40iw/i40iw_pble.c if (!chunk->vaddr) { vaddr 213 drivers/infiniband/hw/i40iw/i40iw_pble.c addr = (u8 *)chunk->vaddr; vaddr 284 drivers/infiniband/hw/i40iw/i40iw_pble.c addr = chunk->vaddr; vaddr 385 drivers/infiniband/hw/i40iw/i40iw_pble.c if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr, vaddr 93 drivers/infiniband/hw/i40iw/i40iw_pble.h void *vaddr; vaddr 84 drivers/infiniband/hw/qedr/qedr_roce_cm.c dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr, vaddr 211 drivers/infiniband/hw/qedr/qedr_roce_cm.c pkt->header.vaddr, pkt->header.baddr); vaddr 516 drivers/infiniband/hw/qedr/qedr_roce_cm.c packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size, vaddr 519 drivers/infiniband/hw/qedr/qedr_roce_cm.c if (!packet->header.vaddr) { vaddr 530 drivers/infiniband/hw/qedr/qedr_roce_cm.c memcpy(packet->header.vaddr, ud_header_buffer, header_size); vaddr 2659 drivers/infiniband/hw/qedr/verbs.c mr->hw_mr.vaddr = usr_addr; vaddr 2755 drivers/infiniband/hw/qedr/verbs.c mr->hw_mr.vaddr = 0; vaddr 2992 drivers/infiniband/hw/qedr/verbs.c #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \ vaddr 2994 drivers/infiniband/hw/qedr/verbs.c DMA_REGPAIR_LE(sge->addr, vaddr); \ vaddr 3005 drivers/infiniband/hw/qedr/verbs.c #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \ vaddr 3007 drivers/infiniband/hw/qedr/verbs.c DMA_REGPAIR_LE(sge->addr, vaddr); \ vaddr 292 drivers/infiniband/hw/qib/qib_file_ops.c unsigned long vaddr; vaddr 345 drivers/infiniband/hw/qib/qib_file_ops.c vaddr = ti->tidvaddr; vaddr 346 drivers/infiniband/hw/qib/qib_file_ops.c if (!access_ok((void __user *) vaddr, vaddr 351 drivers/infiniband/hw/qib/qib_file_ops.c ret = qib_get_user_pages(vaddr, cnt, pagep); vaddr 363 drivers/infiniband/hw/qib/qib_file_ops.c (void *) vaddr, cnt, -ret); vaddr 366 drivers/infiniband/hw/qib/qib_file_ops.c for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { vaddr 347 drivers/infiniband/hw/qib/qib_rc.c ohdr->u.rc.reth.vaddr = vaddr 390 drivers/infiniband/hw/qib/qib_rc.c ohdr->u.rc.reth.vaddr = vaddr 552 drivers/infiniband/hw/qib/qib_rc.c ohdr->u.rc.reth.vaddr = vaddr 1078 drivers/infiniband/hw/qib/qib_rc.c u64 *vaddr = wqe->sg_list[0].vaddr; vaddr 1079 drivers/infiniband/hw/qib/qib_rc.c *vaddr = val; vaddr 1606 drivers/infiniband/hw/qib/qib_rc.c u64 vaddr = be64_to_cpu(reth->vaddr); vaddr 1609 drivers/infiniband/hw/qib/qib_rc.c ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, vaddr 1614 drivers/infiniband/hw/qib/qib_rc.c e->rdma_sge.vaddr = NULL; vaddr 1904 drivers/infiniband/hw/qib/qib_rc.c u64 vaddr = be64_to_cpu(reth->vaddr); vaddr 1908 drivers/infiniband/hw/qib/qib_rc.c ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, vaddr 1916 drivers/infiniband/hw/qib/qib_rc.c qp->r_sge.sge.vaddr = NULL; vaddr 1962 drivers/infiniband/hw/qib/qib_rc.c u64 vaddr = be64_to_cpu(reth->vaddr); vaddr 1966 drivers/infiniband/hw/qib/qib_rc.c ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, vaddr 1977 drivers/infiniband/hw/qib/qib_rc.c e->rdma_sge.vaddr = NULL; vaddr 2007 drivers/infiniband/hw/qib/qib_rc.c u64 vaddr; vaddr 2030 drivers/infiniband/hw/qib/qib_rc.c vaddr = get_ib_ateth_vaddr(ateth); vaddr 2031 drivers/infiniband/hw/qib/qib_rc.c if (unlikely(vaddr & (sizeof(u64) - 1))) vaddr 2036 drivers/infiniband/hw/qib/qib_rc.c vaddr, rkey, vaddr 2040 drivers/infiniband/hw/qib/qib_rc.c maddr = (atomic64_t *) qp->r_sge.sge.vaddr; vaddr 2044 drivers/infiniband/hw/qib/qib_rc.c (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, vaddr 571 drivers/infiniband/hw/qib/qib_sdma.c addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, vaddr 130 drivers/infiniband/hw/qib/qib_uc.c ohdr->u.rc.reth.vaddr = vaddr 421 drivers/infiniband/hw/qib/qib_uc.c u64 vaddr = be64_to_cpu(reth->vaddr); vaddr 426 drivers/infiniband/hw/qib/qib_uc.c vaddr, rkey, IB_ACCESS_REMOTE_WRITE); vaddr 433 drivers/infiniband/hw/qib/qib_uc.c qp->r_sge.sge.vaddr = NULL; vaddr 178 drivers/infiniband/hw/qib/qib_ud.c rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false); vaddr 179 drivers/infiniband/hw/qib/qib_ud.c sge->vaddr += len; vaddr 191 drivers/infiniband/hw/qib/qib_ud.c sge->vaddr = vaddr 192 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].vaddr; vaddr 149 drivers/infiniband/hw/qib/qib_verbs.c if (((long) sge.vaddr & (sizeof(u32) - 1)) || vaddr 155 drivers/infiniband/hw/qib/qib_verbs.c sge.vaddr += len; vaddr 167 drivers/infiniband/hw/qib/qib_verbs.c sge.vaddr = vaddr 168 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].vaddr; vaddr 187 drivers/infiniband/hw/qib/qib_verbs.c memcpy(data, sge->vaddr, len); vaddr 188 drivers/infiniband/hw/qib/qib_verbs.c sge->vaddr += len; vaddr 200 drivers/infiniband/hw/qib/qib_verbs.c sge->vaddr = vaddr 201 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].vaddr; vaddr 441 drivers/infiniband/hw/qib/qib_verbs.c off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); vaddr 443 drivers/infiniband/hw/qib/qib_verbs.c u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & vaddr 474 drivers/infiniband/hw/qib/qib_verbs.c u32 *addr = (u32 *) ss->sge.vaddr; vaddr 527 drivers/infiniband/hw/qib/qib_verbs.c qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); vaddr 529 drivers/infiniband/hw/qib/qib_verbs.c last = ((u32 *) ss->sge.vaddr)[w - 1]; vaddr 534 drivers/infiniband/hw/qib/qib_verbs.c qib_pio_copy(piobuf, ss->sge.vaddr, w); vaddr 539 drivers/infiniband/hw/qib/qib_verbs.c u32 v = ((u32 *) ss->sge.vaddr)[w]; vaddr 963 drivers/infiniband/hw/qib/qib_verbs.c !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { vaddr 964 drivers/infiniband/hw/qib/qib_verbs.c u32 *addr = (u32 *) ss->sge.vaddr; vaddr 708 drivers/infiniband/hw/usnic/usnic_ib_verbs.c &bus_addr, bar->vaddr, bar->len); vaddr 107 drivers/infiniband/hw/usnic/usnic_vnic.c bar0->vaddr, bar0->len); vaddr 361 drivers/infiniband/hw/usnic/usnic_vnic.c vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len); vaddr 362 drivers/infiniband/hw/usnic/usnic_vnic.c if (!vnic->bar[i].vaddr) { vaddr 398 drivers/infiniband/hw/usnic/usnic_vnic.c if (!vnic->bar[i].vaddr) vaddr 401 drivers/infiniband/hw/usnic/usnic_vnic.c iounmap(vnic->bar[i].vaddr); vaddr 435 drivers/infiniband/hw/usnic/usnic_vnic.c iounmap(vnic->bar[i].vaddr); vaddr 416 drivers/infiniband/sw/rdmavt/mr.c void *vaddr; vaddr 418 drivers/infiniband/sw/rdmavt/mr.c vaddr = page_address(sg_page_iter_page(&sg_iter)); vaddr 419 drivers/infiniband/sw/rdmavt/mr.c if (!vaddr) { vaddr 423 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = vaddr; vaddr 425 drivers/infiniband/sw/rdmavt/mr.c trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE); vaddr 613 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = (void *)addr; vaddr 643 drivers/infiniband/sw/rdmavt/mr.c mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; vaddr 811 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; vaddr 883 drivers/infiniband/sw/rdmavt/mr.c ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) { vaddr 941 drivers/infiniband/sw/rdmavt/mr.c isge->vaddr = (void *)sge->addr; vaddr 995 drivers/infiniband/sw/rdmavt/mr.c isge->vaddr = mr->map[m]->segs[n].vaddr + off; vaddr 1025 drivers/infiniband/sw/rdmavt/mr.c u32 len, u64 vaddr, u32 rkey, int acc) vaddr 1051 drivers/infiniband/sw/rdmavt/mr.c sge->vaddr = (void *)vaddr; vaddr 1070 drivers/infiniband/sw/rdmavt/mr.c off = vaddr - mr->iova; vaddr 1071 drivers/infiniband/sw/rdmavt/mr.c if (unlikely(vaddr < mr->iova || off + len > mr->length || vaddr 1102 drivers/infiniband/sw/rdmavt/mr.c sge->vaddr = mr->map[m]->segs[n].vaddr + off; vaddr 2895 drivers/infiniband/sw/rdmavt/qp.c wss_insert(wss, sge->vaddr); vaddr 2897 drivers/infiniband/sw/rdmavt/qp.c wss_insert(wss, (sge->vaddr + PAGE_SIZE)); vaddr 2922 drivers/infiniband/sw/rdmavt/qp.c ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; vaddr 2924 drivers/infiniband/sw/rdmavt/qp.c cacheless_memcpy(sge->vaddr, data, len); vaddr 2926 drivers/infiniband/sw/rdmavt/qp.c memcpy(sge->vaddr, data, len); vaddr 3142 drivers/infiniband/sw/rdmavt/qp.c maddr = (atomic64_t *)qp->r_sge.sge.vaddr; vaddr 3144 drivers/infiniband/sw/rdmavt/qp.c *(u64 *)sqp->s_sge.sge.vaddr = vaddr 3147 drivers/infiniband/sw/rdmavt/qp.c (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, vaddr 3163 drivers/infiniband/sw/rdmavt/qp.c rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, vaddr 67 drivers/infiniband/sw/rdmavt/trace_mr.h __field(void *, vaddr) vaddr 80 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->vaddr = v; vaddr 98 drivers/infiniband/sw/rdmavt/trace_mr.h (unsigned long long)__entry->vaddr, vaddr 131 drivers/infiniband/sw/rdmavt/trace_mr.h __field(void *, vaddr) vaddr 146 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->vaddr = sge->vaddr; vaddr 162 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->vaddr, vaddr 169 drivers/infiniband/sw/rxe/rxe_mr.c void *vaddr; vaddr 207 drivers/infiniband/sw/rxe/rxe_mr.c vaddr = page_address(sg_page_iter_page(&sg_iter)); vaddr 208 drivers/infiniband/sw/rxe/rxe_mr.c if (!vaddr) { vaddr 214 drivers/infiniband/sw/rxe/rxe_mr.c buf->addr = (uintptr_t)vaddr; vaddr 549 drivers/infiniband/sw/rxe/rxe_resp.c u64 *vaddr; vaddr 558 drivers/infiniband/sw/rxe/rxe_resp.c vaddr = iova_to_vaddr(mr, iova, sizeof(u64)); vaddr 561 drivers/infiniband/sw/rxe/rxe_resp.c if (!vaddr || (uintptr_t)vaddr & 7) { vaddr 568 drivers/infiniband/sw/rxe/rxe_resp.c qp->resp.atomic_orig = *vaddr; vaddr 572 drivers/infiniband/sw/rxe/rxe_resp.c if (*vaddr == atmeth_comp(pkt)) vaddr 573 drivers/infiniband/sw/rxe/rxe_resp.c *vaddr = atmeth_swap_add(pkt); vaddr 575 drivers/infiniband/sw/rxe/rxe_resp.c *vaddr += atmeth_swap_add(pkt); vaddr 37 drivers/infiniband/sw/siw/siw_verbs.c static u32 siw_create_uobj(struct siw_ucontext *uctx, void *vaddr, u32 size) vaddr 53 drivers/infiniband/sw/siw/siw_verbs.c uobj->addr = vaddr; vaddr 84 drivers/iommu/amd_iommu_proto.h static inline u64 iommu_virt_to_phys(void *vaddr) vaddr 86 drivers/iommu/amd_iommu_proto.h return (u64)__sme_set(virt_to_phys(vaddr)); vaddr 577 drivers/iommu/dma-iommu.c void *vaddr; vaddr 617 drivers/iommu/dma-iommu.c vaddr = dma_common_pages_remap(pages, size, prot, vaddr 619 drivers/iommu/dma-iommu.c if (!vaddr) vaddr 624 drivers/iommu/dma-iommu.c return vaddr; vaddr 517 drivers/iommu/intel-iommu.c void *vaddr = NULL; vaddr 521 drivers/iommu/intel-iommu.c vaddr = page_address(page); vaddr 522 drivers/iommu/intel-iommu.c return vaddr; vaddr 525 drivers/iommu/intel-iommu.c void free_pgtable_page(void *vaddr) vaddr 527 drivers/iommu/intel-iommu.c free_page((unsigned long)vaddr); vaddr 535 drivers/iommu/intel-iommu.c static void free_domain_mem(void *vaddr) vaddr 537 drivers/iommu/intel-iommu.c kmem_cache_free(iommu_domain_cache, vaddr); vaddr 545 drivers/iommu/intel-iommu.c static inline void free_devinfo_mem(void *vaddr) vaddr 547 drivers/iommu/intel-iommu.c kmem_cache_free(iommu_devinfo_cache, vaddr); vaddr 3670 drivers/iommu/intel-iommu.c static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, vaddr 3674 drivers/iommu/intel-iommu.c struct page *page = virt_to_page(vaddr); vaddr 3677 drivers/iommu/intel-iommu.c return dma_direct_free(dev, size, vaddr, dma_handle, attrs); vaddr 181 drivers/irqchip/qcom-irq-combiner.c void __iomem *vaddr; vaddr 196 drivers/irqchip/qcom-irq-combiner.c vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE); vaddr 197 drivers/irqchip/qcom-irq-combiner.c if (!vaddr) { vaddr 203 drivers/irqchip/qcom-irq-combiner.c ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr; vaddr 821 drivers/mailbox/bcm-pdc-mailbox.c void *vaddr; vaddr 832 drivers/mailbox/bcm-pdc-mailbox.c vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr); vaddr 833 drivers/mailbox/bcm-pdc-mailbox.c if (unlikely(!vaddr)) vaddr 854 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx->resp_hdr = vaddr; vaddr 99 drivers/mailbox/pcc.c static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width) vaddr 105 drivers/mailbox/pcc.c *val = readb(vaddr); vaddr 108 drivers/mailbox/pcc.c *val = readw(vaddr); vaddr 111 drivers/mailbox/pcc.c *val = readl(vaddr); vaddr 114 drivers/mailbox/pcc.c *val = readq(vaddr); vaddr 125 drivers/mailbox/pcc.c static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) vaddr 131 drivers/mailbox/pcc.c writeb(val, vaddr); vaddr 134 drivers/mailbox/pcc.c writew(val, vaddr); vaddr 137 drivers/mailbox/pcc.c writel(val, vaddr); vaddr 140 drivers/mailbox/pcc.c writeq(val, vaddr); vaddr 907 drivers/media/common/videobuf2/videobuf2-core.c return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); vaddr 2238 drivers/media/common/videobuf2/videobuf2-core.c void *vaddr; vaddr 2255 drivers/media/common/videobuf2/videobuf2-core.c vaddr = vb2_plane_vaddr(vb, plane); vaddr 2256 drivers/media/common/videobuf2/videobuf2-core.c return vaddr ? (unsigned long)vaddr : -EINVAL; vaddr 2404 drivers/media/common/videobuf2/videobuf2-core.c void *vaddr; vaddr 2468 drivers/media/common/videobuf2/videobuf2-core.c if (!q->mem_ops->vaddr) vaddr 2518 drivers/media/common/videobuf2/videobuf2-core.c fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); vaddr 2519 drivers/media/common/videobuf2/videobuf2-core.c if (fileio->bufs[i].vaddr == NULL) { vaddr 2684 drivers/media/common/videobuf2/videobuf2-core.c ret = copy_to_user(data, buf->vaddr + buf->pos, count); vaddr 2686 drivers/media/common/videobuf2/videobuf2-core.c ret = copy_from_user(buf->vaddr + buf->pos, data, count); vaddr 27 drivers/media/common/videobuf2/videobuf2-dma-contig.c void *vaddr; vaddr 80 drivers/media/common/videobuf2/videobuf2-dma-contig.c if (!buf->vaddr && buf->db_attach) vaddr 81 drivers/media/common/videobuf2/videobuf2-dma-contig.c buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); vaddr 83 drivers/media/common/videobuf2/videobuf2-dma-contig.c return buf->vaddr; vaddr 163 drivers/media/common/videobuf2/videobuf2-dma-contig.c buf->vaddr = buf->cookie; vaddr 342 drivers/media/common/videobuf2/videobuf2-dma-contig.c return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; vaddr 349 drivers/media/common/videobuf2/videobuf2-dma-contig.c return buf->vaddr; vaddr 453 drivers/media/common/videobuf2/videobuf2-dma-contig.c static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, vaddr 466 drivers/media/common/videobuf2/videobuf2-dma-contig.c if (!IS_ALIGNED(vaddr | size, dma_align)) { vaddr 486 drivers/media/common/videobuf2/videobuf2-dma-contig.c offset = lower_32_bits(offset_in_page(vaddr)); vaddr 487 drivers/media/common/videobuf2/videobuf2-dma-contig.c vec = vb2_create_framevec(vaddr, size); vaddr 612 drivers/media/common/videobuf2/videobuf2-dma-contig.c buf->vaddr = NULL; vaddr 632 drivers/media/common/videobuf2/videobuf2-dma-contig.c if (buf->vaddr) { vaddr 633 drivers/media/common/videobuf2/videobuf2-dma-contig.c dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); vaddr 634 drivers/media/common/videobuf2/videobuf2-dma-contig.c buf->vaddr = NULL; vaddr 696 drivers/media/common/videobuf2/videobuf2-dma-contig.c .vaddr = vb2_dc_vaddr, vaddr 36 drivers/media/common/videobuf2/videobuf2-dma-sg.c void *vaddr; vaddr 115 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = NULL; vaddr 185 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (buf->vaddr) vaddr 186 drivers/media/common/videobuf2/videobuf2-dma-sg.c vm_unmap_ram(buf->vaddr, buf->num_pages); vaddr 221 drivers/media/common/videobuf2/videobuf2-dma-sg.c static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, vaddr 236 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = NULL; vaddr 239 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->offset = vaddr & ~PAGE_MASK; vaddr 242 drivers/media/common/videobuf2/videobuf2-dma-sg.c vec = vb2_create_framevec(vaddr, size); vaddr 291 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (buf->vaddr) vaddr 292 drivers/media/common/videobuf2/videobuf2-dma-sg.c vm_unmap_ram(buf->vaddr, buf->num_pages); vaddr 308 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (!buf->vaddr) { vaddr 310 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); vaddr 312 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = vm_map_ram(buf->pages, vaddr 317 drivers/media/common/videobuf2/videobuf2-dma-sg.c return buf->vaddr ? buf->vaddr + buf->offset : NULL; vaddr 477 drivers/media/common/videobuf2/videobuf2-dma-sg.c return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; vaddr 555 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = NULL; vaddr 575 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (buf->vaddr) { vaddr 576 drivers/media/common/videobuf2/videobuf2-dma-sg.c dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); vaddr 577 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = NULL; vaddr 643 drivers/media/common/videobuf2/videobuf2-dma-sg.c .vaddr = vb2_dma_sg_vaddr, vaddr 26 drivers/media/common/videobuf2/videobuf2-vmalloc.c void *vaddr; vaddr 48 drivers/media/common/videobuf2/videobuf2-vmalloc.c buf->vaddr = vmalloc_user(buf->size); vaddr 49 drivers/media/common/videobuf2/videobuf2-vmalloc.c if (!buf->vaddr) { vaddr 69 drivers/media/common/videobuf2/videobuf2-vmalloc.c vfree(buf->vaddr); vaddr 74 drivers/media/common/videobuf2/videobuf2-vmalloc.c static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, vaddr 88 drivers/media/common/videobuf2/videobuf2-vmalloc.c offset = vaddr & ~PAGE_MASK; vaddr 90 drivers/media/common/videobuf2/videobuf2-vmalloc.c vec = vb2_create_framevec(vaddr, size); vaddr 107 drivers/media/common/videobuf2/videobuf2-vmalloc.c buf->vaddr = (__force void *) vaddr 110 drivers/media/common/videobuf2/videobuf2-vmalloc.c buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, vaddr 114 drivers/media/common/videobuf2/videobuf2-vmalloc.c if (!buf->vaddr) vaddr 116 drivers/media/common/videobuf2/videobuf2-vmalloc.c buf->vaddr += offset; vaddr 130 drivers/media/common/videobuf2/videobuf2-vmalloc.c unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; vaddr 138 drivers/media/common/videobuf2/videobuf2-vmalloc.c if (vaddr) vaddr 139 drivers/media/common/videobuf2/videobuf2-vmalloc.c vm_unmap_ram((void *)vaddr, n_pages); vaddr 145 drivers/media/common/videobuf2/videobuf2-vmalloc.c iounmap((__force void __iomem *)buf->vaddr); vaddr 155 drivers/media/common/videobuf2/videobuf2-vmalloc.c if (!buf->vaddr) { vaddr 160 drivers/media/common/videobuf2/videobuf2-vmalloc.c return buf->vaddr; vaddr 179 drivers/media/common/videobuf2/videobuf2-vmalloc.c ret = remap_vmalloc_range(vma, buf->vaddr, 0); vaddr 219 drivers/media/common/videobuf2/videobuf2-vmalloc.c void *vaddr = buf->vaddr; vaddr 234 drivers/media/common/videobuf2/videobuf2-vmalloc.c struct page *page = vmalloc_to_page(vaddr); vaddr 242 drivers/media/common/videobuf2/videobuf2-vmalloc.c vaddr += PAGE_SIZE; vaddr 326 drivers/media/common/videobuf2/videobuf2-vmalloc.c return buf->vaddr + pgnum * PAGE_SIZE; vaddr 333 drivers/media/common/videobuf2/videobuf2-vmalloc.c return buf->vaddr; vaddr 364 drivers/media/common/videobuf2/videobuf2-vmalloc.c if (WARN_ON(!buf->vaddr)) vaddr 387 drivers/media/common/videobuf2/videobuf2-vmalloc.c buf->vaddr = dma_buf_vmap(buf->dbuf); vaddr 389 drivers/media/common/videobuf2/videobuf2-vmalloc.c return buf->vaddr ? 0 : -EFAULT; vaddr 396 drivers/media/common/videobuf2/videobuf2-vmalloc.c dma_buf_vunmap(buf->dbuf, buf->vaddr); vaddr 397 drivers/media/common/videobuf2/videobuf2-vmalloc.c buf->vaddr = NULL; vaddr 404 drivers/media/common/videobuf2/videobuf2-vmalloc.c if (buf->vaddr) vaddr 405 drivers/media/common/videobuf2/videobuf2-vmalloc.c dma_buf_vunmap(buf->dbuf, buf->vaddr); vaddr 442 drivers/media/common/videobuf2/videobuf2-vmalloc.c .vaddr = vb2_vmalloc_vaddr, vaddr 77 drivers/media/pci/cx23885/cx23885-alsa.c buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); vaddr 78 drivers/media/pci/cx23885/cx23885-alsa.c if (NULL == buf->vaddr) { vaddr 84 drivers/media/pci/cx23885/cx23885-alsa.c buf->vaddr, nr_pages << PAGE_SHIFT); vaddr 86 drivers/media/pci/cx23885/cx23885-alsa.c memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); vaddr 95 drivers/media/pci/cx23885/cx23885-alsa.c pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE); vaddr 106 drivers/media/pci/cx23885/cx23885-alsa.c vfree(buf->vaddr); vaddr 107 drivers/media/pci/cx23885/cx23885-alsa.c buf->vaddr = NULL; vaddr 141 drivers/media/pci/cx23885/cx23885-alsa.c vfree(buf->vaddr); vaddr 142 drivers/media/pci/cx23885/cx23885-alsa.c buf->vaddr = NULL; vaddr 402 drivers/media/pci/cx23885/cx23885-alsa.c substream->runtime->dma_area = chip->buf->vaddr; vaddr 326 drivers/media/pci/cx23885/cx23885.h void *vaddr; vaddr 54 drivers/media/pci/cx25821/cx25821-alsa.c void *vaddr; vaddr 140 drivers/media/pci/cx25821/cx25821-alsa.c buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); vaddr 141 drivers/media/pci/cx25821/cx25821-alsa.c if (NULL == buf->vaddr) { vaddr 147 drivers/media/pci/cx25821/cx25821-alsa.c buf->vaddr, vaddr 150 drivers/media/pci/cx25821/cx25821-alsa.c memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); vaddr 159 drivers/media/pci/cx25821/cx25821-alsa.c pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE); vaddr 170 drivers/media/pci/cx25821/cx25821-alsa.c vfree(buf->vaddr); vaddr 171 drivers/media/pci/cx25821/cx25821-alsa.c buf->vaddr = NULL; vaddr 205 drivers/media/pci/cx25821/cx25821-alsa.c vfree(buf->vaddr); vaddr 206 drivers/media/pci/cx25821/cx25821-alsa.c buf->vaddr = NULL; vaddr 546 drivers/media/pci/cx25821/cx25821-alsa.c substream->runtime->dma_area = chip->buf->vaddr; vaddr 47 drivers/media/pci/cx88/cx88-alsa.c void *vaddr; vaddr 280 drivers/media/pci/cx88/cx88-alsa.c buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); vaddr 281 drivers/media/pci/cx88/cx88-alsa.c if (!buf->vaddr) { vaddr 287 drivers/media/pci/cx88/cx88-alsa.c buf->vaddr, nr_pages << PAGE_SHIFT); vaddr 289 drivers/media/pci/cx88/cx88-alsa.c memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); vaddr 298 drivers/media/pci/cx88/cx88-alsa.c pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE); vaddr 309 drivers/media/pci/cx88/cx88-alsa.c vfree(buf->vaddr); vaddr 310 drivers/media/pci/cx88/cx88-alsa.c buf->vaddr = NULL; vaddr 345 drivers/media/pci/cx88/cx88-alsa.c vfree(buf->vaddr); vaddr 346 drivers/media/pci/cx88/cx88-alsa.c buf->vaddr = NULL; vaddr 493 drivers/media/pci/cx88/cx88-alsa.c substream->runtime->dma_area = chip->buf->vaddr; vaddr 261 drivers/media/pci/saa7134/saa7134-alsa.c dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); vaddr 262 drivers/media/pci/saa7134/saa7134-alsa.c if (NULL == dma->vaddr) { vaddr 268 drivers/media/pci/saa7134/saa7134-alsa.c dma->vaddr, nr_pages << PAGE_SHIFT); vaddr 270 drivers/media/pci/saa7134/saa7134-alsa.c memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); vaddr 279 drivers/media/pci/saa7134/saa7134-alsa.c pg = vmalloc_to_page(dma->vaddr + i * PAGE_SIZE); vaddr 290 drivers/media/pci/saa7134/saa7134-alsa.c vfree(dma->vaddr); vaddr 291 drivers/media/pci/saa7134/saa7134-alsa.c dma->vaddr = NULL; vaddr 325 drivers/media/pci/saa7134/saa7134-alsa.c vfree(dma->vaddr); vaddr 326 drivers/media/pci/saa7134/saa7134-alsa.c dma->vaddr = NULL; vaddr 727 drivers/media/pci/saa7134/saa7134-alsa.c substream->runtime->dma_area = dev->dmasound.vaddr; vaddr 509 drivers/media/pci/saa7134/saa7134.h void *vaddr; vaddr 215 drivers/media/platform/coda/coda-bit.c kfifo_init(&ctx->bitstream_fifo, ctx->bitstream.vaddr, vaddr 233 drivers/media/platform/coda/coda-bit.c u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0); vaddr 238 drivers/media/platform/coda/coda-bit.c size = coda_mpeg2_parse_headers(ctx, vaddr, payload); vaddr 241 drivers/media/platform/coda/coda-bit.c size = coda_mpeg4_parse_headers(ctx, vaddr, payload); vaddr 254 drivers/media/platform/coda/coda-bit.c u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0); vaddr 262 drivers/media/platform/coda/coda-bit.c if (!vaddr) { vaddr 282 drivers/media/platform/coda/coda-bit.c ret = coda_bitstream_queue(ctx, vaddr, vaddr 303 drivers/media/platform/coda/coda-bit.c ret = coda_bitstream_queue(ctx, vaddr, payload); vaddr 451 drivers/media/platform/coda/coda-bit.c u32 *p = ctx->parabuf.vaddr; vaddr 574 drivers/media/platform/coda/coda-bit.c if (!ctx->parabuf.vaddr) { vaddr 584 drivers/media/platform/coda/coda-bit.c if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) { vaddr 594 drivers/media/platform/coda/coda-bit.c if (!ctx->psbuf.vaddr && (dev->devtype->product == CODA_HX4 || vaddr 602 drivers/media/platform/coda/coda-bit.c if (!ctx->workbuf.vaddr) { vaddr 804 drivers/media/platform/coda/coda-bit.c if (!dev->iram.vaddr) vaddr 1715 drivers/media/platform/coda/coda-bit.c ctx->bitstream.vaddr, ctx->bitstream.size); vaddr 1752 drivers/media/platform/coda/coda-bit.c if (ctx->bitstream.vaddr) vaddr 1756 drivers/media/platform/coda/coda-bit.c ctx->bitstream.vaddr = dma_alloc_wc(ctx->dev->dev, ctx->bitstream.size, vaddr 1758 drivers/media/platform/coda/coda-bit.c if (!ctx->bitstream.vaddr) { vaddr 1764 drivers/media/platform/coda/coda-bit.c ctx->bitstream.vaddr, ctx->bitstream.size); vaddr 1771 drivers/media/platform/coda/coda-bit.c if (ctx->bitstream.vaddr == NULL) vaddr 1774 drivers/media/platform/coda/coda-bit.c dma_free_wc(ctx->dev->dev, ctx->bitstream.size, ctx->bitstream.vaddr, vaddr 1776 drivers/media/platform/coda/coda-bit.c ctx->bitstream.vaddr = NULL; vaddr 2279 drivers/media/platform/coda/coda-bit.c ctx->bitstream.vaddr, ctx->bitstream.size); vaddr 1798 drivers/media/platform/coda/coda-common.c buf->vaddr = dma_alloc_coherent(dev->dev, size, &buf->paddr, vaddr 1800 drivers/media/platform/coda/coda-common.c if (!buf->vaddr) { vaddr 1810 drivers/media/platform/coda/coda-common.c buf->blob.data = buf->vaddr; vaddr 1825 drivers/media/platform/coda/coda-common.c if (buf->vaddr) { vaddr 1826 drivers/media/platform/coda/coda-common.c dma_free_coherent(dev->dev, buf->size, buf->vaddr, buf->paddr); vaddr 1827 drivers/media/platform/coda/coda-common.c buf->vaddr = NULL; vaddr 1986 drivers/media/platform/coda/coda-common.c ctx->bitstream.vaddr, ctx->bitstream.size); vaddr 2605 drivers/media/platform/coda/coda-common.c p = (u16 *)dev->codebuf.vaddr; vaddr 2728 drivers/media/platform/coda/coda-common.c u32 *dst = dev->codebuf.vaddr; vaddr 2743 drivers/media/platform/coda/coda-common.c memcpy(dev->codebuf.vaddr, src, size); vaddr 3051 drivers/media/platform/coda/coda-common.c dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size, vaddr 3053 drivers/media/platform/coda/coda-common.c if (!dev->iram.vaddr) { vaddr 3056 drivers/media/platform/coda/coda-common.c memset(dev->iram.vaddr, 0, dev->iram.size); vaddr 3057 drivers/media/platform/coda/coda-common.c dev->iram.blob.data = dev->iram.vaddr; vaddr 3108 drivers/media/platform/coda/coda-common.c if (dev->iram.vaddr) vaddr 3109 drivers/media/platform/coda/coda-common.c gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr, vaddr 3125 drivers/media/platform/coda/coda-common.c if (dev->pm_domain && cdev->codebuf.vaddr) { vaddr 168 drivers/media/platform/coda/coda-jpeg.c coda_memcpy_parabuf(ctx->parabuf.vaddr, huff + i); vaddr 172 drivers/media/platform/coda/coda-jpeg.c coda_memcpy_parabuf(ctx->parabuf.vaddr, qmat + i); vaddr 179 drivers/media/platform/coda/coda-jpeg.c void *vaddr = vb2_plane_vaddr(vb, 0); vaddr 183 drivers/media/platform/coda/coda-jpeg.c soi = be16_to_cpup((__be16 *)vaddr); vaddr 188 drivers/media/platform/coda/coda-jpeg.c vaddr += len - 2; vaddr 190 drivers/media/platform/coda/coda-jpeg.c eoi = be16_to_cpup((__be16 *)(vaddr - i)); vaddr 63 drivers/media/platform/coda/coda.h void *vaddr; vaddr 223 drivers/media/platform/exynos4-is/fimc-capture.c void *vaddr; vaddr 228 drivers/media/platform/exynos4-is/fimc-capture.c vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane); vaddr 230 drivers/media/platform/exynos4-is/fimc-capture.c vaddr, &size); vaddr 242 drivers/media/platform/exynos4-is/fimc-is.c buf = is->memory.vaddr + is->setfile.base; vaddr 247 drivers/media/platform/exynos4-is/fimc-is.c pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf); vaddr 318 drivers/media/platform/exynos4-is/fimc-is.c memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); vaddr 338 drivers/media/platform/exynos4-is/fimc-is.c is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE, vaddr 340 drivers/media/platform/exynos4-is/fimc-is.c if (is->memory.vaddr == NULL) vaddr 350 drivers/media/platform/exynos4-is/fimc-is.c dma_free_coherent(dev, is->memory.size, is->memory.vaddr, vaddr 355 drivers/media/platform/exynos4-is/fimc-is.c is->is_p_region = (struct is_region *)(is->memory.vaddr + vaddr 361 drivers/media/platform/exynos4-is/fimc-is.c is->is_shared_region = (struct is_share_region *)(is->memory.vaddr + vaddr 370 drivers/media/platform/exynos4-is/fimc-is.c if (is->memory.vaddr == NULL) vaddr 373 drivers/media/platform/exynos4-is/fimc-is.c dma_free_coherent(dev, is->memory.size, is->memory.vaddr, vaddr 403 drivers/media/platform/exynos4-is/fimc-is.c memcpy(is->memory.vaddr, fw->data, fw->size); vaddr 407 drivers/media/platform/exynos4-is/fimc-is.c buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN); vaddr 411 drivers/media/platform/exynos4-is/fimc-is.c buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN); vaddr 740 drivers/media/platform/exynos4-is/fimc-is.c const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET; vaddr 742 drivers/media/platform/exynos4-is/fimc-is.c if (is->memory.vaddr == NULL) { vaddr 178 drivers/media/platform/exynos4-is/fimc-is.h void *vaddr; vaddr 191 drivers/media/platform/exynos4-is/fimc-is.h void *vaddr; vaddr 382 drivers/media/platform/fsl-viu.c void *vaddr = NULL; vaddr 388 drivers/media/platform/fsl-viu.c if (vq->int_ops && vq->int_ops->vaddr) vaddr 389 drivers/media/platform/fsl-viu.c vaddr = vq->int_ops->vaddr(vb); vaddr 391 drivers/media/platform/fsl-viu.c if (vaddr) vaddr 41 drivers/media/platform/sti/delta/delta-ipc.c static inline dma_addr_t to_paddr(struct delta_ipc_ctx *ctx, void *vaddr) vaddr 43 drivers/media/platform/sti/delta/delta-ipc.c return (ctx->ipc_buf->paddr + (vaddr - ctx->ipc_buf->vaddr)); vaddr 49 drivers/media/platform/sti/delta/delta-ipc.c return ((data >= ctx->ipc_buf->vaddr) && vaddr 50 drivers/media/platform/sti/delta/delta-ipc.c ((data + size) <= (ctx->ipc_buf->vaddr + ctx->ipc_buf->size))); vaddr 182 drivers/media/platform/sti/delta/delta-ipc.c memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size); vaddr 273 drivers/media/platform/sti/delta/delta-ipc.c ctx->ipc_buf->vaddr, vaddr 274 drivers/media/platform/sti/delta/delta-ipc.c ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1); vaddr 373 drivers/media/platform/sti/delta/delta-ipc.c ctx->ipc_buf->vaddr, vaddr 374 drivers/media/platform/sti/delta/delta-ipc.c ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1); vaddr 384 drivers/media/platform/sti/delta/delta-ipc.c ctx->ipc_buf->vaddr, vaddr 385 drivers/media/platform/sti/delta/delta-ipc.c ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1); vaddr 30 drivers/media/platform/sti/delta/delta-mem.c buf->vaddr = addr; vaddr 36 drivers/media/platform/sti/delta/delta-mem.c ctx->name, size, buf->vaddr, &buf->paddr, buf->name); vaddr 47 drivers/media/platform/sti/delta/delta-mem.c ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name); vaddr 50 drivers/media/platform/sti/delta/delta-mem.c buf->vaddr, buf->paddr, buf->attrs); vaddr 247 drivers/media/platform/sti/delta/delta-mjpeg-dec.c struct jpeg_decode_params_t *params = ctx->ipc_buf->vaddr; vaddr 249 drivers/media/platform/sti/delta/delta-mjpeg-dec.c ctx->ipc_buf->vaddr + sizeof(*params); vaddr 382 drivers/media/platform/sti/delta/delta-mjpeg-dec.c ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size, vaddr 408 drivers/media/platform/sti/delta/delta-mjpeg-dec.c ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size, vaddr 416 drivers/media/platform/sti/delta/delta-mjpeg-dec.c au.vaddr += data_offset; vaddr 65 drivers/media/platform/sti/delta/delta-v4l2.c u8 *data = (u8 *)(au->vaddr); vaddr 81 drivers/media/platform/sti/delta/delta-v4l2.c u8 *data = (u8 *)(frame->vaddr); vaddr 1125 drivers/media/platform/sti/delta/delta-v4l2.c au->vaddr = vb2_plane_vaddr(&au->vbuf.vb2_buf, 0); vaddr 1130 drivers/media/platform/sti/delta/delta-v4l2.c ctx->name, vb->index, au->vaddr, &au->paddr); vaddr 1472 drivers/media/platform/sti/delta/delta-v4l2.c frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0); vaddr 1486 drivers/media/platform/sti/delta/delta-v4l2.c ctx->name, vb->index, frame->vaddr, vaddr 105 drivers/media/platform/sti/delta/delta.h void *vaddr; vaddr 175 drivers/media/platform/sti/delta/delta.h void *vaddr; vaddr 205 drivers/media/platform/sti/delta/delta.h void *vaddr; vaddr 823 drivers/media/platform/sti/hva/hva-h264.c slice_header_vaddr = seq_info->vaddr + (td->addr_slice_header - vaddr 854 drivers/media/platform/sti/hva/hva-h264.c (u8 *)stream->vaddr, vaddr 1009 drivers/media/platform/sti/hva/hva-h264.c struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr; vaddr 1028 drivers/media/platform/sti/hva/hva-h264.c (u8 *)stream->vaddr, vaddr 37 drivers/media/platform/sti/hva/hva-mem.c b->vaddr = base; vaddr 42 drivers/media/platform/sti/hva/hva-mem.c ctx->name, size, b->vaddr, &b->paddr, b->name); vaddr 56 drivers/media/platform/sti/hva/hva-mem.c ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name); vaddr 58 drivers/media/platform/sti/hva/hva-mem.c dma_free_attrs(dev, buf->size, buf->vaddr, buf->paddr, vaddr 22 drivers/media/platform/sti/hva/hva-mem.h void *vaddr; vaddr 953 drivers/media/platform/sti/hva/hva-v4l2.c frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0); vaddr 962 drivers/media/platform/sti/hva/hva-v4l2.c frame->vaddr, &frame->paddr); vaddr 969 drivers/media/platform/sti/hva/hva-v4l2.c stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0); vaddr 978 drivers/media/platform/sti/hva/hva-v4l2.c stream->vaddr, &stream->paddr); vaddr 120 drivers/media/platform/sti/hva/hva.h void *vaddr; vaddr 145 drivers/media/platform/sti/hva/hva.h void *vaddr; vaddr 121 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h void *vaddr; vaddr 259 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c csi->scratch.vaddr = dma_alloc_coherent(csi->dev, vaddr 263 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c if (!csi->scratch.vaddr) { vaddr 336 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, vaddr 364 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, vaddr 133 drivers/media/v4l2-core/videobuf-core.c if (q->int_ops->vaddr) vaddr 134 drivers/media/v4l2-core/videobuf-core.c return q->int_ops->vaddr(buf); vaddr 835 drivers/media/v4l2-core/videobuf-core.c void *vaddr = CALLPTR(q, vaddr, buf); vaddr 841 drivers/media/v4l2-core/videobuf-core.c if (copy_to_user(data, vaddr + q->read_off, count)) vaddr 852 drivers/media/v4l2-core/videobuf-core.c unsigned int *fc = CALLPTR(q, vaddr, buf); vaddr 25 drivers/media/v4l2-core/videobuf-dma-contig.c void *vaddr; vaddr 42 drivers/media/v4l2-core/videobuf-dma-contig.c mem->vaddr = dma_alloc_coherent(dev, mem->size, vaddr 45 drivers/media/v4l2-core/videobuf-dma-contig.c if (!mem->vaddr) { vaddr 50 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size); vaddr 58 drivers/media/v4l2-core/videobuf-dma-contig.c dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle); vaddr 60 drivers/media/v4l2-core/videobuf-dma-contig.c mem->vaddr = NULL; vaddr 114 drivers/media/v4l2-core/videobuf-dma-contig.c i, mem->vaddr); vaddr 117 drivers/media/v4l2-core/videobuf-dma-contig.c mem->vaddr = NULL; vaddr 231 drivers/media/v4l2-core/videobuf-dma-contig.c return mem->vaddr; vaddr 248 drivers/media/v4l2-core/videobuf-dma-contig.c if (!mem->vaddr) { vaddr 318 drivers/media/v4l2-core/videobuf-dma-contig.c mem->vaddr, mem->dma_handle); vaddr 344 drivers/media/v4l2-core/videobuf-dma-contig.c .vaddr = __videobuf_to_vaddr, vaddr 399 drivers/media/v4l2-core/videobuf-dma-contig.c if (mem->vaddr) { vaddr 401 drivers/media/v4l2-core/videobuf-dma-contig.c mem->vaddr = NULL; vaddr 238 drivers/media/v4l2-core/videobuf-dma-sg.c dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP, vaddr 240 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == dma->vaddr) { vaddr 246 drivers/media/v4l2-core/videobuf-dma-sg.c dma->vaddr, nr_pages << PAGE_SHIFT); vaddr 248 drivers/media/v4l2-core/videobuf-dma-sg.c memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); vaddr 294 drivers/media/v4l2-core/videobuf-dma-sg.c if (dma->vaddr) { vaddr 295 drivers/media/v4l2-core/videobuf-dma-sg.c dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, vaddr 373 drivers/media/v4l2-core/videobuf-dma-sg.c vunmap(dma->vaddr); vaddr 374 drivers/media/v4l2-core/videobuf-dma-sg.c dma->vaddr = NULL; vaddr 499 drivers/media/v4l2-core/videobuf-dma-sg.c return mem->dma.vaddr; vaddr 657 drivers/media/v4l2-core/videobuf-dma-sg.c .vaddr = __videobuf_to_vaddr, vaddr 102 drivers/media/v4l2-core/videobuf-vmalloc.c __func__, i, mem->vaddr); vaddr 104 drivers/media/v4l2-core/videobuf-vmalloc.c vfree(mem->vaddr); vaddr 105 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr = NULL; vaddr 170 drivers/media/v4l2-core/videobuf-vmalloc.c if (!mem->vaddr) { vaddr 189 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr = vmalloc_user(pages); vaddr 190 drivers/media/v4l2-core/videobuf-vmalloc.c if (!mem->vaddr) { vaddr 195 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr, pages); vaddr 234 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr = vmalloc_user(pages); vaddr 235 drivers/media/v4l2-core/videobuf-vmalloc.c if (!mem->vaddr) { vaddr 239 drivers/media/v4l2-core/videobuf-vmalloc.c dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); vaddr 242 drivers/media/v4l2-core/videobuf-vmalloc.c retval = remap_vmalloc_range(vma, mem->vaddr, 0); vaddr 245 drivers/media/v4l2-core/videobuf-vmalloc.c vfree(mem->vaddr); vaddr 274 drivers/media/v4l2-core/videobuf-vmalloc.c .vaddr = videobuf_to_vmalloc, vaddr 298 drivers/media/v4l2-core/videobuf-vmalloc.c return mem->vaddr; vaddr 320 drivers/media/v4l2-core/videobuf-vmalloc.c vfree(mem->vaddr); vaddr 321 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr = NULL; vaddr 54 drivers/misc/cxl/cxllib.c u64 buf, vaddr; vaddr 66 drivers/misc/cxl/cxllib.c vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) & vaddr 69 drivers/misc/cxl/cxllib.c WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size), vaddr 71 drivers/misc/cxl/cxllib.c dummy_read_addr = virt_to_phys((void *) vaddr); vaddr 536 drivers/misc/genwqe/card_base.h void *vaddr, dma_addr_t dma_handle); vaddr 218 drivers/misc/genwqe/card_utils.c void *vaddr, dma_addr_t dma_handle) vaddr 220 drivers/misc/genwqe/card_utils.c if (vaddr == NULL) vaddr 223 drivers/misc/genwqe/card_utils.c dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle); vaddr 260 drivers/misc/habanalabs/debugfs.c hnode->vaddr, userptr->size); vaddr 265 drivers/misc/habanalabs/debugfs.c hnode->vaddr, phys_pg_pack->total_size, vaddr 173 drivers/misc/habanalabs/firmware_if.c void *vaddr) vaddr 175 drivers/misc/habanalabs/firmware_if.c gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr, vaddr 3080 drivers/misc/habanalabs/goya/goya.c static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr, vaddr 3086 drivers/misc/habanalabs/goya/goya.c dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr); vaddr 3092 drivers/misc/habanalabs/goya/goya.c void *vaddr; vaddr 3094 drivers/misc/habanalabs/goya/goya.c vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle); vaddr 3098 drivers/misc/habanalabs/goya/goya.c return vaddr; vaddr 3102 drivers/misc/habanalabs/goya/goya.c void *vaddr) vaddr 3104 drivers/misc/habanalabs/goya/goya.c hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr); vaddr 233 drivers/misc/habanalabs/goya/goyaP.h void *vaddr); vaddr 532 drivers/misc/habanalabs/habanalabs.h void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr, vaddr 537 drivers/misc/habanalabs/habanalabs.h size_t size, void *vaddr); vaddr 819 drivers/misc/habanalabs/habanalabs.h u64 vaddr; vaddr 1541 drivers/misc/habanalabs/habanalabs.h void *vaddr); vaddr 745 drivers/misc/habanalabs/memory.c static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, vaddr 749 drivers/misc/habanalabs/memory.c u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i; vaddr 772 drivers/misc/habanalabs/memory.c next_vaddr = vaddr; vaddr 927 drivers/misc/habanalabs/memory.c hnode->vaddr = ret_vaddr; vaddr 974 drivers/misc/habanalabs/memory.c static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) vaddr 989 drivers/misc/habanalabs/memory.c hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr) vaddr 990 drivers/misc/habanalabs/memory.c if (vaddr == hnode->vaddr) vaddr 997 drivers/misc/habanalabs/memory.c vaddr); vaddr 1015 drivers/misc/habanalabs/memory.c vaddr); vaddr 1025 drivers/misc/habanalabs/memory.c vaddr); vaddr 1031 drivers/misc/habanalabs/memory.c dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr); vaddr 1037 drivers/misc/habanalabs/memory.c vaddr &= ~(((u64) page_size) - 1); vaddr 1039 drivers/misc/habanalabs/memory.c next_vaddr = vaddr; vaddr 1064 drivers/misc/habanalabs/memory.c rc = add_va_block(hdev, va_range, vaddr, vaddr 1065 drivers/misc/habanalabs/memory.c vaddr + phys_pg_pack->total_size - 1); vaddr 1069 drivers/misc/habanalabs/memory.c vaddr); vaddr 1087 drivers/misc/habanalabs/memory.c hash_add(ctx->mem_hash, &hnode->node, vaddr); vaddr 1632 drivers/misc/habanalabs/memory.c hnode->vaddr, ctx->asid); vaddr 1633 drivers/misc/habanalabs/memory.c unmap_device_va(ctx, hnode->vaddr, true); vaddr 174 drivers/misc/habanalabs/mmu.c static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) vaddr 176 drivers/misc/habanalabs/mmu.c return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT); vaddr 179 drivers/misc/habanalabs/mmu.c static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) vaddr 181 drivers/misc/habanalabs/mmu.c return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT); vaddr 184 drivers/misc/habanalabs/mmu.c static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) vaddr 186 drivers/misc/habanalabs/mmu.c return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT); vaddr 189 drivers/misc/habanalabs/mmu.c static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) vaddr 191 drivers/misc/habanalabs/mmu.c return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT); vaddr 194 drivers/misc/habanalabs/mmu.c static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) vaddr 196 drivers/misc/habanalabs/mmu.c return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT); vaddr 306 drivers/misc/ibmvmc.c static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr, vaddr 313 drivers/misc/ibmvmc.c kzfree(vaddr); vaddr 220 drivers/misc/kgdbts.c static void break_helper(char *bp_type, char *arg, unsigned long vaddr) vaddr 227 drivers/misc/kgdbts.c addr = vaddr; vaddr 30 drivers/misc/mei/dma-ring.c if (dscr->vaddr) vaddr 33 drivers/misc/mei/dma-ring.c dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, vaddr 35 drivers/misc/mei/dma-ring.c if (!dscr->vaddr) vaddr 50 drivers/misc/mei/dma-ring.c if (!dscr->vaddr) vaddr 53 drivers/misc/mei/dma-ring.c dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); vaddr 54 drivers/misc/mei/dma-ring.c dscr->vaddr = NULL; vaddr 98 drivers/misc/mei/dma-ring.c return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; vaddr 104 drivers/misc/mei/dma-ring.c return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; vaddr 131 drivers/misc/mei/dma-ring.c unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; vaddr 151 drivers/misc/mei/dma-ring.c unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; vaddr 123 drivers/misc/mei/mei_dev.h void *vaddr; vaddr 154 drivers/misc/mic/host/mic_boot.c static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 161 drivers/misc/mic/host/mic_boot.c kfree(vaddr); vaddr 127 drivers/misc/mic/scif/scif_nodeqp.h unsigned long vaddr; vaddr 205 drivers/misc/mic/scif/scif_rma.c msg.payload[1] = window->alloc_handle.vaddr; vaddr 634 drivers/misc/mic/scif/scif_rma.c msg.payload[0] = window->alloc_handle.vaddr; vaddr 812 drivers/misc/mic/scif/scif_rma.c msg.payload[1] = window->alloc_handle.vaddr; vaddr 929 drivers/misc/mic/scif/scif_rma.c window->peer_window = alloc->vaddr; vaddr 950 drivers/misc/mic/scif/scif_rma.c msg.payload[1] = window->alloc_handle.vaddr; vaddr 1084 drivers/misc/mic/scif/scif_rma.c handle->vaddr = msg->payload[0]; vaddr 732 drivers/misc/sgi-gru/gru_instructions.h static inline int gru_get_tri(void *vaddr) vaddr 734 drivers/misc/sgi-gru/gru_instructions.h return ((unsigned long)vaddr & (GRU_GSEG_PAGESIZE - 1)) - GRU_DS_BASE; vaddr 48 drivers/misc/sgi-gru/grufault.c struct vm_area_struct *gru_find_vma(unsigned long vaddr) vaddr 52 drivers/misc/sgi-gru/grufault.c vma = find_vma(current->mm, vaddr); vaddr 53 drivers/misc/sgi-gru/grufault.c if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) vaddr 66 drivers/misc/sgi-gru/grufault.c static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) vaddr 73 drivers/misc/sgi-gru/grufault.c vma = gru_find_vma(vaddr); vaddr 75 drivers/misc/sgi-gru/grufault.c gts = gru_find_thread_state(vma, TSID(vaddr, vma)); vaddr 83 drivers/misc/sgi-gru/grufault.c static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) vaddr 90 drivers/misc/sgi-gru/grufault.c vma = gru_find_vma(vaddr); vaddr 94 drivers/misc/sgi-gru/grufault.c gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); vaddr 178 drivers/misc/sgi-gru/grufault.c unsigned long vaddr, int write, vaddr 188 drivers/misc/sgi-gru/grufault.c if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) vaddr 205 drivers/misc/sgi-gru/grufault.c static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, vaddr 214 drivers/misc/sgi-gru/grufault.c pgdp = pgd_offset(vma->vm_mm, vaddr); vaddr 218 drivers/misc/sgi-gru/grufault.c p4dp = p4d_offset(pgdp, vaddr); vaddr 222 drivers/misc/sgi-gru/grufault.c pudp = pud_offset(p4dp, vaddr); vaddr 226 drivers/misc/sgi-gru/grufault.c pmdp = pmd_offset(pudp, vaddr); vaddr 234 drivers/misc/sgi-gru/grufault.c pte = *pte_offset_kernel(pmdp, vaddr); vaddr 252 drivers/misc/sgi-gru/grufault.c static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, vaddr 260 drivers/misc/sgi-gru/grufault.c vma = find_vma(mm, vaddr); vaddr 269 drivers/misc/sgi-gru/grufault.c ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); vaddr 273 drivers/misc/sgi-gru/grufault.c if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) vaddr 316 drivers/misc/sgi-gru/grufault.c unsigned long vaddr = 0, gpa; vaddr 323 drivers/misc/sgi-gru/grufault.c vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; vaddr 325 drivers/misc/sgi-gru/grufault.c vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; vaddr 328 drivers/misc/sgi-gru/grufault.c vaddr &= PAGE_MASK; vaddr 329 drivers/misc/sgi-gru/grufault.c vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); vaddr 331 drivers/misc/sgi-gru/grufault.c while (vaddr > fault_vaddr) { vaddr 332 drivers/misc/sgi-gru/grufault.c ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); vaddr 333 drivers/misc/sgi-gru/grufault.c if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, vaddr 339 drivers/misc/sgi-gru/grufault.c vaddr, asid, write, pageshift, gpa); vaddr 340 drivers/misc/sgi-gru/grufault.c vaddr -= PAGE_SIZE; vaddr 363 drivers/misc/sgi-gru/grufault.c unsigned long gpa = 0, vaddr = 0; vaddr 398 drivers/misc/sgi-gru/grufault.c vaddr = tfh->missvaddr; vaddr 413 drivers/misc/sgi-gru/grufault.c ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); vaddr 428 drivers/misc/sgi-gru/grufault.c gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe); vaddr 434 drivers/misc/sgi-gru/grufault.c tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, vaddr 439 drivers/misc/sgi-gru/grufault.c atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, vaddr 447 drivers/misc/sgi-gru/grufault.c gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); vaddr 460 drivers/misc/sgi-gru/grufault.c gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); vaddr 497 drivers/misc/sgi-gru/grufault.c gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); vaddr 509 drivers/misc/sgi-gru/grufault.c tfh, vaddr); vaddr 802 drivers/misc/sgi-gru/grufault.c req.vaddr, req.len); vaddr 810 drivers/misc/sgi-gru/grufault.c gru_flush_tlb_range(gms, req.vaddr, req.len); vaddr 230 drivers/misc/sgi-gru/grufile.c void *vaddr, int blade_id, int chiplet_id) vaddr 235 drivers/misc/sgi-gru/grufile.c gru->gs_gru_base_vaddr = vaddr; vaddr 259 drivers/misc/sgi-gru/grufile.c void *vaddr; vaddr 281 drivers/misc/sgi-gru/grufile.c vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); vaddr 282 drivers/misc/sgi-gru/grufile.c gru_init_chiplet(gru, paddr, vaddr, bid, chip); vaddr 139 drivers/misc/sgi-gru/gruhandles.c unsigned long vaddr, unsigned long vaddrmask, vaddr 143 drivers/misc/sgi-gru/gruhandles.c tgh->vaddr = vaddr; vaddr 157 drivers/misc/sgi-gru/gruhandles.c unsigned long vaddr, int asid, int dirty, vaddr 161 drivers/misc/sgi-gru/gruhandles.c tfh->fillvaddr = vaddr; vaddr 173 drivers/misc/sgi-gru/gruhandles.c unsigned long vaddr, int asid, int dirty, vaddr 177 drivers/misc/sgi-gru/gruhandles.c tfh->fillvaddr = vaddr; vaddr 149 drivers/misc/sgi-gru/gruhandles.h static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet) vaddr 151 drivers/misc/sgi-gru/gruhandles.h return vaddr + GRU_SIZE * (2 * pnode + chiplet); vaddr 199 drivers/misc/sgi-gru/gruhandles.h unsigned long vaddr:64; /* DW 1 */ vaddr 507 drivers/misc/sgi-gru/gruhandles.h int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, vaddr 511 drivers/misc/sgi-gru/gruhandles.h int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); vaddr 513 drivers/misc/sgi-gru/gruhandles.h int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); vaddr 143 drivers/misc/sgi-gru/grukdump.c hdr.vaddr = gts->ts_vma->vm_start; vaddr 143 drivers/misc/sgi-gru/grukservices.c void *vaddr; vaddr 174 drivers/misc/sgi-gru/grukservices.c vaddr = gru->gs_gru_base_vaddr; vaddr 176 drivers/misc/sgi-gru/grukservices.c bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0); vaddr 177 drivers/misc/sgi-gru/grukservices.c bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0); vaddr 106 drivers/misc/sgi-gru/grulib.h unsigned long vaddr; vaddr 137 drivers/misc/sgi-gru/grulib.h unsigned long vaddr; vaddr 920 drivers/misc/sgi-gru/grumain.c unsigned long paddr, vaddr; vaddr 923 drivers/misc/sgi-gru/grumain.c vaddr = vmf->address; vaddr 925 drivers/misc/sgi-gru/grumain.c vma, vaddr, GSEG_BASE(vaddr)); vaddr 929 drivers/misc/sgi-gru/grumain.c gts = gru_find_thread_state(vma, TSID(vaddr, vma)); vaddr 953 drivers/misc/sgi-gru/grumain.c remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), vaddr 642 drivers/misc/sgi-gru/grutables.h extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); vaddr 48 drivers/mtd/nand/raw/davinci_nand.c void __iomem *vaddr; vaddr 111 drivers/mtd/nand/raw/davinci_nand.c info->current_cs = info->vaddr; vaddr 694 drivers/mtd/nand/raw/davinci_nand.c void __iomem *vaddr; vaddr 725 drivers/mtd/nand/raw/davinci_nand.c vaddr = devm_ioremap_resource(&pdev->dev, res1); vaddr 726 drivers/mtd/nand/raw/davinci_nand.c if (IS_ERR(vaddr)) vaddr 727 drivers/mtd/nand/raw/davinci_nand.c return PTR_ERR(vaddr); vaddr 743 drivers/mtd/nand/raw/davinci_nand.c info->vaddr = vaddr; vaddr 749 drivers/mtd/nand/raw/davinci_nand.c info->chip.legacy.IO_ADDR_R = vaddr; vaddr 750 drivers/mtd/nand/raw/davinci_nand.c info->chip.legacy.IO_ADDR_W = vaddr; vaddr 762 drivers/mtd/nand/raw/davinci_nand.c info->current_cs = info->vaddr; vaddr 199 drivers/mtd/nand/raw/qcom_nandc.c #define reg_buf_dma_addr(chip, vaddr) \ vaddr 201 drivers/mtd/nand/raw/qcom_nandc.c ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) vaddr 807 drivers/mtd/nand/raw/qcom_nandc.c int reg_off, const void *vaddr, vaddr 824 drivers/mtd/nand/raw/qcom_nandc.c (__le32 *)vaddr + i)); vaddr 829 drivers/mtd/nand/raw/qcom_nandc.c *((__le32 *)vaddr + i)); vaddr 862 drivers/mtd/nand/raw/qcom_nandc.c const void *vaddr, vaddr 870 drivers/mtd/nand/raw/qcom_nandc.c vaddr, size); vaddr 874 drivers/mtd/nand/raw/qcom_nandc.c vaddr, size); vaddr 893 drivers/mtd/nand/raw/qcom_nandc.c int reg_off, const void *vaddr, int size, vaddr 909 drivers/mtd/nand/raw/qcom_nandc.c sg_init_one(sgl, vaddr, size); vaddr 974 drivers/mtd/nand/raw/qcom_nandc.c void *vaddr; vaddr 976 drivers/mtd/nand/raw/qcom_nandc.c vaddr = nandc->reg_read_buf + nandc->reg_read_pos; vaddr 983 drivers/mtd/nand/raw/qcom_nandc.c return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, vaddr 989 drivers/mtd/nand/raw/qcom_nandc.c return prep_adm_dma_desc(nandc, true, first, vaddr, vaddr 1006 drivers/mtd/nand/raw/qcom_nandc.c void *vaddr; vaddr 1008 drivers/mtd/nand/raw/qcom_nandc.c vaddr = offset_to_nandc_reg(regs, first); vaddr 1012 drivers/mtd/nand/raw/qcom_nandc.c vaddr = ®s->erased_cw_detect_cfg_set; vaddr 1014 drivers/mtd/nand/raw/qcom_nandc.c vaddr = ®s->erased_cw_detect_cfg_clr; vaddr 1027 drivers/mtd/nand/raw/qcom_nandc.c return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, vaddr 1033 drivers/mtd/nand/raw/qcom_nandc.c return prep_adm_dma_desc(nandc, false, first, vaddr, vaddr 1047 drivers/mtd/nand/raw/qcom_nandc.c const u8 *vaddr, int size, unsigned int flags) vaddr 1050 drivers/mtd/nand/raw/qcom_nandc.c return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); vaddr 1052 drivers/mtd/nand/raw/qcom_nandc.c return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); vaddr 1065 drivers/mtd/nand/raw/qcom_nandc.c const u8 *vaddr, int size, unsigned int flags) vaddr 1068 drivers/mtd/nand/raw/qcom_nandc.c return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); vaddr 1070 drivers/mtd/nand/raw/qcom_nandc.c return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); vaddr 136 drivers/net/caif/caif_virtio.c u8 *vaddr; vaddr 152 drivers/net/caif/caif_virtio.c gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr, vaddr 506 drivers/net/caif/caif_virtio.c buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size); vaddr 507 drivers/net/caif/caif_virtio.c if (unlikely(!buf_info->vaddr)) vaddr 511 drivers/net/caif/caif_virtio.c skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len); vaddr 512 drivers/net/caif/caif_virtio.c sg_init_one(sg, buf_info->vaddr + pad_len, vaddr 802 drivers/net/ethernet/amd/au1000_eth.c (unsigned char *)pDB->vaddr, frmlen); vaddr 994 drivers/net/ethernet/amd/au1000_eth.c skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); vaddr 997 drivers/net/ethernet/amd/au1000_eth.c ((char *)pDB->vaddr)[i] = 0; vaddr 1152 drivers/net/ethernet/amd/au1000_eth.c aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE * vaddr 1156 drivers/net/ethernet/amd/au1000_eth.c if (!aup->vaddr) { vaddr 1256 drivers/net/ethernet/amd/au1000_eth.c pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); vaddr 1257 drivers/net/ethernet/amd/au1000_eth.c pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); vaddr 1335 drivers/net/ethernet/amd/au1000_eth.c (void *)aup->vaddr, aup->dma_addr, vaddr 1369 drivers/net/ethernet/amd/au1000_eth.c (void *)aup->vaddr, aup->dma_addr, vaddr 31 drivers/net/ethernet/amd/au1000_eth.h u32 *vaddr; vaddr 109 drivers/net/ethernet/amd/au1000_eth.h u32 vaddr; /* virtual address of rx/tx buffers */ vaddr 563 drivers/net/ethernet/apple/bmac.c void *vaddr; vaddr 568 drivers/net/ethernet/apple/bmac.c vaddr = skb->data; vaddr 569 drivers/net/ethernet/apple/bmac.c baddr = virt_to_bus(vaddr); vaddr 110 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int vaddr, rc; vaddr 112 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE); vaddr 113 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c if (vaddr < 0) vaddr 114 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c return vaddr; vaddr 116 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c rc = pci_read_vpd(padap->pdev, vaddr, len, dest); vaddr 1115 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); vaddr 1117 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c if (vaddr >= 0) vaddr 1118 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); vaddr 1119 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c return vaddr < 0 ? vaddr : 0; vaddr 1124 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); vaddr 1126 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c if (vaddr >= 0) vaddr 1127 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); vaddr 1128 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c return vaddr < 0 ? vaddr : 0; vaddr 2682 drivers/net/ethernet/cisco/enic/enic_main.c if (enic->bar[i].vaddr) vaddr 2683 drivers/net/ethernet/cisco/enic/enic_main.c iounmap(enic->bar[i].vaddr); vaddr 2768 drivers/net/ethernet/cisco/enic/enic_main.c enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); vaddr 2769 drivers/net/ethernet/cisco/enic/enic_main.c if (!enic->bar[i].vaddr) { vaddr 60 drivers/net/ethernet/cisco/enic/vnic_dev.c rh = bar->vaddr; vaddr 61 drivers/net/ethernet/cisco/enic/vnic_dev.c mrh = bar->vaddr; vaddr 98 drivers/net/ethernet/cisco/enic/vnic_dev.c if (!bar[bar_num].len || !bar[bar_num].vaddr) vaddr 125 drivers/net/ethernet/cisco/enic/vnic_dev.c vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + vaddr 143 drivers/net/ethernet/cisco/enic/vnic_dev.c if (!vdev->res[type].vaddr) vaddr 151 drivers/net/ethernet/cisco/enic/vnic_dev.c return (char __iomem *)vdev->res[type].vaddr + vaddr 154 drivers/net/ethernet/cisco/enic/vnic_dev.c return (char __iomem *)vdev->res[type].vaddr; vaddr 55 drivers/net/ethernet/cisco/enic/vnic_dev.h void __iomem *vaddr; vaddr 80 drivers/net/ethernet/cisco/enic/vnic_dev.h void __iomem *vaddr; vaddr 1325 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c void *vaddr; vaddr 1335 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c vaddr = phys_to_virt(qm_fd_addr(fd)); vaddr 1336 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c sgt = vaddr + qm_fd_get_offset(fd); vaddr 1343 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, vaddr 1701 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c void *vaddr; vaddr 1703 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c vaddr = phys_to_virt(addr); vaddr 1704 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); vaddr 1710 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c skb = build_skb(vaddr, dpaa_bp->size + vaddr 1723 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c skb_free_frag(vaddr); vaddr 1740 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c void *vaddr, *sg_vaddr; vaddr 1749 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c vaddr = phys_to_virt(addr); vaddr 1750 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); vaddr 1753 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c sgt = vaddr + fd_off; vaddr 1823 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c skb_free_frag(vaddr); vaddr 1851 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c skb_free_frag(vaddr); vaddr 2272 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c void *vaddr; vaddr 2319 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c vaddr = phys_to_virt(addr); vaddr 2320 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c prefetch(vaddr + qm_fd_get_offset(fd)); vaddr 2342 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) vaddr 2358 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)), vaddr 98 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h void *vaddr, vaddr 107 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), vaddr 115 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h __field(void *, vaddr) vaddr 127 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h __entry->vaddr = vaddr; vaddr 140 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h __entry->vaddr, vaddr 66 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c void *vaddr) vaddr 85 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c sgt = vaddr + dpaa2_fd_get_offset(fd); vaddr 98 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_pages((unsigned long)vaddr, 0); vaddr 209 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c void *vaddr; vaddr 213 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); vaddr 216 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_pages((unsigned long)vaddr, 0); vaddr 278 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_fd *fd, void *vaddr) vaddr 295 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.data = vaddr + dpaa2_fd_get_offset(fd); vaddr 304 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c dpaa2_fd_set_offset(fd, xdp.data - vaddr); vaddr 311 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid); vaddr 336 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.data_hard_start = vaddr; vaddr 359 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c void *vaddr; vaddr 372 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); vaddr 376 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fas = dpaa2_get_fas(vaddr, false); vaddr 378 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c buf_data = vaddr + dpaa2_fd_get_offset(fd); vaddr 385 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); vaddr 394 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c skb = build_linear_skb(ch, fd, vaddr); vaddr 401 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_pages((unsigned long)vaddr, 0); vaddr 417 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c __le64 *ts = dpaa2_get_ts(vaddr, false); vaddr 443 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_rx_fd(priv, fd, vaddr); vaddr 109 drivers/net/ethernet/freescale/enetc/enetc.h void *vaddr; vaddr 76 drivers/net/ethernet/freescale/enetc/enetc_msg.c msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma, vaddr 78 drivers/net/ethernet/freescale/enetc/enetc_msg.c if (!msg->vaddr) { vaddr 100 drivers/net/ethernet/freescale/enetc/enetc_msg.c dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma); vaddr 587 drivers/net/ethernet/freescale/enetc/enetc_pf.c cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr; vaddr 610 drivers/net/ethernet/freescale/enetc/enetc_pf.c cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr; vaddr 63 drivers/net/ethernet/freescale/enetc/enetc_vf.c msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma, vaddr 65 drivers/net/ethernet/freescale/enetc/enetc_vf.c if (!msg.vaddr) { vaddr 71 drivers/net/ethernet/freescale/enetc/enetc_vf.c cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr; vaddr 79 drivers/net/ethernet/freescale/enetc/enetc_vf.c dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma); vaddr 47 drivers/net/ethernet/freescale/fman/fman_muram.c unsigned long vaddr) vaddr 49 drivers/net/ethernet/freescale/fman/fman_muram.c return vaddr - (unsigned long)muram->vbase; vaddr 68 drivers/net/ethernet/freescale/fman/fman_muram.c void __iomem *vaddr; vaddr 81 drivers/net/ethernet/freescale/fman/fman_muram.c vaddr = ioremap(base, size); vaddr 82 drivers/net/ethernet/freescale/fman/fman_muram.c if (!vaddr) { vaddr 87 drivers/net/ethernet/freescale/fman/fman_muram.c ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr, vaddr 91 drivers/net/ethernet/freescale/fman/fman_muram.c iounmap(vaddr); vaddr 95 drivers/net/ethernet/freescale/fman/fman_muram.c memset_io(vaddr, 0, (int)size); vaddr 97 drivers/net/ethernet/freescale/fman/fman_muram.c muram->vbase = vaddr; vaddr 134 drivers/net/ethernet/freescale/fman/fman_muram.c unsigned long vaddr; vaddr 136 drivers/net/ethernet/freescale/fman/fman_muram.c vaddr = gen_pool_alloc(muram->pool, size); vaddr 137 drivers/net/ethernet/freescale/fman/fman_muram.c if (!vaddr) vaddr 140 drivers/net/ethernet/freescale/fman/fman_muram.c memset_io((void __iomem *)vaddr, 0, size); vaddr 142 drivers/net/ethernet/freescale/fman/fman_muram.c return fman_muram_vbase_to_offset(muram, vaddr); vaddr 58 drivers/net/ethernet/freescale/fman/mac.c void __iomem *vaddr; vaddr 703 drivers/net/ethernet/freescale/fman/mac.c priv->vaddr = devm_ioremap(dev, mac_dev->res->start, vaddr 705 drivers/net/ethernet/freescale/fman/mac.c if (!priv->vaddr) { vaddr 1359 drivers/net/ethernet/freescale/gianfar.c void *vaddr; vaddr 1376 drivers/net/ethernet/freescale/gianfar.c vaddr = dma_alloc_coherent(dev, vaddr 1382 drivers/net/ethernet/freescale/gianfar.c if (!vaddr) vaddr 1387 drivers/net/ethernet/freescale/gianfar.c tx_queue->tx_bd_base = vaddr; vaddr 1392 drivers/net/ethernet/freescale/gianfar.c vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; vaddr 1398 drivers/net/ethernet/freescale/gianfar.c rx_queue->rx_bd_base = vaddr; vaddr 1403 drivers/net/ethernet/freescale/gianfar.c vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; vaddr 736 drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c mac_drv->io_base = mac_param->vaddr; vaddr 369 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c param->vaddr = mac_cb->vaddr; vaddr 1025 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); vaddr 186 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h u8 __iomem *vaddr; /*virtual address*/ vaddr 308 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h u8 __iomem *vaddr; vaddr 809 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c mac_drv->io_base = mac_param->vaddr; vaddr 102 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, vaddr 108 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, vaddr 110 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (!*vaddr) { vaddr 125 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr); vaddr 426 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void **vaddr = &wq->shadow_block_vaddr[i]; vaddr 431 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, vaddr 472 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void **vaddr = &wq->shadow_block_vaddr[i]; vaddr 476 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, vaddr 478 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (!*vaddr) { vaddr 311 drivers/net/ethernet/ibm/ehea/ehea.h u64 vaddr; vaddr 489 drivers/net/ethernet/ibm/ehea/ehea_main.c rwqe->sg_list[0].vaddr = tmp_addr; vaddr 1627 drivers/net/ethernet/ibm/ehea/ehea_main.c sg1entry->vaddr = vaddr 1660 drivers/net/ethernet/ibm/ehea/ehea_main.c sg1entry->vaddr = vaddr 1673 drivers/net/ethernet/ibm/ehea/ehea_main.c sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); vaddr 2594 drivers/net/ethernet/ibm/ehea/ehea_main.c rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); vaddr 2603 drivers/net/ethernet/ibm/ehea/ehea_main.c rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); vaddr 477 drivers/net/ethernet/ibm/ehea/ehea_phyp.c u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, vaddr 488 drivers/net/ethernet/ibm/ehea/ehea_phyp.c vaddr, /* R6 */ vaddr 392 drivers/net/ethernet/ibm/ehea/ehea_phyp.h u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, vaddr 571 drivers/net/ethernet/ibm/ehea/ehea_qmr.c u64 vaddr = EHEA_BUSMAP_START; vaddr 593 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_dir->ent[idx] = vaddr; vaddr 594 drivers/net/ethernet/ibm/ehea/ehea_qmr.c vaddr += EHEA_SECTSIZE; vaddr 909 drivers/net/ethernet/ibm/ehea/ehea_qmr.c mr->vaddr = EHEA_BUSMAP_START; vaddr 940 drivers/net/ethernet/ibm/ehea/ehea_qmr.c old_mr->vaddr, EHEA_MR_ACC_CTRL, vaddr 59 drivers/net/ethernet/ibm/ehea/ehea_qmr.h u64 vaddr; vaddr 4221 drivers/net/ethernet/intel/e1000/e1000_main.c u8 *vaddr; vaddr 4230 drivers/net/ethernet/intel/e1000/e1000_main.c vaddr = kmap_atomic(p); vaddr 4231 drivers/net/ethernet/intel/e1000/e1000_main.c memcpy(skb_tail_pointer(skb), vaddr, vaddr 4233 drivers/net/ethernet/intel/e1000/e1000_main.c kunmap_atomic(vaddr); vaddr 1389 drivers/net/ethernet/intel/e1000e/netdev.c u8 *vaddr; vaddr 1401 drivers/net/ethernet/intel/e1000e/netdev.c vaddr = kmap_atomic(ps_page->page); vaddr 1402 drivers/net/ethernet/intel/e1000e/netdev.c memcpy(skb_tail_pointer(skb), vaddr, l1); vaddr 1403 drivers/net/ethernet/intel/e1000e/netdev.c kunmap_atomic(vaddr); vaddr 1604 drivers/net/ethernet/intel/e1000e/netdev.c u8 *vaddr; vaddr 1605 drivers/net/ethernet/intel/e1000e/netdev.c vaddr = kmap_atomic(buffer_info->page); vaddr 1606 drivers/net/ethernet/intel/e1000e/netdev.c memcpy(skb_tail_pointer(skb), vaddr, vaddr 1608 drivers/net/ethernet/intel/e1000e/netdev.c kunmap_atomic(vaddr); vaddr 230 drivers/net/ethernet/marvell/mvneta_bm.c u32 *vaddr; vaddr 241 drivers/net/ethernet/marvell/mvneta_bm.c vaddr = phys_to_virt(buf_phys_addr); vaddr 242 drivers/net/ethernet/marvell/mvneta_bm.c if (!vaddr) vaddr 247 drivers/net/ethernet/marvell/mvneta_bm.c hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); vaddr 909 drivers/net/ethernet/marvell/skge.c static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) vaddr 919 drivers/net/ethernet/marvell/skge.c for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { vaddr 493 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c struct vport_addr *vaddr); vaddr 495 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) vaddr 497 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u8 *mac = vaddr->node.addr; vaddr 498 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u16 vport = vaddr->vport; vaddr 514 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->mpfs = true; vaddr 519 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); vaddr 522 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vport, mac, vaddr->flow_rule); vaddr 527 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) vaddr 529 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u8 *mac = vaddr->node.addr; vaddr 530 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u16 vport = vaddr->vport; vaddr 536 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c if (!vaddr->mpfs || esw->manager_vport == vport) vaddr 544 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->mpfs = false; vaddr 547 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c if (vaddr->flow_rule) vaddr 548 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c mlx5_del_flow_rules(vaddr->flow_rule); vaddr 549 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->flow_rule = NULL; vaddr 555 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c struct vport_addr *vaddr, vaddr 558 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u8 *mac = vaddr->node.addr; vaddr 570 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->vport == vport_num) vaddr 572 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c switch (vaddr->action) { vaddr 602 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) vaddr 606 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u8 *mac = vaddr->node.addr; vaddr 607 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u16 vport = vaddr->vport; vaddr 624 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c update_allmulti_vports(esw, vaddr, esw_mc); vaddr 630 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c if (!vaddr->mc_promisc) vaddr 634 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); vaddr 637 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vport, mac, vaddr->flow_rule, vaddr 642 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) vaddr 646 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u8 *mac = vaddr->node.addr; vaddr 647 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u16 vport = vaddr->vport; vaddr 661 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vport, mac, vaddr->flow_rule, esw_mc->refcnt, vaddr 664 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c if (vaddr->flow_rule) vaddr 665 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c mlx5_del_flow_rules(vaddr->flow_rule); vaddr 666 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c vaddr->flow_rule = NULL; vaddr 671 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) vaddr 675 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c update_allmulti_vports(esw, vaddr, esw_mc); vaddr 2307 drivers/net/ethernet/neterion/vxge/vxge-config.c void *vaddr; vaddr 2314 drivers/net/ethernet/neterion/vxge/vxge-config.c vaddr = kmalloc((size), flags); vaddr 2316 drivers/net/ethernet/neterion/vxge/vxge-config.c vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); vaddr 1903 drivers/net/ethernet/neterion/vxge/vxge-config.h void *vaddr; vaddr 1913 drivers/net/ethernet/neterion/vxge/vxge-config.h vaddr = kmalloc((size), flags); vaddr 1914 drivers/net/ethernet/neterion/vxge/vxge-config.h if (vaddr == NULL) vaddr 1915 drivers/net/ethernet/neterion/vxge/vxge-config.h return vaddr; vaddr 1916 drivers/net/ethernet/neterion/vxge/vxge-config.h misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr, vaddr 1925 drivers/net/ethernet/neterion/vxge/vxge-config.h kfree(vaddr); vaddr 1932 drivers/net/ethernet/neterion/vxge/vxge-config.h vaddr = (void *)((u8 *)vaddr + misaligned); vaddr 1933 drivers/net/ethernet/neterion/vxge/vxge-config.h return vaddr; vaddr 1936 drivers/net/ethernet/neterion/vxge/vxge-config.h static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, vaddr 1940 drivers/net/ethernet/neterion/vxge/vxge-config.h u8 *tmp = (u8 *)vaddr; vaddr 60 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c bars[j].vaddr = NULL; vaddr 62 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c bars[j].vaddr = pci_iomap(pdev, i, bars[j].len); vaddr 63 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c if (!bars[j].vaddr) { vaddr 86 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c if (bars[i].vaddr) { vaddr 87 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c iounmap(bars[i].vaddr); vaddr 89 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c bars[i].vaddr = NULL; vaddr 55 drivers/net/ethernet/pensando/ionic/ionic_dev.c idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET; vaddr 56 drivers/net/ethernet/pensando/ionic/ionic_dev.c idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET; vaddr 57 drivers/net/ethernet/pensando/ionic/ionic_dev.c idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; vaddr 58 drivers/net/ethernet/pensando/ionic/ionic_dev.c idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; vaddr 75 drivers/net/ethernet/pensando/ionic/ionic_dev.c idev->db_pages = bar->vaddr; vaddr 26 drivers/net/ethernet/pensando/ionic/ionic_dev.h void __iomem *vaddr; vaddr 1543 drivers/net/ethernet/qlogic/qed/qed_rdma.c DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); vaddr 273 drivers/net/ethernet/sfc/tx.c u8 *vaddr; vaddr 275 drivers/net/ethernet/sfc/tx.c vaddr = kmap_atomic(skb_frag_page(f)); vaddr 277 drivers/net/ethernet/sfc/tx.c efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), vaddr 279 drivers/net/ethernet/sfc/tx.c kunmap_atomic(vaddr); vaddr 282 drivers/net/ethernet/socionext/netsec.c void *vaddr; vaddr 609 drivers/net/ethernet/socionext/netsec.c struct netsec_de *de = dring->vaddr + DESC_SZ * idx; vaddr 639 drivers/net/ethernet/socionext/netsec.c entry = dring->vaddr + DESC_SZ * tail; vaddr 680 drivers/net/ethernet/socionext/netsec.c entry = dring->vaddr + DESC_SZ * tail; vaddr 783 drivers/net/ethernet/socionext/netsec.c de = dring->vaddr + (DESC_SZ * idx); vaddr 941 drivers/net/ethernet/socionext/netsec.c struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); vaddr 1184 drivers/net/ethernet/socionext/netsec.c if (!dring->vaddr || !dring->desc) vaddr 1210 drivers/net/ethernet/socionext/netsec.c memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); vaddr 1223 drivers/net/ethernet/socionext/netsec.c if (dring->vaddr) { vaddr 1225 drivers/net/ethernet/socionext/netsec.c dring->vaddr, dring->desc_dma); vaddr 1226 drivers/net/ethernet/socionext/netsec.c dring->vaddr = NULL; vaddr 1237 drivers/net/ethernet/socionext/netsec.c dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, vaddr 1239 drivers/net/ethernet/socionext/netsec.c if (!dring->vaddr) vaddr 1261 drivers/net/ethernet/socionext/netsec.c de = dring->vaddr + (DESC_SZ * i); vaddr 1085 drivers/net/ethernet/sun/sunvnet_common.c u8 *vaddr; vaddr 1088 drivers/net/ethernet/sun/sunvnet_common.c vaddr = kmap_atomic(skb_frag_page(f)); vaddr 1091 drivers/net/ethernet/sun/sunvnet_common.c err = ldc_map_single(lp, vaddr + skb_frag_off(f), vaddr 1094 drivers/net/ethernet/sun/sunvnet_common.c kunmap_atomic(vaddr); vaddr 155 drivers/net/wireless/ath/ath10k/core.h void *vaddr; vaddr 24 drivers/net/wireless/ath/ath10k/hif.h void *vaddr; /* for debugging mostly */ vaddr 144 drivers/net/wireless/ath/ath10k/htc.c sg_item.vaddr = skb->data; vaddr 1942 drivers/net/wireless/ath/ath10k/htt.h __le32 *vaddr; vaddr 2010 drivers/net/wireless/ath/ath10k/htt.h struct htt_q_state *vaddr; vaddr 2104 drivers/net/wireless/ath/ath10k/htt.h void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); vaddr 2123 drivers/net/wireless/ath/ath10k/htt.h void *vaddr) vaddr 2126 drivers/net/wireless/ath/ath10k/htt.h htt->rx_ops->htt_config_paddrs_ring(htt, vaddr); vaddr 86 drivers/net/wireless/ath/ath10k/htt_rx.c void *vaddr) vaddr 88 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_32 = vaddr; vaddr 92 drivers/net/wireless/ath/ath10k/htt_rx.c void *vaddr) vaddr 94 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.paddrs_ring_64 = vaddr; vaddr 144 drivers/net/wireless/ath/ath10k/htt_rx.c idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); vaddr 194 drivers/net/wireless/ath/ath10k/htt_rx.c *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); vaddr 293 drivers/net/wireless/ath/ath10k/htt_rx.c sizeof(*htt->rx_ring.alloc_idx.vaddr), vaddr 294 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.alloc_idx.vaddr, vaddr 748 drivers/net/wireless/ath/ath10k/htt_rx.c void *vaddr, *vaddr_ring; vaddr 784 drivers/net/wireless/ath/ath10k/htt_rx.c vaddr = dma_alloc_coherent(htt->ar->dev, vaddr 785 drivers/net/wireless/ath/ath10k/htt_rx.c sizeof(*htt->rx_ring.alloc_idx.vaddr), vaddr 787 drivers/net/wireless/ath/ath10k/htt_rx.c if (!vaddr) vaddr 790 drivers/net/wireless/ath/ath10k/htt_rx.c htt->rx_ring.alloc_idx.vaddr = vaddr; vaddr 793 drivers/net/wireless/ath/ath10k/htt_rx.c *htt->rx_ring.alloc_idx.vaddr = 0; vaddr 80 drivers/net/wireless/ath/ath10k/htt_tx.c ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; vaddr 81 drivers/net/wireless/ath/ath10k/htt_tx.c ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; vaddr 82 drivers/net/wireless/ath/ath10k/htt_tx.c ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; vaddr 101 drivers/net/wireless/ath/ath10k/htt_tx.c seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); vaddr 103 drivers/net/wireless/ath/ath10k/htt_tx.c ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); vaddr 108 drivers/net/wireless/ath/ath10k/htt_tx.c size = sizeof(*ar->htt.tx_q_state.vaddr); vaddr 377 drivers/net/wireless/ath/ath10k/htt_tx.c size = sizeof(*htt->tx_q_state.vaddr); vaddr 380 drivers/net/wireless/ath/ath10k/htt_tx.c kfree(htt->tx_q_state.vaddr); vaddr 397 drivers/net/wireless/ath/ath10k/htt_tx.c size = sizeof(*htt->tx_q_state.vaddr); vaddr 398 drivers/net/wireless/ath/ath10k/htt_tx.c htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); vaddr 399 drivers/net/wireless/ath/ath10k/htt_tx.c if (!htt->tx_q_state.vaddr) vaddr 402 drivers/net/wireless/ath/ath10k/htt_tx.c htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, vaddr 407 drivers/net/wireless/ath/ath10k/htt_tx.c kfree(htt->tx_q_state.vaddr); vaddr 841 drivers/net/wireless/ath/ath10k/htt_tx.c fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); vaddr 911 drivers/net/wireless/ath/ath10k/htt_tx.c fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); vaddr 1514 drivers/net/wireless/ath/ath10k/htt_tx.c sg_items[0].vaddr = &txbuf->htc_hdr; vaddr 1523 drivers/net/wireless/ath/ath10k/htt_tx.c sg_items[1].vaddr = msdu->data; vaddr 1721 drivers/net/wireless/ath/ath10k/htt_tx.c sg_items[0].vaddr = &txbuf->htc_hdr; vaddr 1730 drivers/net/wireless/ath/ath10k/htt_tx.c sg_items[1].vaddr = msdu->data; vaddr 1379 drivers/net/wireless/ath/ath10k/pci.c items[i].vaddr, items[i].len); vaddr 1397 drivers/net/wireless/ath/ath10k/pci.c items[i].vaddr, items[i].len); vaddr 2852 drivers/net/wireless/ath/ath10k/wmi-tlv.c pkt_addr->vaddr = skb; vaddr 1602 drivers/net/wireless/ath/ath10k/wmi-tlv.h void *vaddr; vaddr 2364 drivers/net/wireless/ath/ath10k/wmi.c msdu = pkt_addr->vaddr; vaddr 5251 drivers/net/wireless/ath/ath10k/wmi.c void *vaddr; vaddr 5254 drivers/net/wireless/ath/ath10k/wmi.c vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); vaddr 5256 drivers/net/wireless/ath/ath10k/wmi.c if (!vaddr) vaddr 5259 drivers/net/wireless/ath/ath10k/wmi.c ar->wmi.mem_chunks[idx].vaddr = vaddr; vaddr 9406 drivers/net/wireless/ath/ath10k/wmi.c ar->wmi.mem_chunks[i].vaddr, vaddr 9423 drivers/net/wireless/ath/ath10k/wmi.c msdu = pkt_addr->vaddr; vaddr 333 drivers/net/wireless/ath/ath6kl/target.h #define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff) vaddr 334 drivers/net/wireless/ath/ath6kl/target.h #define AR6004_VTOP(vaddr) (vaddr) vaddr 336 drivers/net/wireless/ath/ath6kl/target.h #define TARG_VTOP(target_type, vaddr) \ vaddr 337 drivers/net/wireless/ath/ath6kl/target.h (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \ vaddr 338 drivers/net/wireless/ath/ath6kl/target.h (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0)) vaddr 29 drivers/net/wireless/intersil/orinoco/airport.c void __iomem *vaddr; vaddr 101 drivers/net/wireless/intersil/orinoco/airport.c if (card->vaddr) vaddr 102 drivers/net/wireless/intersil/orinoco/airport.c iounmap(card->vaddr); vaddr 103 drivers/net/wireless/intersil/orinoco/airport.c card->vaddr = NULL; vaddr 184 drivers/net/wireless/intersil/orinoco/airport.c card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN); vaddr 185 drivers/net/wireless/intersil/orinoco/airport.c if (!card->vaddr) { vaddr 190 drivers/net/wireless/intersil/orinoco/airport.c hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING); vaddr 67 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c struct sk_buff **vaddr; vaddr 72 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL); vaddr 74 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c if (!vaddr) vaddr 77 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c priv->tx_skb = vaddr; vaddr 79 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c vaddr += priv->tx_bd_num; vaddr 80 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c priv->rx_skb = vaddr; vaddr 213 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c void __iomem *vaddr; vaddr 224 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c vaddr = pcim_iomap_table(pdev)[index]; vaddr 225 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c if (!vaddr) vaddr 229 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index, vaddr, &busaddr, (int)len); vaddr 231 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c return vaddr; vaddr 235 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c void *vaddr; vaddr 241 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); vaddr 242 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c if (!vaddr) vaddr 247 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c memset(vaddr, 0, len); vaddr 249 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c ps->bd_table_vaddr = vaddr; vaddr 253 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c ps->tx_bd_vbase = vaddr; vaddr 256 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); vaddr 263 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num; vaddr 266 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c ps->rx_bd_vbase = vaddr; vaddr 278 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); vaddr 188 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c void *vaddr; vaddr 198 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); vaddr 199 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c if (!vaddr) vaddr 202 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c memset(vaddr, 0, len); vaddr 206 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c ts->tx_bd_vbase = vaddr; vaddr 212 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); vaddr 219 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num; vaddr 222 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c ts->rx_bd_vbase = vaddr; vaddr 225 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); vaddr 229 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num; vaddr 232 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr; vaddr 31 drivers/parisc/iommu-helpers.h unsigned long vaddr; vaddr 70 drivers/parisc/iommu-helpers.h vaddr = (unsigned long)sg_virt(startsg); vaddr 79 drivers/parisc/iommu-helpers.h vaddr, hint); vaddr 80 drivers/parisc/iommu-helpers.h vaddr += IOVP_SIZE; vaddr 904 drivers/parisc/sba_iommu.c sba_free(struct device *hwdev, size_t size, void *vaddr, vaddr 908 drivers/parisc/sba_iommu.c free_pages((unsigned long) vaddr, get_order(size)); vaddr 317 drivers/pci/controller/vmd.c static void vmd_free(struct device *dev, size_t size, void *vaddr, vaddr 320 drivers/pci/controller/vmd.c return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); vaddr 3941 drivers/pci/pci.c unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; vaddr 3949 drivers/pci/pci.c return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, vaddr 3973 drivers/pci/pci.c unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; vaddr 3975 drivers/pci/pci.c unmap_kernel_range(vaddr, resource_size(res)); vaddr 137 drivers/rapidio/devices/rio_mport_cdev.c void *vaddr; vaddr 543 drivers/scsi/aic7xxx/aic79xx.h uint8_t *vaddr; vaddr 523 drivers/scsi/aic7xxx/aic79xx_core.c /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, vaddr 6551 drivers/scsi/aic7xxx/aic79xx_core.c sns_map->vaddr, sns_map->dmamap); vaddr 6566 drivers/scsi/aic7xxx/aic79xx_core.c sg_map->vaddr, sg_map->dmamap); vaddr 6581 drivers/scsi/aic7xxx/aic79xx_core.c hscb_map->vaddr, hscb_map->dmamap); vaddr 6818 drivers/scsi/aic7xxx/aic79xx_core.c hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; vaddr 6828 drivers/scsi/aic7xxx/aic79xx_core.c (void **)&hscb_map->vaddr, vaddr 6837 drivers/scsi/aic7xxx/aic79xx_core.c hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, vaddr 6840 drivers/scsi/aic7xxx/aic79xx_core.c hscb = (struct hardware_scb *)hscb_map->vaddr; vaddr 6851 drivers/scsi/aic7xxx/aic79xx_core.c segs = sg_map->vaddr + offset; vaddr 6861 drivers/scsi/aic7xxx/aic79xx_core.c (void **)&sg_map->vaddr, vaddr 6870 drivers/scsi/aic7xxx/aic79xx_core.c sg_map->vaddr, ahd_sglist_allocsize(ahd), vaddr 6873 drivers/scsi/aic7xxx/aic79xx_core.c segs = sg_map->vaddr; vaddr 6888 drivers/scsi/aic7xxx/aic79xx_core.c sense_data = sense_map->vaddr + offset; vaddr 6898 drivers/scsi/aic7xxx/aic79xx_core.c (void **)&sense_map->vaddr, vaddr 6907 drivers/scsi/aic7xxx/aic79xx_core.c sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, vaddr 6910 drivers/scsi/aic7xxx/aic79xx_core.c sense_data = sense_map->vaddr; vaddr 7101 drivers/scsi/aic7xxx/aic79xx_core.c (void **)&ahd->shared_data_map.vaddr, vaddr 7111 drivers/scsi/aic7xxx/aic79xx_core.c ahd->shared_data_map.vaddr, driver_data_size, vaddr 7114 drivers/scsi/aic7xxx/aic79xx_core.c ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; vaddr 965 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr, vaddr 968 drivers/scsi/aic7xxx/aic79xx_osm.c *vaddr = pci_alloc_consistent(ahd->dev_softc, vaddr 970 drivers/scsi/aic7xxx/aic79xx_osm.c if (*vaddr == NULL) vaddr 977 drivers/scsi/aic7xxx/aic79xx_osm.c void* vaddr, bus_dmamap_t map) vaddr 980 drivers/scsi/aic7xxx/aic79xx_osm.c vaddr, map); vaddr 860 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, vaddr 864 drivers/scsi/aic7xxx/aic7xxx_osm.c *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); vaddr 865 drivers/scsi/aic7xxx/aic7xxx_osm.c if (*vaddr == NULL) vaddr 872 drivers/scsi/aic7xxx/aic7xxx_osm.c void* vaddr, bus_dmamap_t map) vaddr 874 drivers/scsi/aic7xxx/aic7xxx_osm.c dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); vaddr 917 drivers/scsi/aic94xx/aic94xx_dump.c ind, ascb->dma_scb.vaddr, vaddr 55 drivers/scsi/aic94xx/aic94xx_hwi.c phy->identify_frame = phy->id_frm_tok->vaddr; vaddr 223 drivers/scsi/aic94xx/aic94xx_hwi.c seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, vaddr 225 drivers/scsi/aic94xx/aic94xx_hwi.c if (!seq->next_scb.vaddr) { vaddr 261 drivers/scsi/aic94xx/aic94xx_hwi.c asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; vaddr 287 drivers/scsi/aic94xx/aic94xx_hwi.c memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); vaddr 1042 drivers/scsi/aic94xx/aic94xx_hwi.c ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool, vaddr 1045 drivers/scsi/aic94xx/aic94xx_hwi.c if (!ascb->dma_scb.vaddr) { vaddr 1062 drivers/scsi/aic94xx/aic94xx_hwi.c dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, vaddr 1138 drivers/scsi/aic94xx/aic94xx_hwi.c memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); vaddr 1140 drivers/scsi/aic94xx/aic94xx_hwi.c ascb->scb = ascb->dma_scb.vaddr; vaddr 80 drivers/scsi/aic94xx/aic94xx_hwi.h void *vaddr; vaddr 248 drivers/scsi/aic94xx/aic94xx_hwi.h token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev, vaddr 252 drivers/scsi/aic94xx/aic94xx_hwi.h if (!token->vaddr) { vaddr 265 drivers/scsi/aic94xx/aic94xx_hwi.h token->vaddr, token->dma_handle); vaddr 274 drivers/scsi/aic94xx/aic94xx_hwi.h ascb->scb = ascb->dma_scb.vaddr; vaddr 329 drivers/scsi/aic94xx/aic94xx_hwi.h dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, vaddr 595 drivers/scsi/aic94xx/aic94xx_init.c if (asd_ha->seq.next_scb.vaddr) { vaddr 596 drivers/scsi/aic94xx/aic94xx_init.c dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr, vaddr 598 drivers/scsi/aic94xx/aic94xx_init.c asd_ha->seq.next_scb.vaddr = NULL; vaddr 231 drivers/scsi/aic94xx/aic94xx_scb.c memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size); vaddr 371 drivers/scsi/aic94xx/aic94xx_scb.c memset(edb->vaddr, 0, ASD_EDB_SIZE); vaddr 416 drivers/scsi/aic94xx/aic94xx_scb.c ascb->dma_scb.vaddr, vaddr 582 drivers/scsi/aic94xx/aic94xx_scb.c ascb->dma_scb.vaddr, vaddr 309 drivers/scsi/aic94xx/aic94xx_seq.c memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left); vaddr 81 drivers/scsi/aic94xx/aic94xx_task.c &((struct sg_el *)ascb->sg_arr->vaddr)[i]; vaddr 171 drivers/scsi/aic94xx/aic94xx_task.c r = edb->vaddr; vaddr 286 drivers/scsi/aic94xx/aic94xx_tmf.c ascb->tag = *(__be16 *)(edb->vaddr+4); vaddr 287 drivers/scsi/aic94xx/aic94xx_tmf.c fh = edb->vaddr + 16; vaddr 288 drivers/scsi/aic94xx/aic94xx_tmf.c ru = edb->vaddr + 16 + sizeof(*fh); vaddr 3856 drivers/scsi/csiostor/csio_hw.c fl_sg->flbufs[n].vaddr, vaddr 278 drivers/scsi/csiostor/csio_lnode.c cmd = fdmi_req->dma_buf.vaddr; vaddr 312 drivers/scsi/csiostor/csio_lnode.c cmd = fdmi_req->dma_buf.vaddr; vaddr 428 drivers/scsi/csiostor/csio_lnode.c cmd = fdmi_req->dma_buf.vaddr; vaddr 529 drivers/scsi/csiostor/csio_lnode.c cmd = fdmi_req->dma_buf.vaddr; vaddr 588 drivers/scsi/csiostor/csio_lnode.c cmd = fdmi_req->dma_buf.vaddr; vaddr 1775 drivers/scsi/csiostor/csio_lnode.c csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); vaddr 1851 drivers/scsi/csiostor/csio_lnode.c dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len, vaddr 1853 drivers/scsi/csiostor/csio_lnode.c if (!dma_buf->vaddr) { vaddr 1878 drivers/scsi/csiostor/csio_lnode.c if (dma_buf->vaddr) vaddr 1879 drivers/scsi/csiostor/csio_lnode.c dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr, vaddr 1521 drivers/scsi/csiostor/csio_scsi.c buf_addr = dma_buf->vaddr + buf_off; vaddr 1582 drivers/scsi/csiostor/csio_scsi.c fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; vaddr 2040 drivers/scsi/csiostor/csio_scsi.c fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; vaddr 2347 drivers/scsi/csiostor/csio_scsi.c ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size, vaddr 2349 drivers/scsi/csiostor/csio_scsi.c if (!ddp_desc->vaddr) { vaddr 2371 drivers/scsi/csiostor/csio_scsi.c ddp_desc->vaddr, ddp_desc->paddr); vaddr 2398 drivers/scsi/csiostor/csio_scsi.c ddp_desc->vaddr, ddp_desc->paddr); vaddr 2443 drivers/scsi/csiostor/csio_scsi.c dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, vaddr 2445 drivers/scsi/csiostor/csio_scsi.c if (!dma_buf->vaddr) { vaddr 2483 drivers/scsi/csiostor/csio_scsi.c dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, vaddr 2514 drivers/scsi/csiostor/csio_scsi.c dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, vaddr 127 drivers/scsi/csiostor/csio_wr.c buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len, vaddr 129 drivers/scsi/csiostor/csio_wr.c if (!buf->vaddr) { vaddr 1078 drivers/scsi/csiostor/csio_wr.c fbuf->vaddr = buf->vaddr; vaddr 1702 drivers/scsi/csiostor/csio_wr.c if (!buf->vaddr) vaddr 1705 drivers/scsi/csiostor/csio_wr.c buf->len, buf->vaddr, vaddr 233 drivers/scsi/csiostor/csio_wr.h void *vaddr; /* Virtual address */ vaddr 538 drivers/scsi/fnic/fnic_main.c if (fnic->bar0.vaddr) vaddr 539 drivers/scsi/fnic/fnic_main.c iounmap(fnic->bar0.vaddr); vaddr 635 drivers/scsi/fnic/fnic_main.c fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); vaddr 639 drivers/scsi/fnic/fnic_main.c if (!fnic->bar0.vaddr) { vaddr 50 drivers/scsi/fnic/vnic_dev.c void __iomem *vaddr; vaddr 99 drivers/scsi/fnic/vnic_dev.c rh = bar->vaddr; vaddr 155 drivers/scsi/fnic/vnic_dev.c vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; vaddr 170 drivers/scsi/fnic/vnic_dev.c if (!vdev->res[type].vaddr) vaddr 178 drivers/scsi/fnic/vnic_dev.c return (char __iomem *)vdev->res[type].vaddr + vaddr 181 drivers/scsi/fnic/vnic_dev.c return (char __iomem *)vdev->res[type].vaddr; vaddr 91 drivers/scsi/fnic/vnic_dev.h void __iomem *vaddr; vaddr 297 drivers/scsi/hpsa.c static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, vaddr 307 drivers/scsi/hpsa.c static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, vaddr 1150 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); vaddr 1154 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); vaddr 1158 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); vaddr 7052 drivers/scsi/hpsa.c void __iomem *vaddr; vaddr 7055 drivers/scsi/hpsa.c vaddr = pci_ioremap_bar(pdev, 0); vaddr 7056 drivers/scsi/hpsa.c if (vaddr == NULL) vaddr 7065 drivers/scsi/hpsa.c iounmap(vaddr); vaddr 7071 drivers/scsi/hpsa.c iounmap(vaddr); vaddr 7098 drivers/scsi/hpsa.c writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); vaddr 7101 drivers/scsi/hpsa.c tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); vaddr 7107 drivers/scsi/hpsa.c iounmap(vaddr); vaddr 7134 drivers/scsi/hpsa.c void __iomem *vaddr, u32 use_doorbell) vaddr 7143 drivers/scsi/hpsa.c writel(use_doorbell, vaddr + SA5_DOORBELL); vaddr 7246 drivers/scsi/hpsa.c void __iomem *vaddr; vaddr 7284 drivers/scsi/hpsa.c vaddr = remap_pci_mem(paddr, 0x250); vaddr 7285 drivers/scsi/hpsa.c if (!vaddr) vaddr 7289 drivers/scsi/hpsa.c rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, vaddr 7320 drivers/scsi/hpsa.c rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); vaddr 7331 drivers/scsi/hpsa.c rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); vaddr 7338 drivers/scsi/hpsa.c rc = controller_reset_failed(vaddr); vaddr 7353 drivers/scsi/hpsa.c iounmap(vaddr); vaddr 7540 drivers/scsi/hpsa.c static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, vaddr 7551 drivers/scsi/hpsa.c scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); vaddr 7565 drivers/scsi/hpsa.c static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, vaddr 7569 drivers/scsi/hpsa.c *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); vaddr 7570 drivers/scsi/hpsa.c *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); vaddr 7603 drivers/scsi/hpsa.c rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, vaddr 7725 drivers/scsi/hpsa.c dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); vaddr 7727 drivers/scsi/hpsa.c writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); vaddr 7738 drivers/scsi/hpsa.c doorbell_value = readl(h->vaddr + SA5_DOORBELL); vaddr 7764 drivers/scsi/hpsa.c doorbell_value = readl(h->vaddr + SA5_DOORBELL); vaddr 7790 drivers/scsi/hpsa.c writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); vaddr 7807 drivers/scsi/hpsa.c iounmap(h->vaddr); /* pci_init 3 */ vaddr 7808 drivers/scsi/hpsa.c h->vaddr = NULL; vaddr 7860 drivers/scsi/hpsa.c h->vaddr = remap_pci_mem(h->paddr, 0x250); vaddr 7861 drivers/scsi/hpsa.c if (!h->vaddr) { vaddr 7866 drivers/scsi/hpsa.c err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); vaddr 7888 drivers/scsi/hpsa.c iounmap(h->vaddr); vaddr 7889 drivers/scsi/hpsa.c h->vaddr = NULL; vaddr 7921 drivers/scsi/hpsa.c void __iomem *vaddr; vaddr 7945 drivers/scsi/hpsa.c vaddr = pci_ioremap_bar(pdev, 0); vaddr 7946 drivers/scsi/hpsa.c if (vaddr == NULL) { vaddr 7950 drivers/scsi/hpsa.c writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 7951 drivers/scsi/hpsa.c iounmap(vaddr); vaddr 8124 drivers/scsi/hpsa.c rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); vaddr 8131 drivers/scsi/hpsa.c rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); vaddr 8227 drivers/scsi/hpsa.c lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); vaddr 8241 drivers/scsi/hpsa.c writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL); vaddr 8379 drivers/scsi/hpsa.c writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); vaddr 8386 drivers/scsi/hpsa.c writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); vaddr 9204 drivers/scsi/hpsa.c writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); vaddr 9227 drivers/scsi/hpsa.c writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); vaddr 9229 drivers/scsi/hpsa.c readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); vaddr 9267 drivers/scsi/hpsa.c rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, vaddr 9285 drivers/scsi/hpsa.c writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); vaddr 171 drivers/scsi/hpsa.h void __iomem *vaddr; vaddr 422 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); vaddr 423 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); vaddr 429 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); vaddr 435 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); vaddr 447 drivers/scsi/hpsa.h writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 448 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 452 drivers/scsi/hpsa.h h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 453 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 464 drivers/scsi/hpsa.h writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 465 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 469 drivers/scsi/hpsa.h h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 470 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 478 drivers/scsi/hpsa.h writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 479 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 483 drivers/scsi/hpsa.h h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 484 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); vaddr 498 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_OUTDB_STATUS); vaddr 499 drivers/scsi/hpsa.h writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); vaddr 503 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_OUTDB_STATUS); vaddr 529 drivers/scsi/hpsa.h = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); vaddr 550 drivers/scsi/hpsa.h readl(h->vaddr + SA5_INTR_STATUS); vaddr 556 drivers/scsi/hpsa.h unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); vaddr 562 drivers/scsi/hpsa.h register_value = readl(h->vaddr + SA5_OUTDB_STATUS); vaddr 570 drivers/scsi/hpsa.h unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); vaddr 581 drivers/scsi/hpsa.h return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING; vaddr 608 drivers/scsi/hpsa.h writel((q << 24) | rq->current_entry, h->vaddr + vaddr 280 drivers/scsi/megaraid/mega_common.h caddr_t vaddr; vaddr 238 drivers/scsi/megaraid/megaraid_ioctl.h caddr_t vaddr; vaddr 1042 drivers/scsi/megaraid/megaraid_mbox.c ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16); vaddr 1044 drivers/scsi/megaraid/megaraid_mbox.c ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8); vaddr 1056 drivers/scsi/megaraid/megaraid_mbox.c epthru_pci_blk[i].vaddr; vaddr 1062 drivers/scsi/megaraid/megaraid_mbox.c ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr; vaddr 1153 drivers/scsi/megaraid/megaraid_mbox.c mbox_pci_blk[i].vaddr = dma_pool_alloc( vaddr 1157 drivers/scsi/megaraid/megaraid_mbox.c if (!mbox_pci_blk[i].vaddr) { vaddr 1179 drivers/scsi/megaraid/megaraid_mbox.c epthru_pci_blk[i].vaddr = dma_pool_alloc( vaddr 1183 drivers/scsi/megaraid/megaraid_mbox.c if (!epthru_pci_blk[i].vaddr) { vaddr 1202 drivers/scsi/megaraid/megaraid_mbox.c sg_pci_blk[i].vaddr = dma_pool_alloc( vaddr 1206 drivers/scsi/megaraid/megaraid_mbox.c if (!sg_pci_blk[i].vaddr) { vaddr 1237 drivers/scsi/megaraid/megaraid_mbox.c for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { vaddr 1238 drivers/scsi/megaraid/megaraid_mbox.c dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, vaddr 1245 drivers/scsi/megaraid/megaraid_mbox.c for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { vaddr 1247 drivers/scsi/megaraid/megaraid_mbox.c epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); vaddr 1253 drivers/scsi/megaraid/megaraid_mbox.c for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { vaddr 1255 drivers/scsi/megaraid/megaraid_mbox.c mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); vaddr 1541 drivers/scsi/megaraid/megaraid_mbox.c caddr_t vaddr; vaddr 1545 drivers/scsi/megaraid/megaraid_mbox.c vaddr = (caddr_t) sg_virt(&sgl[0]); vaddr 1547 drivers/scsi/megaraid/megaraid_mbox.c memset(vaddr, 0, scp->cmnd[4]); vaddr 545 drivers/scsi/megaraid/megaraid_mm.c kioc->buf_vaddr = pool->vaddr; vaddr 1093 drivers/scsi/megaraid/megaraid_mm.c pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL, vaddr 1096 drivers/scsi/megaraid/megaraid_mm.c if (!pool->vaddr) vaddr 1193 drivers/scsi/megaraid/megaraid_mm.c if (pool->vaddr) vaddr 1194 drivers/scsi/megaraid/megaraid_mm.c dma_pool_free(pool->handle, pool->vaddr, vaddr 203 drivers/scsi/ncr53c8xx.c m_addr_t vaddr; vaddr 384 drivers/scsi/ncr53c8xx.c vbp->vaddr = vp; vaddr 403 drivers/scsi/ncr53c8xx.c while (*vbpp && (*vbpp)->vaddr != m) vaddr 409 drivers/scsi/ncr53c8xx.c (void *)vbp->vaddr, (dma_addr_t)vbp->baddr); vaddr 494 drivers/scsi/ncr53c8xx.c while (vp && (m_addr_t) vp->vaddr != a) vaddr 1648 drivers/scsi/ncr53c8xx.c void __iomem *vaddr; /* Virtual and bus address of */ vaddr 8359 drivers/scsi/ncr53c8xx.c np->vaddr = device->slot.base_v; vaddr 8361 drivers/scsi/ncr53c8xx.c np->vaddr = ioremap(device->slot.base_c, 128); vaddr 8363 drivers/scsi/ncr53c8xx.c if (!np->vaddr) { vaddr 8370 drivers/scsi/ncr53c8xx.c "%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr); vaddr 8377 drivers/scsi/ncr53c8xx.c np->reg = (struct ncr_reg __iomem *)np->vaddr; vaddr 274 drivers/scsi/snic/snic_main.c if (snic->bar0.vaddr) vaddr 275 drivers/scsi/snic/snic_main.c iounmap(snic->bar0.vaddr); vaddr 451 drivers/scsi/snic/snic_main.c snic->bar0.vaddr = pci_iomap(pdev, 0, 0); vaddr 452 drivers/scsi/snic/snic_main.c if (!snic->bar0.vaddr) { vaddr 46 drivers/scsi/snic/vnic_dev.c void __iomem *vaddr; vaddr 98 drivers/scsi/snic/vnic_dev.c rh = bar->vaddr; vaddr 128 drivers/scsi/snic/vnic_dev.c if (!bar[bar_num].len || !bar[bar_num].vaddr) vaddr 159 drivers/scsi/snic/vnic_dev.c vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; vaddr 174 drivers/scsi/snic/vnic_dev.c if (!vdev->res[type].vaddr) vaddr 182 drivers/scsi/snic/vnic_dev.c return (char __iomem *)vdev->res[type].vaddr + vaddr 186 drivers/scsi/snic/vnic_dev.c return (char __iomem *)vdev->res[type].vaddr; vaddr 49 drivers/scsi/snic/vnic_dev.h void __iomem *vaddr; vaddr 389 drivers/scsi/sun3_scsi.c unsigned char *vaddr; vaddr 391 drivers/scsi/sun3_scsi.c vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); vaddr 393 drivers/scsi/sun3_scsi.c vaddr += (sun3_dma_orig_count - fifo); vaddr 394 drivers/scsi/sun3_scsi.c vaddr--; vaddr 398 drivers/scsi/sun3_scsi.c *vaddr = (dregs->bpack_lo & 0xff00) >> 8; vaddr 399 drivers/scsi/sun3_scsi.c vaddr--; vaddr 403 drivers/scsi/sun3_scsi.c *vaddr = (dregs->bpack_hi & 0x00ff); vaddr 404 drivers/scsi/sun3_scsi.c vaddr--; vaddr 408 drivers/scsi/sun3_scsi.c *vaddr = (dregs->bpack_hi & 0xff00) >> 8; vaddr 440 drivers/scsi/sun3_scsi.c unsigned char *vaddr; vaddr 443 drivers/scsi/sun3_scsi.c vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr); vaddr 445 drivers/scsi/sun3_scsi.c vaddr += (sun3_dma_orig_count - fifo); vaddr 447 drivers/scsi/sun3_scsi.c vaddr[-2] = (data & 0xff00) >> 8; vaddr 448 drivers/scsi/sun3_scsi.c vaddr[-1] = (data & 0xff); vaddr 1130 drivers/scsi/sym53c8xx_2/sym_hipd.h void *vaddr; /* Virtual address */ vaddr 1195 drivers/scsi/sym53c8xx_2/sym_hipd.h void *vaddr = NULL; vaddr 1198 drivers/scsi/sym53c8xx_2/sym_hipd.h vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr, vaddr 1200 drivers/scsi/sym53c8xx_2/sym_hipd.h if (vaddr) { vaddr 1201 drivers/scsi/sym53c8xx_2/sym_hipd.h vbp->vaddr = vaddr; vaddr 1204 drivers/scsi/sym53c8xx_2/sym_hipd.h return vaddr; vaddr 1209 drivers/scsi/sym53c8xx_2/sym_hipd.h dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr, vaddr 213 drivers/scsi/sym53c8xx_2/sym_malloc.c void *vaddr; vaddr 219 drivers/scsi/sym53c8xx_2/sym_malloc.c vaddr = sym_m_get_dma_mem_cluster(mp, vbp); vaddr 220 drivers/scsi/sym53c8xx_2/sym_malloc.c if (vaddr) { vaddr 221 drivers/scsi/sym53c8xx_2/sym_malloc.c int hc = VTOB_HASH_CODE(vaddr); vaddr 226 drivers/scsi/sym53c8xx_2/sym_malloc.c return vaddr; vaddr 239 drivers/scsi/sym53c8xx_2/sym_malloc.c while (*vbpp && (*vbpp)->vaddr != m) vaddr 357 drivers/scsi/sym53c8xx_2/sym_malloc.c while (vp && vp->vaddr != a) vaddr 36 drivers/soc/fsl/dpio/dpio-service.c struct dpaa2_dq *vaddr; vaddr 360 drivers/soc/fsl/dpio/dpio-service.c qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); vaddr 391 drivers/soc/fsl/dpio/dpio-service.c qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); vaddr 559 drivers/soc/fsl/dpio/dpio-service.c ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); vaddr 560 drivers/soc/fsl/dpio/dpio-service.c ret->paddr = dma_map_single(dev, ret->vaddr, vaddr 610 drivers/soc/fsl/dpio/dpio-service.c struct dpaa2_dq *ret = &s->vaddr[s->idx]; vaddr 631 drivers/soc/fsl/dpio/dpio-service.c prefetch(&s->vaddr[s->idx]); vaddr 87 drivers/spi/spi-orion.c void __iomem *vaddr; vaddr 431 drivers/spi/spi-orion.c void __iomem *vaddr; vaddr 442 drivers/spi/spi-orion.c vaddr = orion_spi->child[cs].direct_access.vaddr; vaddr 444 drivers/spi/spi-orion.c if (vaddr && xfer->tx_buf && word_len == 8) { vaddr 452 drivers/spi/spi-orion.c iowrite32_rep(vaddr, xfer->tx_buf, cnt); vaddr 456 drivers/spi/spi-orion.c iowrite8_rep(vaddr, &buf[cnt], rem); vaddr 756 drivers/spi/spi-orion.c dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); vaddr 757 drivers/spi/spi-orion.c if (!dir_acc->vaddr) { vaddr 114 drivers/staging/android/ion/ion.c void *vaddr; vaddr 118 drivers/staging/android/ion/ion.c return buffer->vaddr; vaddr 120 drivers/staging/android/ion/ion.c vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); vaddr 121 drivers/staging/android/ion/ion.c if (WARN_ONCE(!vaddr, vaddr 124 drivers/staging/android/ion/ion.c if (IS_ERR(vaddr)) vaddr 125 drivers/staging/android/ion/ion.c return vaddr; vaddr 126 drivers/staging/android/ion/ion.c buffer->vaddr = vaddr; vaddr 128 drivers/staging/android/ion/ion.c return vaddr; vaddr 136 drivers/staging/android/ion/ion.c buffer->vaddr = NULL; vaddr 281 drivers/staging/android/ion/ion.c return buffer->vaddr + offset * PAGE_SIZE; vaddr 293 drivers/staging/android/ion/ion.c void *vaddr; vaddr 302 drivers/staging/android/ion/ion.c vaddr = ion_buffer_kmap_get(buffer); vaddr 303 drivers/staging/android/ion/ion.c if (IS_ERR(vaddr)) { vaddr 304 drivers/staging/android/ion/ion.c ret = PTR_ERR(vaddr); vaddr 50 drivers/staging/android/ion/ion.h void *vaddr; vaddr 51 drivers/staging/android/ion/ion_cma_heap.c void *vaddr = kmap_atomic(page); vaddr 53 drivers/staging/android/ion/ion_cma_heap.c memset(vaddr, 0, PAGE_SIZE); vaddr 54 drivers/staging/android/ion/ion_cma_heap.c kunmap_atomic(vaddr); vaddr 25 drivers/staging/android/ion/ion_heap.c void *vaddr; vaddr 49 drivers/staging/android/ion/ion_heap.c vaddr = vmap(pages, npages, VM_MAP, pgprot); vaddr 52 drivers/staging/android/ion/ion_heap.c if (!vaddr) vaddr 55 drivers/staging/android/ion/ion_heap.c return vaddr; vaddr 61 drivers/staging/android/ion/ion_heap.c vunmap(buffer->vaddr); vaddr 97 drivers/staging/media/allegro-dvt/allegro-core.c void *vaddr; vaddr 731 drivers/staging/media/allegro-dvt/allegro-core.c buffer->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, vaddr 733 drivers/staging/media/allegro-dvt/allegro-core.c if (!buffer->vaddr) vaddr 743 drivers/staging/media/allegro-dvt/allegro-core.c if (buffer->vaddr) { vaddr 745 drivers/staging/media/allegro-dvt/allegro-core.c buffer->vaddr, buffer->paddr); vaddr 746 drivers/staging/media/allegro-dvt/allegro-core.c buffer->vaddr = NULL; vaddr 1837 drivers/staging/media/allegro-dvt/allegro-core.c memcpy(dev->firmware.vaddr, buf, size); vaddr 251 drivers/staging/media/ipu3/ipu3-css-fw.c memcpy(css->binary[i].vaddr, blob, size); vaddr 13 drivers/staging/media/ipu3/ipu3-css-pool.c if (map->size < size && map->vaddr) { vaddr 41 drivers/staging/media/ipu3/ipu3-css-pool.c pool->entry[i].param.vaddr = NULL; vaddr 23 drivers/staging/media/ipu3/ipu3-css-pool.h void *vaddr; vaddr 572 drivers/staging/media/ipu3/ipu3-css.c memset(css->xmem_sp_group_ptrs.vaddr, 0, vaddr 720 drivers/staging/media/ipu3/ipu3-css.c void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr; vaddr 730 drivers/staging/media/ipu3/ipu3-css.c sizeof(*cfg_iter), vaddr); vaddr 796 drivers/staging/media/ipu3/ipu3-css.c sizeof(*cfg_ref), vaddr); vaddr 826 drivers/staging/media/ipu3/ipu3-css.c vaddr); vaddr 843 drivers/staging/media/ipu3/ipu3-css.c vaddr); vaddr 869 drivers/staging/media/ipu3/ipu3-css.c vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr; vaddr 874 drivers/staging/media/ipu3/ipu3-css.c vaddr); vaddr 887 drivers/staging/media/ipu3/ipu3-css.c vaddr); vaddr 901 drivers/staging/media/ipu3/ipu3-css.c isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr; vaddr 916 drivers/staging/media/ipu3/ipu3-css.c sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr; vaddr 1046 drivers/staging/media/ipu3/ipu3-css.c sp_group = css->xmem_sp_group_ptrs.vaddr; vaddr 1951 drivers/staging/media/ipu3/ipu3-css.c abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr; vaddr 2175 drivers/staging/media/ipu3/ipu3-css.c 0)->vaddr; vaddr 2179 drivers/staging/media/ipu3/ipu3-css.c if (set_params || !map->vaddr) { vaddr 2182 drivers/staging/media/ipu3/ipu3-css.c acc = map->vaddr; vaddr 2188 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params || vaddr 2193 drivers/staging/media/ipu3/ipu3-css.c vmem0 = map->vaddr; vaddr 2199 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params || vaddr 2203 drivers/staging/media/ipu3/ipu3-css.c dmem0 = map->vaddr; vaddr 2211 drivers/staging/media/ipu3/ipu3-css.c r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr, vaddr 2222 drivers/staging/media/ipu3/ipu3-css.c map->vaddr, set_params); vaddr 2231 drivers/staging/media/ipu3/ipu3-css.c map->vaddr, set_params); vaddr 2243 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr) { vaddr 2246 drivers/staging/media/ipu3/ipu3-css.c gdc = map->vaddr; vaddr 2247 drivers/staging/media/ipu3/ipu3-css.c imgu_css_cfg_gdc_table(map->vaddr, vaddr 2260 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr || (set_params && set_params->use.obgrid_param)) { vaddr 2263 drivers/staging/media/ipu3/ipu3-css.c obgrid = map->vaddr; vaddr 138 drivers/staging/media/ipu3/ipu3-dmamap.c map->vaddr = map->vma->addr; vaddr 183 drivers/staging/media/ipu3/ipu3-dmamap.c __func__, map->size, &map->daddr, map->vaddr); vaddr 185 drivers/staging/media/ipu3/ipu3-dmamap.c if (!map->vaddr) vaddr 194 drivers/staging/media/ipu3/ipu3-dmamap.c vunmap(map->vaddr); vaddr 195 drivers/staging/media/ipu3/ipu3-dmamap.c map->vaddr = NULL; vaddr 162 drivers/staging/media/ipu3/ipu3.c if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr)) vaddr 86 drivers/staging/media/meson/vdec/esparser.c u8 *vaddr = vb2_plane_vaddr(vb, 0) + payload_size; vaddr 90 drivers/staging/media/meson/vdec/esparser.c memset(vaddr, 0, pad_size); vaddr 93 drivers/staging/media/meson/vdec/esparser.c memset(vaddr + pad_size, 0, SEARCH_PATTERN_LEN); vaddr 94 drivers/staging/media/meson/vdec/esparser.c vaddr[pad_size] = 0x00; vaddr 95 drivers/staging/media/meson/vdec/esparser.c vaddr[pad_size + 1] = 0x00; vaddr 96 drivers/staging/media/meson/vdec/esparser.c vaddr[pad_size + 2] = 0x01; vaddr 97 drivers/staging/media/meson/vdec/esparser.c vaddr[pad_size + 3] = 0xff; vaddr 693 drivers/staging/media/tegra-vde/vde.c unsigned long vaddr) vaddr 713 drivers/staging/media/tegra-vde/vde.c if (copy_from_user(&ctx, (void __user *)vaddr, sizeof(ctx))) vaddr 596 drivers/target/target_core_user.c static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) vaddr 598 drivers/target/target_core_user.c unsigned long offset = offset_in_page(vaddr); vaddr 599 drivers/target/target_core_user.c void *start = vaddr - offset; vaddr 447 drivers/tee/optee/core.c unsigned long vaddr; vaddr 484 drivers/tee/optee/core.c vaddr = (unsigned long)va; vaddr 486 drivers/tee/optee/core.c rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, vaddr 492 drivers/tee/optee/core.c vaddr += sz; vaddr 496 drivers/tee/optee/core.c rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); vaddr 74 drivers/tee/tee_shm_pool.c rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr, vaddr 84 drivers/tee/tee_shm_pool.c rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr, vaddr 106 drivers/tee/tee_shm_pool.c struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, vaddr 116 drivers/tee/tee_shm_pool.c if (vaddr & page_mask || paddr & page_mask || size & page_mask) vaddr 130 drivers/tee/tee_shm_pool.c rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1); vaddr 35 drivers/tty/serial/8250/8250_acorn.c void __iomem *vaddr; vaddr 54 drivers/tty/serial/8250/8250_acorn.c info->vaddr = ecardm_iomap(ec, type->type, 0, 0); vaddr 55 drivers/tty/serial/8250/8250_acorn.c if (!info->vaddr) { vaddr 71 drivers/tty/serial/8250/8250_acorn.c uart.port.membase = info->vaddr + type->offset[i]; vaddr 366 drivers/tty/serial/jsm/jsm_tty.c void __iomem *vaddr; vaddr 401 drivers/tty/serial/jsm/jsm_tty.c vaddr = brd->re_map_membase; vaddr 412 drivers/tty/serial/jsm/jsm_tty.c ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); vaddr 414 drivers/tty/serial/jsm/jsm_tty.c ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); vaddr 1275 drivers/usb/core/hcd.c unsigned char *vaddr; vaddr 1282 drivers/usb/core/hcd.c vaddr = hcd_buffer_alloc(bus, size + sizeof(vaddr), vaddr 1284 drivers/usb/core/hcd.c if (!vaddr) vaddr 1296 drivers/usb/core/hcd.c (unsigned long *)(vaddr + size)); vaddr 1299 drivers/usb/core/hcd.c memcpy(vaddr, *vaddr_handle, size); vaddr 1301 drivers/usb/core/hcd.c *vaddr_handle = vaddr; vaddr 1309 drivers/usb/core/hcd.c unsigned char *vaddr = *vaddr_handle; vaddr 1311 drivers/usb/core/hcd.c vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size)); vaddr 1314 drivers/usb/core/hcd.c memcpy(vaddr, *vaddr_handle, size); vaddr 1316 drivers/usb/core/hcd.c hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle); vaddr 1318 drivers/usb/core/hcd.c *vaddr_handle = vaddr; vaddr 767 drivers/usb/gadget/function/f_fs.c void *vaddr, *ptr; vaddr 771 drivers/usb/gadget/function/f_fs.c vaddr = vmalloc(sz); vaddr 772 drivers/usb/gadget/function/f_fs.c if (!vaddr) vaddr 778 drivers/usb/gadget/function/f_fs.c vfree(vaddr); vaddr 782 drivers/usb/gadget/function/f_fs.c for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE) vaddr 787 drivers/usb/gadget/function/f_fs.c vfree(vaddr); vaddr 793 drivers/usb/gadget/function/f_fs.c return vaddr; vaddr 814 drivers/usb/gadget/udc/fsl_qe_udc.c u32 vaddr; vaddr 836 drivers/usb/gadget/udc/fsl_qe_udc.c vaddr = (u32)phys_to_virt(in_be32(&bd->buf)); vaddr 837 drivers/usb/gadget/udc/fsl_qe_udc.c frame_set_data(pframe, (u8 *)vaddr); vaddr 934 drivers/usb/gadget/udc/fsl_qe_udc.c u32 vaddr, i; vaddr 964 drivers/usb/gadget/udc/fsl_qe_udc.c vaddr = (u32)phys_to_virt(in_be32(&bd->buf)); vaddr 965 drivers/usb/gadget/udc/fsl_qe_udc.c frame_set_data(pframe, (u8 *)vaddr); vaddr 1469 drivers/usb/gadget/udc/fsl_qe_udc.c u32 vaddr, fsize; vaddr 1490 drivers/usb/gadget/udc/fsl_qe_udc.c vaddr = (u32)phys_to_virt(in_be32(&bd->buf)); vaddr 1491 drivers/usb/gadget/udc/fsl_qe_udc.c frame_set_data(pframe, (u8 *)vaddr); vaddr 21 drivers/usb/host/xhci-dbgcap.c void *vaddr; vaddr 23 drivers/usb/host/xhci-dbgcap.c vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, vaddr 25 drivers/usb/host/xhci-dbgcap.c return vaddr; vaddr 1328 drivers/usb/mon/mon_bin.c unsigned long vaddr; vaddr 1331 drivers/usb/mon/mon_bin.c vaddr = get_zeroed_page(GFP_KERNEL); vaddr 1332 drivers/usb/mon/mon_bin.c if (vaddr == 0) { vaddr 1337 drivers/usb/mon/mon_bin.c map[n].ptr = (unsigned char *) vaddr; vaddr 1338 drivers/usb/mon/mon_bin.c map[n].pg = virt_to_page((void *) vaddr); vaddr 103 drivers/vfio/vfio_iommu_spapr_tce.c __u64 vaddr, __u64 size) vaddr 110 drivers/vfio/vfio_iommu_spapr_tce.c if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) vaddr 113 drivers/vfio/vfio_iommu_spapr_tce.c mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT); vaddr 135 drivers/vfio/vfio_iommu_spapr_tce.c __u64 vaddr, __u64 size) vaddr 142 drivers/vfio/vfio_iommu_spapr_tce.c if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || vaddr 143 drivers/vfio/vfio_iommu_spapr_tce.c ((vaddr + size) < vaddr)) vaddr 146 drivers/vfio/vfio_iommu_spapr_tce.c mem = mm_iommu_get(container->mm, vaddr, entries); vaddr 155 drivers/vfio/vfio_iommu_spapr_tce.c ret = mm_iommu_new(container->mm, vaddr, entries, &mem); vaddr 885 drivers/vfio/vfio_iommu_spapr_tce.c (param.vaddr & ~IOMMU_PAGE_MASK(tbl))) vaddr 901 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr); vaddr 908 drivers/vfio/vfio_iommu_spapr_tce.c param.vaddr, vaddr 914 drivers/vfio/vfio_iommu_spapr_tce.c param.vaddr, vaddr 990 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_register_pages(container, param.vaddr, vaddr 1019 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_unregister_pages(container, param.vaddr, vaddr 86 drivers/vfio/vfio_iommu_type1.c unsigned long vaddr; /* Process virtual addr */ vaddr 338 drivers/vfio/vfio_iommu_type1.c static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, vaddr 352 drivers/vfio/vfio_iommu_type1.c ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page, vaddr 355 drivers/vfio/vfio_iommu_type1.c ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, vaddr 378 drivers/vfio/vfio_iommu_type1.c vaddr = untagged_addr(vaddr); vaddr 380 drivers/vfio/vfio_iommu_type1.c vma = find_vma_intersection(mm, vaddr, vaddr + 1); vaddr 383 drivers/vfio/vfio_iommu_type1.c if (!follow_pfn(vma, vaddr, pfn) && vaddr 397 drivers/vfio/vfio_iommu_type1.c static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, vaddr 404 drivers/vfio/vfio_iommu_type1.c dma_addr_t iova = vaddr - dma->vaddr + dma->iova; vaddr 410 drivers/vfio/vfio_iommu_type1.c ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); vaddr 435 drivers/vfio/vfio_iommu_type1.c for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; vaddr 436 drivers/vfio/vfio_iommu_type1.c pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { vaddr 437 drivers/vfio/vfio_iommu_type1.c ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); vaddr 497 drivers/vfio/vfio_iommu_type1.c static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, vaddr 507 drivers/vfio/vfio_iommu_type1.c ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); vaddr 596 drivers/vfio/vfio_iommu_type1.c remote_vaddr = dma->vaddr + (iova - dma->iova); vaddr 1013 drivers/vfio/vfio_iommu_type1.c unsigned long vaddr = dma->vaddr; vaddr 1021 drivers/vfio/vfio_iommu_type1.c npage = vfio_pin_pages_remote(dma, vaddr + dma->size, vaddr 1075 drivers/vfio/vfio_iommu_type1.c unsigned long vaddr = map->vaddr; vaddr 1082 drivers/vfio/vfio_iommu_type1.c if (map->size != size || map->vaddr != vaddr || map->iova != iova) vaddr 1095 drivers/vfio/vfio_iommu_type1.c if (!prot || !size || (size | iova | vaddr) & mask) vaddr 1099 drivers/vfio/vfio_iommu_type1.c if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) vaddr 1127 drivers/vfio/vfio_iommu_type1.c dma->vaddr = vaddr; vaddr 1232 drivers/vfio/vfio_iommu_type1.c unsigned long vaddr = dma->vaddr + vaddr 1237 drivers/vfio/vfio_iommu_type1.c npage = vfio_pin_pages_remote(dma, vaddr, vaddr 331 drivers/video/fbdev/hpfb.c unsigned long paddr, vaddr; vaddr 338 drivers/video/fbdev/hpfb.c vaddr = (unsigned long)ioremap(paddr, resource_size(&d->resource)); vaddr 340 drivers/video/fbdev/hpfb.c vaddr = paddr + DIO_VIRADDRBASE; vaddr 344 drivers/video/fbdev/hpfb.c if (hpfb_init_one(paddr, vaddr)) { vaddr 346 drivers/video/fbdev/hpfb.c iounmap((void *)vaddr); vaddr 465 drivers/video/fbdev/matrox/matroxfb_accel.c fb_writel((*chardata) << 24, mmio.vaddr); vaddr 467 drivers/video/fbdev/matrox/matroxfb_accel.c fb_writel(*chardata, mmio.vaddr); vaddr 475 drivers/video/fbdev/matrox/matroxfb_accel.c fb_writel((*(u_int16_t*)chardata) << 16, mmio.vaddr); vaddr 477 drivers/video/fbdev/matrox/matroxfb_accel.c fb_writel(*(u_int16_t*)chardata, mmio.vaddr); vaddr 488 drivers/video/fbdev/matrox/matroxfb_accel.c fb_writel(get_unaligned((u_int32_t*)(chardata + i)),mmio.vaddr); vaddr 375 drivers/video/fbdev/matrox/matroxfb_base.c iounmap(minfo->mmio.vbase.vaddr); vaddr 376 drivers/video/fbdev/matrox/matroxfb_base.c iounmap(minfo->video.vbase.vaddr); vaddr 1713 drivers/video/fbdev/matrox/matroxfb_base.c minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384); vaddr 1714 drivers/video/fbdev/matrox/matroxfb_base.c if (!minfo->mmio.vbase.vaddr) { vaddr 1721 drivers/video/fbdev/matrox/matroxfb_base.c minfo->video.vbase.vaddr = ioremap_wc(video_base_phys, memsize); vaddr 1722 drivers/video/fbdev/matrox/matroxfb_base.c if (!minfo->video.vbase.vaddr) { vaddr 1940 drivers/video/fbdev/matrox/matroxfb_base.c iounmap(minfo->video.vbase.vaddr); vaddr 1942 drivers/video/fbdev/matrox/matroxfb_base.c iounmap(minfo->mmio.vbase.vaddr); vaddr 125 drivers/video/fbdev/matrox/matroxfb_base.h void __iomem* vaddr; vaddr 129 drivers/video/fbdev/matrox/matroxfb_base.h return readb(va.vaddr + offs); vaddr 133 drivers/video/fbdev/matrox/matroxfb_base.h writeb(value, va.vaddr + offs); vaddr 137 drivers/video/fbdev/matrox/matroxfb_base.h writew(value, va.vaddr + offs); vaddr 141 drivers/video/fbdev/matrox/matroxfb_base.h return readl(va.vaddr + offs); vaddr 145 drivers/video/fbdev/matrox/matroxfb_base.h writel(value, va.vaddr + offs); vaddr 157 drivers/video/fbdev/matrox/matroxfb_base.h iowrite32_rep(va.vaddr, src, len >> 2); vaddr 159 drivers/video/fbdev/matrox/matroxfb_base.h u_int32_t __iomem* addr = va.vaddr; vaddr 180 drivers/video/fbdev/matrox/matroxfb_base.h va->vaddr += offs; vaddr 184 drivers/video/fbdev/matrox/matroxfb_base.h return va.vaddr; vaddr 628 drivers/video/fbdev/matrox/matroxfb_crtc2.c m2info->video.vbase.vaddr = vaddr_va(minfo->video.vbase) + m2info->video.offbase; vaddr 640 drivers/video/fbdev/omap/lcdc.c region->vaddr = lcdc.vram_virt; vaddr 41 drivers/video/fbdev/omap/omapfb.h void __iomem *vaddr; vaddr 166 drivers/video/fbdev/omap/omapfb_main.c fbdev->mem_desc.region[i].vaddr, vaddr 379 drivers/video/fbdev/omap/omapfb_main.c fbi->screen_base = rg->vaddr; vaddr 1031 drivers/video/fbdev/omap/omapfb_main.c *(u16 *)fbdev->mem_desc.region[0].vaddr = pixval; vaddr 199 drivers/video/fbdev/omap2/omapfb/omapfb-main.c return ofbi->region->vrfb.vaddr[0]; vaddr 201 drivers/video/fbdev/omap2/omapfb/omapfb-main.c return ofbi->region->vaddr; vaddr 531 drivers/video/fbdev/omap2/omapfb/omapfb-main.c if (vrfb->vaddr[0] && reconf) { vaddr 535 drivers/video/fbdev/omap2/omapfb/omapfb-main.c iounmap(vrfb->vaddr[0]); vaddr 536 drivers/video/fbdev/omap2/omapfb/omapfb-main.c vrfb->vaddr[0] = NULL; vaddr 540 drivers/video/fbdev/omap2/omapfb/omapfb-main.c if (vrfb->vaddr[0]) vaddr 554 drivers/video/fbdev/omap2/omapfb/omapfb-main.c fbi->screen_base = ofbi->region->vrfb.vaddr[0]; vaddr 1316 drivers/video/fbdev/omap2/omapfb/omapfb-main.c if (rg->vrfb.vaddr[0]) { vaddr 1317 drivers/video/fbdev/omap2/omapfb/omapfb-main.c iounmap(rg->vrfb.vaddr[0]); vaddr 1318 drivers/video/fbdev/omap2/omapfb/omapfb-main.c rg->vrfb.vaddr[0] = NULL; vaddr 1328 drivers/video/fbdev/omap2/omapfb/omapfb-main.c rg->vaddr = NULL; vaddr 1370 drivers/video/fbdev/omap2/omapfb/omapfb-main.c rg->vaddr = NULL; vaddr 1412 drivers/video/fbdev/omap2/omapfb/omapfb-main.c rg->vaddr = (void __iomem *)token; vaddr 1563 drivers/video/fbdev/omap2/omapfb/omapfb-main.c rg->vaddr, vaddr 504 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); vaddr 46 drivers/video/fbdev/omap2/omapfb/omapfb.h void __iomem *vaddr; vaddr 235 drivers/video/fbdev/omap2/omapfb/vrfb.c vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size); vaddr 237 drivers/video/fbdev/omap2/omapfb/vrfb.c if (!vrfb->vaddr[rot]) { vaddr 243 drivers/video/fbdev/omap2/omapfb/vrfb.c vrfb->vaddr[rot]); vaddr 1517 drivers/vme/bridges/vme_ca91cx42.c void *vaddr, dma_addr_t dma) vaddr 1524 drivers/vme/bridges/vme_ca91cx42.c pci_free_consistent(pdev, size, vaddr, dma); vaddr 1015 drivers/vme/bridges/vme_fake.c void *vaddr, dma_addr_t dma) vaddr 1017 drivers/vme/bridges/vme_fake.c kfree(vaddr); vaddr 2169 drivers/vme/bridges/vme_tsi148.c void *vaddr, dma_addr_t dma) vaddr 2176 drivers/vme/bridges/vme_tsi148.c pci_free_consistent(pdev, size, vaddr, dma); vaddr 127 drivers/vme/vme.c void *vaddr, dma_addr_t dma) vaddr 153 drivers/vme/vme.c bridge->free_consistent(bridge->parent, size, vaddr, dma); vaddr 175 drivers/vme/vme_bridge.h void *vaddr, dma_addr_t dma); vaddr 108 drivers/xen/gntdev.c args.vaddr = map->dma_vaddr; vaddr 180 drivers/xen/gntdev.c add->dma_vaddr = args.vaddr; vaddr 737 drivers/xen/gntdev.c pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); vaddr 740 drivers/xen/gntdev.c vma = find_vma(current->mm, op.vaddr); vaddr 736 drivers/xen/grant-table.c void *vaddr; vaddr 741 drivers/xen/grant-table.c vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes); vaddr 742 drivers/xen/grant-table.c if (vaddr == NULL) { vaddr 749 drivers/xen/grant-table.c xen_unmap(vaddr); vaddr 755 drivers/xen/grant-table.c xen_auto_xlat_grant_frames.vaddr = vaddr; vaddr 768 drivers/xen/grant-table.c xen_unmap(xen_auto_xlat_grant_frames.vaddr); vaddr 772 drivers/xen/grant-table.c xen_auto_xlat_grant_frames.vaddr = NULL; vaddr 858 drivers/xen/grant-table.c args->vaddr = dma_alloc_coherent(args->dev, size, vaddr 862 drivers/xen/grant-table.c args->vaddr = dma_alloc_wc(args->dev, size, vaddr 865 drivers/xen/grant-table.c if (!args->vaddr) { vaddr 929 drivers/xen/grant-table.c args->vaddr, args->dev_bus_addr); vaddr 932 drivers/xen/grant-table.c args->vaddr, args->dev_bus_addr); vaddr 1364 drivers/xen/grant-table.c gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; vaddr 332 drivers/xen/swiotlb-xen.c xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, vaddr 351 drivers/xen/swiotlb-xen.c TestClearPageXenRemapped(virt_to_page(vaddr))) vaddr 354 drivers/xen/swiotlb-xen.c xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); vaddr 111 drivers/xen/xen-pciback/xenbus.c void *vaddr; vaddr 117 drivers/xen/xen-pciback/xenbus.c err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr); vaddr 124 drivers/xen/xen-pciback/xenbus.c pdev->sh_info = vaddr; vaddr 288 drivers/xen/xen-scsiback.c gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), vaddr 547 drivers/xen/xen-scsiback.c vaddr(pending_req, i) + ring_req->seg[i].offset); vaddr 557 drivers/xen/xen-scsiback.c end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset; vaddr 569 drivers/xen/xen-scsiback.c end_seg = vaddr(pending_req, i_seg) + vaddr 78 drivers/xen/xenbus/xenbus_client.c void **vaddr); vaddr 79 drivers/xen/xenbus/xenbus_client.c int (*unmap)(struct xenbus_device *dev, void *vaddr); vaddr 359 drivers/xen/xenbus/xenbus_client.c int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, vaddr 367 drivers/xen/xenbus/xenbus_client.c virt_to_gfn(vaddr), 0); vaddr 375 drivers/xen/xenbus/xenbus_client.c vaddr = vaddr + XEN_PAGE_SIZE; vaddr 449 drivers/xen/xenbus/xenbus_client.c unsigned int nr_grefs, void **vaddr) vaddr 453 drivers/xen/xenbus/xenbus_client.c err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); vaddr 542 drivers/xen/xenbus/xenbus_client.c unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); vaddr 544 drivers/xen/xenbus/xenbus_client.c info->phys_addrs[info->idx] = vaddr; vaddr 545 drivers/xen/xenbus/xenbus_client.c info->addrs[info->idx] = vaddr; vaddr 553 drivers/xen/xenbus/xenbus_client.c void **vaddr) vaddr 567 drivers/xen/xenbus/xenbus_client.c *vaddr = NULL; vaddr 601 drivers/xen/xenbus/xenbus_client.c *vaddr = addr; vaddr 669 drivers/xen/xenbus/xenbus_client.c int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) vaddr 671 drivers/xen/xenbus/xenbus_client.c return ring_ops->unmap(dev, vaddr); vaddr 679 drivers/xen/xenbus/xenbus_client.c void **vaddr) vaddr 689 drivers/xen/xenbus/xenbus_client.c *vaddr = NULL; vaddr 721 drivers/xen/xenbus/xenbus_client.c *vaddr = area->addr; vaddr 734 drivers/xen/xenbus/xenbus_client.c static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) vaddr 745 drivers/xen/xenbus/xenbus_client.c if (node->pv.area->addr == vaddr) { vaddr 756 drivers/xen/xenbus/xenbus_client.c "can't find mapped virtual address %p", vaddr); vaddr 764 drivers/xen/xenbus/xenbus_client.c addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); vaddr 821 drivers/xen/xenbus/xenbus_client.c static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) vaddr 834 drivers/xen/xenbus/xenbus_client.c if (addr == vaddr) { vaddr 845 drivers/xen/xenbus/xenbus_client.c "can't find mapped virtual address %p", vaddr); vaddr 858 drivers/xen/xenbus/xenbus_client.c vunmap(vaddr); vaddr 862 drivers/xen/xenbus/xenbus_client.c WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); vaddr 219 drivers/xen/xlate_mmu.c void *vaddr; vaddr 248 drivers/xen/xlate_mmu.c vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL); vaddr 249 drivers/xen/xlate_mmu.c if (!vaddr) { vaddr 260 drivers/xen/xlate_mmu.c *virt = vaddr; vaddr 581 fs/binfmt_elf.c unsigned long vaddr = 0; vaddr 584 fs/binfmt_elf.c vaddr = eppnt->p_vaddr; vaddr 588 fs/binfmt_elf.c load_addr = -vaddr; vaddr 590 fs/binfmt_elf.c map_addr = elf_map(interpreter, load_addr + vaddr, vaddr 601 fs/binfmt_elf.c load_addr = map_addr - ELF_PAGESTART(vaddr); vaddr 883 fs/binfmt_elf.c unsigned long k, vaddr; vaddr 920 fs/binfmt_elf.c vaddr = elf_ppnt->p_vaddr; vaddr 973 fs/binfmt_elf.c load_bias = ELF_PAGESTART(load_bias - vaddr); vaddr 983 fs/binfmt_elf.c error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, vaddr 996 fs/binfmt_elf.c ELF_PAGESTART(load_bias + vaddr); vaddr 685 fs/dax.c unsigned long vaddr) vaddr 703 fs/dax.c copy_user_page(vto, (void __force *)kaddr, vaddr, to); vaddr 1035 fs/dax.c unsigned long vaddr = vmf->address; vaddr 1036 fs/dax.c pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); vaddr 1042 fs/dax.c ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); vaddr 1252 fs/dax.c unsigned long vaddr = vmf->address; vaddr 1317 fs/dax.c clear_user_highpage(vmf->cow_page, vaddr); vaddr 1321 fs/dax.c sector, PAGE_SIZE, vmf->cow_page, vaddr); vaddr 1372 fs/dax.c ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); vaddr 1374 fs/dax.c ret = vmf_insert_mixed(vma, vaddr, pfn); vaddr 2832 fs/fuse/file.c void *vaddr; vaddr 2852 fs/fuse/file.c vaddr = kmap_atomic(ap.pages[0]); vaddr 2853 fs/fuse/file.c err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, vaddr 2856 fs/fuse/file.c kunmap_atomic(vaddr); vaddr 125 fs/minix/minix.h static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) vaddr 127 fs/minix/minix.h const unsigned short *p = vaddr, *addr = vaddr; vaddr 150 fs/minix/minix.h static inline int minix_test_bit(int nr, const void *vaddr) vaddr 152 fs/minix/minix.h const unsigned short *p = vaddr; vaddr 386 fs/proc/kcore.c phdr->p_vaddr = (size_t)m->vaddr; vaddr 409 fs/pstore/ram_core.c void *vaddr; vaddr 430 fs/pstore/ram_core.c vaddr = vmap(pages, page_count, VM_MAP, prot); vaddr 438 fs/pstore/ram_core.c return vaddr + offset_in_page(start); vaddr 473 fs/pstore/ram_core.c prz->vaddr = persistent_ram_vmap(start, size, memtype); vaddr 475 fs/pstore/ram_core.c prz->vaddr = persistent_ram_iomap(start, size, memtype, vaddr 478 fs/pstore/ram_core.c if (!prz->vaddr) { vaddr 484 fs/pstore/ram_core.c prz->buffer = prz->vaddr; vaddr 539 fs/pstore/ram_core.c if (prz->vaddr) { vaddr 542 fs/pstore/ram_core.c vunmap(prz->vaddr - offset_in_page(prz->paddr)); vaddr 544 fs/pstore/ram_core.c iounmap(prz->vaddr); vaddr 547 fs/pstore/ram_core.c prz->vaddr = NULL; vaddr 100 include/asm-generic/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ vaddr 103 include/asm-generic/cacheflush.h flush_icache_user_range(vma, page, vaddr, len); \ vaddr 108 include/asm-generic/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ vaddr 36 include/asm-generic/fixmap.h static inline unsigned long virt_to_fix(const unsigned long vaddr) vaddr 38 include/asm-generic/fixmap.h BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); vaddr 39 include/asm-generic/fixmap.h return __virt_to_fix(vaddr); vaddr 31 include/asm-generic/page.h #define clear_user_page(page, vaddr, pg) clear_page(page) vaddr 32 include/asm-generic/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) vaddr 936 include/asm-generic/vmlinux.lds.h #define PERCPU_VADDR(cacheline, vaddr, phdr) \ vaddr 938 include/asm-generic/vmlinux.lds.h .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ vaddr 59 include/crypto/scatterwalk.h static inline void scatterwalk_unmap(void *vaddr) vaddr 61 include/crypto/scatterwalk.h kunmap_atomic(vaddr); vaddr 141 include/drm/drm_client.h void *vaddr; vaddr 642 include/drm/drm_drv.h void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); vaddr 12 include/drm/drm_format_helper.h void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, vaddr 14 include/drm/drm_format_helper.h void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, vaddr 17 include/drm/drm_format_helper.h void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, vaddr 19 include/drm/drm_format_helper.h void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, vaddr 23 include/drm/drm_format_helper.h void *vaddr, struct drm_framebuffer *fb, vaddr 26 include/drm/drm_format_helper.h void *vaddr, struct drm_framebuffer *fb, vaddr 28 include/drm/drm_format_helper.h void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, vaddr 151 include/drm/drm_gem.h void (*vunmap)(struct drm_gem_object *obj, void *vaddr); vaddr 26 include/drm/drm_gem_cma_helper.h void *vaddr; vaddr 107 include/drm/drm_gem_cma_helper.h void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 90 include/drm/drm_gem_shmem_helper.h void *vaddr; vaddr 138 include/drm/drm_gem_shmem_helper.h void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); vaddr 82 include/drm/drm_legacy.h void *vaddr; vaddr 88 include/drm/drm_prime.h void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); vaddr 263 include/linux/dma-buf.h void (*vunmap)(struct dma_buf *, void *vaddr); vaddr 422 include/linux/dma-buf.h void dma_buf_vunmap(struct dma_buf *, void *vaddr); vaddr 84 include/linux/dma-mapping.h void *vaddr, dma_addr_t dma_handle, vaddr 129 include/linux/dma-mapping.h void (*cache_sync)(struct device *dev, void *vaddr, size_t size, vaddr 160 include/linux/dma-mapping.h int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); vaddr 166 include/linux/dma-mapping.h int dma_release_from_global_coherent(int order, void *vaddr); vaddr 172 include/linux/dma-mapping.h #define dma_release_from_dev_coherent(dev, order, vaddr) (0) vaddr 173 include/linux/dma-mapping.h #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) vaddr 181 include/linux/dma-mapping.h static inline int dma_release_from_global_coherent(int order, void *vaddr) vaddr 451 include/linux/dma-mapping.h void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, vaddr 453 include/linux/dma-mapping.h void dma_cache_sync(struct device *dev, void *vaddr, size_t size, vaddr 534 include/linux/dma-mapping.h void *vaddr, dma_addr_t dma_handle) vaddr 537 include/linux/dma-mapping.h static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, vaddr 68 include/linux/dma-noncoherent.h void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, vaddr 71 include/linux/dma-noncoherent.h static inline void arch_dma_cache_sync(struct device *dev, void *vaddr, vaddr 28 include/linux/dmapool.h void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); vaddr 44 include/linux/dmapool.h static inline void dma_pool_free(struct dma_pool *pool, void *vaddr, vaddr 24 include/linux/highmem.h static inline void flush_kernel_vmap_range(void *vaddr, int size) vaddr 27 include/linux/highmem.h static inline void invalidate_kernel_vmap_range(void *vaddr, int size) vaddr 158 include/linux/highmem.h static inline void clear_user_highpage(struct page *page, unsigned long vaddr) vaddr 161 include/linux/highmem.h clear_user_page(addr, vaddr, page); vaddr 184 include/linux/highmem.h unsigned long vaddr) vaddr 187 include/linux/highmem.h vma, vaddr); vaddr 190 include/linux/highmem.h clear_user_highpage(page, vaddr); vaddr 206 include/linux/highmem.h unsigned long vaddr) vaddr 208 include/linux/highmem.h return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); vaddr 251 include/linux/highmem.h unsigned long vaddr, struct vm_area_struct *vma) vaddr 257 include/linux/highmem.h copy_user_page(vto, vfrom, vaddr, to); vaddr 61 include/linux/iio/buffer-dma.h void *vaddr; vaddr 655 include/linux/intel-iommu.h void free_pgtable_page(void *vaddr); vaddr 76 include/linux/io-mapping.h io_mapping_unmap_atomic(void __iomem *vaddr) vaddr 78 include/linux/io-mapping.h iounmap_atomic(vaddr); vaddr 95 include/linux/io-mapping.h io_mapping_unmap(void __iomem *vaddr) vaddr 97 include/linux/io-mapping.h iounmap(vaddr); vaddr 141 include/linux/io-mapping.h io_mapping_unmap(void __iomem *vaddr) vaddr 156 include/linux/io-mapping.h io_mapping_unmap_atomic(void __iomem *vaddr) vaddr 158 include/linux/io-mapping.h io_mapping_unmap(vaddr); vaddr 21 include/linux/kcore.h unsigned long vaddr; vaddr 386 include/linux/kexec.h static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; } vaddr 390 include/linux/kexec.h static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } vaddr 143 include/linux/net.h struct sockaddr *vaddr, vaddr 32 include/linux/pci-dma-compat.h void *vaddr, dma_addr_t dma_handle) vaddr 34 include/linux/pci-dma-compat.h dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); vaddr 1425 include/linux/pci.h #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) vaddr 82 include/linux/pstore_ram.h void *vaddr; vaddr 266 include/linux/qed/qed_rdma_if.h u64 vaddr; vaddr 560 include/linux/qed/qed_rdma_if.h void *vaddr; vaddr 247 include/linux/tee_drv.h struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, vaddr 268 include/linux/tee_drv.h unsigned long vaddr; vaddr 67 include/linux/uprobes.h unsigned long vaddr; vaddr 106 include/linux/uprobes.h extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); vaddr 107 include/linux/uprobes.h extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); vaddr 112 include/linux/uprobes.h extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); vaddr 139 include/linux/uprobes.h extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, vaddr 121 include/media/videobuf-core.h void *(*vaddr) (struct videobuf_buffer *buf); vaddr 52 include/media/videobuf-dma-sg.h void *vaddr; vaddr 22 include/media/videobuf-vmalloc.h void *vaddr; vaddr 126 include/media/videobuf2-core.h void *(*get_userptr)(struct device *dev, unsigned long vaddr, vaddr 142 include/media/videobuf2-core.h void *(*vaddr)(void *buf_priv); vaddr 492 include/net/ip_vs.h const union nf_inet_addr *vaddr; vaddr 512 include/net/ip_vs.h union nf_inet_addr vaddr; /* virtual address */ vaddr 690 include/net/ip_vs.h union nf_inet_addr vaddr; /* virtual IP address */ vaddr 1184 include/net/ip_vs.h const union nf_inet_addr *vaddr, vaddr 1193 include/net/ip_vs.h p->vaddr = vaddr; vaddr 1252 include/net/ip_vs.h IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 1271 include/net/ip_vs.h IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 1287 include/net/ip_vs.h IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 1404 include/net/ip_vs.h const union nf_inet_addr *vaddr, __be16 vport); vaddr 1425 include/net/ip_vs.h const union nf_inet_addr *vaddr, __be16 vport, vaddr 91 include/rdma/ib_hdrs.h __be64 vaddr; /* potentially unaligned */ vaddr 97 include/rdma/ib_hdrs.h __be64 vaddr; /* potentially unaligned */ vaddr 164 include/rdma/ib_hdrs.h return ib_u64_get(&reth->vaddr); vaddr 169 include/rdma/ib_hdrs.h ib_u64_put(val, &reth->vaddr); vaddr 174 include/rdma/ib_hdrs.h return ib_u64_get(&ateth->vaddr); vaddr 179 include/rdma/ib_hdrs.h ib_u64_put(val, &ateth->vaddr); vaddr 567 include/rdma/rdma_vt.h u32 len, u64 vaddr, u32 rkey, int acc); vaddr 62 include/rdma/rdmavt_mr.h void *vaddr; vaddr 111 include/rdma/rdmavt_mr.h void *vaddr; /* kernel virtual address of segment */ vaddr 161 include/rdma/rdmavt_mr.h sge->vaddr += length; vaddr 175 include/rdma/rdmavt_mr.h sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; vaddr 21 include/uapi/linux/netfilter/xt_ipvs.h union nf_inet_addr vaddr, vmask; vaddr 764 include/uapi/linux/vfio.h __u64 vaddr; /* Process virtual address */ vaddr 895 include/uapi/linux/vfio.h __u64 vaddr; /* Process virtual address */ vaddr 101 include/uapi/rdma/hfi/hfi1_ioctl.h __aligned_u64 vaddr; vaddr 100 include/uapi/xen/gntdev.h __u64 vaddr; vaddr 16 include/video/omapvrfb.h void __iomem *vaddr[4]; vaddr 83 include/xen/arm/page.h static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) vaddr 189 include/xen/grant_table.h void *vaddr; vaddr 211 include/xen/grant_table.h void *vaddr; vaddr 210 include/xen/xen-ops.h int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, vaddr 204 include/xen/xenbus.h int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, vaddr 207 include/xen/xenbus.h unsigned int nr_grefs, void **vaddr); vaddr 213 include/xen/xenbus.h int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); vaddr 380 kernel/debug/kdb/kdb_support.c void *vaddr; vaddr 387 kernel/debug/kdb/kdb_support.c vaddr = kmap_atomic(page); vaddr 388 kernel/debug/kdb/kdb_support.c memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); vaddr 389 kernel/debug/kdb/kdb_support.c kunmap_atomic(vaddr); vaddr 195 kernel/dma/coherent.c int order, void *vaddr) vaddr 197 kernel/dma/coherent.c if (mem && vaddr >= mem->virt_base && vaddr < vaddr 199 kernel/dma/coherent.c int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; vaddr 222 kernel/dma/coherent.c int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) vaddr 226 kernel/dma/coherent.c return __dma_release_from_coherent(mem, order, vaddr); vaddr 229 kernel/dma/coherent.c int dma_release_from_global_coherent(int order, void *vaddr) vaddr 235 kernel/dma/coherent.c vaddr); vaddr 239 kernel/dma/coherent.c struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) vaddr 241 kernel/dma/coherent.c if (mem && vaddr >= mem->virt_base && vaddr + size <= vaddr 244 kernel/dma/coherent.c int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; vaddr 276 kernel/dma/coherent.c void *vaddr, size_t size, int *ret) vaddr 280 kernel/dma/coherent.c return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); vaddr 283 kernel/dma/coherent.c int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, vaddr 290 kernel/dma/coherent.c vaddr, size, ret); vaddr 23 kernel/dma/mapping.c void *vaddr; vaddr 32 kernel/dma/mapping.c dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, vaddr 40 kernel/dma/mapping.c if (this->vaddr == match->vaddr) { vaddr 57 kernel/dma/mapping.c void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, vaddr 60 kernel/dma/mapping.c struct dma_devres match_data = { size, vaddr, dma_handle }; vaddr 62 kernel/dma/mapping.c dma_free_coherent(dev, size, vaddr, dma_handle); vaddr 85 kernel/dma/mapping.c void *vaddr; vaddr 91 kernel/dma/mapping.c vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); vaddr 92 kernel/dma/mapping.c if (!vaddr) { vaddr 97 kernel/dma/mapping.c dr->vaddr = vaddr; vaddr 104 kernel/dma/mapping.c return vaddr; vaddr 403 kernel/dma/mapping.c void dma_cache_sync(struct device *dev, void *vaddr, size_t size, vaddr 411 kernel/dma/mapping.c arch_dma_cache_sync(dev, vaddr, size, dir); vaddr 413 kernel/dma/mapping.c ops->cache_sync(dev, vaddr, size, dir); vaddr 251 kernel/dma/remap.c void arch_dma_free(struct device *dev, size_t size, void *vaddr, vaddr 254 kernel/dma/remap.c if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) { vaddr 258 kernel/dma/remap.c vunmap(vaddr); vaddr 188 kernel/dma/swiotlb.c void *vaddr; vaddr 194 kernel/dma/swiotlb.c vaddr = phys_to_virt(io_tlb_start); vaddr 196 kernel/dma/swiotlb.c set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); vaddr 197 kernel/dma/swiotlb.c memset(vaddr, 0, bytes); vaddr 412 kernel/dma/swiotlb.c unsigned char *vaddr = phys_to_virt(tlb_addr); vaddr 427 kernel/dma/swiotlb.c memcpy(vaddr, buffer + offset, sz); vaddr 429 kernel/dma/swiotlb.c memcpy(buffer + offset, vaddr, sz); vaddr 435 kernel/dma/swiotlb.c vaddr += sz; vaddr 439 kernel/dma/swiotlb.c memcpy(vaddr, phys_to_virt(orig_addr), size); vaddr 441 kernel/dma/swiotlb.c memcpy(phys_to_virt(orig_addr), vaddr, size); vaddr 110 kernel/events/uprobes.c unsigned long vaddr; /* Page(s) of instruction slots */ vaddr 136 kernel/events/uprobes.c static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) vaddr 138 kernel/events/uprobes.c return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); vaddr 250 kernel/events/uprobes.c static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) vaddr 253 kernel/events/uprobes.c memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); vaddr 257 kernel/events/uprobes.c static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) vaddr 260 kernel/events/uprobes.c memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); vaddr 264 kernel/events/uprobes.c static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) vaddr 278 kernel/events/uprobes.c copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); vaddr 351 kernel/events/uprobes.c unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); vaddr 357 kernel/events/uprobes.c vma->vm_start <= vaddr && vaddr 358 kernel/events/uprobes.c vma->vm_end > vaddr; vaddr 374 kernel/events/uprobes.c __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) vaddr 382 kernel/events/uprobes.c if (!vaddr || !d) vaddr 385 kernel/events/uprobes.c ret = get_user_pages_remote(NULL, mm, vaddr, 1, vaddr 396 kernel/events/uprobes.c ptr = kaddr + (vaddr & ~PAGE_MASK); vaddr 400 kernel/events/uprobes.c "curr val: %d, delta: %d\n", vaddr, *ptr, d); vaddr 470 kernel/events/uprobes.c unsigned long vaddr, uprobe_opcode_t opcode) vaddr 486 kernel/events/uprobes.c ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags, vaddr 491 kernel/events/uprobes.c ret = verify_opcode(old_page, vaddr, &opcode); vaddr 519 kernel/events/uprobes.c new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); vaddr 525 kernel/events/uprobes.c copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); vaddr 533 kernel/events/uprobes.c index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; vaddr 551 kernel/events/uprobes.c ret = __replace_page(vma, vaddr, old_page, new_page); vaddr 566 kernel/events/uprobes.c collapse_pte_mapped_thp(mm, vaddr); vaddr 580 kernel/events/uprobes.c int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) vaddr 582 kernel/events/uprobes.c return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); vaddr 595 kernel/events/uprobes.c set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) vaddr 597 kernel/events/uprobes.c return uprobe_write_opcode(auprobe, mm, vaddr, vaddr 846 kernel/events/uprobes.c struct mm_struct *mm, unsigned long vaddr) vaddr 866 kernel/events/uprobes.c ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); vaddr 904 kernel/events/uprobes.c struct vm_area_struct *vma, unsigned long vaddr) vaddr 909 kernel/events/uprobes.c ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); vaddr 921 kernel/events/uprobes.c ret = set_swbp(&uprobe->arch, mm, vaddr); vaddr 931 kernel/events/uprobes.c remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) vaddr 934 kernel/events/uprobes.c return set_orig_insn(&uprobe->arch, mm, vaddr); vaddr 961 kernel/events/uprobes.c unsigned long vaddr; vaddr 1011 kernel/events/uprobes.c info->vaddr = offset_to_vaddr(vma, offset); vaddr 1064 kernel/events/uprobes.c vma = find_vma(mm, info->vaddr); vaddr 1069 kernel/events/uprobes.c if (vma->vm_start > info->vaddr || vaddr 1070 kernel/events/uprobes.c vaddr_to_offset(vma, info->vaddr) != uprobe->offset) vaddr 1077 kernel/events/uprobes.c err = install_breakpoint(uprobe, mm, vma, info->vaddr); vaddr 1081 kernel/events/uprobes.c err |= remove_breakpoint(uprobe, mm, info->vaddr); vaddr 1251 kernel/events/uprobes.c unsigned long vaddr; vaddr 1263 kernel/events/uprobes.c vaddr = offset_to_vaddr(vma, uprobe->offset); vaddr 1264 kernel/events/uprobes.c err |= remove_breakpoint(uprobe, mm, vaddr); vaddr 1338 kernel/events/uprobes.c unsigned long vaddr; vaddr 1349 kernel/events/uprobes.c vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); vaddr 1350 kernel/events/uprobes.c ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); vaddr 1399 kernel/events/uprobes.c unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); vaddr 1400 kernel/events/uprobes.c install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); vaddr 1461 kernel/events/uprobes.c if (!area->vaddr) { vaddr 1463 kernel/events/uprobes.c area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, vaddr 1465 kernel/events/uprobes.c if (area->vaddr & ~PAGE_MASK) { vaddr 1466 kernel/events/uprobes.c ret = area->vaddr; vaddr 1471 kernel/events/uprobes.c vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, vaddr 1488 kernel/events/uprobes.c static struct xol_area *__create_xol_area(unsigned long vaddr) vaddr 1511 kernel/events/uprobes.c area->vaddr = vaddr; vaddr 1607 kernel/events/uprobes.c slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); vaddr 1655 kernel/events/uprobes.c vma_end = area->vaddr + PAGE_SIZE; vaddr 1656 kernel/events/uprobes.c if (area->vaddr <= slot_addr && slot_addr < vma_end) { vaddr 1660 kernel/events/uprobes.c offset = slot_addr - area->vaddr; vaddr 1675 kernel/events/uprobes.c void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, vaddr 1679 kernel/events/uprobes.c copy_to_page(page, vaddr, src, len); vaddr 1706 kernel/events/uprobes.c return utask->vaddr; vaddr 1830 kernel/events/uprobes.c t->utask->dup_xol_addr = area->vaddr; vaddr 1849 kernel/events/uprobes.c trampoline_vaddr = area->vaddr; vaddr 1950 kernel/events/uprobes.c utask->vaddr = bp_vaddr; vaddr 2016 kernel/events/uprobes.c static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) vaddr 2022 kernel/events/uprobes.c if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) vaddr 2026 kernel/events/uprobes.c result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); vaddr 2038 kernel/events/uprobes.c result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, vaddr 2043 kernel/events/uprobes.c copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); vaddr 27 kernel/trace/trace_uprobe.c unsigned long vaddr[]; vaddr 133 kernel/trace/trace_uprobe.c void __user *vaddr = (void __force __user *)src; vaddr 135 kernel/trace/trace_uprobe.c return copy_from_user(dest, vaddr, size) ? -EFAULT : 0; vaddr 191 kernel/trace/trace_uprobe.c void __user *vaddr = (void __force __user *) addr; vaddr 196 kernel/trace/trace_uprobe.c len = strnlen_user(vaddr, MAX_STRING_SIZE); vaddr 212 kernel/trace/trace_uprobe.c udd = (void *) current->utask->vaddr; vaddr 957 kernel/trace/trace_uprobe.c entry->vaddr[0] = func; vaddr 958 kernel/trace/trace_uprobe.c entry->vaddr[1] = instruction_pointer(regs); vaddr 961 kernel/trace/trace_uprobe.c entry->vaddr[0] = instruction_pointer(regs); vaddr 1017 kernel/trace/trace_uprobe.c entry->vaddr[1], entry->vaddr[0]); vaddr 1022 kernel/trace/trace_uprobe.c entry->vaddr[0]); vaddr 1173 kernel/trace/trace_uprobe.c DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); vaddr 1174 kernel/trace/trace_uprobe.c DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); vaddr 1177 kernel/trace/trace_uprobe.c DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); vaddr 1356 kernel/trace/trace_uprobe.c entry->vaddr[0] = func; vaddr 1357 kernel/trace/trace_uprobe.c entry->vaddr[1] = instruction_pointer(regs); vaddr 1360 kernel/trace/trace_uprobe.c entry->vaddr[0] = instruction_pointer(regs); vaddr 1470 kernel/trace/trace_uprobe.c current->utask->vaddr = (unsigned long) &udd; vaddr 1505 kernel/trace/trace_uprobe.c current->utask->vaddr = (unsigned long) &udd; vaddr 363 lib/genalloc.c unsigned long vaddr; vaddr 368 lib/genalloc.c vaddr = gen_pool_alloc_algo(pool, size, algo, data); vaddr 369 lib/genalloc.c if (!vaddr) vaddr 373 lib/genalloc.c *dma = gen_pool_virt_to_phys(pool, vaddr); vaddr 375 lib/genalloc.c return (void *)vaddr; vaddr 441 lib/genalloc.c void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data); vaddr 443 lib/genalloc.c if (vaddr) vaddr 444 lib/genalloc.c memset(vaddr, 0, size); vaddr 446 lib/genalloc.c return vaddr; vaddr 55 mm/dmapool.c void *vaddr; vaddr 216 mm/dmapool.c *(int *)(page->vaddr + offset) = next; vaddr 228 mm/dmapool.c page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, vaddr 230 mm/dmapool.c if (page->vaddr) { vaddr 232 mm/dmapool.c memset(page->vaddr, POOL_POISON_FREED, pool->allocation); vaddr 254 mm/dmapool.c memset(page->vaddr, POOL_POISON_FREED, pool->allocation); vaddr 256 mm/dmapool.c dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); vaddr 294 mm/dmapool.c pool->name, page->vaddr); vaddr 297 mm/dmapool.c pool->name, page->vaddr); vaddr 348 mm/dmapool.c page->offset = *(int *)(page->vaddr + offset); vaddr 349 mm/dmapool.c retval = offset + page->vaddr; vaddr 410 mm/dmapool.c void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) vaddr 423 mm/dmapool.c pool->name, vaddr, (unsigned long)dma); vaddr 426 mm/dmapool.c pool->name, vaddr, (unsigned long)dma); vaddr 430 mm/dmapool.c offset = vaddr - page->vaddr; vaddr 432 mm/dmapool.c memset(vaddr, 0, pool->size); vaddr 439 mm/dmapool.c pool->name, vaddr, &dma); vaddr 442 mm/dmapool.c pool->name, vaddr, &dma); vaddr 449 mm/dmapool.c chain = *(int *)(page->vaddr + chain); vaddr 462 mm/dmapool.c memset(vaddr, POOL_POISON_FREED, pool->size); vaddr 466 mm/dmapool.c *(int *)vaddr = page->offset; vaddr 150 mm/highmem.c struct page *kmap_to_page(void *vaddr) vaddr 152 mm/highmem.c unsigned long addr = (unsigned long)vaddr; vaddr 215 mm/highmem.c unsigned long vaddr; vaddr 257 mm/highmem.c vaddr = PKMAP_ADDR(last_pkmap_nr); vaddr 258 mm/highmem.c set_pte_at(&init_mm, vaddr, vaddr 262 mm/highmem.c set_page_address(page, (void *)vaddr); vaddr 264 mm/highmem.c return vaddr; vaddr 277 mm/highmem.c unsigned long vaddr; vaddr 284 mm/highmem.c vaddr = (unsigned long)page_address(page); vaddr 285 mm/highmem.c if (!vaddr) vaddr 286 mm/highmem.c vaddr = map_new_virtual(page); vaddr 287 mm/highmem.c pkmap_count[PKMAP_NR(vaddr)]++; vaddr 288 mm/highmem.c BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); vaddr 290 mm/highmem.c return (void*) vaddr; vaddr 308 mm/highmem.c unsigned long vaddr, flags; vaddr 311 mm/highmem.c vaddr = (unsigned long)page_address(page); vaddr 312 mm/highmem.c if (vaddr) { vaddr 313 mm/highmem.c BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); vaddr 314 mm/highmem.c pkmap_count[PKMAP_NR(vaddr)]++; vaddr 317 mm/highmem.c return (void*) vaddr; vaddr 330 mm/highmem.c unsigned long vaddr; vaddr 338 mm/highmem.c vaddr = (unsigned long)page_address(page); vaddr 339 mm/highmem.c BUG_ON(!vaddr); vaddr 340 mm/highmem.c nr = PKMAP_NR(vaddr); vaddr 4393 mm/hugetlb.c unsigned long vaddr = *position; vaddr 4398 mm/hugetlb.c while (vaddr < vma->vm_end && remainder) { vaddr 4420 mm/hugetlb.c pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), vaddr 4434 mm/hugetlb.c !hugetlbfs_pagecache_present(h, vma, vaddr)) { vaddr 4471 mm/hugetlb.c ret = hugetlb_fault(mm, vma, vaddr, fault_flags); vaddr 4496 mm/hugetlb.c pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; vaddr 4520 mm/hugetlb.c vaddr += PAGE_SIZE; vaddr 4524 mm/hugetlb.c if (vaddr < vma->vm_end && remainder && vaddr 4540 mm/hugetlb.c *position = vaddr; vaddr 1464 mm/vmalloc.c void *vaddr; vaddr 1488 mm/vmalloc.c vaddr = vmap_block_vaddr(va->va_start, 0); vaddr 1512 mm/vmalloc.c return vaddr; vaddr 1577 mm/vmalloc.c void *vaddr = NULL; vaddr 1604 mm/vmalloc.c vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); vaddr 1620 mm/vmalloc.c if (!vaddr) vaddr 1621 mm/vmalloc.c vaddr = new_vmap_block(order, gfp_mask); vaddr 1623 mm/vmalloc.c return vaddr; vaddr 2850 mm/vmalloc.c char *vaddr, *buf_start = buf; vaddr 2867 mm/vmalloc.c vaddr = (char *) vm->addr; vaddr 2868 mm/vmalloc.c if (addr >= vaddr + get_vm_area_size(vm)) vaddr 2870 mm/vmalloc.c while (addr < vaddr) { vaddr 2878 mm/vmalloc.c n = vaddr + get_vm_area_size(vm) - addr; vaddr 2929 mm/vmalloc.c char *vaddr; vaddr 2947 mm/vmalloc.c vaddr = (char *) vm->addr; vaddr 2948 mm/vmalloc.c if (addr >= vaddr + get_vm_area_size(vm)) vaddr 2950 mm/vmalloc.c while (addr < vaddr) { vaddr 2957 mm/vmalloc.c n = vaddr + get_vm_area_size(vm) - addr; vaddr 992 mm/zsmalloc.c void *vaddr; vaddr 996 mm/zsmalloc.c vaddr = kmap_atomic(page); vaddr 997 mm/zsmalloc.c link = (struct link_free *)vaddr + off / sizeof(*link); vaddr 1019 mm/zsmalloc.c kunmap_atomic(vaddr); vaddr 1416 mm/zsmalloc.c void *vaddr; vaddr 1429 mm/zsmalloc.c vaddr = kmap_atomic(m_page); vaddr 1430 mm/zsmalloc.c link = (struct link_free *)vaddr + m_offset / sizeof(*link); vaddr 1439 mm/zsmalloc.c kunmap_atomic(vaddr); vaddr 1522 mm/zsmalloc.c void *vaddr; vaddr 1529 mm/zsmalloc.c vaddr = kmap_atomic(f_page); vaddr 1532 mm/zsmalloc.c link = (struct link_free *)(vaddr + f_offset); vaddr 1534 mm/zsmalloc.c kunmap_atomic(vaddr); vaddr 951 net/appletalk/ddp.c u8 *vaddr; vaddr 955 net/appletalk/ddp.c vaddr = kmap_atomic(skb_frag_page(frag)); vaddr 956 net/appletalk/ddp.c sum = atalk_sum_partial(vaddr + skb_frag_off(frag) + vaddr 958 net/appletalk/ddp.c kunmap_atomic(vaddr); vaddr 448 net/core/datagram.c u8 *vaddr = kmap(page); vaddr 453 net/core/datagram.c vaddr + skb_frag_off(frag) + offset - start, vaddr 786 net/core/skbuff.c u8 *vaddr; vaddr 792 net/core/skbuff.c vaddr = kmap_atomic(p); vaddr 795 net/core/skbuff.c 16, 1, vaddr + p_off, seg_len, false); vaddr 796 net/core/skbuff.c kunmap_atomic(vaddr); vaddr 1376 net/core/skbuff.c u8 *vaddr; vaddr 1381 net/core/skbuff.c vaddr = kmap_atomic(p); vaddr 1390 net/core/skbuff.c vaddr + p_off + done, copy); vaddr 1394 net/core/skbuff.c kunmap_atomic(vaddr); vaddr 2218 net/core/skbuff.c u8 *vaddr; vaddr 2226 net/core/skbuff.c vaddr = kmap_atomic(p); vaddr 2227 net/core/skbuff.c memcpy(to + copied, vaddr + p_off, p_len); vaddr 2228 net/core/skbuff.c kunmap_atomic(vaddr); vaddr 2579 net/core/skbuff.c u8 *vaddr; vaddr 2587 net/core/skbuff.c vaddr = kmap_atomic(p); vaddr 2588 net/core/skbuff.c memcpy(vaddr + p_off, from + copied, p_len); vaddr 2589 net/core/skbuff.c kunmap_atomic(vaddr); vaddr 2659 net/core/skbuff.c u8 *vaddr; vaddr 2667 net/core/skbuff.c vaddr = kmap_atomic(p); vaddr 2670 net/core/skbuff.c vaddr + p_off, p_len, 0); vaddr 2671 net/core/skbuff.c kunmap_atomic(vaddr); vaddr 2758 net/core/skbuff.c u8 *vaddr; vaddr 2766 net/core/skbuff.c vaddr = kmap_atomic(p); vaddr 2767 net/core/skbuff.c csum2 = csum_partial_copy_nocheck(vaddr + p_off, vaddr 2770 net/core/skbuff.c kunmap_atomic(vaddr); vaddr 275 net/ipv4/esp4.c u8 *vaddr; vaddr 317 net/ipv4/esp4.c vaddr = kmap_atomic(page); vaddr 319 net/ipv4/esp4.c tail = vaddr + pfrag->offset; vaddr 323 net/ipv4/esp4.c kunmap_atomic(vaddr); vaddr 229 net/ipv6/esp6.c u8 *vaddr; vaddr 262 net/ipv6/esp6.c vaddr = kmap_atomic(page); vaddr 264 net/ipv6/esp6.c tail = vaddr + pfrag->offset; vaddr 268 net/ipv6/esp6.c kunmap_atomic(vaddr); vaddr 136 net/netfilter/ipvs/ip_vs_conn.c addr = p->vaddr; vaddr 276 net/netfilter/ipvs/ip_vs_conn.c ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) && vaddr 307 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), vaddr 374 net/netfilter/ipvs/ip_vs_conn.c p->af, p->vaddr, &cp->vaddr) && vaddr 391 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), vaddr 416 net/netfilter/ipvs/ip_vs_conn.c ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && vaddr 433 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), vaddr 607 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), vaddr 651 net/netfilter/ipvs/ip_vs_conn.c cp->dport, &cp->vaddr, cp->vport, vaddr 704 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), vaddr 773 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(ct->af, &ct->vaddr), vaddr 926 net/netfilter/ipvs/ip_vs_conn.c &cp->vaddr, p->vaddr); vaddr 1118 net/netfilter/ipvs/ip_vs_conn.c &cp->vaddr.in6, ntohs(cp->vport), vaddr 1131 net/netfilter/ipvs/ip_vs_conn.c ntohl(cp->vaddr.ip), ntohs(cp->vport), vaddr 1184 net/netfilter/ipvs/ip_vs_conn.c &cp->vaddr.in6, ntohs(cp->vport), vaddr 1197 net/netfilter/ipvs/ip_vs_conn.c ntohl(cp->vaddr.ip), ntohs(cp->vport), vaddr 245 net/netfilter/ipvs/ip_vs_core.c const union nf_inet_addr *vaddr, __be16 vport, vaddr 248 net/netfilter/ipvs/ip_vs_core.c ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr, vaddr 319 net/netfilter/ipvs/ip_vs_core.c const union nf_inet_addr *vaddr = dst_addr; vaddr 340 net/netfilter/ipvs/ip_vs_core.c vaddr = &fwmark; vaddr 345 net/netfilter/ipvs/ip_vs_core.c vaddr, vport, ¶m) < 0) { vaddr 462 net/netfilter/ipvs/ip_vs_core.c const void *caddr, *vaddr; vaddr 477 net/netfilter/ipvs/ip_vs_core.c vaddr = &iph->daddr; vaddr 482 net/netfilter/ipvs/ip_vs_core.c vaddr = &iph->saddr; vaddr 560 net/netfilter/ipvs/ip_vs_core.c caddr, cport, vaddr, vport, &p); vaddr 574 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), vaddr 776 net/netfilter/ipvs/ip_vs_core.c iph->saddr = cp->vaddr.ip; vaddr 778 net/netfilter/ipvs/ip_vs_core.c ciph->daddr = cp->vaddr.ip; vaddr 831 net/netfilter/ipvs/ip_vs_core.c iph->saddr = cp->vaddr.in6; vaddr 832 net/netfilter/ipvs/ip_vs_core.c ciph->daddr = cp->vaddr.in6; vaddr 1157 net/netfilter/ipvs/ip_vs_core.c const union nf_inet_addr *vaddr, *daddr, *caddr; vaddr 1163 net/netfilter/ipvs/ip_vs_core.c vaddr = &svc->addr; vaddr 1186 net/netfilter/ipvs/ip_vs_core.c &snet, 0, vaddr, vaddr 1209 net/netfilter/ipvs/ip_vs_core.c caddr, cport, vaddr, vport, ¶m); vaddr 1227 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), vaddr 1297 net/netfilter/ipvs/ip_vs_core.c ipv6_hdr(skb)->saddr = cp->vaddr.in6; vaddr 1301 net/netfilter/ipvs/ip_vs_core.c ip_hdr(skb)->saddr = cp->vaddr.ip; vaddr 356 net/netfilter/ipvs/ip_vs_ctl.c const union nf_inet_addr *vaddr, __be16 vport) vaddr 362 net/netfilter/ipvs/ip_vs_ctl.c hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport); vaddr 366 net/netfilter/ipvs/ip_vs_ctl.c && ip_vs_addr_equal(af, &svc->addr, vaddr) vaddr 405 net/netfilter/ipvs/ip_vs_ctl.c const union nf_inet_addr *vaddr, __be16 vport) vaddr 422 net/netfilter/ipvs/ip_vs_ctl.c svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport); vaddr 431 net/netfilter/ipvs/ip_vs_ctl.c svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, FTPPORT); vaddr 439 net/netfilter/ipvs/ip_vs_ctl.c svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, 0); vaddr 445 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_DBG_ADDR(af, vaddr), ntohs(vport), vaddr 679 net/netfilter/ipvs/ip_vs_ctl.c const union nf_inet_addr *vaddr, vaddr 687 net/netfilter/ipvs/ip_vs_ctl.c svc = ip_vs_service_find(ipvs, svc_af, fwmark, protocol, vaddr, vport); vaddr 754 net/netfilter/ipvs/ip_vs_ctl.c (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) && vaddr 995 net/netfilter/ipvs/ip_vs_ctl.c dest->vaddr = svc->addr; vaddr 1077 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_DBG_ADDR(svc->af, &dest->vaddr), vaddr 330 net/netfilter/ipvs/ip_vs_ftp.c 0, &cp->vaddr, port, &p); vaddr 344 net/netfilter/ipvs/ip_vs_ftp.c from.ip = n_cp->vaddr.ip; vaddr 354 net/netfilter/ipvs/ip_vs_ftp.c from = n_cp->vaddr; vaddr 504 net/netfilter/ipvs/ip_vs_ftp.c &to.ip, ntohs(port), &cp->vaddr.ip, vaddr 520 net/netfilter/ipvs/ip_vs_ftp.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 532 net/netfilter/ipvs/ip_vs_ftp.c ipvsh->protocol, &to, port, &cp->vaddr, vaddr 64 net/netfilter/ipvs/ip_vs_nfct.c IP_VS_DBG_ADDR((C)->af, &((C)->vaddr)), \ vaddr 114 net/netfilter/ipvs/ip_vs_nfct.c new_tuple.dst.u3 = cp->vaddr; vaddr 171 net/netfilter/ipvs/ip_vs_nfct.c new_reply.dst.u3 = cp->vaddr; vaddr 226 net/netfilter/ipvs/ip_vs_nfct.c from_rs ? &cp->caddr : &cp->vaddr, vaddr 256 net/netfilter/ipvs/ip_vs_nfct.c tuple.dst.u3 = cp->vaddr; vaddr 118 net/netfilter/ipvs/ip_vs_pe_sip.c p->vaddr, &ct->vaddr) && vaddr 129 net/netfilter/ipvs/ip_vs_pe_sip.c IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), vaddr 542 net/netfilter/ipvs/ip_vs_proto_sctp.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 187 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, vaddr 192 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, vaddr 203 net/netfilter/ipvs/ip_vs_proto_tcp.c tcph->check = csum_ipv6_magic(&cp->vaddr.in6, vaddr 209 net/netfilter/ipvs/ip_vs_proto_tcp.c tcph->check = csum_tcpudp_magic(cp->vaddr.ip, vaddr 270 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, vaddr 275 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, vaddr 673 net/netfilter/ipvs/ip_vs_proto_tcp.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 180 net/netfilter/ipvs/ip_vs_proto_udp.c udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, vaddr 185 net/netfilter/ipvs/ip_vs_proto_udp.c udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, vaddr 196 net/netfilter/ipvs/ip_vs_proto_udp.c udph->check = csum_ipv6_magic(&cp->vaddr.in6, vaddr 202 net/netfilter/ipvs/ip_vs_proto_udp.c udph->check = csum_tcpudp_magic(cp->vaddr.ip, vaddr 264 net/netfilter/ipvs/ip_vs_proto_udp.c udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, vaddr 269 net/netfilter/ipvs/ip_vs_proto_udp.c udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, vaddr 419 net/netfilter/ipvs/ip_vs_proto_udp.c IP_VS_DBG_ADDR(cp->af, &cp->vaddr), vaddr 80 net/netfilter/ipvs/ip_vs_sync.c __be32 vaddr; /* virtual address */ vaddr 147 net/netfilter/ipvs/ip_vs_sync.c __be32 vaddr; /* virtual address */ vaddr 169 net/netfilter/ipvs/ip_vs_sync.c struct in6_addr vaddr; /* virtual address */ vaddr 602 net/netfilter/ipvs/ip_vs_sync.c s->vaddr = cp->vaddr.ip; vaddr 741 net/netfilter/ipvs/ip_vs_sync.c s->v6.vaddr = cp->vaddr.in6; vaddr 748 net/netfilter/ipvs/ip_vs_sync.c s->v4.vaddr = cp->vaddr.ip; vaddr 802 net/netfilter/ipvs/ip_vs_sync.c (const union nf_inet_addr *)&sc->v6.vaddr, vaddr 809 net/netfilter/ipvs/ip_vs_sync.c (const union nf_inet_addr *)&sc->v4.vaddr, vaddr 912 net/netfilter/ipvs/ip_vs_sync.c param->vaddr, param->vport, protocol, vaddr 1018 net/netfilter/ipvs/ip_vs_sync.c (const union nf_inet_addr *)&s->vaddr, vaddr 653 net/netfilter/ipvs/ip_vs_xmit.c !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr)) vaddr 140 net/netfilter/xt_ipvs.c if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr, vaddr 903 samples/vfio-mdev/mbochs.c void *vaddr) vaddr 905 samples/vfio-mdev/mbochs.c kunmap(vaddr); vaddr 3357 sound/core/pcm_native.c void *vaddr = substream->runtime->dma_area + ofs; vaddr 3358 sound/core/pcm_native.c return virt_to_page(vaddr); vaddr 39 sound/pci/asihpi/hpios.c p_mem_area->vaddr = vaddr 43 sound/pci/asihpi/hpios.c if (p_mem_area->vaddr) { vaddr 46 sound/pci/asihpi/hpios.c p_mem_area->vaddr); vaddr 62 sound/pci/asihpi/hpios.c p_mem_area->vaddr, p_mem_area->dma_handle); vaddr 66 sound/pci/asihpi/hpios.c p_mem_area->vaddr); vaddr 40 sound/pci/asihpi/hpios.h void *vaddr; vaddr 54 sound/pci/asihpi/hpios.h *pp_virtual_addr = locked_mem_handle->vaddr; vaddr 133 sound/usb/usx2y/us122l.c void *vaddr; vaddr 144 sound/usb/usx2y/us122l.c vaddr = (char *)s + offset; vaddr 150 sound/usb/usx2y/us122l.c vaddr = us122l->sk.write_page + offset; vaddr 152 sound/usb/usx2y/us122l.c page = virt_to_page(vaddr); vaddr 25 sound/usb/usx2y/usX2Yhwdep.c void *vaddr; vaddr 32 sound/usb/usx2y/usX2Yhwdep.c vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; vaddr 33 sound/usb/usx2y/usX2Yhwdep.c page = virt_to_page(vaddr); vaddr 38 sound/usb/usx2y/usX2Yhwdep.c vaddr, page); vaddr 648 sound/usb/usx2y/usx2yhwdeppcm.c void *vaddr; vaddr 651 sound/usb/usx2y/usx2yhwdeppcm.c vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset; vaddr 652 sound/usb/usx2y/usx2yhwdeppcm.c vmf->page = virt_to_page(vaddr); vaddr 106 tools/testing/selftests/kvm/include/kvm_util.h void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vaddr 144 tools/testing/selftests/kvm/include/kvm_util.h void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vaddr 88 tools/testing/selftests/kvm/lib/aarch64/processor.c void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vaddr 94 tools/testing/selftests/kvm/lib/aarch64/processor.c TEST_ASSERT((vaddr % vm->page_size) == 0, vaddr 96 tools/testing/selftests/kvm/lib/aarch64/processor.c " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); vaddr 98 tools/testing/selftests/kvm/lib/aarch64/processor.c (vaddr >> vm->page_shift)), vaddr 99 tools/testing/selftests/kvm/lib/aarch64/processor.c "Invalid virtual address, vaddr: 0x%lx", vaddr); vaddr 108 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; vaddr 116 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; vaddr 123 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; vaddr 130 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; vaddr 140 tools/testing/selftests/kvm/lib/aarch64/processor.c void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vaddr 145 tools/testing/selftests/kvm/lib/aarch64/processor.c _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx); vaddr 167 tools/testing/selftests/kvm/lib/elf.c vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart, vaddr 169 tools/testing/selftests/kvm/lib/elf.c TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " vaddr 174 tools/testing/selftests/kvm/lib/elf.c n1, seg_vstart, vaddr); vaddr 175 tools/testing/selftests/kvm/lib/elf.c memset(addr_gva2hva(vm, vaddr), 0, seg_size); vaddr 962 tools/testing/selftests/kvm/lib/kvm_util.c for (vm_vaddr_t vaddr = vaddr_start; pages > 0; vaddr 963 tools/testing/selftests/kvm/lib/kvm_util.c pages--, vaddr += vm->page_size) { vaddr 969 tools/testing/selftests/kvm/lib/kvm_util.c virt_pg_map(vm, vaddr, paddr, pgd_memslot); vaddr 972 tools/testing/selftests/kvm/lib/kvm_util.c vaddr >> vm->page_shift); vaddr 995 tools/testing/selftests/kvm/lib/kvm_util.c void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vaddr 1001 tools/testing/selftests/kvm/lib/kvm_util.c TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); vaddr 1005 tools/testing/selftests/kvm/lib/kvm_util.c virt_pg_map(vm, vaddr, paddr, pgd_memslot); vaddr 1006 tools/testing/selftests/kvm/lib/kvm_util.c vaddr += page_size; vaddr 258 tools/testing/selftests/kvm/lib/x86_64/processor.c void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vaddr 267 tools/testing/selftests/kvm/lib/x86_64/processor.c TEST_ASSERT((vaddr % vm->page_size) == 0, vaddr 270 tools/testing/selftests/kvm/lib/x86_64/processor.c vaddr, vm->page_size); vaddr 272 tools/testing/selftests/kvm/lib/x86_64/processor.c (vaddr >> vm->page_shift)), vaddr 274 tools/testing/selftests/kvm/lib/x86_64/processor.c vaddr); vaddr 284 tools/testing/selftests/kvm/lib/x86_64/processor.c index[0] = (vaddr >> 12) & 0x1ffu; vaddr 285 tools/testing/selftests/kvm/lib/x86_64/processor.c index[1] = (vaddr >> 21) & 0x1ffu; vaddr 286 tools/testing/selftests/kvm/lib/x86_64/processor.c index[2] = (vaddr >> 30) & 0x1ffu; vaddr 287 tools/testing/selftests/kvm/lib/x86_64/processor.c index[3] = (vaddr >> 39) & 0x1ffu;