Searched refs:vaddr (Results 1 - 200 of 739) sorted by relevance

1234

/linux-4.4.14/arch/m68k/include/asm/
H A Dbitops.h31 static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr) bset_reg_set_bit() argument
33 char *p = (char *)vaddr + (nr ^ 31) / 8; bset_reg_set_bit()
41 static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr) bset_mem_set_bit() argument
43 char *p = (char *)vaddr + (nr ^ 31) / 8; bset_mem_set_bit()
50 static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) bfset_mem_set_bit() argument
54 : "d" (nr ^ 31), "o" (*vaddr) bfset_mem_set_bit()
59 #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
61 #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
63 #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
64 bset_mem_set_bit(nr, vaddr) : \
65 bfset_mem_set_bit(nr, vaddr))
68 #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
71 static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) bclr_reg_clear_bit() argument
73 char *p = (char *)vaddr + (nr ^ 31) / 8; bclr_reg_clear_bit()
81 static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) bclr_mem_clear_bit() argument
83 char *p = (char *)vaddr + (nr ^ 31) / 8; bclr_mem_clear_bit()
90 static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) bfclr_mem_clear_bit() argument
94 : "d" (nr ^ 31), "o" (*vaddr) bfclr_mem_clear_bit()
99 #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
101 #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
103 #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
104 bclr_mem_clear_bit(nr, vaddr) : \
105 bfclr_mem_clear_bit(nr, vaddr))
108 #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
111 static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr) bchg_reg_change_bit() argument
113 char *p = (char *)vaddr + (nr ^ 31) / 8; bchg_reg_change_bit()
121 static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr) bchg_mem_change_bit() argument
123 char *p = (char *)vaddr + (nr ^ 31) / 8; bchg_mem_change_bit()
130 static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) bfchg_mem_change_bit() argument
134 : "d" (nr ^ 31), "o" (*vaddr) bfchg_mem_change_bit()
139 #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
141 #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
143 #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
144 bchg_mem_change_bit(nr, vaddr) : \
145 bfchg_mem_change_bit(nr, vaddr))
148 #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
151 static inline int test_bit(int nr, const unsigned long *vaddr) test_bit() argument
153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; test_bit()
158 volatile unsigned long *vaddr) bset_reg_test_and_set_bit()
160 char *p = (char *)vaddr + (nr ^ 31) / 8; bset_reg_test_and_set_bit()
171 volatile unsigned long *vaddr) bset_mem_test_and_set_bit()
173 char *p = (char *)vaddr + (nr ^ 31) / 8; bset_mem_test_and_set_bit()
183 volatile unsigned long *vaddr) bfset_mem_test_and_set_bit()
189 : "d" (nr ^ 31), "o" (*vaddr) bfset_mem_test_and_set_bit()
195 #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
197 #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
199 #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
200 bset_mem_test_and_set_bit(nr, vaddr) : \
201 bfset_mem_test_and_set_bit(nr, vaddr))
204 #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
208 volatile unsigned long *vaddr) bclr_reg_test_and_clear_bit()
210 char *p = (char *)vaddr + (nr ^ 31) / 8; bclr_reg_test_and_clear_bit()
221 volatile unsigned long *vaddr) bclr_mem_test_and_clear_bit()
223 char *p = (char *)vaddr + (nr ^ 31) / 8; bclr_mem_test_and_clear_bit()
233 volatile unsigned long *vaddr) bfclr_mem_test_and_clear_bit()
239 : "d" (nr ^ 31), "o" (*vaddr) bfclr_mem_test_and_clear_bit()
245 #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
247 #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
249 #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
250 bclr_mem_test_and_clear_bit(nr, vaddr) : \
251 bfclr_mem_test_and_clear_bit(nr, vaddr))
254 #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
258 volatile unsigned long *vaddr) bchg_reg_test_and_change_bit()
260 char *p = (char *)vaddr + (nr ^ 31) / 8; bchg_reg_test_and_change_bit()
271 volatile unsigned long *vaddr) bchg_mem_test_and_change_bit()
273 char *p = (char *)vaddr + (nr ^ 31) / 8; bchg_mem_test_and_change_bit()
283 volatile unsigned long *vaddr) bfchg_mem_test_and_change_bit()
289 : "d" (nr ^ 31), "o" (*vaddr) bfchg_mem_test_and_change_bit()
295 #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
297 #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
299 #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
300 bchg_mem_test_and_change_bit(nr, vaddr) : \
301 bfchg_mem_test_and_change_bit(nr, vaddr))
304 #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
318 static inline int find_first_zero_bit(const unsigned long *vaddr, find_first_zero_bit() argument
321 const unsigned long *p = vaddr; find_first_zero_bit()
339 res += ((long)p - (long)vaddr - 4) * 8; find_first_zero_bit()
344 static inline int find_next_zero_bit(const unsigned long *vaddr, int size, find_next_zero_bit() argument
347 const unsigned long *p = vaddr + (offset >> 5); find_next_zero_bit()
374 static inline int find_first_bit(const unsigned long *vaddr, unsigned size) find_first_bit() argument
376 const unsigned long *p = vaddr; find_first_bit()
394 res += ((long)p - (long)vaddr - 4) * 8; find_first_bit()
399 static inline int find_next_bit(const unsigned long *vaddr, int size, find_next_bit() argument
402 const unsigned long *p = vaddr + (offset >> 5); find_next_bit()
157 bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) bset_reg_test_and_set_bit() argument
170 bset_mem_test_and_set_bit(int nr, volatile unsigned long *vaddr) bset_mem_test_and_set_bit() argument
182 bfset_mem_test_and_set_bit(int nr, volatile unsigned long *vaddr) bfset_mem_test_and_set_bit() argument
207 bclr_reg_test_and_clear_bit(int nr, volatile unsigned long *vaddr) bclr_reg_test_and_clear_bit() argument
220 bclr_mem_test_and_clear_bit(int nr, volatile unsigned long *vaddr) bclr_mem_test_and_clear_bit() argument
232 bfclr_mem_test_and_clear_bit(int nr, volatile unsigned long *vaddr) bfclr_mem_test_and_clear_bit() argument
257 bchg_reg_test_and_change_bit(int nr, volatile unsigned long *vaddr) bchg_reg_test_and_change_bit() argument
270 bchg_mem_test_and_change_bit(int nr, volatile unsigned long *vaddr) bchg_mem_test_and_change_bit() argument
282 bfchg_mem_test_and_change_bit(int nr, volatile unsigned long *vaddr) bfchg_mem_test_and_change_bit() argument
H A Dpage_no.h9 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
15 #define clear_user_page(page, vaddr, pg) clear_page(page)
16 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
18 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
19 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
22 #define __pa(vaddr) ((unsigned long)(vaddr))
H A Dpage_mm.h9 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
58 #define clear_user_page(addr, vaddr, page) \
62 #define copy_user_page(to, from, vaddr, page) \
73 static inline unsigned long ___pa(void *vaddr) ___pa() argument
80 : "0" (vaddr), "i" (m68k_fixup_memoffset)); ___pa()
83 #define __pa(vaddr) ___pa((void *)(long)(vaddr)) __va()
86 void *vaddr; __va() local
90 : "=r" (vaddr) __va()
92 return vaddr; __va()
H A Dcacheflush_mm.h155 extern void cache_push_v(unsigned long vaddr, int len);
222 static inline void __flush_page_to_ram(void *vaddr) __flush_page_to_ram() argument
226 addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1); __flush_page_to_ram()
239 : : "a" (__pa(vaddr))); __flush_page_to_ram()
261 struct page *page, unsigned long vaddr, copy_to_user_page()
264 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_to_user_page()
266 flush_icache_user_range(vma, page, vaddr, len); copy_to_user_page()
269 struct page *page, unsigned long vaddr, copy_from_user_page()
272 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_from_user_page()
260 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_to_user_page() argument
268 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_from_user_page() argument
/linux-4.4.14/arch/m68k/sun3/
H A Ddvma.c23 static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) dvma_page() argument
35 // kaddr, vaddr, pte, len); dvma_page()
36 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { dvma_page()
37 sun3_put_pte(vaddr, pte); dvma_page()
38 ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; dvma_page()
41 return (vaddr + (kaddr & ~PAGE_MASK)); dvma_page()
50 unsigned long vaddr; dvma_map_iommu() local
52 vaddr = dvma_btov(baddr); dvma_map_iommu()
54 end = vaddr + len; dvma_map_iommu()
56 while(vaddr < end) { dvma_map_iommu()
57 dvma_page(kaddr, vaddr); dvma_map_iommu()
59 vaddr += PAGE_SIZE; dvma_map_iommu()
H A Dmmu_emu.c117 void print_pte_vaddr (unsigned long vaddr) print_pte_vaddr() argument
119 printk (" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr)); print_pte_vaddr()
120 print_pte (__pte (sun3_get_pte (vaddr))); print_pte_vaddr()
275 * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
282 inline void mmu_emu_map_pmeg (int context, int vaddr) mmu_emu_map_pmeg() argument
288 vaddr &= ~SUN3_PMEG_MASK; mmu_emu_map_pmeg()
296 printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n", mmu_emu_map_pmeg()
297 curr_pmeg, context, vaddr); mmu_emu_map_pmeg()
309 if(vaddr >= PAGE_OFFSET) { mmu_emu_map_pmeg()
315 sun3_put_segmap (vaddr, curr_pmeg); mmu_emu_map_pmeg()
325 sun3_put_segmap (vaddr, curr_pmeg); mmu_emu_map_pmeg()
328 pmeg_vaddr[curr_pmeg] = vaddr; mmu_emu_map_pmeg()
332 sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM); mmu_emu_map_pmeg()
339 * Handle a pagefault at virtual address `vaddr'; check if there should be a
354 int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault) mmu_emu_handle_fault() argument
373 printk ("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n", mmu_emu_handle_fault()
374 vaddr, read_flag ? "read" : "write", crp); mmu_emu_handle_fault()
377 segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF; mmu_emu_handle_fault()
378 offset = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF; mmu_emu_handle_fault()
399 if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG) mmu_emu_handle_fault()
400 mmu_emu_map_pmeg (context, vaddr); mmu_emu_handle_fault()
403 sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte)); mmu_emu_handle_fault()
421 print_pte_vaddr (vaddr); mmu_emu_handle_fault()
/linux-4.4.14/arch/sh/mm/
H A Dkmap.c18 #define kmap_get_fixmap_pte(vaddr) \
19 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
25 unsigned long vaddr; kmap_coherent_init() local
28 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); kmap_coherent_init()
29 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); kmap_coherent_init()
35 unsigned long vaddr; kmap_coherent() local
46 vaddr = __fix_to_virt(idx); kmap_coherent()
51 return (void *)vaddr; kmap_coherent()
57 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; kunmap_coherent() local
58 enum fixed_addresses idx = __virt_to_fix(vaddr); kunmap_coherent()
61 __flush_purge_region((void *)vaddr, PAGE_SIZE); kunmap_coherent()
63 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); kunmap_coherent()
64 local_flush_tlb_one(get_asid(), vaddr); kunmap_coherent() local
H A Dcache.c59 unsigned long vaddr, void *dst, const void *src, copy_to_user_page()
64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); copy_to_user_page()
74 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_to_user_page()
78 unsigned long vaddr, void *dst, const void *src, copy_from_user_page()
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); copy_from_user_page()
94 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
102 vfrom = kmap_coherent(from, vaddr); copy_user_highpage()
111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || copy_user_highpage()
121 void clear_user_highpage(struct page *page, unsigned long vaddr) clear_user_highpage() argument
127 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) clear_user_highpage()
58 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_to_user_page() argument
77 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_from_user_page() argument
93 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
H A Dconsistent.c68 void *vaddr, dma_addr_t dma_handle, dma_generic_free_coherent()
78 iounmap(vaddr); dma_generic_free_coherent()
81 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
87 (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; dma_cache_sync()
67 dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) dma_generic_free_coherent() argument
H A Dinit.c153 unsigned long vaddr, pte_t *lastpte) page_table_kmap_check()
166 unsigned long vaddr; page_table_range_init() local
168 vaddr = start; page_table_range_init()
169 i = __pgd_offset(vaddr); page_table_range_init()
170 j = __pud_offset(vaddr); page_table_range_init()
171 k = __pmd_offset(vaddr); page_table_range_init()
174 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { page_table_range_init()
176 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { page_table_range_init()
181 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { page_table_range_init()
183 pmd, vaddr, pte); page_table_range_init()
184 vaddr += PMD_SIZE; page_table_range_init()
324 unsigned long vaddr, end; paging_init() local
373 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; paging_init()
375 page_table_range_init(vaddr, end, swapper_pg_dir); paging_init()
152 page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) page_table_kmap_check() argument
H A Dioremap.c108 unsigned long vaddr = (unsigned long __force)addr; __iounmap() local
114 if (iomapping_nontranslatable(vaddr)) __iounmap()
129 p = remove_vm_area((void *)(vaddr & PAGE_MASK)); __iounmap()
H A Dpmb.c137 static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, pmb_mapping_exists() argument
156 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) pmb_mapping_exists()
335 int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, pmb_bolt_mapping() argument
345 if (!pmb_addr_valid(vaddr, size)) pmb_bolt_mapping()
347 if (pmb_mapping_exists(vaddr, phys, size)) pmb_bolt_mapping()
350 orig_addr = vaddr; pmb_bolt_mapping()
353 flush_tlb_kernel_range(vaddr, vaddr + size); pmb_bolt_mapping()
363 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_bolt_mapping()
377 vaddr += pmbe->size; pmb_bolt_mapping()
413 unsigned long vaddr; pmb_remap_caller() local
452 vaddr = (unsigned long)area->addr; pmb_remap_caller()
454 ret = pmb_bolt_mapping(vaddr, phys, size, prot); pmb_remap_caller()
458 return (void __iomem *)(offset + (char *)vaddr); pmb_remap_caller()
464 unsigned long vaddr = (unsigned long __force)addr; pmb_unmap() local
472 if (pmbe->vpn == vaddr) { pmb_unmap()
/linux-4.4.14/arch/frv/include/asm/
H A Dvirtconvert.h24 #define phys_to_virt(vaddr) ((void *) ((unsigned long)(vaddr) + PAGE_OFFSET))
25 #define virt_to_phys(vaddr) ((unsigned long) (vaddr) - PAGE_OFFSET)
29 #define phys_to_virt(vaddr) ((void *) (vaddr))
30 #define virt_to_phys(vaddr) ((unsigned long) (vaddr))
H A Dpage.h11 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
17 #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
18 #define copy_user_page(vto, vfrom, vaddr, topg) memcpy((vto), (vfrom), PAGE_SIZE)
45 #define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr))
H A Dpci.h29 extern void consistent_free(void *vaddr);
30 extern void consistent_sync(void *vaddr, size_t size, int direction);
39 void *vaddr, dma_addr_t dma_handle);
H A Dcacheflush.h96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
99 flush_icache_user_range((vma), (page), (vaddr), (len)); \
102 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/arch/arm/mm/
H A Dhighmem.c23 unsigned long vaddr = __fix_to_virt(idx); set_fixmap_pte() local
24 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); set_fixmap_pte()
27 local_flush_tlb_kernel_page(vaddr); set_fixmap_pte()
30 static inline pte_t get_fixmap_pte(unsigned long vaddr) get_fixmap_pte() argument
32 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); get_fixmap_pte()
58 unsigned long vaddr; kmap_atomic() local
83 vaddr = __fix_to_virt(idx); kmap_atomic()
89 BUG_ON(!pte_none(get_fixmap_pte(vaddr))); kmap_atomic()
98 return (void *)vaddr; kmap_atomic()
104 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
112 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); __kunmap_atomic()
114 BUG_ON(vaddr != __fix_to_virt(idx)); __kunmap_atomic()
120 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { __kunmap_atomic()
122 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); __kunmap_atomic()
131 unsigned long vaddr; kmap_atomic_pfn() local
142 vaddr = __fix_to_virt(idx); kmap_atomic_pfn()
144 BUG_ON(!pte_none(get_fixmap_pte(vaddr))); kmap_atomic_pfn()
148 return (void *)vaddr; kmap_atomic_pfn()
H A Dcache-xsc3l2.c100 unsigned long vaddr; xsc3_l2_inv_range() local
107 vaddr = -1; /* to force the first mapping */ xsc3_l2_inv_range()
113 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); xsc3_l2_inv_range()
114 xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_range()
115 xsc3_l2_inv_mva(vaddr); xsc3_l2_inv_range()
123 vaddr = l2_map_va(start, vaddr); xsc3_l2_inv_range()
124 xsc3_l2_inv_mva(vaddr); xsc3_l2_inv_range()
132 vaddr = l2_map_va(start, vaddr); xsc3_l2_inv_range()
133 xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_range()
134 xsc3_l2_inv_mva(vaddr); xsc3_l2_inv_range()
137 l2_unmap_va(vaddr); xsc3_l2_inv_range()
144 unsigned long vaddr; xsc3_l2_clean_range() local
146 vaddr = -1; /* to force the first mapping */ xsc3_l2_clean_range()
150 vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_range()
151 xsc3_l2_clean_mva(vaddr); xsc3_l2_clean_range()
155 l2_unmap_va(vaddr); xsc3_l2_clean_range()
182 unsigned long vaddr; xsc3_l2_flush_range() local
189 vaddr = -1; /* to force the first mapping */ xsc3_l2_flush_range()
193 vaddr = l2_map_va(start, vaddr); xsc3_l2_flush_range()
194 xsc3_l2_clean_mva(vaddr); xsc3_l2_flush_range()
195 xsc3_l2_inv_mva(vaddr); xsc3_l2_flush_range()
199 l2_unmap_va(vaddr); xsc3_l2_flush_range()
H A Dcopypage-v6.c34 struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v6_copy_user_highpage_nonaliasing()
49 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) v6_clear_user_highpage_nonaliasing() argument
73 struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v6_copy_user_highpage_aliasing()
75 unsigned int offset = CACHE_COLOUR(vaddr); v6_copy_user_highpage_aliasing()
106 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) v6_clear_user_highpage_aliasing() argument
108 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); v6_clear_user_highpage_aliasing()
33 v6_copy_user_highpage_nonaliasing(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v6_copy_user_highpage_nonaliasing() argument
72 v6_copy_user_highpage_aliasing(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v6_copy_user_highpage_aliasing() argument
H A Dcopypage-xsc3.c74 unsigned long vaddr, struct vm_area_struct *vma) xsc3_mc_copy_user_highpage()
80 flush_cache_page(vma, vaddr, page_to_pfn(from)); xsc3_mc_copy_user_highpage()
91 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) xsc3_mc_clear_user_highpage() argument
73 xsc3_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) xsc3_mc_copy_user_highpage() argument
H A Dcopypage-feroceon.c71 unsigned long vaddr, struct vm_area_struct *vma) feroceon_copy_user_highpage()
77 flush_cache_page(vma, vaddr, page_to_pfn(from)); feroceon_copy_user_highpage()
83 void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) feroceon_clear_user_highpage() argument
70 feroceon_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) feroceon_copy_user_highpage() argument
H A Dcopypage-v4wb.c51 unsigned long vaddr, struct vm_area_struct *vma) v4wb_copy_user_highpage()
57 flush_cache_page(vma, vaddr, page_to_pfn(from)); v4wb_copy_user_highpage()
68 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) v4wb_clear_user_highpage() argument
50 v4wb_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v4wb_copy_user_highpage() argument
/linux-4.4.14/arch/mips/mm/
H A Dpgtable-32.c35 unsigned long vaddr; pagetable_init() local
54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; pagetable_init()
55 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); pagetable_init()
61 vaddr = PKMAP_BASE; pagetable_init()
62 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pagetable_init()
64 pgd = swapper_pg_dir + __pgd_offset(vaddr); pagetable_init()
65 pud = pud_offset(pgd, vaddr); pagetable_init()
66 pmd = pmd_offset(pud, vaddr); pagetable_init()
67 pte = pte_offset_kernel(pmd, vaddr); pagetable_init()
H A Dhighmem.c47 unsigned long vaddr; kmap_atomic() local
57 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic()
62 local_flush_tlb_one((unsigned long)vaddr); kmap_atomic()
64 return (void*) vaddr; kmap_atomic()
70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
73 if (vaddr < FIXADDR_START) { // FIXME __kunmap_atomic()
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); __kunmap_atomic()
90 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
91 local_flush_tlb_one(vaddr); __kunmap_atomic()
106 unsigned long vaddr; kmap_atomic_pfn() local
114 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_pfn()
116 flush_tlb_one(vaddr); kmap_atomic_pfn()
118 return (void*) vaddr; kmap_atomic_pfn()
H A Dpgtable-64.c97 unsigned long vaddr; pagetable_init() local
109 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; pagetable_init()
110 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); pagetable_init()
H A Dinit.c88 unsigned long vaddr, flags, entrylo; __kmap_pgprot() local
99 vaddr = __fix_to_virt(FIX_CMAP_END - idx); __kmap_pgprot()
109 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); __kmap_pgprot()
126 return (void*) vaddr; __kmap_pgprot()
162 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
169 vfrom = kmap_coherent(from, vaddr); copy_user_highpage()
178 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) copy_user_highpage()
186 struct page *page, unsigned long vaddr, void *dst, const void *src, copy_to_user_page()
191 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); copy_to_user_page()
200 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_to_user_page()
204 struct page *page, unsigned long vaddr, void *dst, const void *src, copy_from_user_page()
209 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); copy_from_user_page()
229 unsigned long vaddr; fixrange_init() local
231 vaddr = start; fixrange_init()
232 i = __pgd_offset(vaddr); fixrange_init()
233 j = __pud_offset(vaddr); fixrange_init()
234 k = __pmd_offset(vaddr); fixrange_init()
237 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { fixrange_init()
239 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { fixrange_init()
241 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { fixrange_init()
247 vaddr += PMD_SIZE; fixrange_init()
161 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
185 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_to_user_page() argument
203 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_from_user_page() argument
/linux-4.4.14/arch/powerpc/mm/
H A Dhighmem.c34 unsigned long vaddr; kmap_atomic_prot() local
44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_prot()
48 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); kmap_atomic_prot()
49 local_flush_tlb_page(NULL, vaddr); kmap_atomic_prot()
51 return (void*) vaddr; kmap_atomic_prot()
57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
60 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { __kunmap_atomic()
73 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); __kunmap_atomic()
79 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
80 local_flush_tlb_page(NULL, vaddr); __kunmap_atomic()
/linux-4.4.14/arch/microblaze/mm/
H A Dhighmem.c37 unsigned long vaddr; kmap_atomic_prot() local
48 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_prot()
52 set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); kmap_atomic_prot()
53 local_flush_tlb_page(NULL, vaddr); kmap_atomic_prot()
55 return (void *) vaddr; kmap_atomic_prot()
61 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
64 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { __kunmap_atomic()
76 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); __kunmap_atomic()
82 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
83 local_flush_tlb_page(NULL, vaddr); __kunmap_atomic()
H A Dconsistent.c64 unsigned long order, vaddr; consistent_alloc() local
82 vaddr = __get_free_pages(gfp, order); consistent_alloc()
83 if (!vaddr) consistent_alloc()
90 flush_dcache_range(virt_to_phys((void *)vaddr), consistent_alloc()
91 virt_to_phys((void *)vaddr) + size); consistent_alloc()
94 ret = (void *)vaddr; consistent_alloc()
113 free_pages(vaddr, order); consistent_alloc()
120 *dma_handle = pa = __virt_to_phys(vaddr); consistent_alloc()
129 page = virt_to_page(vaddr); consistent_alloc()
151 free_pages(vaddr, order); consistent_alloc()
160 static pte_t *consistent_virt_to_pte(void *vaddr) consistent_virt_to_pte() argument
162 unsigned long addr = (unsigned long)vaddr; consistent_virt_to_pte()
167 unsigned long consistent_virt_to_pfn(void *vaddr) consistent_virt_to_pfn() argument
169 pte_t *ptep = consistent_virt_to_pte(vaddr); consistent_virt_to_pfn()
181 void consistent_free(size_t size, void *vaddr) consistent_free() argument
193 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); consistent_free()
195 page = virt_to_page(vaddr); consistent_free()
203 pte_t *ptep = consistent_virt_to_pte(vaddr); consistent_free()
208 pte_clear(&init_mm, (unsigned int)vaddr, ptep); consistent_free()
214 vaddr += PAGE_SIZE; consistent_free()
226 void consistent_sync(void *vaddr, size_t size, int direction) consistent_sync() argument
231 start = (unsigned long)vaddr; consistent_sync()
/linux-4.4.14/arch/parisc/kernel/
H A Dpci-dma.c83 unsigned long vaddr, map_pte_uncached()
87 unsigned long orig_vaddr = vaddr; map_pte_uncached()
89 vaddr &= ~PMD_MASK; map_pte_uncached()
90 end = vaddr + size; map_pte_uncached()
102 vaddr += PAGE_SIZE; map_pte_uncached()
106 } while (vaddr < end); map_pte_uncached()
110 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, map_pmd_uncached() argument
114 unsigned long orig_vaddr = vaddr; map_pmd_uncached()
116 vaddr &= ~PGDIR_MASK; map_pmd_uncached()
117 end = vaddr + size; map_pmd_uncached()
121 pte_t * pte = pte_alloc_kernel(pmd, vaddr); map_pmd_uncached()
124 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) map_pmd_uncached()
126 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; map_pmd_uncached()
129 } while (vaddr < end); map_pmd_uncached()
133 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, map_uncached_pages() argument
137 unsigned long end = vaddr + size; map_uncached_pages()
139 dir = pgd_offset_k(vaddr); map_uncached_pages()
143 pmd = pmd_alloc(NULL, dir, vaddr); map_uncached_pages()
146 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) map_uncached_pages()
148 vaddr = vaddr + PGDIR_SIZE; map_uncached_pages()
150 } while (vaddr && (vaddr < end)); map_uncached_pages()
154 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, unmap_uncached_pte() argument
159 unsigned long orig_vaddr = vaddr; unmap_uncached_pte()
168 pte = pte_offset_map(pmd, vaddr); unmap_uncached_pte()
169 vaddr &= ~PMD_MASK; unmap_uncached_pte()
170 end = vaddr + size; unmap_uncached_pte()
177 pte_clear(&init_mm, vaddr, pte); unmap_uncached_pte()
181 vaddr += PAGE_SIZE; unmap_uncached_pte()
187 } while (vaddr < end); unmap_uncached_pte()
190 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, unmap_uncached_pmd() argument
195 unsigned long orig_vaddr = vaddr; unmap_uncached_pmd()
204 pmd = pmd_offset(dir, vaddr); unmap_uncached_pmd()
205 vaddr &= ~PGDIR_MASK; unmap_uncached_pmd()
206 end = vaddr + size; unmap_uncached_pmd()
210 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); unmap_uncached_pmd()
211 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; unmap_uncached_pmd()
214 } while (vaddr < end); unmap_uncached_pmd()
217 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size) unmap_uncached_pages() argument
220 unsigned long end = vaddr + size; unmap_uncached_pages()
222 dir = pgd_offset_k(vaddr); unmap_uncached_pages()
224 unmap_uncached_pmd(dir, vaddr, end - vaddr); unmap_uncached_pages()
225 vaddr = vaddr + PGDIR_SIZE; unmap_uncached_pages()
227 } while (vaddr && (vaddr < end)); unmap_uncached_pages()
292 ** return the corresponding vaddr in the pcxl dma map pcxl_alloc_range()
306 pcxl_free_range(unsigned long vaddr, size_t size) pcxl_free_range() argument
309 unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3); pcxl_free_range()
418 unsigned long vaddr; pa11_dma_alloc_consistent() local
424 vaddr = pcxl_alloc_range(size); pa11_dma_alloc_consistent()
428 map_uncached_pages(vaddr, size, paddr); pa11_dma_alloc_consistent()
439 return (void *)vaddr; pa11_dma_alloc_consistent()
442 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) pa11_dma_free_consistent() argument
448 unmap_uncached_pages((unsigned long)vaddr, size); pa11_dma_free_consistent()
449 pcxl_free_range((unsigned long)vaddr, size); pa11_dma_free_consistent()
486 unsigned long vaddr = (unsigned long)sg_virt(sg); for_each_sg() local
488 sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr); for_each_sg()
490 flush_kernel_dcache_range(vaddr, sg->length); for_each_sg()
582 void *vaddr, dma_addr_t iova) pa11_dma_free_noncoherent()
584 free_pages((unsigned long)vaddr, get_order(size)); pa11_dma_free_noncoherent()
82 map_pte_uncached(pte_t * pte, unsigned long vaddr, unsigned long size, unsigned long *paddr_ptr) map_pte_uncached() argument
581 pa11_dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t iova) pa11_dma_free_noncoherent() argument
/linux-4.4.14/arch/metag/mm/
H A Dhighmem.c43 unsigned long vaddr; kmap_atomic() local
53 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic()
59 return (void *)vaddr; kmap_atomic()
65 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
78 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
79 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); __kunmap_atomic()
96 unsigned long vaddr; kmap_atomic_pfn() local
104 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_pfn()
109 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); kmap_atomic_pfn()
111 return (void *)vaddr; kmap_atomic_pfn()
H A Dinit.c270 unsigned long vaddr; allocate_pgtables() local
272 vaddr = start; allocate_pgtables()
273 i = pgd_index(vaddr); allocate_pgtables()
274 j = pmd_index(vaddr); allocate_pgtables()
277 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { allocate_pgtables()
279 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { allocate_pgtables()
280 vaddr += PMD_SIZE; allocate_pgtables()
294 unsigned long vaddr, end; fixedrange_init() local
303 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; fixedrange_init()
305 allocate_pgtables(vaddr, end); fixedrange_init()
310 vaddr = PKMAP_BASE; fixedrange_init()
311 allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP); fixedrange_init()
313 pgd = swapper_pg_dir + pgd_index(vaddr); fixedrange_init()
314 pud = pud_offset(pgd, vaddr); fixedrange_init()
315 pmd = pmd_offset(pud, vaddr); fixedrange_init()
316 pte = pte_offset_kernel(pmd, vaddr); fixedrange_init()
H A Dmmu-meta2.c18 unsigned long mmu_read_first_level_page(unsigned long vaddr) mmu_read_first_level_page() argument
25 if (is_global_space(vaddr)) mmu_read_first_level_page()
26 vaddr &= ~0x80000000; mmu_read_first_level_page()
28 offset = vaddr >> PGDIR_SHIFT; mmu_read_first_level_page()
53 unsigned long mmu_read_second_level_page(unsigned long vaddr) mmu_read_second_level_page() argument
55 return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK)); mmu_read_second_level_page()
H A Dmmu-meta1.c111 unsigned long mmu_read_first_level_page(unsigned long vaddr) mmu_read_first_level_page() argument
113 return metag_in32(pgd_entry_addr(vaddr)); mmu_read_first_level_page()
116 unsigned long mmu_read_second_level_page(unsigned long vaddr) mmu_read_second_level_page() argument
118 return metag_in32(pgtable_entry_addr(vaddr)); mmu_read_second_level_page()
/linux-4.4.14/arch/frv/mm/
H A Dcache-page.c25 void *vaddr; flush_dcache_page() local
29 vaddr = kmap_atomic_primary(page); flush_dcache_page()
31 frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); flush_dcache_page()
33 kunmap_atomic_primary(vaddr); flush_dcache_page()
53 void *vaddr; flush_icache_user_range() local
57 vaddr = kmap_atomic_primary(page); flush_icache_user_range()
59 start = (start & ~PAGE_MASK) | (unsigned long) vaddr; flush_icache_user_range()
62 kunmap_atomic_primary(vaddr); flush_icache_user_range()
/linux-4.4.14/arch/avr32/include/asm/
H A Dcacheflush.h20 * Invalidate any cacheline containing virtual address vaddr without
26 static inline void invalidate_dcache_line(void *vaddr) invalidate_dcache_line() argument
30 : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE) invalidate_dcache_line()
35 * Make sure any cacheline containing virtual address vaddr is written
38 static inline void clean_dcache_line(void *vaddr) clean_dcache_line() argument
42 : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN) clean_dcache_line()
47 * Make sure any cacheline containing virtual address vaddr is written
50 static inline void flush_dcache_line(void *vaddr) flush_dcache_line() argument
54 : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL) flush_dcache_line()
60 * vaddr.
62 static inline void invalidate_icache_line(void *vaddr) invalidate_icache_line() argument
66 : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE) invalidate_icache_line()
122 unsigned long vaddr, void *dst, const void *src,
126 struct page *page, unsigned long vaddr, void *dst, copy_from_user_page()
125 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_from_user_page() argument
/linux-4.4.14/arch/hexagon/include/asm/
H A Dfixmap.h31 #define kmap_get_fixmap_pte(vaddr) \
32 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
33 (vaddr)), (vaddr)), (vaddr))
/linux-4.4.14/arch/sparc/mm/
H A Dio-unit.c94 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) iounit_get_area() argument
100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_get_area()
109 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); iounit_get_area()
124 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); iounit_get_area()
132 iopte = MKIOPTE(__pa(vaddr & PAGE_MASK)); iounit_get_area()
133 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); iounit_get_area()
138 IOD(("%08lx\n", vaddr)); iounit_get_area()
139 return vaddr; iounit_get_area()
142 static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len) iounit_get_scsi_one() argument
148 ret = iounit_get_area(iounit, (unsigned long)vaddr, len); iounit_get_scsi_one()
169 static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) iounit_release_scsi_one() argument
175 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_release_scsi_one()
176 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; iounit_release_scsi_one()
177 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); iounit_release_scsi_one()
178 for (len += vaddr; vaddr < len; vaddr++) iounit_release_scsi_one()
179 clear_bit(vaddr, iounit->bmap); iounit_release_scsi_one()
187 unsigned long vaddr, len; iounit_release_scsi_sgl() local
193 vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; iounit_release_scsi_sgl()
194 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); iounit_release_scsi_sgl()
195 for (len += vaddr; vaddr < len; vaddr++) iounit_release_scsi_sgl()
196 clear_bit(vaddr, iounit->bmap); iounit_release_scsi_sgl()
H A Dhighmem.c53 unsigned long vaddr; kmap_atomic() local
63 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic()
67 __flush_cache_one(vaddr); kmap_atomic()
78 __flush_tlb_one(vaddr); kmap_atomic()
83 return (void*) vaddr; kmap_atomic()
89 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
92 if (vaddr < FIXADDR_START) { // FIXME __kunmap_atomic()
105 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); __kunmap_atomic()
109 __flush_cache_one(vaddr); __kunmap_atomic()
118 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
121 __flush_tlb_one(vaddr); __kunmap_atomic()
H A Dtlb.c69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, tlb_batch_add_one() argument
75 vaddr &= PAGE_MASK; tlb_batch_add_one()
77 vaddr |= 0x1UL; tlb_batch_add_one()
87 flush_tsb_user_page(mm, vaddr, huge); tlb_batch_add_one()
88 global_flush_tlb_page(mm, vaddr); tlb_batch_add_one()
103 tb->vaddrs[nr] = vaddr; tlb_batch_add_one()
112 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, tlb_batch_add() argument
136 if ((paddr ^ vaddr) & (1 << 13)) tlb_batch_add()
142 tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); tlb_batch_add()
146 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, tlb_batch_pmd_scan() argument
152 pte = pte_offset_map(&pmd, vaddr); tlb_batch_pmd_scan()
153 end = vaddr + HPAGE_SIZE; tlb_batch_pmd_scan()
154 while (vaddr < end) { tlb_batch_pmd_scan()
158 tlb_batch_add_one(mm, vaddr, exec, false); tlb_batch_pmd_scan()
161 vaddr += PAGE_SIZE; tlb_batch_pmd_scan()
H A Diommu.c207 static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len) iommu_get_scsi_one() argument
214 off = (unsigned long)vaddr & ~PAGE_MASK; iommu_get_scsi_one()
216 page = virt_to_page((unsigned long)vaddr & PAGE_MASK); iommu_get_scsi_one()
221 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) iommu_get_scsi_one_gflush() argument
224 return iommu_get_scsi_one(dev, vaddr, len); iommu_get_scsi_one_gflush()
227 static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len) iommu_get_scsi_one_pflush() argument
229 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; iommu_get_scsi_one_pflush()
231 while(page < ((unsigned long)(vaddr + len))) { iommu_get_scsi_one_pflush()
235 return iommu_get_scsi_one(dev, vaddr, len); iommu_get_scsi_one_pflush()
299 static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) iommu_release_scsi_one() argument
304 off = vaddr & ~PAGE_MASK; iommu_release_scsi_one()
306 iommu_release_one(dev, vaddr & PAGE_MASK, npages); iommu_release_scsi_one()
H A Dleon_mm.c35 unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) leon_swprobe() argument
88 ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); leon_swprobe()
111 ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); leon_swprobe()
139 ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); leon_swprobe()
160 (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); leon_swprobe()
164 (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); leon_swprobe()
168 (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); leon_swprobe()
172 paddr_calc = vaddr; leon_swprobe()
/linux-4.4.14/arch/um/kernel/
H A Dmem.c94 unsigned long vaddr; fixrange_init() local
96 vaddr = start; fixrange_init()
97 i = pgd_index(vaddr); fixrange_init()
98 j = pmd_index(vaddr); fixrange_init()
101 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { fixrange_init()
102 pud = pud_offset(pgd, vaddr); fixrange_init()
105 pmd = pmd_offset(pud, vaddr); fixrange_init()
106 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { fixrange_init()
108 vaddr += PMD_SIZE; fixrange_init()
123 unsigned long v, vaddr = FIXADDR_USER_START; fixaddr_user_init() local
132 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, fixaddr_user_init()
134 pgd = swapper_pg_dir + pgd_index(vaddr); fixaddr_user_init()
135 pud = pud_offset(pgd, vaddr); fixaddr_user_init()
136 pmd = pmd_offset(pud, vaddr); fixaddr_user_init()
137 pte = pte_offset_kernel(pmd, vaddr); fixaddr_user_init()
145 unsigned long zones_size[MAX_NR_ZONES], vaddr; paging_init() local
161 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; paging_init()
162 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); paging_init()
/linux-4.4.14/arch/x86/kernel/
H A Dcrash_dump_64.c29 void *vaddr; copy_oldmem_page() local
34 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); copy_oldmem_page()
35 if (!vaddr) copy_oldmem_page()
39 if (copy_to_user(buf, vaddr + offset, csize)) { copy_oldmem_page()
40 iounmap(vaddr); copy_oldmem_page()
44 memcpy(buf, vaddr + offset, csize); copy_oldmem_page()
47 iounmap(vaddr); copy_oldmem_page()
H A Dcrash_dump_32.c53 void *vaddr; copy_oldmem_page() local
61 vaddr = kmap_atomic_pfn(pfn); copy_oldmem_page()
64 memcpy(buf, (vaddr + offset), csize); copy_oldmem_page()
65 kunmap_atomic(vaddr); copy_oldmem_page()
70 kunmap_atomic(vaddr); copy_oldmem_page()
73 copy_page(kdump_buf_page, vaddr); copy_oldmem_page()
74 kunmap_atomic(vaddr); copy_oldmem_page()
H A Dpci-swiotlb.c21 void *vaddr; x86_swiotlb_alloc_coherent() local
30 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, x86_swiotlb_alloc_coherent()
32 if (vaddr) x86_swiotlb_alloc_coherent()
33 return vaddr; x86_swiotlb_alloc_coherent()
39 void *vaddr, dma_addr_t dma_addr, x86_swiotlb_free_coherent()
43 swiotlb_free_coherent(dev, size, vaddr, dma_addr); x86_swiotlb_free_coherent()
45 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); x86_swiotlb_free_coherent()
38 x86_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, struct dma_attrs *attrs) x86_swiotlb_free_coherent() argument
/linux-4.4.14/arch/sparc/include/asm/
H A Dtlbflush_64.h20 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
49 void __flush_tlb_page(unsigned long context, unsigned long vaddr);
54 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) global_flush_tlb_page() argument
56 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); global_flush_tlb_page()
62 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
64 #define global_flush_tlb_page(mm, vaddr) \
65 smp_flush_tlb_page(mm, vaddr)
H A Dcacheflush_64.h57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
66 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
68 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
H A Dcacheflush_32.h21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 flush_cache_page(vma, vaddr, page_to_pfn(page));\
26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dhvtramp.h9 __u64 vaddr; member in struct:hvtramp_mapping
H A Dviking.h212 static inline unsigned long viking_hwprobe(unsigned long vaddr) viking_hwprobe() argument
216 vaddr &= PAGE_MASK; viking_hwprobe()
220 : "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); viking_hwprobe()
227 : "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE)); viking_hwprobe()
229 vaddr &= ~SRMMU_PGDIR_MASK; viking_hwprobe()
230 vaddr >>= PAGE_SHIFT; viking_hwprobe()
231 return val | (vaddr << 8); viking_hwprobe()
237 : "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE)); viking_hwprobe()
239 vaddr &= ~SRMMU_REAL_PMD_MASK; viking_hwprobe()
240 vaddr >>= PAGE_SHIFT; viking_hwprobe()
241 return val | (vaddr << 8); viking_hwprobe()
247 : "r" (vaddr), "i" (ASI_M_FLUSH_PROBE)); viking_hwprobe()
/linux-4.4.14/arch/frv/mb93090-mb00/
H A Dpci-dma.c34 void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) dma_free_coherent() argument
36 consistent_free(vaddr); dma_free_coherent()
57 void *vaddr; dma_map_sg() local
66 vaddr = kmap_atomic_primary(sg_page(sg)); for_each_sg()
68 frv_dcache_writeback((unsigned long) vaddr, for_each_sg()
69 (unsigned long) vaddr + PAGE_SIZE); for_each_sg()
73 kunmap_atomic_primary(vaddr);
/linux-4.4.14/arch/arm/kernel/
H A Dcrash_dump.c37 void *vaddr; copy_oldmem_page() local
42 vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); copy_oldmem_page()
43 if (!vaddr) copy_oldmem_page()
47 if (copy_to_user(buf, vaddr + offset, csize)) { copy_oldmem_page()
48 iounmap(vaddr); copy_oldmem_page()
52 memcpy(buf, vaddr + offset, csize); copy_oldmem_page()
55 iounmap(vaddr); copy_oldmem_page()
/linux-4.4.14/arch/xtensa/include/asm/
H A Dfixmap.h66 static inline unsigned long virt_to_fix(const unsigned long vaddr) virt_to_fix() argument
68 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); virt_to_fix()
69 return __virt_to_fix(vaddr); virt_to_fix()
74 #define kmap_get_fixmap_pte(vaddr) \
76 pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
77 (vaddr) \
H A Dcacheflush.h39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40 * __invalidate_dcache_page_alias(vaddr,paddr)
41 * __invalidate_icache_page_alias(vaddr,paddr)
170 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dpage.h149 extern void clear_page_alias(void *vaddr, unsigned long paddr);
154 void clear_user_highpage(struct page *page, unsigned long vaddr);
157 unsigned long vaddr, struct vm_area_struct *vma);
159 # define clear_user_page(page, vaddr, pg) clear_page(page)
160 # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
H A Ddma-mapping.h35 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
/linux-4.4.14/include/asm-generic/
H A Ddma-coherent.h11 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
28 #define dma_release_from_coherent(dev, order, vaddr) (0)
29 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
H A Dcacheflush.h26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
29 flush_icache_user_range(vma, page, vaddr, len); \
31 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dfixmap.h35 static inline unsigned long virt_to_fix(const unsigned long vaddr) virt_to_fix() argument
37 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); virt_to_fix()
38 return __virt_to_fix(vaddr); virt_to_fix()
H A Dpage.h27 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
33 #define clear_user_page(page, vaddr, pg) clear_page(page)
34 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/linux-4.4.14/arch/arm/mach-ixp4xx/include/mach/
H A Dio.h98 const u8 *vaddr, int count) __indirect_writesb()
101 writeb(*vaddr++, bus_addr); __indirect_writesb()
121 const u16 *vaddr, int count) __indirect_writesw()
124 writew(*vaddr++, bus_addr); __indirect_writesw()
140 const u32 *vaddr, int count) __indirect_writesl()
143 writel(*vaddr++, bus_addr); __indirect_writesl()
163 u8 *vaddr, u32 count) __indirect_readsb()
166 *vaddr++ = readb(bus_addr); __indirect_readsb()
186 u16 *vaddr, u32 count) __indirect_readsw()
189 *vaddr++ = readw(bus_addr); __indirect_readsw()
207 u32 *vaddr, u32 count) __indirect_readsl()
210 *vaddr++ = readl(bus_addr); __indirect_readsl()
250 const u8 *vaddr = p; outsb() local
253 outb(*vaddr++, io_addr); outsb()
269 const u16 *vaddr = p; outsw() local
271 outw(cpu_to_le16(*vaddr++), io_addr); outsw()
283 const u32 *vaddr = p; outsl() local
285 outl(cpu_to_le32(*vaddr++), io_addr); outsl()
303 u8 *vaddr = p; insb() local
305 *vaddr++ = inb(io_addr); insb()
323 u16 *vaddr = p; insw() local
325 *vaddr++ = le16_to_cpu(inw(io_addr)); insw()
341 u32 *vaddr = p; insl() local
343 *vaddr++ = le32_to_cpu(inl(io_addr)); insl()
367 static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count) ioread8_rep() argument
371 insb(port & PIO_MASK, vaddr, count); ioread8_rep()
374 __raw_readsb(addr, vaddr, count); ioread8_rep()
376 __indirect_readsb(addr, vaddr, count); ioread8_rep()
395 static inline void ioread16_rep(const void __iomem *addr, void *vaddr, ioread16_rep() argument
400 insw(port & PIO_MASK, vaddr, count); ioread16_rep()
403 __raw_readsw(addr, vaddr, count); ioread16_rep()
405 __indirect_readsw(addr, vaddr, count); ioread16_rep()
425 static inline void ioread32_rep(const void __iomem *addr, void *vaddr, ioread32_rep() argument
430 insl(port & PIO_MASK, vaddr, count); ioread32_rep()
433 __raw_readsl(addr, vaddr, count); ioread32_rep()
435 __indirect_readsl(addr, vaddr, count); ioread32_rep()
454 static inline void iowrite8_rep(void __iomem *addr, const void *vaddr, iowrite8_rep() argument
459 outsb(port & PIO_MASK, vaddr, count); iowrite8_rep()
462 __raw_writesb(addr, vaddr, count); iowrite8_rep()
464 __indirect_writesb(addr, vaddr, count); iowrite8_rep()
483 static inline void iowrite16_rep(void __iomem *addr, const void *vaddr, iowrite16_rep() argument
488 outsw(port & PIO_MASK, vaddr, count); iowrite16_rep()
491 __raw_writesw(addr, vaddr, count); iowrite16_rep()
493 __indirect_writesw(addr, vaddr, count); iowrite16_rep()
512 static inline void iowrite32_rep(void __iomem *addr, const void *vaddr, iowrite32_rep() argument
517 outsl(port & PIO_MASK, vaddr, count); iowrite32_rep()
520 __raw_writesl(addr, vaddr, count); iowrite32_rep()
522 __indirect_writesl(addr, vaddr, count); iowrite32_rep()
97 __indirect_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count) __indirect_writesb() argument
120 __indirect_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count) __indirect_writesw() argument
139 __indirect_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count) __indirect_writesl() argument
162 __indirect_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count) __indirect_readsb() argument
185 __indirect_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count) __indirect_readsw() argument
206 __indirect_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count) __indirect_readsl() argument
/linux-4.4.14/drivers/media/v4l2-core/
H A Dvideobuf2-vmalloc.c25 void *vaddr; member in struct:vb2_vmalloc_buf
46 buf->vaddr = vmalloc_user(buf->size); vb2_vmalloc_alloc()
52 if (!buf->vaddr) { vb2_vmalloc_alloc()
67 vfree(buf->vaddr); vb2_vmalloc_put()
72 static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, vb2_vmalloc_get_userptr() argument
85 offset = vaddr & ~PAGE_MASK; vb2_vmalloc_get_userptr()
87 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); vb2_vmalloc_get_userptr()
102 buf->vaddr = (__force void *) vb2_vmalloc_get_userptr()
105 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, vb2_vmalloc_get_userptr()
109 if (!buf->vaddr) vb2_vmalloc_get_userptr()
111 buf->vaddr += offset; vb2_vmalloc_get_userptr()
125 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; vb2_vmalloc_put_userptr() local
133 if (vaddr) vb2_vmalloc_put_userptr()
134 vm_unmap_ram((void *)vaddr, n_pages); vb2_vmalloc_put_userptr()
139 iounmap((__force void __iomem *)buf->vaddr); vb2_vmalloc_put_userptr()
149 if (!buf->vaddr) { vb2_vmalloc_vaddr()
155 return buf->vaddr; vb2_vmalloc_vaddr()
174 ret = remap_vmalloc_range(vma, buf->vaddr, 0); vb2_vmalloc_mmap()
214 void *vaddr = buf->vaddr; vb2_vmalloc_dmabuf_ops_attach() local
229 struct page *page = vmalloc_to_page(vaddr); vb2_vmalloc_dmabuf_ops_attach()
237 vaddr += PAGE_SIZE; vb2_vmalloc_dmabuf_ops_attach()
321 return buf->vaddr + pgnum * PAGE_SIZE; vb2_vmalloc_dmabuf_ops_kmap()
328 return buf->vaddr; vb2_vmalloc_dmabuf_ops_vmap()
360 if (WARN_ON(!buf->vaddr)) vb2_vmalloc_get_dmabuf()
383 buf->vaddr = dma_buf_vmap(buf->dbuf); vb2_vmalloc_map_dmabuf()
385 return buf->vaddr ? 0 : -EFAULT; vb2_vmalloc_map_dmabuf()
392 dma_buf_vunmap(buf->dbuf, buf->vaddr); vb2_vmalloc_unmap_dmabuf()
393 buf->vaddr = NULL; vb2_vmalloc_unmap_dmabuf()
400 if (buf->vaddr) vb2_vmalloc_detach_dmabuf()
401 dma_buf_vunmap(buf->dbuf, buf->vaddr); vb2_vmalloc_detach_dmabuf()
438 .vaddr = vb2_vmalloc_vaddr,
H A Dvideobuf-vmalloc.c105 __func__, i, mem->vaddr); videobuf_vm_close()
107 vfree(mem->vaddr); videobuf_vm_close()
108 mem->vaddr = NULL; videobuf_vm_close()
173 if (!mem->vaddr) { __videobuf_iolock()
192 mem->vaddr = vmalloc_user(pages); __videobuf_iolock()
193 if (!mem->vaddr) { __videobuf_iolock()
198 mem->vaddr, pages); __videobuf_iolock()
257 mem->vaddr = vmalloc_user(pages); __videobuf_mmap_mapper()
258 if (!mem->vaddr) { __videobuf_mmap_mapper()
262 dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); __videobuf_mmap_mapper()
265 retval = remap_vmalloc_range(vma, mem->vaddr, 0); __videobuf_mmap_mapper()
268 vfree(mem->vaddr); __videobuf_mmap_mapper()
297 .vaddr = videobuf_to_vmalloc,
321 return mem->vaddr; videobuf_to_vmalloc()
343 vfree(mem->vaddr); videobuf_vmalloc_free()
344 mem->vaddr = NULL; videobuf_vmalloc_free()
H A Dvideobuf2-dma-sg.c39 void *vaddr; member in struct:vb2_dma_sg_buf
120 buf->vaddr = NULL; vb2_dma_sg_alloc()
193 if (buf->vaddr) vb2_dma_sg_put()
194 vm_unmap_ram(buf->vaddr, buf->num_pages); vb2_dma_sg_put()
229 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, vb2_dma_sg_get_userptr() argument
244 buf->vaddr = NULL; vb2_dma_sg_get_userptr()
247 buf->offset = vaddr & ~PAGE_MASK; vb2_dma_sg_get_userptr()
250 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); vb2_dma_sg_get_userptr()
302 if (buf->vaddr) vb2_dma_sg_put_userptr()
303 vm_unmap_ram(buf->vaddr, buf->num_pages); vb2_dma_sg_put_userptr()
319 if (!buf->vaddr) { vb2_dma_sg_vaddr()
321 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); vb2_dma_sg_vaddr()
323 buf->vaddr = vm_map_ram(buf->pages, vb2_dma_sg_vaddr()
328 return buf->vaddr ? buf->vaddr + buf->offset : NULL; vb2_dma_sg_vaddr()
498 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; vb2_dma_sg_dmabuf_ops_kmap()
577 buf->vaddr = NULL; vb2_dma_sg_map_dmabuf()
597 if (buf->vaddr) { vb2_dma_sg_unmap_dmabuf()
598 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); vb2_dma_sg_unmap_dmabuf()
599 buf->vaddr = NULL; vb2_dma_sg_unmap_dmabuf()
663 .vaddr = vb2_dma_sg_vaddr,
H A Dvideobuf-dma-contig.c28 void *vaddr; member in struct:videobuf_dma_contig_memory
45 mem->vaddr = dma_alloc_coherent(dev, mem->size, __videobuf_dc_alloc()
48 if (!mem->vaddr) { __videobuf_dc_alloc()
53 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size); __videobuf_dc_alloc()
61 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle); __videobuf_dc_free()
63 mem->vaddr = NULL; __videobuf_dc_free()
117 i, mem->vaddr); videobuf_vm_close()
120 mem->vaddr = NULL; videobuf_vm_close()
233 return mem->vaddr; __videobuf_to_vaddr()
250 if (!mem->vaddr) { __videobuf_iolock()
322 mem->vaddr, mem->dma_handle); __videobuf_mmap_mapper()
348 .vaddr = __videobuf_to_vaddr,
403 if (mem->vaddr) { videobuf_dma_contig_free()
405 mem->vaddr = NULL; videobuf_dma_contig_free()
H A Dvideobuf2-dma-contig.c30 void *vaddr; member in struct:vb2_dc_buf
81 if (!buf->vaddr && buf->db_attach) vb2_dc_vaddr()
82 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); vb2_dc_vaddr()
84 return buf->vaddr; vb2_dc_vaddr()
134 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); vb2_dc_put()
150 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, vb2_dc_alloc()
152 if (!buf->vaddr) { vb2_dc_alloc()
188 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, vb2_dc_mmap()
332 return buf->vaddr + pgnum * PAGE_SIZE; vb2_dc_dmabuf_ops_kmap()
339 return buf->vaddr; vb2_dc_dmabuf_ops_vmap()
371 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr, vb2_dc_get_base_sgt()
473 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, vb2_dc_get_userptr() argument
490 if (!IS_ALIGNED(vaddr | size, dma_align)) { vb2_dc_get_userptr()
507 offset = vaddr & ~PAGE_MASK; vb2_dc_get_userptr()
508 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); vb2_dc_get_userptr()
628 buf->vaddr = NULL; vb2_dc_map_dmabuf()
648 if (buf->vaddr) { vb2_dc_unmap_dmabuf()
649 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); vb2_dc_unmap_dmabuf()
650 buf->vaddr = NULL; vb2_dc_unmap_dmabuf()
710 .vaddr = vb2_dc_vaddr,
/linux-4.4.14/drivers/base/
H A Ddma-mapping.c22 void *vaddr; member in struct:dma_devres
30 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle); dmam_coherent_release()
37 dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle); dmam_noncoherent_release()
44 if (this->vaddr == match->vaddr) { dmam_match()
69 void *vaddr; dmam_alloc_coherent() local
75 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); dmam_alloc_coherent()
76 if (!vaddr) { dmam_alloc_coherent()
81 dr->vaddr = vaddr; dmam_alloc_coherent()
87 return vaddr; dmam_alloc_coherent()
95 * @vaddr: Virtual address of the memory to free
100 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dmam_free_coherent() argument
103 struct dma_devres match_data = { size, vaddr, dma_handle }; dmam_free_coherent()
105 dma_free_coherent(dev, size, vaddr, dma_handle); dmam_free_coherent()
128 void *vaddr; dmam_alloc_noncoherent() local
134 vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp); dmam_alloc_noncoherent()
135 if (!vaddr) { dmam_alloc_noncoherent()
140 dr->vaddr = vaddr; dmam_alloc_noncoherent()
146 return vaddr; dmam_alloc_noncoherent()
154 * @vaddr: Virtual address of the memory to free
159 void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, dmam_free_noncoherent() argument
162 struct dma_devres match_data = { size, vaddr, dma_handle }; dmam_free_noncoherent()
164 dma_free_noncoherent(dev, size, vaddr, dma_handle); dmam_free_noncoherent()
H A Ddma-coherent.c204 * @vaddr: virtual address of allocated pages
213 int dma_release_from_coherent(struct device *dev, int order, void *vaddr) dma_release_from_coherent() argument
217 if (mem && vaddr >= mem->virt_base && vaddr < dma_release_from_coherent()
219 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; dma_release_from_coherent()
236 * @vaddr: cpu address returned by dma_alloc_from_coherent
247 void *vaddr, size_t size, int *ret) dma_mmap_from_coherent()
251 if (mem && vaddr >= mem->virt_base && vaddr + size <= dma_mmap_from_coherent()
254 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; dma_mmap_from_coherent()
246 dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) dma_mmap_from_coherent() argument
/linux-4.4.14/arch/m68k/sun3x/
H A Ddvma.c83 unsigned long vaddr, int len) dvma_map_cpu()
90 vaddr &= PAGE_MASK; dvma_map_cpu()
92 end = PAGE_ALIGN(vaddr + len); dvma_map_cpu()
96 kaddr, vaddr); dvma_map_cpu()
98 pgd = pgd_offset_k(vaddr); dvma_map_cpu()
104 if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) { dvma_map_cpu()
109 if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK)) dvma_map_cpu()
110 end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK; dvma_map_cpu()
118 if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { dvma_map_cpu()
123 if((end2 & PMD_MASK) > (vaddr & PMD_MASK)) dvma_map_cpu()
124 end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK; dvma_map_cpu()
131 __pa(kaddr), vaddr); dvma_map_cpu()
137 vaddr += PAGE_SIZE; dvma_map_cpu()
138 } while(vaddr < end3); dvma_map_cpu()
140 } while(vaddr < end2); dvma_map_cpu()
142 } while(vaddr < end); dvma_map_cpu()
82 dvma_map_cpu(unsigned long kaddr, unsigned long vaddr, int len) dvma_map_cpu() argument
/linux-4.4.14/arch/x86/mm/
H A Dpgtable_32.c27 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) set_pte_vaddr() argument
34 pgd = swapper_pg_dir + pgd_index(vaddr); set_pte_vaddr()
39 pud = pud_offset(pgd, vaddr); set_pte_vaddr()
44 pmd = pmd_offset(pud, vaddr); set_pte_vaddr()
49 pte = pte_offset_kernel(pmd, vaddr); set_pte_vaddr()
51 set_pte_at(&init_mm, vaddr, pte, pteval); set_pte_vaddr()
53 pte_clear(&init_mm, vaddr, pte); set_pte_vaddr()
59 __flush_tlb_one(vaddr); set_pte_vaddr()
H A Dhighmem_32.c35 unsigned long vaddr; kmap_atomic_prot() local
46 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_prot()
51 return (void *)vaddr; kmap_atomic_prot()
73 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
75 if (vaddr >= __fix_to_virt(FIX_KMAP_END) && __kunmap_atomic()
76 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { __kunmap_atomic()
83 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); __kunmap_atomic()
91 kpte_clear_flush(kmap_pte-idx, vaddr); __kunmap_atomic()
97 BUG_ON(vaddr < PAGE_OFFSET); __kunmap_atomic()
98 BUG_ON(vaddr >= (unsigned long)high_memory); __kunmap_atomic()
H A Diomap_32.c59 unsigned long vaddr; kmap_atomic_prot_pfn() local
67 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_prot_pfn()
71 return (void *)vaddr; kmap_atomic_prot_pfn()
98 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; iounmap_atomic() local
100 if (vaddr >= __fix_to_virt(FIX_KMAP_END) && iounmap_atomic()
101 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { iounmap_atomic()
108 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); iounmap_atomic()
116 kpte_clear_flush(kmap_pte-idx, vaddr); iounmap_atomic()
H A Dinit_32.c108 pmd_t * __init populate_extra_pmd(unsigned long vaddr) populate_extra_pmd() argument
110 int pgd_idx = pgd_index(vaddr); populate_extra_pmd()
111 int pmd_idx = pmd_index(vaddr); populate_extra_pmd()
116 pte_t * __init populate_extra_pte(unsigned long vaddr) populate_extra_pte() argument
118 int pte_idx = pte_index(vaddr); populate_extra_pte()
121 pmd = populate_extra_pmd(vaddr); populate_extra_pte()
133 unsigned long vaddr; page_table_range_init_count() local
138 vaddr = start; page_table_range_init_count()
139 pgd_idx = pgd_index(vaddr); page_table_range_init_count()
140 pmd_idx = pmd_index(vaddr); page_table_range_init_count()
142 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { page_table_range_init_count()
143 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); page_table_range_init_count()
145 if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && page_table_range_init_count()
146 (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) page_table_range_init_count()
148 vaddr += PMD_SIZE; page_table_range_init_count()
157 unsigned long vaddr, pte_t *lastpte, page_table_kmap_check()
171 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin page_table_kmap_check()
172 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) { page_table_kmap_check()
190 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) page_table_kmap_check()
191 && vaddr > fix_to_virt(FIX_KMAP_END) page_table_kmap_check()
210 unsigned long vaddr; page_table_range_init() local
220 vaddr = start; page_table_range_init()
221 pgd_idx = pgd_index(vaddr); page_table_range_init()
222 pmd_idx = pmd_index(vaddr); page_table_range_init()
225 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { page_table_range_init()
227 pmd = pmd + pmd_index(vaddr); page_table_range_init()
228 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); page_table_range_init()
231 pmd, vaddr, pte, &adr); page_table_range_init()
233 vaddr += PMD_SIZE; page_table_range_init()
393 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) kmap_get_fixmap_pte() argument
395 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), kmap_get_fixmap_pte()
396 vaddr), vaddr), vaddr); kmap_get_fixmap_pte()
415 unsigned long vaddr; permanent_kmaps_init() local
421 vaddr = PKMAP_BASE; permanent_kmaps_init()
422 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); permanent_kmaps_init()
424 pgd = swapper_pg_dir + pgd_index(vaddr); permanent_kmaps_init()
425 pud = pud_offset(pgd, vaddr); permanent_kmaps_init()
426 pmd = pmd_offset(pud, vaddr); permanent_kmaps_init()
427 pte = pte_offset_kernel(pmd, vaddr); permanent_kmaps_init()
522 unsigned long vaddr, end; early_ioremap_page_table_range_init() local
528 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; early_ioremap_page_table_range_init()
530 page_table_range_init(vaddr, end, pgd_base); early_ioremap_page_table_range_init()
156 page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte, void **adr) page_table_kmap_check() argument
H A Dioremap.c31 int ioremap_change_attr(unsigned long vaddr, unsigned long size, ioremap_change_attr() argument
40 err = _set_memory_uc(vaddr, nrpages); ioremap_change_attr()
43 err = _set_memory_wc(vaddr, nrpages); ioremap_change_attr()
46 err = _set_memory_wt(vaddr, nrpages); ioremap_change_attr()
49 err = _set_memory_wb(vaddr, nrpages); ioremap_change_attr()
86 unsigned long offset, vaddr; __ioremap_caller() local
182 vaddr = (unsigned long) area->addr; __ioremap_caller()
187 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) __ioremap_caller()
190 ret_addr = (void __iomem *) (vaddr + offset); __ioremap_caller()
400 void *vaddr; xlate_dev_mem_ptr() local
406 vaddr = ioremap_cache(start, PAGE_SIZE); xlate_dev_mem_ptr()
408 if (vaddr) xlate_dev_mem_ptr()
409 vaddr += offset; xlate_dev_mem_ptr()
411 return vaddr; xlate_dev_mem_ptr()
H A Dinit_64.c233 static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) fill_pud() argument
242 return pud_offset(pgd, vaddr); fill_pud()
245 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) fill_pmd() argument
254 return pmd_offset(pud, vaddr); fill_pmd()
257 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) fill_pte() argument
265 return pte_offset_kernel(pmd, vaddr); fill_pte()
268 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) set_pte_vaddr_pud() argument
274 pud = pud_page + pud_index(vaddr); set_pte_vaddr_pud()
275 pmd = fill_pmd(pud, vaddr); set_pte_vaddr_pud()
276 pte = fill_pte(pmd, vaddr); set_pte_vaddr_pud()
284 __flush_tlb_one(vaddr); set_pte_vaddr_pud()
287 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) set_pte_vaddr() argument
292 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); set_pte_vaddr()
294 pgd = pgd_offset_k(vaddr); set_pte_vaddr()
301 set_pte_vaddr_pud(pud_page, vaddr, pteval); set_pte_vaddr()
304 pmd_t * __init populate_extra_pmd(unsigned long vaddr) populate_extra_pmd() argument
309 pgd = pgd_offset_k(vaddr); populate_extra_pmd()
310 pud = fill_pud(pgd, vaddr); populate_extra_pmd()
311 return fill_pmd(pud, vaddr); populate_extra_pmd()
314 pte_t * __init populate_extra_pte(unsigned long vaddr) populate_extra_pte() argument
318 pmd = populate_extra_pmd(vaddr); populate_extra_pte()
319 return fill_pte(pmd, vaddr); populate_extra_pte()
380 unsigned long vaddr = __START_KERNEL_map; cleanup_highmap() local
393 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { cleanup_highmap()
396 if (vaddr < (unsigned long) _text || vaddr > end) cleanup_highmap()
/linux-4.4.14/arch/score/include/asm/
H A Dpage.h36 #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
37 #define copy_user_page(vto, vfrom, vaddr, topg) \
67 #define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
70 #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
83 #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
H A Dcacheflush.h38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dtlbflush.h23 extern void local_flush_tlb_one(unsigned long vaddr);
32 #define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
/linux-4.4.14/arch/sh/include/asm/
H A Ddma-mapping.h16 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
24 void *vaddr, dma_addr_t dma_handle,
H A Dcacheflush.h20 * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
82 struct page *page, unsigned long vaddr, void *dst, const void *src,
86 struct page *page, unsigned long vaddr, void *dst, const void *src,
/linux-4.4.14/arch/m68k/mm/
H A Dcache.c14 static unsigned long virt_to_phys_slow(unsigned long vaddr) virt_to_phys_slow() argument
36 : "0" (vaddr)); virt_to_phys_slow()
46 : "a" (vaddr)); virt_to_phys_slow()
49 return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); virt_to_phys_slow()
57 : "a" (vaddr), "d" (get_fs().seg)); virt_to_phys_slow()
63 return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); virt_to_phys_slow()
65 return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); virt_to_phys_slow()
67 return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); virt_to_phys_slow()
/linux-4.4.14/arch/mips/kernel/
H A Dcrash_dump.c29 void *vaddr; copy_oldmem_page() local
34 vaddr = kmap_atomic_pfn(pfn); copy_oldmem_page()
37 memcpy(buf, (vaddr + offset), csize); copy_oldmem_page()
38 kunmap_atomic(vaddr); copy_oldmem_page()
45 copy_page(kdump_buf_page, vaddr); copy_oldmem_page()
46 kunmap_atomic(vaddr); copy_oldmem_page()
H A Duprobes.c249 instruction_pointer_set(regs, utask->vaddr); arch_uprobe_abort_xol()
269 * @vaddr: the virtual address to insert the opcode.
271 * For mm @mm, store the breakpoint instruction at @vaddr.
278 unsigned long vaddr) set_swbp()
280 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); set_swbp()
287 * @vaddr: the virtual address to insert the opcode.
289 * For mm @mm, restore the original opcode (opcode) at @vaddr.
295 unsigned long vaddr) set_orig_insn()
297 return uprobe_write_opcode(mm, vaddr, set_orig_insn()
301 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, arch_uprobe_copy_ixol() argument
308 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); arch_uprobe_copy_ixol()
316 flush_icache_range(vaddr, vaddr + len); arch_uprobe_copy_ixol()
277 set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_swbp() argument
294 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_orig_insn() argument
H A Dmips-r2-to-r6-emul.c908 unsigned long vaddr; mipsr2_decoder() local
1201 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1202 if (!access_ok(VERIFY_READ, vaddr, 4)) { mipsr2_decoder()
1203 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1262 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1274 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1275 if (!access_ok(VERIFY_READ, vaddr, 4)) { mipsr2_decoder()
1276 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1337 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1348 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1349 if (!access_ok(VERIFY_WRITE, vaddr, 4)) { mipsr2_decoder()
1350 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1408 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1418 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1419 if (!access_ok(VERIFY_WRITE, vaddr, 4)) { mipsr2_decoder()
1420 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1478 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1493 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1494 if (!access_ok(VERIFY_READ, vaddr, 8)) { mipsr2_decoder()
1495 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1597 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1612 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1613 if (!access_ok(VERIFY_READ, vaddr, 8)) { mipsr2_decoder()
1614 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1716 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1731 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1732 if (!access_ok(VERIFY_WRITE, vaddr, 8)) { mipsr2_decoder()
1733 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1835 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1849 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1850 if (!access_ok(VERIFY_WRITE, vaddr, 8)) { mipsr2_decoder()
1851 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1953 "+&r"(vaddr), "+&r"(err) mipsr2_decoder()
1961 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
1962 if (vaddr & 0x3) { mipsr2_decoder()
1963 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
1967 if (!access_ok(VERIFY_READ, vaddr, 4)) { mipsr2_decoder()
1968 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2007 : "r"(vaddr), "i"(SIGSEGV) mipsr2_decoder()
2017 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
2018 if (vaddr & 0x3) { mipsr2_decoder()
2019 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2023 if (!access_ok(VERIFY_WRITE, vaddr, 4)) { mipsr2_decoder()
2024 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2065 : "r"(vaddr), "i"(SIGSEGV)); mipsr2_decoder()
2080 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
2081 if (vaddr & 0x7) { mipsr2_decoder()
2082 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2086 if (!access_ok(VERIFY_READ, vaddr, 8)) { mipsr2_decoder()
2087 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2126 : "r"(vaddr), "i"(SIGSEGV) mipsr2_decoder()
2141 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); mipsr2_decoder()
2142 if (vaddr & 0x7) { mipsr2_decoder()
2143 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2147 if (!access_ok(VERIFY_WRITE, vaddr, 8)) { mipsr2_decoder()
2148 current->thread.cp0_baduaddr = vaddr; mipsr2_decoder()
2189 : "r"(vaddr), "i"(SIGSEGV)); mipsr2_decoder()
/linux-4.4.14/arch/sh/kernel/
H A Dcrash_dump.c28 void *vaddr; copy_oldmem_page() local
33 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); copy_oldmem_page()
36 if (copy_to_user(buf, (vaddr + offset), csize)) { copy_oldmem_page()
37 iounmap(vaddr); copy_oldmem_page()
41 memcpy(buf, (vaddr + offset), csize); copy_oldmem_page()
43 iounmap(vaddr); copy_oldmem_page()
/linux-4.4.14/include/linux/
H A Dhighmem.h23 static inline void flush_kernel_vmap_range(void *vaddr, int size) flush_kernel_vmap_range() argument
26 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) invalidate_kernel_vmap_range() argument
133 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
136 clear_user_page(addr, vaddr, page);
146 * @vaddr: The virtual address the page will be inserted into
159 unsigned long vaddr)
162 vma, vaddr);
165 clear_user_highpage(page, vaddr);
174 * @vaddr: The virtual address the page will be inserted into
181 unsigned long vaddr)
183 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
226 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
232 copy_user_page(vto, vfrom, vaddr, to); copy_user_highpage()
225 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
H A Dio-mapping.h97 io_mapping_unmap_atomic(void __iomem *vaddr) io_mapping_unmap_atomic() argument
99 iounmap_atomic(vaddr); io_mapping_unmap_atomic()
114 io_mapping_unmap(void __iomem *vaddr) io_mapping_unmap() argument
116 iounmap(vaddr); io_mapping_unmap()
150 io_mapping_unmap_atomic(void __iomem *vaddr) io_mapping_unmap_atomic() argument
164 io_mapping_unmap(void __iomem *vaddr) io_mapping_unmap() argument
H A Ddmapool.h33 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/linux-4.4.14/arch/sh/boards/mach-sdk7786/
H A Dsram.c25 void __iomem *vaddr; fpga_sram_init() local
53 vaddr = ioremap(phys, SZ_2K); fpga_sram_init()
54 if (unlikely(!vaddr)) { fpga_sram_init()
63 ret = gen_pool_add(sram_pool, (unsigned long)vaddr, SZ_2K, -1); fpga_sram_init()
66 iounmap(vaddr); fpga_sram_init()
/linux-4.4.14/arch/metag/include/asm/
H A Dfixmap.h56 #define kmap_get_fixmap_pte(vaddr) \
58 pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
59 (vaddr) \
H A Dmmu.h30 unsigned long mmu_read_first_level_page(unsigned long vaddr);
35 unsigned long mmu_read_second_level_page(unsigned long vaddr);
/linux-4.4.14/arch/xtensa/mm/
H A Dcache.c64 unsigned long vaddr) kmap_invalidate_coherent()
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { kmap_invalidate_coherent()
84 unsigned long vaddr, unsigned long *paddr) coherent_kvaddr()
86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { coherent_kvaddr()
88 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); coherent_kvaddr()
95 void clear_user_highpage(struct page *page, unsigned long vaddr) clear_user_highpage() argument
98 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); clear_user_highpage()
101 kmap_invalidate_coherent(page, vaddr); clear_user_highpage()
108 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
111 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, copy_user_highpage()
113 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, copy_user_highpage()
117 kmap_invalidate_coherent(dst, vaddr); copy_user_highpage()
262 unsigned long vaddr, void *dst, const void *src, copy_to_user_page()
266 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); copy_to_user_page()
271 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); copy_to_user_page()
285 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); copy_to_user_page()
298 unsigned long vaddr, void *dst, const void *src, copy_from_user_page()
302 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); copy_from_user_page()
310 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); copy_from_user_page()
63 kmap_invalidate_coherent(struct page *page, unsigned long vaddr) kmap_invalidate_coherent() argument
83 coherent_kvaddr(struct page *page, unsigned long base, unsigned long vaddr, unsigned long *paddr) coherent_kvaddr() argument
107 copy_user_highpage(struct page *dst, struct page *src, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
261 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_to_user_page() argument
297 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) copy_from_user_page() argument
H A Dmmu.c21 static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) init_pmd() argument
23 pgd_t *pgd = pgd_offset_k(vaddr); init_pmd()
24 pmd_t *pmd = pmd_offset(pgd, vaddr); init_pmd()
30 pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", init_pmd()
31 __func__, vaddr, n_pages); init_pmd()
H A Dhighmem.c43 unsigned long vaddr; kmap_atomic() local
52 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic()
58 return (void *)vaddr; kmap_atomic()
/linux-4.4.14/arch/x86/include/asm/
H A Dpmem.h71 * @vaddr: virtual start address
78 static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) __arch_wb_cache_pmem() argument
82 void *vend = vaddr + size; __arch_wb_cache_pmem()
85 for (p = (void *)((unsigned long)vaddr & ~clflush_mask); __arch_wb_cache_pmem()
111 void *vaddr = (void __force *)addr; arch_copy_from_iter_pmem() local
115 len = copy_from_iter_nocache(vaddr, bytes, i); arch_copy_from_iter_pmem()
118 __arch_wb_cache_pmem(vaddr, bytes); arch_copy_from_iter_pmem()
133 void *vaddr = (void __force *)addr; arch_clear_pmem() local
136 if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0) arch_clear_pmem()
137 clear_page(vaddr); arch_clear_pmem()
139 memset(vaddr, 0, size); arch_clear_pmem()
141 __arch_wb_cache_pmem(vaddr, size); arch_clear_pmem()
H A Dpage.h24 static inline void clear_user_page(void *page, unsigned long vaddr, clear_user_page() argument
30 static inline void copy_user_page(void *to, void *from, unsigned long vaddr, copy_user_page() argument
36 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
37 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
H A Dpgtable_32.h60 #define kpte_clear_flush(ptep, vaddr) \
62 pte_clear(&init_mm, (vaddr), (ptep)); \
63 __flush_tlb_one((vaddr)); \
H A Dswiotlb.h36 void *vaddr, dma_addr_t dma_addr,
/linux-4.4.14/arch/cris/arch-v32/drivers/pci/
H A Ddma.c43 void *vaddr, dma_addr_t dma_handle) dma_free_coherent()
47 if (!dma_release_from_coherent(dev, order, vaddr)) dma_free_coherent()
48 free_pages((unsigned long)vaddr, order); dma_free_coherent()
42 dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) dma_free_coherent() argument
/linux-4.4.14/arch/powerpc/kernel/
H A Dio-workarounds.c28 static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) iowa_pci_find() argument
38 if (vaddr) { iowa_pci_find()
41 if ((vaddr >= vstart) && (vaddr <= vend)) iowa_pci_find()
68 unsigned long vaddr, paddr; iowa_mem_find_bus() local
71 vaddr = (unsigned long)PCI_FIX_ADDR(addr); iowa_mem_find_bus()
72 if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) iowa_mem_find_bus()
78 ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr, iowa_mem_find_bus()
86 bus = iowa_pci_find(vaddr, paddr); iowa_mem_find_bus()
104 unsigned long vaddr = (unsigned long)pci_io_base + port; iowa_pio_find_bus() local
105 return iowa_pci_find(vaddr, 0); iowa_pio_find_bus()
H A Dcrash_dump.c72 static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, copy_oldmem_vaddr() argument
76 if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) copy_oldmem_vaddr()
79 memcpy(buf, (vaddr + offset), csize); copy_oldmem_vaddr()
100 void *vaddr; copy_oldmem_page() local
110 vaddr = __va(paddr); copy_oldmem_page()
111 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); copy_oldmem_page()
113 vaddr = __ioremap(paddr, PAGE_SIZE, 0); copy_oldmem_page()
114 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); copy_oldmem_page()
115 iounmap(vaddr); copy_oldmem_page()
/linux-4.4.14/arch/arm/include/asm/
H A Dpage-nommu.h20 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
26 #define clear_user_page(page, vaddr, pg) clear_page(page)
27 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
H A Dpage.h114 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
116 unsigned long vaddr, struct vm_area_struct *vma);
130 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
132 unsigned long vaddr, struct vm_area_struct *vma);
135 #define clear_user_highpage(page,vaddr) \
136 __cpu_clear_user_highpage(page, vaddr)
139 #define copy_user_highpage(to,from,vaddr,vma) \
140 __cpu_copy_user_highpage(to, from, vaddr, vma)
/linux-4.4.14/arch/arc/mm/
H A Dioremap.c44 void __iomem *vaddr; ioremap_prot() local
73 vaddr = (void __iomem *)area->addr; ioremap_prot()
74 if (ioremap_page_range((unsigned long)vaddr, ioremap_prot()
75 (unsigned long)vaddr + size, paddr, prot)) { ioremap_prot()
76 vunmap((void __force *)vaddr); ioremap_prot()
79 return (void __iomem *)(off + (char __iomem *)vaddr); ioremap_prot()
H A Dcache.c28 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
179 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
191 * The solution was to provide CDU with these additonal vaddr bits. These
195 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
212 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
219 void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, __cache_line_loop_v2() argument
242 vaddr &= CACHE_LINE_MASK; __cache_line_loop_v2()
248 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; __cache_line_loop_v2()
261 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, __cache_line_loop_v3() argument
285 vaddr &= CACHE_LINE_MASK; __cache_line_loop_v3()
312 write_aux_reg(aux_cmd, vaddr); __cache_line_loop_v3()
313 vaddr += L1_CACHE_BYTES; __cache_line_loop_v3()
323 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
331 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, __cache_line_loop_v4() argument
451 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, __dc_line_op() argument
460 __cache_line_loop(paddr, vaddr, sz, op); __dc_line_op()
470 #define __dc_line_op(paddr, vaddr, sz, op)
484 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, __ic_line_inv_vaddr_local() argument
490 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC); __ic_line_inv_vaddr_local()
501 phys_addr_t paddr, vaddr; member in struct:ic_inv_args
509 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); __ic_line_inv_vaddr_helper()
512 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, __ic_line_inv_vaddr() argument
517 .vaddr = vaddr, __ic_line_inv_vaddr()
624 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; flush_dcache_page() local
626 if (addr_not_cache_congruent(paddr, vaddr)) flush_dcache_page()
627 __flush_dcache_page(paddr, vaddr); flush_dcache_page()
714 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); flush_icache_range()
740 * handling of kernel vaddr. flush_icache_range()
764 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
766 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
769 * kprobe on loadable module will be kernel vaddr.
771 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) __sync_icache_dcache() argument
773 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); __sync_icache_dcache()
774 __ic_line_inv_vaddr(paddr, vaddr, len); __sync_icache_dcache()
778 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr) __inv_icache_page() argument
780 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); __inv_icache_page()
785 * For kernel mappings @vaddr == @paddr
787 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr) __flush_dcache_page() argument
789 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); __flush_dcache_page()
854 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is copy_user_highpage()
870 * here as well (given that both vaddr/paddr are available). copy_user_highpage()
933 * pair to provide vaddr/paddr respectively, just as in MMU v3 arc_cache_init()
H A Dhighmem.c36 * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
68 unsigned long vaddr; kmap_atomic() local
77 vaddr = FIXMAP_ADDR(idx); kmap_atomic()
79 set_pte_at(&init_mm, vaddr, fixmap_page_table + idx, kmap_atomic()
82 return (void *)vaddr; kmap_atomic()
93 * Because preemption is disabled, this vaddr can be associated __kunmap_atomic()
/linux-4.4.14/arch/nios2/include/asm/
H A Dpage.h53 extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
54 extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
93 # define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr)))
94 # define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr)))
H A Dio.h56 #define phys_to_virt(vaddr) \
57 ((void *)((unsigned long)(vaddr) | CONFIG_NIOS2_KERNEL_REGION_BASE))
59 #define virt_to_phys(vaddr) \
60 ((unsigned long)((unsigned long)(vaddr) & ~0xE0000000))
H A Ddma-mapping.h17 static inline void __dma_sync_for_device(void *vaddr, size_t size, __dma_sync_for_device() argument
22 invalidate_dcache_range((unsigned long)vaddr, __dma_sync_for_device()
23 (unsigned long)(vaddr + size)); __dma_sync_for_device()
31 flush_dcache_range((unsigned long)vaddr, __dma_sync_for_device()
32 (unsigned long)(vaddr + size)); __dma_sync_for_device()
39 static inline void __dma_sync_for_cpu(void *vaddr, size_t size, __dma_sync_for_cpu() argument
45 invalidate_dcache_range((unsigned long)vaddr, __dma_sync_for_cpu()
46 (unsigned long)(vaddr + size)); __dma_sync_for_cpu()
62 void *vaddr, dma_addr_t dma_handle);
125 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
/linux-4.4.14/arch/mn10300/include/asm/
H A Dhighmem.h75 unsigned long vaddr; kmap_atomic() local
85 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic()
91 local_flush_tlb_one(vaddr); kmap_atomic()
93 return (void *)vaddr; kmap_atomic()
96 static inline void __kunmap_atomic(unsigned long vaddr) __kunmap_atomic() argument
100 if (vaddr < FIXADDR_START) { /* FIXME */ __kunmap_atomic()
113 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) __kunmap_atomic()
121 local_flush_tlb_one(vaddr); __kunmap_atomic()
/linux-4.4.14/drivers/block/
H A Dcciss.h66 void __iomem *vaddr; member in struct:ctlr_info
226 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); SA5_submit_command()
227 readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); SA5_submit_command()
243 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
244 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
249 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
250 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
263 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5B_intr_mask()
264 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5B_intr_mask()
269 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5B_intr_mask()
270 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5B_intr_mask()
279 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
280 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
284 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
285 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
308 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); SA5_completed()
334 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); SA5_performant_completed()
337 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); SA5_performant_completed()
341 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); SA5_performant_completed()
365 readl(h->vaddr + SA5_INTR_STATUS); SA5_intr_pending()
380 readl(h->vaddr + SA5_INTR_STATUS); SA5B_intr_pending()
391 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); SA5_performant_intr_pending()
400 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); SA5_performant_intr_pending()
H A Dsmart1,2.h44 writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET); smart4_submit_command()
56 writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET); smart4_intr_mask()
60 h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET); smart4_intr_mask()
72 return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET)); smart4_fifo_full()
82 = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET); smart4_completed()
90 writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET); smart4_completed()
103 readl(h->vaddr + S42XX_INTR_STATUS); smart4_intr_pending()
123 writel(c->busaddr, h->vaddr + COMMAND_FIFO); smart2_submit_command()
128 writel(val, h->vaddr + INTR_MASK); smart2_intr_mask()
133 return readl(h->vaddr + COMMAND_FIFO); smart2_fifo_full()
138 return readl(h->vaddr + COMMAND_COMPLETE_FIFO); smart2_completed()
143 return readl(h->vaddr + INTR_PENDING); smart2_intr_pending()
/linux-4.4.14/arch/s390/include/asm/
H A Didals.h29 idal_is_needed(void *vaddr, unsigned int length) idal_is_needed() argument
31 return ((__pa(vaddr) + length - 1) >> 31) != 0; idal_is_needed()
38 static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) idal_nr_words() argument
40 return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + idal_nr_words()
48 void *vaddr, unsigned int length) idal_create_words()
53 paddr = __pa(vaddr); idal_create_words()
70 set_normalized_cda(struct ccw1 * ccw, void *vaddr) set_normalized_cda() argument
77 nridaws = idal_nr_words(vaddr, ccw->count); set_normalized_cda()
83 idal_create_words(idal, vaddr, ccw->count); set_normalized_cda()
85 vaddr = idal; set_normalized_cda()
87 ccw->cda = (__u32)(unsigned long) vaddr; set_normalized_cda()
47 idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length) idal_create_words() argument
H A Ddma-mapping.h21 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
H A Dpage.h59 #define clear_user_page(page, vaddr, pg) clear_page(page)
60 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
62 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
63 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
/linux-4.4.14/arch/parisc/include/asm/
H A Dcacheflush.h51 static inline void flush_kernel_vmap_range(void *vaddr, int size) flush_kernel_vmap_range() argument
53 unsigned long start = (unsigned long)vaddr; flush_kernel_vmap_range()
57 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) invalidate_kernel_vmap_range() argument
59 unsigned long start = (unsigned long)vaddr; invalidate_kernel_vmap_range()
60 void *cursor = vaddr; invalidate_kernel_vmap_range()
62 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { invalidate_kernel_vmap_range()
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
110 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
H A Ddma-mapping.h13 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
68 void *vaddr, dma_addr_t dma_handle) dma_free_coherent()
70 hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle); dma_free_coherent()
75 void *vaddr, dma_addr_t dma_handle) dma_free_noncoherent()
77 hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle); dma_free_noncoherent()
191 dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
195 flush_kernel_dcache_range((unsigned long)vaddr, size); dma_cache_sync()
67 dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) dma_free_coherent() argument
74 dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) dma_free_noncoherent() argument
/linux-4.4.14/include/uapi/linux/netfilter/
H A Dxt_ipvs.h19 union nf_inet_addr vaddr, vmask; member in struct:xt_ipvs_mtinfo
/linux-4.4.14/arch/mips/include/asm/
H A Dtlbflush.h23 extern void local_flush_tlb_one(unsigned long vaddr);
33 extern void flush_tlb_one(unsigned long vaddr);
43 #define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
H A Dfixmap.h72 #define kmap_get_fixmap_pte(vaddr) \
73 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
H A Dcacheflush.h101 struct page *page, unsigned long vaddr, void *dst, const void *src,
105 struct page *page, unsigned long vaddr, void *dst, const void *src,
135 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
137 static inline void flush_kernel_vmap_range(void *vaddr, int size) flush_kernel_vmap_range() argument
140 __flush_kernel_vmap_range((unsigned long) vaddr, size); flush_kernel_vmap_range()
143 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) invalidate_kernel_vmap_range() argument
146 __flush_kernel_vmap_range((unsigned long) vaddr, size); invalidate_kernel_vmap_range()
H A Ddma-mapping.h34 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
/linux-4.4.14/arch/ia64/kernel/
H A Dcrash_dump.c37 void *vaddr; copy_oldmem_page() local
41 vaddr = __va(pfn<<PAGE_SHIFT); copy_oldmem_page()
43 if (copy_to_user(buf, (vaddr + offset), csize)) { copy_oldmem_page()
47 memcpy(buf, (vaddr + offset), csize); copy_oldmem_page()
H A Dpci-swiotlb.c27 void *vaddr, dma_addr_t dma_addr, ia64_swiotlb_free_coherent()
30 swiotlb_free_coherent(dev, size, vaddr, dma_addr); ia64_swiotlb_free_coherent()
26 ia64_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, struct dma_attrs *attrs) ia64_swiotlb_free_coherent() argument
/linux-4.4.14/arch/arm64/include/asm/
H A Dpage.h50 #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
51 #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
/linux-4.4.14/arch/arc/include/asm/
H A Dcacheflush.h26 * However ARC Cache flush requires paddr as well as vaddr, latter not available
35 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
36 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
37 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
108 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
112 __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
115 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dpage.h16 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
93 * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
96 #define __pa(vaddr) ((unsigned long)vaddr)
/linux-4.4.14/sound/pci/asihpi/
H A Dhpios.c50 p_mem_area->vaddr = hpios_locked_mem_alloc()
54 if (p_mem_area->vaddr) { hpios_locked_mem_alloc()
57 p_mem_area->vaddr); hpios_locked_mem_alloc()
73 p_mem_area->vaddr, p_mem_area->dma_handle); hpios_locked_mem_free()
77 p_mem_area->vaddr); hpios_locked_mem_free()
/linux-4.4.14/arch/xtensa/kernel/
H A Dpci-dma.c28 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
33 __flush_invalidate_dcache_range((unsigned long)vaddr, size); dma_cache_sync()
37 __invalidate_dcache_range((unsigned long)vaddr, size); dma_cache_sync()
41 __flush_dcache_range((unsigned long)vaddr, size); dma_cache_sync()
63 void *vaddr = kmap_atomic(page); do_cache_op() local
65 fn((unsigned long)vaddr + off, sz); do_cache_op()
66 kunmap_atomic(vaddr); do_cache_op()
173 static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, xtensa_dma_free() argument
176 unsigned long addr = (unsigned long)vaddr + xtensa_dma_free()
/linux-4.4.14/arch/mn10300/mm/
H A Dpgtable.c32 * vaddr is what the page gets mapped to - both must be properly aligned.
35 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) set_pmd_pfn() argument
41 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ set_pmd_pfn()
42 printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n"); set_pmd_pfn()
49 pgd = swapper_pg_dir + pgd_index(vaddr); set_pmd_pfn()
54 pud = pud_offset(pgd, vaddr); set_pmd_pfn()
55 pmd = pmd_offset(pud, vaddr); set_pmd_pfn()
61 local_flush_tlb_one(vaddr); set_pmd_pfn()
H A Ddma-alloc.c66 void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_free_coherent() argument
69 unsigned long addr = (unsigned long) vaddr & ~0x20000000; dma_free_coherent()
/linux-4.4.14/arch/tile/mm/
H A Dhighmem.c20 #define kmap_get_pte(vaddr) \
21 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
22 (vaddr)), (vaddr))
200 unsigned long vaddr; kmap_atomic_prot() local
215 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_atomic_prot()
216 pte = kmap_get_pte(vaddr); kmap_atomic_prot()
220 kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); kmap_atomic_prot()
222 return (void *)vaddr; kmap_atomic_prot()
235 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; __kunmap_atomic() local
237 if (vaddr >= __fix_to_virt(FIX_KMAP_END) && __kunmap_atomic()
238 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { __kunmap_atomic()
239 pte_t *pte = kmap_get_pte(vaddr); __kunmap_atomic()
252 kmap_atomic_unregister(pte_page(pteval), vaddr); __kunmap_atomic() local
253 kpte_clear_flush(pte, vaddr); __kunmap_atomic()
257 BUG_ON(vaddr < PAGE_OFFSET); __kunmap_atomic()
258 BUG_ON(vaddr >= (unsigned long)high_memory); __kunmap_atomic()
/linux-4.4.14/drivers/misc/sgi-gru/
H A Dgrufault.c61 struct vm_area_struct *gru_find_vma(unsigned long vaddr) gru_find_vma() argument
65 vma = find_vma(current->mm, vaddr); gru_find_vma()
66 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) gru_find_vma()
72 * Find and lock the gts that contains the specified user vaddr.
76 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
79 static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) gru_find_lock_gts() argument
86 vma = gru_find_vma(vaddr); gru_find_lock_gts()
88 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); gru_find_lock_gts()
96 static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) gru_alloc_locked_gts() argument
103 vma = gru_find_vma(vaddr); gru_alloc_locked_gts()
107 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); gru_alloc_locked_gts()
183 * convert a vaddr into a physical address. The size of the page
191 unsigned long vaddr, int write, non_atomic_pte_lookup()
202 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) non_atomic_pte_lookup()
219 static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, atomic_pte_lookup() argument
227 pgdp = pgd_offset(vma->vm_mm, vaddr); atomic_pte_lookup()
231 pudp = pud_offset(pgdp, vaddr); atomic_pte_lookup()
235 pmdp = pmd_offset(pudp, vaddr); atomic_pte_lookup()
243 pte = *pte_offset_kernel(pmdp, vaddr); atomic_pte_lookup()
261 static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, gru_vtop() argument
269 vma = find_vma(mm, vaddr); gru_vtop()
278 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); gru_vtop()
282 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) gru_vtop()
325 unsigned long vaddr = 0, gpa; gru_preload_tlb() local
332 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; gru_preload_tlb()
334 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; gru_preload_tlb()
337 vaddr &= PAGE_MASK; gru_preload_tlb()
338 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); gru_preload_tlb()
340 while (vaddr > fault_vaddr) { gru_preload_tlb()
341 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); gru_preload_tlb()
342 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, gru_preload_tlb()
346 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n", gru_preload_tlb()
348 vaddr, asid, write, pageshift, gpa); gru_preload_tlb()
349 vaddr -= PAGE_SIZE; gru_preload_tlb()
372 unsigned long gpa = 0, vaddr = 0; gru_try_dropin() local
407 vaddr = tfh->missvaddr; gru_try_dropin()
422 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); gru_try_dropin()
437 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe); gru_try_dropin()
443 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, gru_try_dropin()
446 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x," gru_try_dropin()
448 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, gru_try_dropin()
456 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); gru_try_dropin()
469 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); gru_try_dropin()
506 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); gru_try_dropin()
517 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", gru_try_dropin()
518 tfh, vaddr); gru_try_dropin()
810 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg, gru_user_flush_tlb()
811 req.vaddr, req.len); gru_user_flush_tlb()
819 gru_flush_tlb_range(gms, req.vaddr, req.len); gru_user_flush_tlb()
190 non_atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, int write, unsigned long *paddr, int *pageshift) non_atomic_pte_lookup() argument
H A Dgruhandles.c152 unsigned long vaddr, unsigned long vaddrmask, tgh_invalidate()
156 tgh->vaddr = vaddr; tgh_invalidate()
170 unsigned long vaddr, int asid, int dirty, tfh_write_only()
174 tfh->fillvaddr = vaddr; tfh_write_only()
186 unsigned long vaddr, int asid, int dirty, tfh_write_restart()
190 tfh->fillvaddr = vaddr; tfh_write_restart()
151 tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, unsigned long vaddrmask, int asid, int pagesize, int global, int n, unsigned short ctxbitmap) tgh_invalidate() argument
168 tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr, int gaa, unsigned long vaddr, int asid, int dirty, int pagesize) tfh_write_only() argument
184 tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, int gaa, unsigned long vaddr, int asid, int dirty, int pagesize) tfh_write_restart() argument
/linux-4.4.14/mm/
H A Ddmapool.c58 void *vaddr; member in struct:dma_page
217 *(int *)(page->vaddr + offset) = next; pool_initialise_page()
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, pool_alloc_page()
231 if (page->vaddr) { pool_alloc_page()
233 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); pool_alloc_page()
255 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); pool_free_page()
257 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); pool_free_page()
295 pool->name, page->vaddr); dma_pool_destroy()
299 pool->name, page->vaddr); dma_pool_destroy()
350 page->offset = *(int *)(page->vaddr + offset); dma_pool_alloc()
351 retval = offset + page->vaddr; dma_pool_alloc()
406 * @vaddr: virtual address of block
412 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) dma_pool_free() argument
425 pool->name, vaddr, (unsigned long)dma); dma_pool_free()
428 pool->name, vaddr, (unsigned long)dma); dma_pool_free()
432 offset = vaddr - page->vaddr; dma_pool_free()
438 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", dma_pool_free()
439 pool->name, vaddr, (unsigned long long)dma); dma_pool_free()
442 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", dma_pool_free()
443 pool->name, vaddr, (unsigned long long)dma); dma_pool_free()
450 chain = *(int *)(page->vaddr + chain); dma_pool_free()
465 memset(vaddr, POOL_POISON_FREED, pool->size); dma_pool_free()
469 *(int *)vaddr = page->offset; dma_pool_free()
H A Dhighmem.c154 struct page *kmap_to_page(void *vaddr) kmap_to_page() argument
156 unsigned long addr = (unsigned long)vaddr; kmap_to_page()
219 unsigned long vaddr; map_new_virtual() local
261 vaddr = PKMAP_ADDR(last_pkmap_nr); map_new_virtual()
262 set_pte_at(&init_mm, vaddr, map_new_virtual()
266 set_page_address(page, (void *)vaddr); map_new_virtual()
268 return vaddr; map_new_virtual()
281 unsigned long vaddr; kmap_high() local
288 vaddr = (unsigned long)page_address(page); kmap_high()
289 if (!vaddr) kmap_high()
290 vaddr = map_new_virtual(page); kmap_high()
291 pkmap_count[PKMAP_NR(vaddr)]++; kmap_high()
292 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); kmap_high()
294 return (void*) vaddr; kmap_high()
312 unsigned long vaddr, flags; kmap_high_get() local
315 vaddr = (unsigned long)page_address(page); kmap_high_get()
316 if (vaddr) { kmap_high_get()
317 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); kmap_high_get()
318 pkmap_count[PKMAP_NR(vaddr)]++; kmap_high_get()
321 return (void*) vaddr; kmap_high_get()
334 unsigned long vaddr; kunmap_high() local
342 vaddr = (unsigned long)page_address(page); kunmap_high()
343 BUG_ON(!vaddr); kunmap_high()
344 nr = PKMAP_NR(vaddr); kunmap_high()
/linux-4.4.14/arch/powerpc/platforms/44x/
H A Dcanyonlands.c69 void __iomem *vaddr; ppc460ex_canyonlands_fixup() local
94 vaddr = of_iomap(np, 0); ppc460ex_canyonlands_fixup()
97 if (!vaddr) { ppc460ex_canyonlands_fixup()
117 setbits32((vaddr + GPIO0_OSRH), 0x42000000); ppc460ex_canyonlands_fixup()
118 setbits32((vaddr + GPIO0_TSRH), 0x42000000); ppc460ex_canyonlands_fixup()
120 iounmap(vaddr); ppc460ex_canyonlands_fixup()
/linux-4.4.14/arch/unicore32/mm/
H A Ddma-swiotlb.c28 void *vaddr, dma_addr_t dma_addr, unicore_swiotlb_free_coherent()
31 swiotlb_free_coherent(dev, size, vaddr, dma_addr); unicore_swiotlb_free_coherent()
27 unicore_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, struct dma_attrs *attrs) unicore_swiotlb_free_coherent() argument
/linux-4.4.14/arch/mips/dec/
H A Dkn01-berr.c83 long asid, entryhi, vaddr; dec_kn01_be_backend() local
105 vaddr = regs->regs[insn.i_format.rs] + dec_kn01_be_backend()
108 vaddr = (long)pc; dec_kn01_be_backend()
109 if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1) dec_kn01_be_backend()
110 address = CPHYSADDR(vaddr); dec_kn01_be_backend()
115 entryhi |= vaddr & ~(PAGE_SIZE - 1); dec_kn01_be_backend()
123 offset = vaddr & (PAGE_SIZE - 1); dec_kn01_be_backend()
/linux-4.4.14/arch/cris/include/asm/
H A Dcacheflush.h25 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
27 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dpage.h15 #define clear_user_page(page, vaddr, pg) clear_page(page)
16 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
18 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
19 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
/linux-4.4.14/arch/arm64/mm/
H A Dcopypage.c25 void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) __cpu_copy_user_page() argument
32 void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) __cpu_clear_user_page() argument
/linux-4.4.14/drivers/scsi/
H A Dhpsa.h163 void __iomem *vaddr; member in struct:ctlr_info
411 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); SA5_submit_command()
412 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); SA5_submit_command()
418 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); SA5_submit_command_no_read()
424 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); SA5_submit_command_ioaccel2()
436 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
437 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
441 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
442 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_intr_mask()
450 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
451 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
455 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
456 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); SA5_performant_intr_mask()
470 (void) readl(h->vaddr + SA5_OUTDB_STATUS); SA5_performant_completed()
471 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); SA5_performant_completed()
475 (void) readl(h->vaddr + SA5_OUTDB_STATUS); SA5_performant_completed()
501 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); SA5_completed()
522 readl(h->vaddr + SA5_INTR_STATUS); SA5_intr_pending()
528 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); SA5_performant_intr_pending()
534 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); SA5_performant_intr_pending()
542 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); SA5_ioaccel_mode1_intr_pending()
572 writel((q << 24) | rq->current_entry, h->vaddr + SA5_ioaccel_mode1_completed()
H A DNCR_Q720.c79 int irq, int slot, __u32 paddr, void __iomem *vaddr) NCR_Q720_probe_one()
84 __u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1); NCR_Q720_probe_one()
85 __u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20; NCR_Q720_probe_one()
94 writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1); NCR_Q720_probe_one()
96 version = readb(vaddr + 0x18) >> 4; NCR_Q720_probe_one()
106 device.slot.base_v = vaddr; NCR_Q720_probe_one()
118 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1); NCR_Q720_probe_one()
121 writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1); NCR_Q720_probe_one()
78 NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop, int irq, int slot, __u32 paddr, void __iomem *vaddr) NCR_Q720_probe_one() argument
H A Dsun3_scsi.c402 unsigned char *vaddr; sun3scsi_dma_finish() local
404 vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); sun3scsi_dma_finish()
406 vaddr += (sun3_dma_orig_count - fifo); sun3scsi_dma_finish()
407 vaddr--; sun3scsi_dma_finish()
411 *vaddr = (dregs->bpack_lo & 0xff00) >> 8; sun3scsi_dma_finish()
412 vaddr--; sun3scsi_dma_finish()
415 *vaddr = (dregs->bpack_hi & 0x00ff); sun3scsi_dma_finish()
416 vaddr--; sun3scsi_dma_finish()
419 *vaddr = (dregs->bpack_hi & 0xff00) >> 8; sun3scsi_dma_finish()
448 unsigned char *vaddr; sun3scsi_dma_finish() local
451 vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr); sun3scsi_dma_finish()
453 vaddr += (sun3_dma_orig_count - fifo); sun3scsi_dma_finish()
455 vaddr[-2] = (data & 0xff00) >> 8; sun3scsi_dma_finish()
456 vaddr[-1] = (data & 0xff); sun3scsi_dma_finish()
/linux-4.4.14/drivers/tty/serial/8250/
H A D8250_acorn.c38 void __iomem *vaddr; member in struct:serial_card_info
57 info->vaddr = ecardm_iomap(ec, type->type, 0, 0); serial_card_probe()
58 if (!info->vaddr) { serial_card_probe()
74 uart.port.membase = info->vaddr + type->offset[i]; serial_card_probe()
/linux-4.4.14/arch/sparc/lib/
H A DNG4clear_page.S17 NG4clear_user_page: /* %o0=dest, %o1=vaddr */
H A Dclear_page.S38 clear_user_page: /* %o0=dest, %o1=vaddr */
49 and %o1, %o4, %o0 ! vaddr D-cache alias bit
55 add %o0, %o3, %o0 ! TTE vaddr
77 mov %o0, %g1 ! remember vaddr for tlbflush
/linux-4.4.14/arch/x86/xen/
H A Dmmu.h16 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
H A Dgrant-table.c121 void *vaddr; xlated_setup_gnttab_pages() local
147 vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL); xlated_setup_gnttab_pages()
148 if (!vaddr) { xlated_setup_gnttab_pages()
160 xen_auto_xlat_grant_frames.vaddr = vaddr; xlated_setup_gnttab_pages()
/linux-4.4.14/arch/m32r/include/asm/
H A Dpage.h16 #define clear_user_page(page, vaddr, pg) clear_page(page)
17 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
19 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
20 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
H A Dcacheflush.h64 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
67 flush_icache_user_range(vma, page, vaddr, len); \
69 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/arch/alpha/include/asm/
H A Dpage.h17 #define clear_user_page(page, vaddr, pg) clear_page(page)
19 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
24 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
H A Dcacheflush.h70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
72 flush_icache_user_range(vma, page, vaddr, len); \
74 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/include/drm/
H A Ddrm_gem_cma_helper.h12 * @vaddr: kernel virtual address of the backing memory
20 void *vaddr; member in struct:drm_gem_cma_object
68 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
/linux-4.4.14/arch/ia64/include/asm/
H A Dcacheflush.h47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
49 flush_icache_user_range(vma, page, vaddr, len); \
51 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/arch/avr32/mm/
H A Ddma-coherent.c16 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) dma_cache_sync() argument
21 if (PXSEG(vaddr) == P2SEG) dma_cache_sync()
26 invalidate_dcache_region(vaddr, size); dma_cache_sync()
29 clean_dcache_region(vaddr, size); dma_cache_sync()
32 flush_dcache_region(vaddr, size); dma_cache_sync()
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_keys.c143 isge->vaddr = (void *) sge->addr; ipath_lkey_ok()
176 isge->vaddr = mr->map[m]->segs[n].vaddr + off; ipath_lkey_ok()
193 * @vaddr: virtual address to place data
200 u32 len, u64 vaddr, u32 rkey, int acc) ipath_rkey_ok()
223 sge->vaddr = (void *) vaddr; ipath_rkey_ok()
239 off = vaddr - mr->iova; ipath_rkey_ok()
240 if (unlikely(vaddr < mr->iova || off + len > mr->length || ipath_rkey_ok()
258 sge->vaddr = mr->map[m]->segs[n].vaddr + off; ipath_rkey_ok()
199 ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, u32 len, u64 vaddr, u32 rkey, int acc) ipath_rkey_ok() argument
/linux-4.4.14/arch/nios2/mm/
H A Dtlb.c58 pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr); flush_tlb_one_pid()
77 unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE + flush_tlb_one_pid() local
81 vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT)); flush_tlb_one_pid()
83 WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2); flush_tlb_one_pid()
122 pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr); flush_tlb_one()
139 unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE + flush_tlb_one() local
144 vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT)); flush_tlb_one()
148 WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2); flush_tlb_one()
247 unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE; flush_tlb_all() local
260 WRCTL(CTL_PTEADDR, ((vaddr) >> PAGE_SHIFT) << 2); flush_tlb_all()
263 vaddr += 1UL << 12; flush_tlb_all()
H A Dcacheflush.c230 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, copy_user_page() argument
233 __flush_dcache(vaddr, vaddr + PAGE_SIZE); copy_user_page()
234 __flush_icache(vaddr, vaddr + PAGE_SIZE); copy_user_page()
240 void clear_user_page(void *addr, unsigned long vaddr, struct page *page) clear_user_page() argument
242 __flush_dcache(vaddr, vaddr + PAGE_SIZE); clear_user_page()
243 __flush_icache(vaddr, vaddr + PAGE_SIZE); clear_user_page()
/linux-4.4.14/arch/mips/boot/
H A Delf2ecoff.c58 unsigned long vaddr; member in struct:sect
105 if (base->vaddr + base->len != new->vaddr) { combine()
107 base->len = new->vaddr - base->vaddr; combine()
282 text.vaddr = data.vaddr = bss.vaddr = 0; main()
362 ndata.vaddr = ph[i].p_vaddr; main()
364 nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; main()
372 ntxt.vaddr = ph[i].p_vaddr; main()
392 if (text.vaddr > data.vaddr || data.vaddr > bss.vaddr || main()
393 text.vaddr + text.len > data.vaddr main()
394 || data.vaddr + data.len > bss.vaddr) { main()
406 data.vaddr = text.vaddr + text.len; main()
414 if (text.vaddr + text.len < data.vaddr) main()
415 text.len = data.vaddr - text.vaddr; main()
424 eah.text_start = text.vaddr; main()
425 eah.data_start = data.vaddr; main()
426 eah.bss_start = bss.vaddr; main()
/linux-4.4.14/arch/arm/probes/uprobes/
H A Dcore.c33 unsigned long vaddr) set_swbp()
35 return uprobe_write_opcode(mm, vaddr, set_swbp()
116 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, arch_uprobe_copy_ixol() argument
120 void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); arch_uprobe_copy_ixol()
128 flush_uprobe_xol_access(page, vaddr, dst, len); arch_uprobe_copy_ixol()
157 regs->ARM_pc = utask->vaddr + 4; arch_uprobe_post_xol()
178 instruction_pointer_set(regs, utask->vaddr); arch_uprobe_abort_xol()
32 set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_swbp() argument
/linux-4.4.14/drivers/lguest/
H A Dpage_tables.c83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) spgd_addr() argument
85 unsigned int index = pgd_index(vaddr); spgd_addr()
97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) spmd_addr() argument
99 unsigned int index = pmd_index(vaddr); spmd_addr()
115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) spte_addr() argument
118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); spte_addr()
129 return &page[pte_index(vaddr)]; spte_addr()
136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) gpgd_addr() argument
138 unsigned int index = vaddr >> (PGDIR_SHIFT); gpgd_addr()
144 static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) gpmd_addr() argument
148 return gpage + pmd_index(vaddr) * sizeof(pmd_t); gpmd_addr()
153 pmd_t gpmd, unsigned long vaddr) gpte_addr()
158 return gpage + pte_index(vaddr) * sizeof(pte_t); gpte_addr()
163 pgd_t gpgd, unsigned long vaddr) gpte_addr()
168 return gpage + pte_index(vaddr) * sizeof(pte_t); gpte_addr()
303 static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate, find_spte() argument
313 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); find_spte()
344 spmd = spmd_addr(cpu, *spgd, vaddr); find_spte()
374 return spte_addr(cpu, *spgd, vaddr); find_spte()
393 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode, demand_page() argument
405 if (vaddr >= switcher_addr) demand_page()
413 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); demand_page()
431 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); demand_page()
448 gpte_ptr = gpte_addr(cpu, gpmd, vaddr); demand_page()
454 gpte_ptr = gpte_addr(cpu, gpgd, vaddr); demand_page()
459 gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); demand_page()
482 *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); demand_page()
499 spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd)); demand_page()
551 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) page_writable() argument
557 if (vaddr >= switcher_addr) page_writable()
561 spte = find_spte(cpu, vaddr, false, 0, 0); page_writable()
578 void pin_page(struct lg_cpu *cpu, unsigned long vaddr) pin_page() argument
582 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem)) pin_page()
583 kill_guest(cpu, "bad stack page %#lx", vaddr); pin_page()
676 bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr) __guest_pa() argument
686 *paddr = vaddr; __guest_pa()
691 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); __guest_pa()
697 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); __guest_pa()
700 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); __guest_pa()
702 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); __guest_pa()
707 *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); __guest_pa()
719 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) guest_pa() argument
723 if (!__guest_pa(cpu, vaddr, &paddr)) guest_pa()
724 kill_guest(cpu, "Bad address %#lx", vaddr); guest_pa()
933 unsigned long vaddr, pte_t gpte) __guest_set_pte()
936 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); __guest_set_pte()
944 spmd = spmd_addr(cpu, *spgd, vaddr); __guest_set_pte()
948 pte_t *spte = spte_addr(cpu, *spgd, vaddr); __guest_set_pte()
990 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) guest_set_pte()
993 if (vaddr >= switcher_addr) { guest_set_pte()
1002 if (vaddr >= cpu->lg->kernel_address) { guest_set_pte()
1006 __guest_set_pte(cpu, i, vaddr, gpte); guest_set_pte()
1012 __guest_set_pte(cpu, pgdir, vaddr, gpte); guest_set_pte()
152 gpte_addr(struct lg_cpu *cpu, pmd_t gpmd, unsigned long vaddr) gpte_addr() argument
162 gpte_addr(struct lg_cpu *cpu, pgd_t gpgd, unsigned long vaddr) gpte_addr() argument
932 __guest_set_pte(struct lg_cpu *cpu, int idx, unsigned long vaddr, pte_t gpte) __guest_set_pte() argument
989 guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, unsigned long vaddr, pte_t gpte) guest_set_pte() argument
/linux-4.4.14/drivers/staging/android/ion/
H A Dion_test.c68 void *vaddr = vmap(&page, 1, VM_MAP, pgprot); ion_handle_test_dma() local
72 if (!vaddr) { ion_handle_test_dma()
78 ret = copy_from_user(vaddr + offset, ptr, to_copy); ion_handle_test_dma()
80 ret = copy_to_user(ptr, vaddr + offset, to_copy); ion_handle_test_dma()
82 vunmap(vaddr); ion_handle_test_dma()
118 void *vaddr = dma_buf_kmap(dma_buf, page_offset); ion_handle_test_kernel() local
120 if (!vaddr) ion_handle_test_kernel()
126 ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); ion_handle_test_kernel()
128 ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); ion_handle_test_kernel()
130 dma_buf_kunmap(dma_buf, page_offset, vaddr); ion_handle_test_kernel()
/linux-4.4.14/arch/sparc/prom/
H A Dmisc_64.c203 unsigned long tte_data, unsigned long vaddr) tlb_load()
212 args[5] = vaddr; tlb_load()
224 unsigned long vaddr) prom_itlb_load()
226 return tlb_load("SUNW,itlb-load", index, tte_data, vaddr); prom_itlb_load()
231 unsigned long vaddr) prom_dtlb_load()
233 return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr); prom_dtlb_load()
237 unsigned long vaddr, unsigned long paddr) prom_map()
249 args[7] = vaddr; prom_map()
262 void prom_unmap(unsigned long size, unsigned long vaddr) prom_unmap() argument
272 args[6] = vaddr; prom_unmap()
202 tlb_load(const char *type, unsigned long index, unsigned long tte_data, unsigned long vaddr) tlb_load() argument
222 prom_itlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) prom_itlb_load() argument
229 prom_dtlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) prom_dtlb_load() argument
236 prom_map(int mode, unsigned long size, unsigned long vaddr, unsigned long paddr) prom_map() argument
/linux-4.4.14/arch/metag/kernel/
H A Ddma.c225 unsigned long vaddr = c->vm_start; dma_alloc_coherent() local
226 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); dma_alloc_coherent()
241 set_pte_at(&init_mm, vaddr, dma_alloc_coherent()
247 vaddr += PAGE_SIZE; dma_alloc_coherent()
272 void *vaddr, dma_addr_t dma_handle) dma_free_coherent()
282 c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr); dma_free_coherent()
329 __func__, vaddr); dma_free_coherent()
426 void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) dma_sync_for_device() argument
444 flush_dcache_region(vaddr, size); dma_sync_for_device()
452 writeback_dcache_region(vaddr, size); dma_sync_for_device()
460 invalidate_dcache_region(vaddr, size); dma_sync_for_device()
473 void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) dma_sync_for_cpu() argument
488 invalidate_dcache_region(vaddr, size); dma_sync_for_cpu()
271 dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) dma_free_coherent() argument
H A Dtcm.c51 unsigned long vaddr; tcm_alloc() local
58 vaddr = gen_pool_alloc(pool->pool, len); tcm_alloc()
59 if (!vaddr) tcm_alloc()
62 return vaddr; tcm_alloc()
/linux-4.4.14/kernel/events/
H A Duprobes.c110 unsigned long vaddr; /* Page(s) of instruction slots */ member in struct:xol_area
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) vaddr_to_offset() argument
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); vaddr_to_offset()
233 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) copy_from_page() argument
236 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); copy_from_page()
240 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) copy_to_page() argument
243 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); copy_to_page()
247 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) verify_opcode() argument
261 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); verify_opcode()
286 * @vaddr: the virtual address to store the opcode.
287 * @opcode: opcode to be written at @vaddr.
292 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_write_opcode() argument
300 /* Read the page with vaddr into memory */ uprobe_write_opcode()
301 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); uprobe_write_opcode()
305 ret = verify_opcode(old_page, vaddr, &opcode); uprobe_write_opcode()
314 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); uprobe_write_opcode()
320 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); uprobe_write_opcode()
322 ret = __replace_page(vma, vaddr, old_page, new_page); uprobe_write_opcode()
336 * @vaddr: the virtual address to insert the opcode.
338 * For mm @mm, store the breakpoint instruction at @vaddr.
341 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_swbp() argument
343 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); set_swbp()
350 * @vaddr: the virtual address to insert the opcode.
352 * For mm @mm, restore the original opcode (opcode) at @vaddr.
356 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_orig_insn() argument
358 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn); set_orig_insn()
580 struct mm_struct *mm, unsigned long vaddr) prepare_uprobe()
600 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); prepare_uprobe()
642 struct vm_area_struct *vma, unsigned long vaddr) install_breakpoint()
647 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); install_breakpoint()
659 ret = set_swbp(&uprobe->arch, mm, vaddr); install_breakpoint()
669 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) remove_breakpoint() argument
672 return set_orig_insn(&uprobe->arch, mm, vaddr); remove_breakpoint()
700 unsigned long vaddr; member in struct:map_info
750 info->vaddr = offset_to_vaddr(vma, offset); build_map_info()
803 vma = find_vma(mm, info->vaddr); register_for_each_vma()
808 if (vma->vm_start > info->vaddr || register_for_each_vma()
809 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) register_for_each_vma()
816 err = install_breakpoint(uprobe, mm, vma, info->vaddr); register_for_each_vma()
820 err |= remove_breakpoint(uprobe, mm, info->vaddr); register_for_each_vma()
967 unsigned long vaddr; unapply_uprobe() local
979 vaddr = offset_to_vaddr(vma, uprobe->offset); unapply_uprobe()
980 err |= remove_breakpoint(uprobe, mm, vaddr); unapply_uprobe()
1078 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); uprobe_mmap() local
1079 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); uprobe_mmap()
1138 if (!area->vaddr) { xol_add_vma()
1140 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, xol_add_vma()
1142 if (area->vaddr & ~PAGE_MASK) { xol_add_vma()
1143 ret = area->vaddr; xol_add_vma()
1148 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, xol_add_vma()
1165 static struct xol_area *__create_xol_area(unsigned long vaddr) __create_xol_area() argument
1186 area->vaddr = vaddr; __create_xol_area()
1280 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); xol_take_insn_slot()
1328 vma_end = area->vaddr + PAGE_SIZE; xol_free_insn_slot()
1329 if (area->vaddr <= slot_addr && slot_addr < vma_end) { xol_free_insn_slot()
1333 offset = slot_addr - area->vaddr; xol_free_insn_slot()
1348 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, arch_uprobe_copy_ixol() argument
1352 copy_to_page(page, vaddr, src, len); arch_uprobe_copy_ixol()
1379 return utask->vaddr; uprobe_get_trap_addr()
1502 t->utask->dup_xol_addr = area->vaddr; uprobe_copy_process()
1508 * Current area->vaddr notion assume the trampoline address is always
1509 * equal area->vaddr.
1521 trampoline_vaddr = area->vaddr; get_trampoline_vaddr()
1622 utask->vaddr = bp_vaddr; pre_ssout()
1688 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) is_trap_at_addr() argument
1695 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, is_trap_at_addr()
1702 result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); is_trap_at_addr()
1706 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); is_trap_at_addr()
579 prepare_uprobe(struct uprobe *uprobe, struct file *file, struct mm_struct *mm, unsigned long vaddr) prepare_uprobe() argument
641 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) install_breakpoint() argument
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_keys.c180 isge->vaddr = (void *) sge->addr; qib_lkey_ok()
227 isge->vaddr = mr->map[m]->segs[n].vaddr + off; qib_lkey_ok()
244 * @vaddr: virtual address to place data
253 u32 len, u64 vaddr, u32 rkey, int acc) qib_rkey_ok()
279 sge->vaddr = (void *) vaddr; qib_rkey_ok()
292 off = vaddr - mr->iova; qib_rkey_ok()
293 if (unlikely(vaddr < mr->iova || off + len > mr->length || qib_rkey_ok()
326 sge->vaddr = mr->map[m]->segs[n].vaddr + off; qib_rkey_ok()
380 mrg->map[m]->segs[n].vaddr = (void *) page_list[i]; qib_reg_mr()
252 qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) qib_rkey_ok() argument
/linux-4.4.14/arch/powerpc/include/asm/
H A Dcacheflush.h60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
63 flush_icache_user_range(vma, page, vaddr, len); \
65 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Ddma-mapping.h30 void *vaddr, dma_addr_t dma_handle,
48 extern void __dma_free_coherent(size_t size, void *vaddr);
49 extern void __dma_sync(void *vaddr, size_t size, int direction);
160 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
164 __dma_sync(vaddr, size, (int)direction); dma_cache_sync()
/linux-4.4.14/arch/openrisc/include/asm/
H A Dpage.h43 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
49 #define clear_user_page(page, vaddr, pg) clear_page(page)
50 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
H A Dfixmap.h81 static inline unsigned long virt_to_fix(const unsigned long vaddr) virt_to_fix() argument
83 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); virt_to_fix()
84 return __virt_to_fix(vaddr); virt_to_fix()
/linux-4.4.14/arch/arm/plat-samsung/
H A Dpm-debug.c59 unsigned long vaddr; s3c_pm_uart_base() local
61 debug_ll_addr(&paddr, &vaddr); s3c_pm_uart_base()
63 return (void __iomem *)vaddr; s3c_pm_uart_base()
/linux-4.4.14/crypto/
H A Dscatterwalk.c85 u8 *vaddr; scatterwalk_copychunks() local
90 vaddr = scatterwalk_map(walk); scatterwalk_copychunks()
91 memcpy_dir(buf, vaddr, len_this_page, out); scatterwalk_copychunks()
92 scatterwalk_unmap(vaddr); scatterwalk_copychunks()
/linux-4.4.14/arch/hexagon/mm/
H A Dvm_tlb.c49 void flush_tlb_one(unsigned long vaddr) flush_tlb_one() argument
51 __vmclrmap((void *)vaddr, PAGE_SIZE); flush_tlb_one()
78 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr) flush_tlb_page() argument
83 __vmclrmap((void *)vaddr, PAGE_SIZE); flush_tlb_page()
/linux-4.4.14/drivers/xen/xenbus/
H A Dxenbus_client.c78 void **vaddr);
79 int (*unmap)(struct xenbus_device *dev, void *vaddr);
371 * @vaddr: starting virtual address of the ring
375 * Grant access to the given @vaddr to the peer of the given device.
380 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, xenbus_grant_ring() argument
388 virt_to_gfn(vaddr), 0); xenbus_grant_ring()
396 vaddr = vaddr + XEN_PAGE_SIZE; xenbus_grant_ring()
459 * @vaddr: pointer to address to be filled out by mapping
464 * sets *vaddr to that address. Returns 0 on success, and GNTST_*
470 unsigned int nr_grefs, void **vaddr) xenbus_map_ring_valloc()
472 return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); xenbus_map_ring_valloc()
544 void **vaddr) xenbus_map_ring_valloc_pv()
554 *vaddr = NULL; xenbus_map_ring_valloc_pv()
586 *vaddr = area->addr; xenbus_map_ring_valloc_pv()
614 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); xenbus_map_ring_setup_grant_hvm() local
616 info->phys_addrs[info->idx] = vaddr; xenbus_map_ring_setup_grant_hvm()
617 info->addrs[info->idx] = vaddr; xenbus_map_ring_setup_grant_hvm()
625 void **vaddr) xenbus_map_ring_valloc_hvm()
639 *vaddr = NULL; xenbus_map_ring_valloc_hvm()
673 *vaddr = addr; xenbus_map_ring_valloc_hvm()
698 * @leaked: fail to clean up a failed map, caller should not free vaddr
708 * should not free the address space of @vaddr.
732 * @vaddr: addr to unmap
741 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) xenbus_unmap_ring_vfree() argument
743 return ring_ops->unmap(dev, vaddr); xenbus_unmap_ring_vfree()
747 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) xenbus_unmap_ring_vfree_pv() argument
758 if (node->pv.area->addr == vaddr) { xenbus_unmap_ring_vfree_pv()
769 "can't find mapped virtual address %p", vaddr); xenbus_unmap_ring_vfree_pv()
777 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); xenbus_unmap_ring_vfree_pv()
828 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) xenbus_unmap_ring_vfree_hvm() argument
841 if (addr == vaddr) { xenbus_unmap_ring_vfree_hvm()
852 "can't find mapped virtual address %p", vaddr); xenbus_unmap_ring_vfree_hvm()
865 vunmap(vaddr); xenbus_unmap_ring_vfree_hvm()
869 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); xenbus_unmap_ring_vfree_hvm()
469 xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) xenbus_map_ring_valloc() argument
541 xenbus_map_ring_valloc_pv(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) xenbus_map_ring_valloc_pv() argument
622 xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, grant_ref_t *gnt_ref, unsigned int nr_grefs, void **vaddr) xenbus_map_ring_valloc_hvm() argument
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dkeys.c203 isge->vaddr = (void *) sge->addr; hfi1_lkey_ok()
249 isge->vaddr = mr->map[m]->segs[n].vaddr + off; hfi1_lkey_ok()
266 * @vaddr: virtual address to place data
275 u32 len, u64 vaddr, u32 rkey, int acc) hfi1_rkey_ok()
300 sge->vaddr = (void *) vaddr; hfi1_rkey_ok()
313 off = vaddr - mr->iova; hfi1_rkey_ok()
314 if (unlikely(vaddr < mr->iova || off + len > mr->length || hfi1_rkey_ok()
346 sge->vaddr = mr->map[m]->segs[n].vaddr + off; hfi1_rkey_ok()
274 hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) hfi1_rkey_ok() argument
/linux-4.4.14/arch/unicore32/include/asm/
H A Ddma-mapping.h53 static inline void dma_cache_sync(struct device *dev, void *vaddr, dma_cache_sync() argument
56 unsigned long start = (unsigned long)vaddr; dma_cache_sync()
H A Dpage.h28 #define clear_user_page(page, vaddr, pg) clear_page(page)
29 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/linux-4.4.14/arch/c6x/include/asm/
H A Dcacheflush.h56 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
62 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/arch/microblaze/include/asm/
H A Ddma-mapping.h65 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync() argument
69 __dma_sync(virt_to_phys(vaddr), size, (int)direction); dma_cache_sync()
H A Dpage.h81 # define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
82 # define copy_user_page(vto, vfrom, vaddr, topg) \
147 # define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
157 # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
175 #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
/linux-4.4.14/arch/h8300/kernel/
H A Ddma.c34 void *vaddr, dma_addr_t dma_handle, dma_free()
38 free_pages((unsigned long)vaddr, get_order(size)); dma_free()
33 dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) dma_free() argument
/linux-4.4.14/fs/minix/
H A Dminix.h124 static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) minix_find_first_zero_bit() argument
126 const unsigned short *p = vaddr, *addr = vaddr; minix_find_first_zero_bit()
149 static inline int minix_test_bit(int nr, const void *vaddr) minix_test_bit() argument
151 const unsigned short *p = vaddr; minix_test_bit()
/linux-4.4.14/drivers/scsi/sym53c8xx_2/
H A Dsym_malloc.c226 void *vaddr; ___get_dma_mem_cluster() local
232 vaddr = sym_m_get_dma_mem_cluster(mp, vbp); ___get_dma_mem_cluster()
233 if (vaddr) { ___get_dma_mem_cluster()
234 int hc = VTOB_HASH_CODE(vaddr); ___get_dma_mem_cluster()
239 return vaddr; ___get_dma_mem_cluster()
252 while (*vbpp && (*vbpp)->vaddr != m) ___free_dma_mem_cluster()
370 while (vp && vp->vaddr != a) __vtobus()
/linux-4.4.14/drivers/net/wireless/orinoco/
H A Dairport.c29 void __iomem *vaddr; member in struct:airport
101 if (card->vaddr) airport_detach()
102 iounmap(card->vaddr); airport_detach()
103 card->vaddr = NULL; airport_detach()
184 card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN); airport_attach()
185 if (!card->vaddr) { airport_attach()
190 hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING); airport_attach()
/linux-4.4.14/drivers/net/wireless/ath/ath6kl/
H A Dtarget.h333 #define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff)
334 #define AR6004_VTOP(vaddr) (vaddr)
336 #define TARG_VTOP(target_type, vaddr) \
337 (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
338 (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
/linux-4.4.14/arch/alpha/mm/
H A Dinit.c193 unsigned long vaddr; callback_init() local
205 vaddr = (unsigned long)console_remap_vm.addr; callback_init()
211 crb->map[i].va = vaddr; callback_init()
216 if (pmd != pmd_offset(pgd, vaddr)) { callback_init()
218 pmd = pmd_offset(pgd, vaddr); callback_init()
222 set_pte(pte_offset_kernel(pmd, vaddr), callback_init()
225 vaddr += PAGE_SIZE; callback_init()
/linux-4.4.14/net/netfilter/ipvs/
H A Dip_vs_pe_sip.c117 p->vaddr, &ct->vaddr) && ip_vs_sip_ct_match()
128 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), ip_vs_sip_ct_match()
/linux-4.4.14/drivers/i2c/busses/
H A Di2c-ibm_iic.c85 volatile struct iic_regs __iomem *iic = dev->vaddr; dump_iic_regs()
129 out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0); iic_interrupt_mode()
137 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_dev_init()
182 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_dev_reset()
243 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_smbus_quick()
329 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_handler()
347 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_xfer_result()
380 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_abort_xfer()
412 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_wait_for_tc()
463 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_xfer_bytes()
521 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_address()
557 volatile struct iic_regs __iomem *iic = dev->vaddr; iic_xfer()
710 dev->vaddr = of_iomap(np, 0); iic_probe()
711 if (dev->vaddr == NULL) { iic_probe()
770 if (dev->vaddr) iic_probe()
771 iounmap(dev->vaddr); iic_probe()
791 iounmap(dev->vaddr); iic_remove()
/linux-4.4.14/drivers/media/platform/exynos4-is/
H A Dfimc-is.c241 buf = is->memory.vaddr + is->setfile.base; fimc_is_load_setfile()
246 pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf); fimc_is_load_setfile()
317 memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); fimc_is_start_firmware()
337 is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE, fimc_is_alloc_cpu_memory()
339 if (is->memory.vaddr == NULL) fimc_is_alloc_cpu_memory()
343 memset(is->memory.vaddr, 0, is->memory.size); fimc_is_alloc_cpu_memory()
350 dma_free_coherent(dev, is->memory.size, is->memory.vaddr, fimc_is_alloc_cpu_memory()
355 is->is_p_region = (struct is_region *)(is->memory.vaddr + fimc_is_alloc_cpu_memory()
361 is->is_shared_region = (struct is_share_region *)(is->memory.vaddr + fimc_is_alloc_cpu_memory()
370 if (is->memory.vaddr == NULL) fimc_is_free_cpu_memory()
373 dma_free_coherent(dev, is->memory.size, is->memory.vaddr, fimc_is_free_cpu_memory()
403 memcpy(is->memory.vaddr, fw->data, fw->size); fimc_is_load_firmware()
407 buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN); fimc_is_load_firmware()
411 buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN); fimc_is_load_firmware()
734 const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET; fimc_is_log_show()
736 if (is->memory.vaddr == NULL) { fimc_is_log_show()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
H A Dgk20a.c58 u32 *vaddr; member in struct:gk20a_instobj
93 /* protects vaddr_* and gk20a_instobj::vaddr* */
182 vunmap(obj->vaddr); gk20a_instmem_vaddr_gc()
183 obj->vaddr = NULL; gk20a_instmem_vaddr_gc()
185 nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n", gk20a_instmem_vaddr_gc()
204 if (node->vaddr) { gk20a_instobj_acquire()
214 node->vaddr = imem->cpu_map(memory); gk20a_instobj_acquire()
216 if (!node->vaddr) { gk20a_instobj_acquire()
223 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", gk20a_instobj_acquire()
229 return node->vaddr; gk20a_instobj_acquire()
256 return node->vaddr[offset / 4]; gk20a_instobj_rd32()
264 node->vaddr[offset / 4] = data; gk20a_instobj_wr32()
287 if (!node->vaddr) gk20a_instobj_dtor()
296 vunmap(node->vaddr); gk20a_instobj_dtor()
297 node->vaddr = NULL; gk20a_instobj_dtor()
299 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", gk20a_instobj_dtor()
/linux-4.4.14/drivers/parisc/
H A Diommu-helpers.h30 unsigned long vaddr; iommu_fill_pdir() local
69 vaddr = (unsigned long)sg_virt(startsg); iommu_fill_pdir()
78 vaddr, hint); iommu_fill_pdir()
79 vaddr += IOVP_SIZE; iommu_fill_pdir()
/linux-4.4.14/drivers/net/wireless/ath/ath10k/
H A Dhtt_tx.c112 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, ath10k_htt_tx_alloc()
115 if (!htt->txbuf.vaddr) { ath10k_htt_tx_alloc()
125 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, ath10k_htt_tx_alloc()
128 if (!htt->frag_desc.vaddr) { ath10k_htt_tx_alloc()
140 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, ath10k_htt_tx_alloc()
170 if (htt->txbuf.vaddr) { ath10k_htt_tx_free()
173 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, ath10k_htt_tx_free()
177 if (htt->frag_desc.vaddr) { ath10k_htt_tx_free()
180 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, ath10k_htt_tx_free()
361 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ath10k_htt_send_rx_ring_cfg_ll()
570 skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id]; ath10k_htt_tx()
600 memset(&htt->frag_desc.vaddr[msdu_id], 0, ath10k_htt_tx()
603 &htt->frag_desc.vaddr[msdu_id].frags; ath10k_htt_tx()
604 ext_desc = &htt->frag_desc.vaddr[msdu_id]; ath10k_htt_tx()
699 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; ath10k_htt_tx()
708 sg_items[1].vaddr = msdu->data; ath10k_htt_tx()
/linux-4.4.14/drivers/acpi/apei/
H A Dghes.c149 unsigned long vaddr; ghes_ioremap_pfn_nmi() local
151 vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); ghes_ioremap_pfn_nmi()
152 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, ghes_ioremap_pfn_nmi()
155 return (void __iomem *)vaddr; ghes_ioremap_pfn_nmi()
160 unsigned long vaddr, paddr; ghes_ioremap_pfn_irq() local
163 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); ghes_ioremap_pfn_irq()
168 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); ghes_ioremap_pfn_irq()
170 return (void __iomem *)vaddr; ghes_ioremap_pfn_irq()
175 unsigned long vaddr = (unsigned long __force)vaddr_ptr; ghes_iounmap_nmi() local
178 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); ghes_iounmap_nmi()
179 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); ghes_iounmap_nmi()
180 arch_apei_flush_tlb_one(vaddr); ghes_iounmap_nmi()
185 unsigned long vaddr = (unsigned long __force)vaddr_ptr; ghes_iounmap_irq() local
188 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); ghes_iounmap_irq()
189 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); ghes_iounmap_irq()
190 arch_apei_flush_tlb_one(vaddr); ghes_iounmap_irq()
299 void __iomem *vaddr; ghes_copy_tofrom_phys() local
309 vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); ghes_copy_tofrom_phys()
312 vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); ghes_copy_tofrom_phys()
317 memcpy_fromio(buffer, vaddr + offset, trunk); ghes_copy_tofrom_phys()
319 memcpy_toio(vaddr + offset, buffer, trunk); ghes_copy_tofrom_phys()
324 ghes_iounmap_nmi(vaddr); ghes_copy_tofrom_phys()
327 ghes_iounmap_irq(vaddr); ghes_copy_tofrom_phys()
/linux-4.4.14/include/media/
H A Dvideobuf-vmalloc.h25 void *vaddr; member in struct:videobuf_vmalloc_memory
/linux-4.4.14/arch/s390/kvm/
H A Dgaccess.c543 union vaddress vaddr = {.addr = gva}; guest_translate() local
559 if (vaddr.rfx01 > asce.tl) guest_translate()
561 ptr += vaddr.rfx * 8; guest_translate()
564 if (vaddr.rfx) guest_translate()
566 if (vaddr.rsx01 > asce.tl) guest_translate()
568 ptr += vaddr.rsx * 8; guest_translate()
571 if (vaddr.rfx || vaddr.rsx) guest_translate()
573 if (vaddr.rtx01 > asce.tl) guest_translate()
575 ptr += vaddr.rtx * 8; guest_translate()
578 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) guest_translate()
580 if (vaddr.sx01 > asce.tl) guest_translate()
582 ptr += vaddr.sx * 8; guest_translate()
597 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) guest_translate()
601 ptr = rfte.rto * 4096 + vaddr.rsx * 8; guest_translate()
615 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) guest_translate()
619 ptr = rste.rto * 4096 + vaddr.rtx * 8; guest_translate()
640 if (vaddr.sx01 < rtte.fc0.tf) guest_translate()
642 if (vaddr.sx01 > rtte.fc0.tl) guest_translate()
646 ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8; guest_translate()
668 ptr = ste.fc0.pto * 2048 + vaddr.px * 8; guest_translate()
/linux-4.4.14/arch/sh/include/cpu-sh4/cpu/
H A Dsq.h33 void sq_unmap(unsigned long vaddr);

Completed in 5653 milliseconds

1234