Lines Matching refs:addr

59 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)  in vunmap_pte_range()  argument
63 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
65 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range()
67 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range()
70 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument
75 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
77 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
82 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range()
83 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
86 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_pud_range() argument
91 pud = pud_offset(pgd, addr); in vunmap_pud_range()
93 next = pud_addr_end(addr, end); in vunmap_pud_range()
98 vunmap_pmd_range(pud, addr, next); in vunmap_pud_range()
99 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
102 static void vunmap_page_range(unsigned long addr, unsigned long end) in vunmap_page_range() argument
107 BUG_ON(addr >= end); in vunmap_page_range()
108 pgd = pgd_offset_k(addr); in vunmap_page_range()
110 next = pgd_addr_end(addr, end); in vunmap_page_range()
113 vunmap_pud_range(pgd, addr, next); in vunmap_page_range()
114 } while (pgd++, addr = next, addr != end); in vunmap_page_range()
117 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument
127 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range()
137 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); in vmap_pte_range()
139 } while (pte++, addr += PAGE_SIZE, addr != end); in vmap_pte_range()
143 static int vmap_pmd_range(pud_t *pud, unsigned long addr, in vmap_pmd_range() argument
149 pmd = pmd_alloc(&init_mm, pud, addr); in vmap_pmd_range()
153 next = pmd_addr_end(addr, end); in vmap_pmd_range()
154 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range()
156 } while (pmd++, addr = next, addr != end); in vmap_pmd_range()
160 static int vmap_pud_range(pgd_t *pgd, unsigned long addr, in vmap_pud_range() argument
166 pud = pud_alloc(&init_mm, pgd, addr); in vmap_pud_range()
170 next = pud_addr_end(addr, end); in vmap_pud_range()
171 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range()
173 } while (pud++, addr = next, addr != end); in vmap_pud_range()
188 unsigned long addr = start; in vmap_page_range_noflush() local
192 BUG_ON(addr >= end); in vmap_page_range_noflush()
193 pgd = pgd_offset_k(addr); in vmap_page_range_noflush()
195 next = pgd_addr_end(addr, end); in vmap_page_range_noflush()
196 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush()
199 } while (pgd++, addr = next, addr != end); in vmap_page_range_noflush()
222 unsigned long addr = (unsigned long)x; in is_vmalloc_or_module_addr() local
223 if (addr >= MODULES_VADDR && addr < MODULES_END) in is_vmalloc_or_module_addr()
234 unsigned long addr = (unsigned long) vmalloc_addr; in vmalloc_to_page() local
236 pgd_t *pgd = pgd_offset_k(addr); in vmalloc_to_page()
245 pud_t *pud = pud_offset(pgd, addr); in vmalloc_to_page()
247 pmd_t *pmd = pmd_offset(pud, addr); in vmalloc_to_page()
251 ptep = pte_offset_map(pmd, addr); in vmalloc_to_page()
292 static struct vmap_area *__find_vmap_area(unsigned long addr) in __find_vmap_area() argument
300 if (addr < va->va_start) in __find_vmap_area()
302 else if (addr >= va->va_end) in __find_vmap_area()
356 unsigned long addr; in alloc_vmap_area() local
401 addr = ALIGN(first->va_end, align); in alloc_vmap_area()
402 if (addr < vstart) in alloc_vmap_area()
404 if (addr + size < addr) in alloc_vmap_area()
408 addr = ALIGN(vstart, align); in alloc_vmap_area()
409 if (addr + size < addr) in alloc_vmap_area()
418 if (tmp->va_end >= addr) { in alloc_vmap_area()
420 if (tmp->va_start <= addr) in alloc_vmap_area()
432 while (addr + size > first->va_start && addr + size <= vend) { in alloc_vmap_area()
433 if (addr + cached_hole_size < first->va_start) in alloc_vmap_area()
434 cached_hole_size = first->va_start - addr; in alloc_vmap_area()
435 addr = ALIGN(first->va_end, align); in alloc_vmap_area()
436 if (addr + size < addr) in alloc_vmap_area()
447 if (addr + size > vend) in alloc_vmap_area()
450 va->va_start = addr; in alloc_vmap_area()
451 va->va_end = addr + size; in alloc_vmap_area()
706 static struct vmap_area *find_vmap_area(unsigned long addr) in find_vmap_area() argument
711 va = __find_vmap_area(addr); in find_vmap_area()
717 static void free_unmap_vmap_area_addr(unsigned long addr) in free_unmap_vmap_area_addr() argument
721 va = find_vmap_area(addr); in free_unmap_vmap_area_addr()
792 static unsigned long addr_to_vb_idx(unsigned long addr) in addr_to_vb_idx() argument
794 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
795 addr /= VMAP_BLOCK_SIZE; in addr_to_vb_idx()
796 return addr; in addr_to_vb_idx()
801 unsigned long addr; in vmap_block_vaddr() local
803 addr = va_start + (pages_off << PAGE_SHIFT); in vmap_block_vaddr()
804 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); in vmap_block_vaddr()
805 return (void *)addr; in vmap_block_vaddr()
985 static void vb_free(const void *addr, unsigned long size) in vb_free() argument
995 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); in vb_free()
999 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); in vb_free()
1002 vb_idx = addr_to_vb_idx((unsigned long)addr); in vb_free()
1008 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); in vb_free()
1083 unsigned long addr = (unsigned long)mem; in vm_unmap_ram() local
1085 BUG_ON(!addr); in vm_unmap_ram()
1086 BUG_ON(addr < VMALLOC_START); in vm_unmap_ram()
1087 BUG_ON(addr > VMALLOC_END); in vm_unmap_ram()
1088 BUG_ON(addr & (PAGE_SIZE-1)); in vm_unmap_ram()
1091 vmap_debug_free_range(addr, addr+size); in vm_unmap_ram()
1096 free_unmap_vmap_area_addr(addr); in vm_unmap_ram()
1118 unsigned long addr; in vm_map_ram() local
1125 addr = (unsigned long)mem; in vm_map_ram()
1133 addr = va->va_start; in vm_map_ram()
1134 mem = (void *)addr; in vm_map_ram()
1136 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { in vm_map_ram()
1161 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1162 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1165 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1186 unsigned long addr; in vm_area_register_early() local
1188 addr = ALIGN(VMALLOC_START + vm_init_off, align); in vm_area_register_early()
1189 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1191 vm->addr = (void *)addr; in vm_area_register_early()
1218 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1248 int map_kernel_range_noflush(unsigned long addr, unsigned long size, in map_kernel_range_noflush() argument
1251 return vmap_page_range_noflush(addr, addr + size, prot, pages); in map_kernel_range_noflush()
1268 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) in unmap_kernel_range_noflush() argument
1270 vunmap_page_range(addr, addr + size); in unmap_kernel_range_noflush()
1282 void unmap_kernel_range(unsigned long addr, unsigned long size) in unmap_kernel_range() argument
1284 unsigned long end = addr + size; in unmap_kernel_range()
1286 flush_cache_vunmap(addr, end); in unmap_kernel_range()
1287 vunmap_page_range(addr, end); in unmap_kernel_range()
1288 flush_tlb_kernel_range(addr, end); in unmap_kernel_range()
1294 unsigned long addr = (unsigned long)area->addr; in map_vm_area() local
1295 unsigned long end = addr + get_vm_area_size(area); in map_vm_area()
1298 err = vmap_page_range(addr, end, prot, pages); in map_vm_area()
1309 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1409 struct vm_struct *find_vm_area(const void *addr) in find_vm_area() argument
1413 va = find_vmap_area((unsigned long)addr); in find_vm_area()
1428 struct vm_struct *remove_vm_area(const void *addr) in remove_vm_area() argument
1432 va = find_vmap_area((unsigned long)addr); in remove_vm_area()
1451 static void __vunmap(const void *addr, int deallocate_pages) in __vunmap() argument
1455 if (!addr) in __vunmap()
1458 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", in __vunmap()
1459 addr)) in __vunmap()
1462 area = remove_vm_area(addr); in __vunmap()
1465 addr); in __vunmap()
1469 debug_check_no_locks_freed(addr, area->size); in __vunmap()
1470 debug_check_no_obj_freed(addr, area->size); in __vunmap()
1506 void vfree(const void *addr) in vfree() argument
1510 kmemleak_free(addr); in vfree()
1512 if (!addr) in vfree()
1516 if (llist_add((struct llist_node *)addr, &p->list)) in vfree()
1519 __vunmap(addr, 1); in vfree()
1532 void vunmap(const void *addr) in vunmap() argument
1536 if (addr) in vunmap()
1537 __vunmap(addr, 0); in vunmap()
1567 vunmap(area->addr); in vmap()
1571 return area->addr; in vmap()
1601 remove_vm_area(area->addr); in __vmalloc_area_node()
1626 return area->addr; in __vmalloc_area_node()
1632 vfree(area->addr); in __vmalloc_area_node()
1658 void *addr; in __vmalloc_node_range() local
1670 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
1671 if (!addr) in __vmalloc_node_range()
1686 kmemleak_alloc(addr, real_size, 2, gfp_mask); in __vmalloc_node_range()
1688 return addr; in __vmalloc_node_range()
1897 static int aligned_vread(char *buf, char *addr, unsigned long count) in aligned_vread() argument
1905 offset = (unsigned long)addr & ~PAGE_MASK; in aligned_vread()
1909 p = vmalloc_to_page(addr); in aligned_vread()
1928 addr += length; in aligned_vread()
1936 static int aligned_vwrite(char *buf, char *addr, unsigned long count) in aligned_vwrite() argument
1944 offset = (unsigned long)addr & ~PAGE_MASK; in aligned_vwrite()
1948 p = vmalloc_to_page(addr); in aligned_vwrite()
1965 addr += length; in aligned_vwrite()
1999 long vread(char *buf, char *addr, unsigned long count) in vread() argument
2008 if ((unsigned long) addr + count < count) in vread()
2009 count = -(unsigned long) addr; in vread()
2020 vaddr = (char *) vm->addr; in vread()
2021 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
2023 while (addr < vaddr) { in vread()
2028 addr++; in vread()
2031 n = vaddr + get_vm_area_size(vm) - addr; in vread()
2035 aligned_vread(buf, addr, n); in vread()
2039 addr += n; in vread()
2080 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument
2089 if ((unsigned long) addr + count < count) in vwrite()
2090 count = -(unsigned long) addr; in vwrite()
2102 vaddr = (char *) vm->addr; in vwrite()
2103 if (addr >= vaddr + get_vm_area_size(vm)) in vwrite()
2105 while (addr < vaddr) { in vwrite()
2109 addr++; in vwrite()
2112 n = vaddr + get_vm_area_size(vm) - addr; in vwrite()
2116 aligned_vwrite(buf, addr, n); in vwrite()
2120 addr += n; in vwrite()
2163 if (kaddr + size > area->addr + area->size) in remap_vmalloc_range_partial()
2199 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
2203 addr + (pgoff << PAGE_SHIFT), in remap_vmalloc_range()
2217 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) in f() argument
2255 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in alloc_vm_area()
2268 ret = remove_vm_area(area->addr); in free_vm_area()
2343 unsigned long addr; in pvm_determine_end() local
2346 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); in pvm_determine_end()
2348 addr = vmalloc_end; in pvm_determine_end()
2350 while (*pprev && (*pprev)->va_end > addr) { in pvm_determine_end()
2355 return addr; in pvm_determine_end()
2630 v->addr, v->addr + v->size, v->size); in s_show()
2710 unsigned long addr = va->va_start; in get_vmalloc_info() local
2715 if (addr < VMALLOC_START) in get_vmalloc_info()
2717 if (addr >= VMALLOC_END) in get_vmalloc_info()
2725 free_area_size = addr - prev_end; in get_vmalloc_info()