Lines Matching refs:addr

61 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)  in vunmap_pte_range()  argument
65 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
67 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range()
69 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range()
72 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument
77 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
79 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
84 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range()
85 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
88 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_pud_range() argument
93 pud = pud_offset(pgd, addr); in vunmap_pud_range()
95 next = pud_addr_end(addr, end); in vunmap_pud_range()
100 vunmap_pmd_range(pud, addr, next); in vunmap_pud_range()
101 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
104 static void vunmap_page_range(unsigned long addr, unsigned long end) in vunmap_page_range() argument
109 BUG_ON(addr >= end); in vunmap_page_range()
110 pgd = pgd_offset_k(addr); in vunmap_page_range()
112 next = pgd_addr_end(addr, end); in vunmap_page_range()
115 vunmap_pud_range(pgd, addr, next); in vunmap_page_range()
116 } while (pgd++, addr = next, addr != end); in vunmap_page_range()
119 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument
129 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range()
139 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); in vmap_pte_range()
141 } while (pte++, addr += PAGE_SIZE, addr != end); in vmap_pte_range()
145 static int vmap_pmd_range(pud_t *pud, unsigned long addr, in vmap_pmd_range() argument
151 pmd = pmd_alloc(&init_mm, pud, addr); in vmap_pmd_range()
155 next = pmd_addr_end(addr, end); in vmap_pmd_range()
156 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range()
158 } while (pmd++, addr = next, addr != end); in vmap_pmd_range()
162 static int vmap_pud_range(pgd_t *pgd, unsigned long addr, in vmap_pud_range() argument
168 pud = pud_alloc(&init_mm, pgd, addr); in vmap_pud_range()
172 next = pud_addr_end(addr, end); in vmap_pud_range()
173 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range()
175 } while (pud++, addr = next, addr != end); in vmap_pud_range()
190 unsigned long addr = start; in vmap_page_range_noflush() local
194 BUG_ON(addr >= end); in vmap_page_range_noflush()
195 pgd = pgd_offset_k(addr); in vmap_page_range_noflush()
197 next = pgd_addr_end(addr, end); in vmap_page_range_noflush()
198 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush()
201 } while (pgd++, addr = next, addr != end); in vmap_page_range_noflush()
224 unsigned long addr = (unsigned long)x; in is_vmalloc_or_module_addr() local
225 if (addr >= MODULES_VADDR && addr < MODULES_END) in is_vmalloc_or_module_addr()
236 unsigned long addr = (unsigned long) vmalloc_addr; in vmalloc_to_page() local
238 pgd_t *pgd = pgd_offset_k(addr); in vmalloc_to_page()
247 pud_t *pud = pud_offset(pgd, addr); in vmalloc_to_page()
249 pmd_t *pmd = pmd_offset(pud, addr); in vmalloc_to_page()
253 ptep = pte_offset_map(pmd, addr); in vmalloc_to_page()
294 static struct vmap_area *__find_vmap_area(unsigned long addr) in __find_vmap_area() argument
302 if (addr < va->va_start) in __find_vmap_area()
304 else if (addr >= va->va_end) in __find_vmap_area()
358 unsigned long addr; in alloc_vmap_area() local
403 addr = ALIGN(first->va_end, align); in alloc_vmap_area()
404 if (addr < vstart) in alloc_vmap_area()
406 if (addr + size < addr) in alloc_vmap_area()
410 addr = ALIGN(vstart, align); in alloc_vmap_area()
411 if (addr + size < addr) in alloc_vmap_area()
420 if (tmp->va_end >= addr) { in alloc_vmap_area()
422 if (tmp->va_start <= addr) in alloc_vmap_area()
434 while (addr + size > first->va_start && addr + size <= vend) { in alloc_vmap_area()
435 if (addr + cached_hole_size < first->va_start) in alloc_vmap_area()
436 cached_hole_size = first->va_start - addr; in alloc_vmap_area()
437 addr = ALIGN(first->va_end, align); in alloc_vmap_area()
438 if (addr + size < addr) in alloc_vmap_area()
449 if (addr + size > vend) in alloc_vmap_area()
452 va->va_start = addr; in alloc_vmap_area()
453 va->va_end = addr + size; in alloc_vmap_area()
708 static struct vmap_area *find_vmap_area(unsigned long addr) in find_vmap_area() argument
713 va = __find_vmap_area(addr); in find_vmap_area()
719 static void free_unmap_vmap_area_addr(unsigned long addr) in free_unmap_vmap_area_addr() argument
723 va = find_vmap_area(addr); in free_unmap_vmap_area_addr()
794 static unsigned long addr_to_vb_idx(unsigned long addr) in addr_to_vb_idx() argument
796 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
797 addr /= VMAP_BLOCK_SIZE; in addr_to_vb_idx()
798 return addr; in addr_to_vb_idx()
803 unsigned long addr; in vmap_block_vaddr() local
805 addr = va_start + (pages_off << PAGE_SHIFT); in vmap_block_vaddr()
806 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); in vmap_block_vaddr()
807 return (void *)addr; in vmap_block_vaddr()
987 static void vb_free(const void *addr, unsigned long size) in vb_free() argument
997 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); in vb_free()
1001 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); in vb_free()
1004 vb_idx = addr_to_vb_idx((unsigned long)addr); in vb_free()
1010 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); in vb_free()
1085 unsigned long addr = (unsigned long)mem; in vm_unmap_ram() local
1087 BUG_ON(!addr); in vm_unmap_ram()
1088 BUG_ON(addr < VMALLOC_START); in vm_unmap_ram()
1089 BUG_ON(addr > VMALLOC_END); in vm_unmap_ram()
1090 BUG_ON(addr & (PAGE_SIZE-1)); in vm_unmap_ram()
1093 vmap_debug_free_range(addr, addr+size); in vm_unmap_ram()
1098 free_unmap_vmap_area_addr(addr); in vm_unmap_ram()
1120 unsigned long addr; in vm_map_ram() local
1127 addr = (unsigned long)mem; in vm_map_ram()
1135 addr = va->va_start; in vm_map_ram()
1136 mem = (void *)addr; in vm_map_ram()
1138 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { in vm_map_ram()
1163 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1164 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1167 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1188 unsigned long addr; in vm_area_register_early() local
1190 addr = ALIGN(VMALLOC_START + vm_init_off, align); in vm_area_register_early()
1191 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1193 vm->addr = (void *)addr; in vm_area_register_early()
1220 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1250 int map_kernel_range_noflush(unsigned long addr, unsigned long size, in map_kernel_range_noflush() argument
1253 return vmap_page_range_noflush(addr, addr + size, prot, pages); in map_kernel_range_noflush()
1270 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) in unmap_kernel_range_noflush() argument
1272 vunmap_page_range(addr, addr + size); in unmap_kernel_range_noflush()
1284 void unmap_kernel_range(unsigned long addr, unsigned long size) in unmap_kernel_range() argument
1286 unsigned long end = addr + size; in unmap_kernel_range()
1288 flush_cache_vunmap(addr, end); in unmap_kernel_range()
1289 vunmap_page_range(addr, end); in unmap_kernel_range()
1290 flush_tlb_kernel_range(addr, end); in unmap_kernel_range()
1296 unsigned long addr = (unsigned long)area->addr; in map_vm_area() local
1297 unsigned long end = addr + get_vm_area_size(area); in map_vm_area()
1300 err = vmap_page_range(addr, end, prot, pages); in map_vm_area()
1311 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1411 struct vm_struct *find_vm_area(const void *addr) in find_vm_area() argument
1415 va = find_vmap_area((unsigned long)addr); in find_vm_area()
1430 struct vm_struct *remove_vm_area(const void *addr) in remove_vm_area() argument
1434 va = find_vmap_area((unsigned long)addr); in remove_vm_area()
1452 static void __vunmap(const void *addr, int deallocate_pages) in __vunmap() argument
1456 if (!addr) in __vunmap()
1459 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", in __vunmap()
1460 addr)) in __vunmap()
1463 area = remove_vm_area(addr); in __vunmap()
1466 addr); in __vunmap()
1470 debug_check_no_locks_freed(addr, get_vm_area_size(area)); in __vunmap()
1471 debug_check_no_obj_freed(addr, get_vm_area_size(area)); in __vunmap()
1507 void vfree(const void *addr) in vfree() argument
1511 kmemleak_free(addr); in vfree()
1513 if (!addr) in vfree()
1517 if (llist_add((struct llist_node *)addr, &p->list)) in vfree()
1520 __vunmap(addr, 1); in vfree()
1533 void vunmap(const void *addr) in vunmap() argument
1537 if (addr) in vunmap()
1538 __vunmap(addr, 0); in vunmap()
1568 vunmap(area->addr); in vmap()
1572 return area->addr; in vmap()
1602 remove_vm_area(area->addr); in __vmalloc_area_node()
1627 return area->addr; in __vmalloc_area_node()
1633 vfree(area->addr); in __vmalloc_area_node()
1659 void *addr; in __vmalloc_node_range() local
1671 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
1672 if (!addr) in __vmalloc_node_range()
1687 kmemleak_alloc(addr, real_size, 2, gfp_mask); in __vmalloc_node_range()
1689 return addr; in __vmalloc_node_range()
1898 static int aligned_vread(char *buf, char *addr, unsigned long count) in aligned_vread() argument
1906 offset = offset_in_page(addr); in aligned_vread()
1910 p = vmalloc_to_page(addr); in aligned_vread()
1929 addr += length; in aligned_vread()
1937 static int aligned_vwrite(char *buf, char *addr, unsigned long count) in aligned_vwrite() argument
1945 offset = offset_in_page(addr); in aligned_vwrite()
1949 p = vmalloc_to_page(addr); in aligned_vwrite()
1966 addr += length; in aligned_vwrite()
2000 long vread(char *buf, char *addr, unsigned long count) in vread() argument
2009 if ((unsigned long) addr + count < count) in vread()
2010 count = -(unsigned long) addr; in vread()
2021 vaddr = (char *) vm->addr; in vread()
2022 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
2024 while (addr < vaddr) { in vread()
2029 addr++; in vread()
2032 n = vaddr + get_vm_area_size(vm) - addr; in vread()
2036 aligned_vread(buf, addr, n); in vread()
2040 addr += n; in vread()
2081 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument
2090 if ((unsigned long) addr + count < count) in vwrite()
2091 count = -(unsigned long) addr; in vwrite()
2103 vaddr = (char *) vm->addr; in vwrite()
2104 if (addr >= vaddr + get_vm_area_size(vm)) in vwrite()
2106 while (addr < vaddr) { in vwrite()
2110 addr++; in vwrite()
2113 n = vaddr + get_vm_area_size(vm) - addr; in vwrite()
2117 aligned_vwrite(buf, addr, n); in vwrite()
2121 addr += n; in vwrite()
2164 if (kaddr + size > area->addr + area->size) in remap_vmalloc_range_partial()
2200 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
2204 addr + (pgoff << PAGE_SHIFT), in remap_vmalloc_range()
2218 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) in f() argument
2256 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in alloc_vm_area()
2269 ret = remove_vm_area(area->addr); in free_vm_area()
2344 unsigned long addr; in pvm_determine_end() local
2347 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); in pvm_determine_end()
2349 addr = vmalloc_end; in pvm_determine_end()
2351 while (*pprev && (*pprev)->va_end > addr) { in pvm_determine_end()
2356 return addr; in pvm_determine_end()
2631 v->addr, v->addr + v->size, v->size); in s_show()