Lines Matching refs:va
297 struct vmap_area *va; in __find_vmap_area() local
299 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
300 if (addr < va->va_start) in __find_vmap_area()
302 else if (addr >= va->va_end) in __find_vmap_area()
305 return va; in __find_vmap_area()
311 static void __insert_vmap_area(struct vmap_area *va) in __insert_vmap_area() argument
322 if (va->va_start < tmp_va->va_end) in __insert_vmap_area()
324 else if (va->va_end > tmp_va->va_start) in __insert_vmap_area()
330 rb_link_node(&va->rb_node, parent, p); in __insert_vmap_area()
331 rb_insert_color(&va->rb_node, &vmap_area_root); in __insert_vmap_area()
334 tmp = rb_prev(&va->rb_node); in __insert_vmap_area()
338 list_add_rcu(&va->list, &prev->list); in __insert_vmap_area()
340 list_add_rcu(&va->list, &vmap_area_list); in __insert_vmap_area()
354 struct vmap_area *va; in alloc_vmap_area() local
364 va = kmalloc_node(sizeof(struct vmap_area), in alloc_vmap_area()
366 if (unlikely(!va)) in alloc_vmap_area()
373 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area()
450 va->va_start = addr; in alloc_vmap_area()
451 va->va_end = addr + size; in alloc_vmap_area()
452 va->flags = 0; in alloc_vmap_area()
453 __insert_vmap_area(va); in alloc_vmap_area()
454 free_vmap_cache = &va->rb_node; in alloc_vmap_area()
457 BUG_ON(va->va_start & (align-1)); in alloc_vmap_area()
458 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
459 BUG_ON(va->va_end > vend); in alloc_vmap_area()
461 return va; in alloc_vmap_area()
473 kfree(va); in alloc_vmap_area()
477 static void __free_vmap_area(struct vmap_area *va) in __free_vmap_area() argument
479 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); in __free_vmap_area()
482 if (va->va_end < cached_vstart) { in __free_vmap_area()
487 if (va->va_start <= cache->va_start) { in __free_vmap_area()
488 free_vmap_cache = rb_prev(&va->rb_node); in __free_vmap_area()
496 rb_erase(&va->rb_node, &vmap_area_root); in __free_vmap_area()
497 RB_CLEAR_NODE(&va->rb_node); in __free_vmap_area()
498 list_del_rcu(&va->list); in __free_vmap_area()
506 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) in __free_vmap_area()
507 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); in __free_vmap_area()
509 kfree_rcu(va, rcu_head); in __free_vmap_area()
515 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
518 __free_vmap_area(va); in free_vmap_area()
525 static void unmap_vmap_area(struct vmap_area *va) in unmap_vmap_area() argument
527 vunmap_page_range(va->va_start, va->va_end); in unmap_vmap_area()
605 struct vmap_area *va; in __purge_vmap_area_lazy() local
624 list_for_each_entry_rcu(va, &vmap_area_list, list) { in __purge_vmap_area_lazy()
625 if (va->flags & VM_LAZY_FREE) { in __purge_vmap_area_lazy()
626 if (va->va_start < *start) in __purge_vmap_area_lazy()
627 *start = va->va_start; in __purge_vmap_area_lazy()
628 if (va->va_end > *end) in __purge_vmap_area_lazy()
629 *end = va->va_end; in __purge_vmap_area_lazy()
630 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
631 list_add_tail(&va->purge_list, &valist); in __purge_vmap_area_lazy()
632 va->flags |= VM_LAZY_FREEING; in __purge_vmap_area_lazy()
633 va->flags &= ~VM_LAZY_FREE; in __purge_vmap_area_lazy()
646 list_for_each_entry_safe(va, n_va, &valist, purge_list) in __purge_vmap_area_lazy()
647 __free_vmap_area(va); in __purge_vmap_area_lazy()
679 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
681 va->flags |= VM_LAZY_FREE; in free_vmap_area_noflush()
682 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); in free_vmap_area_noflush()
691 static void free_unmap_vmap_area_noflush(struct vmap_area *va) in free_unmap_vmap_area_noflush() argument
693 unmap_vmap_area(va); in free_unmap_vmap_area_noflush()
694 free_vmap_area_noflush(va); in free_unmap_vmap_area_noflush()
700 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
702 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
703 free_unmap_vmap_area_noflush(va); in free_unmap_vmap_area()
708 struct vmap_area *va; in find_vmap_area() local
711 va = __find_vmap_area(addr); in find_vmap_area()
714 return va; in find_vmap_area()
719 struct vmap_area *va; in free_unmap_vmap_area_addr() local
721 va = find_vmap_area(addr); in free_unmap_vmap_area_addr()
722 BUG_ON(!va); in free_unmap_vmap_area_addr()
723 free_unmap_vmap_area(va); in free_unmap_vmap_area_addr()
766 struct vmap_area *va; member
820 struct vmap_area *va; in new_vmap_block() local
832 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
835 if (IS_ERR(va)) { in new_vmap_block()
837 return ERR_CAST(va); in new_vmap_block()
843 free_vmap_area(va); in new_vmap_block()
847 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
849 vb->va = va; in new_vmap_block()
858 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
879 vb_idx = addr_to_vb_idx(vb->va->va_start); in free_vmap_block()
885 free_vmap_area_noflush(vb->va); in free_vmap_block()
963 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
1055 unsigned long va_start = vb->va->va_start; in vm_unmap_aliases()
1127 struct vmap_area *va; in vm_map_ram() local
1128 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
1130 if (IS_ERR(va)) in vm_map_ram()
1133 addr = va->va_start; in vm_map_ram()
1198 struct vmap_area *va; in vmalloc_init() local
1216 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); in vmalloc_init()
1217 va->flags = VM_VM_AREA; in vmalloc_init()
1218 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1219 va->va_end = va->va_start + tmp->size; in vmalloc_init()
1220 va->vm = tmp; in vmalloc_init()
1221 __insert_vmap_area(va); in vmalloc_init()
1304 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
1309 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1310 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm()
1312 va->vm = vm; in setup_vmalloc_vm()
1313 va->flags |= VM_VM_AREA; in setup_vmalloc_vm()
1332 struct vmap_area *va; in __get_vm_area_node() local
1351 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); in __get_vm_area_node()
1352 if (IS_ERR(va)) { in __get_vm_area_node()
1357 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
1411 struct vmap_area *va; in find_vm_area() local
1413 va = find_vmap_area((unsigned long)addr); in find_vm_area()
1414 if (va && va->flags & VM_VM_AREA) in find_vm_area()
1415 return va->vm; in find_vm_area()
1430 struct vmap_area *va; in remove_vm_area() local
1432 va = find_vmap_area((unsigned long)addr); in remove_vm_area()
1433 if (va && va->flags & VM_VM_AREA) { in remove_vm_area()
1434 struct vm_struct *vm = va->vm; in remove_vm_area()
1437 va->vm = NULL; in remove_vm_area()
1438 va->flags &= ~VM_VM_AREA; in remove_vm_area()
1441 vmap_debug_free_range(va->va_start, va->va_end); in remove_vm_area()
1443 free_unmap_vmap_area(va); in remove_vm_area()
2001 struct vmap_area *va; in vread() local
2012 list_for_each_entry(va, &vmap_area_list, list) { in vread()
2016 if (!(va->flags & VM_VM_AREA)) in vread()
2019 vm = va->vm; in vread()
2082 struct vmap_area *va; in vwrite() local
2094 list_for_each_entry(va, &vmap_area_list, list) { in vwrite()
2098 if (!(va->flags & VM_VM_AREA)) in vwrite()
2101 vm = va->vm; in vwrite()
2297 struct vmap_area *va = NULL; in pvm_find_next_prev() local
2300 va = rb_entry(n, struct vmap_area, rb_node); in pvm_find_next_prev()
2301 if (end < va->va_end) in pvm_find_next_prev()
2303 else if (end > va->va_end) in pvm_find_next_prev()
2309 if (!va) in pvm_find_next_prev()
2312 if (va->va_end > end) { in pvm_find_next_prev()
2313 *pnext = va; in pvm_find_next_prev()
2316 *pprev = va; in pvm_find_next_prev()
2506 struct vmap_area *va = vas[area]; in pcpu_get_vm_areas() local
2508 va->va_start = base + offsets[area]; in pcpu_get_vm_areas()
2509 va->va_end = va->va_start + sizes[area]; in pcpu_get_vm_areas()
2510 __insert_vmap_area(va); in pcpu_get_vm_areas()
2558 struct vmap_area *va; in s_start() local
2561 va = list_entry((&vmap_area_list)->next, typeof(*va), list); in s_start()
2562 while (n > 0 && &va->list != &vmap_area_list) { in s_start()
2564 va = list_entry(va->list.next, typeof(*va), list); in s_start()
2566 if (!n && &va->list != &vmap_area_list) in s_start()
2567 return va; in s_start()
2575 struct vmap_area *va = p, *next; in s_next() local
2578 next = list_entry(va->list.next, typeof(*va), list); in s_next()
2617 struct vmap_area *va = p; in s_show() local
2624 if (!(va->flags & VM_VM_AREA)) in s_show()
2627 v = va->vm; in s_show()
2693 struct vmap_area *va; in get_vmalloc_info() local
2709 list_for_each_entry_rcu(va, &vmap_area_list, list) { in get_vmalloc_info()
2710 unsigned long addr = va->va_start; in get_vmalloc_info()
2720 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) in get_vmalloc_info()
2723 vmi->used += (va->va_end - va->va_start); in get_vmalloc_info()
2729 prev_end = va->va_end; in get_vmalloc_info()