Lines Matching refs:region
589 struct vm_region *region, *last; in validate_nommu_regions() local
601 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
604 BUG_ON(unlikely(region->vm_end <= region->vm_start)); in validate_nommu_regions()
605 BUG_ON(unlikely(region->vm_top < region->vm_end)); in validate_nommu_regions()
606 BUG_ON(unlikely(region->vm_start < last->vm_top)); in validate_nommu_regions()
620 static void add_nommu_region(struct vm_region *region) in add_nommu_region() argument
632 if (region->vm_start < pregion->vm_start) in add_nommu_region()
634 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
636 else if (pregion == region) in add_nommu_region()
642 rb_link_node(®ion->vm_rb, parent, p); in add_nommu_region()
643 rb_insert_color(®ion->vm_rb, &nommu_region_tree); in add_nommu_region()
651 static void delete_nommu_region(struct vm_region *region) in delete_nommu_region() argument
656 rb_erase(®ion->vm_rb, &nommu_region_tree); in delete_nommu_region()
683 static void __put_nommu_region(struct vm_region *region) in __put_nommu_region() argument
686 kenter("%p{%d}", region, region->vm_usage); in __put_nommu_region()
690 if (--region->vm_usage == 0) { in __put_nommu_region()
691 if (region->vm_top > region->vm_start) in __put_nommu_region()
692 delete_nommu_region(region); in __put_nommu_region()
695 if (region->vm_file) in __put_nommu_region()
696 fput(region->vm_file); in __put_nommu_region()
700 if (region->vm_flags & VM_MAPPED_COPY) { in __put_nommu_region()
702 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
704 kmem_cache_free(vm_region_jar, region); in __put_nommu_region()
713 static void put_nommu_region(struct vm_region *region) in put_nommu_region() argument
716 __put_nommu_region(region); in put_nommu_region()
1174 struct vm_region *region, in do_mmap_private() argument
1225 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1226 region->vm_start = (unsigned long) base; in do_mmap_private()
1227 region->vm_end = region->vm_start + len; in do_mmap_private()
1228 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
1230 vma->vm_start = region->vm_start; in do_mmap_private()
1231 vma->vm_end = region->vm_start + len; in do_mmap_private()
1258 free_page_series(region->vm_start, region->vm_top); in do_mmap_private()
1259 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1260 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1261 region->vm_top = 0; in do_mmap_private()
1283 struct vm_region *region; in do_mmap_pgoff() local
1310 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); in do_mmap_pgoff()
1311 if (!region) in do_mmap_pgoff()
1318 region->vm_usage = 1; in do_mmap_pgoff()
1319 region->vm_flags = vm_flags; in do_mmap_pgoff()
1320 region->vm_pgoff = pgoff; in do_mmap_pgoff()
1327 region->vm_file = get_file(file); in do_mmap_pgoff()
1401 fput(region->vm_file); in do_mmap_pgoff()
1402 kmem_cache_free(vm_region_jar, region); in do_mmap_pgoff()
1403 region = pregion; in do_mmap_pgoff()
1429 vma->vm_start = region->vm_start = addr; in do_mmap_pgoff()
1430 vma->vm_end = region->vm_end = addr + len; in do_mmap_pgoff()
1435 vma->vm_region = region; in do_mmap_pgoff()
1443 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap_pgoff()
1446 add_nommu_region(region); in do_mmap_pgoff()
1450 memset((void *)region->vm_start, 0, in do_mmap_pgoff()
1451 region->vm_end - region->vm_start); in do_mmap_pgoff()
1463 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap_pgoff()
1464 flush_icache_range(region->vm_start, region->vm_end); in do_mmap_pgoff()
1465 region->vm_icache_flushed = true; in do_mmap_pgoff()
1476 if (region->vm_file) in do_mmap_pgoff()
1477 fput(region->vm_file); in do_mmap_pgoff()
1478 kmem_cache_free(vm_region_jar, region); in do_mmap_pgoff()
1492 kmem_cache_free(vm_region_jar, region); in do_mmap_pgoff()
1563 struct vm_region *region; in split_vma() local
1576 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); in split_vma()
1577 if (!region) in split_vma()
1582 kmem_cache_free(vm_region_jar, region); in split_vma()
1588 *region = *vma->vm_region; in split_vma()
1589 new->vm_region = region; in split_vma()
1594 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma()
1596 region->vm_start = new->vm_start = addr; in split_vma()
1597 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1629 struct vm_region *region; in shrink_vma() local
1643 region = vma->vm_region; in shrink_vma()
1644 BUG_ON(region->vm_usage != 1); in shrink_vma()
1647 delete_nommu_region(region); in shrink_vma()
1648 if (from > region->vm_start) { in shrink_vma()
1649 to = region->vm_top; in shrink_vma()
1650 region->vm_top = region->vm_end = from; in shrink_vma()
1652 region->vm_start = to; in shrink_vma()
1654 add_nommu_region(region); in shrink_vma()
2094 struct vm_region *region; in nommu_shrink_inode_mappings() local
2125 region = vma->vm_region; in nommu_shrink_inode_mappings()
2126 r_size = region->vm_top - region->vm_start; in nommu_shrink_inode_mappings()
2127 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; in nommu_shrink_inode_mappings()
2130 region->vm_top -= r_top - newsize; in nommu_shrink_inode_mappings()
2131 if (region->vm_end > region->vm_top) in nommu_shrink_inode_mappings()
2132 region->vm_end = region->vm_top; in nommu_shrink_inode_mappings()