Lines Matching refs:vma
121 struct vm_area_struct *vma; in kobjsize() local
123 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
124 if (vma) in kobjsize()
125 return vma->vm_end - vma->vm_start; in kobjsize()
140 struct vm_area_struct *vma; in __get_user_pages() local
153 vma = find_vma(mm, start); in __get_user_pages()
154 if (!vma) in __get_user_pages()
158 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages()
159 !(vm_flags & vma->vm_flags)) in __get_user_pages()
168 vmas[i] = vma; in __get_user_pages()
245 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
248 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
281 struct vm_area_struct *vma; in vmalloc_user() local
284 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
285 if (vma) in vmalloc_user()
286 vma->vm_flags |= VM_USERMAP; in vmalloc_user()
515 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
698 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) in protect_vma() argument
701 struct mm_struct *mm = vma->vm_mm; in protect_vma()
702 long start = vma->vm_start & PAGE_MASK; in protect_vma()
703 while (start < vma->vm_end) { in protect_vma()
717 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
723 BUG_ON(!vma->vm_region); in add_vma_to_mm()
726 vma->vm_mm = mm; in add_vma_to_mm()
728 protect_vma(vma, vma->vm_flags); in add_vma_to_mm()
731 if (vma->vm_file) { in add_vma_to_mm()
732 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
736 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm()
750 if (vma->vm_start < pvma->vm_start) in add_vma_to_mm()
752 else if (vma->vm_start > pvma->vm_start) { in add_vma_to_mm()
755 } else if (vma->vm_end < pvma->vm_end) in add_vma_to_mm()
757 else if (vma->vm_end > pvma->vm_end) { in add_vma_to_mm()
760 } else if (vma < pvma) in add_vma_to_mm()
762 else if (vma > pvma) { in add_vma_to_mm()
769 rb_link_node(&vma->vm_rb, parent, p); in add_vma_to_mm()
770 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
777 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
783 static void delete_vma_from_mm(struct vm_area_struct *vma) in delete_vma_from_mm() argument
787 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
790 protect_vma(vma, 0); in delete_vma_from_mm()
795 if (curr->vmacache[i] == vma) { in delete_vma_from_mm()
802 if (vma->vm_file) { in delete_vma_from_mm()
803 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
807 vma_interval_tree_remove(vma, &mapping->i_mmap); in delete_vma_from_mm()
813 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
815 if (vma->vm_prev) in delete_vma_from_mm()
816 vma->vm_prev->vm_next = vma->vm_next; in delete_vma_from_mm()
818 mm->mmap = vma->vm_next; in delete_vma_from_mm()
820 if (vma->vm_next) in delete_vma_from_mm()
821 vma->vm_next->vm_prev = vma->vm_prev; in delete_vma_from_mm()
827 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
829 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
830 vma->vm_ops->close(vma); in delete_vma()
831 if (vma->vm_file) in delete_vma()
832 fput(vma->vm_file); in delete_vma()
833 put_nommu_region(vma->vm_region); in delete_vma()
834 kmem_cache_free(vm_area_cachep, vma); in delete_vma()
843 struct vm_area_struct *vma; in find_vma() local
846 vma = vmacache_find(mm, addr); in find_vma()
847 if (likely(vma)) in find_vma()
848 return vma; in find_vma()
852 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
853 if (vma->vm_start > addr) in find_vma()
855 if (vma->vm_end > addr) { in find_vma()
856 vmacache_update(addr, vma); in find_vma()
857 return vma; in find_vma()
878 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
891 struct vm_area_struct *vma; in find_vma_exact() local
895 vma = vmacache_find_exact(mm, addr, end); in find_vma_exact()
896 if (vma) in find_vma_exact()
897 return vma; in find_vma_exact()
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
902 if (vma->vm_start < addr) in find_vma_exact()
904 if (vma->vm_start > addr) in find_vma_exact()
906 if (vma->vm_end == end) { in find_vma_exact()
907 vmacache_update(addr, vma); in find_vma_exact()
908 return vma; in find_vma_exact()
1118 static int do_mmap_shared_file(struct vm_area_struct *vma) in do_mmap_shared_file() argument
1122 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); in do_mmap_shared_file()
1124 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
1139 static int do_mmap_private(struct vm_area_struct *vma, in do_mmap_private() argument
1153 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); in do_mmap_private()
1156 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); in do_mmap_private()
1157 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
1187 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1192 vma->vm_start = region->vm_start; in do_mmap_private()
1193 vma->vm_end = region->vm_start + len; in do_mmap_private()
1195 if (vma->vm_file) { in do_mmap_private()
1200 fpos = vma->vm_pgoff; in do_mmap_private()
1205 ret = __vfs_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
1221 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1222 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1245 struct vm_area_struct *vma; in do_mmap() local
1273 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in do_mmap()
1274 if (!vma) in do_mmap()
1281 INIT_LIST_HEAD(&vma->anon_vma_chain); in do_mmap()
1282 vma->vm_flags = vm_flags; in do_mmap()
1283 vma->vm_pgoff = pgoff; in do_mmap()
1287 vma->vm_file = get_file(file); in do_mmap()
1339 vma->vm_region = pregion; in do_mmap()
1342 vma->vm_start = start; in do_mmap()
1343 vma->vm_end = start + len; in do_mmap()
1346 vma->vm_flags |= VM_MAPPED_COPY; in do_mmap()
1348 ret = do_mmap_shared_file(vma); in do_mmap()
1350 vma->vm_region = NULL; in do_mmap()
1351 vma->vm_start = 0; in do_mmap()
1352 vma->vm_end = 0; in do_mmap()
1386 vma->vm_start = region->vm_start = addr; in do_mmap()
1387 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1392 vma->vm_region = region; in do_mmap()
1397 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1398 ret = do_mmap_shared_file(vma); in do_mmap()
1400 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1406 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) in do_mmap()
1411 result = vma->vm_start; in do_mmap()
1416 add_vma_to_mm(current->mm, vma); in do_mmap()
1420 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1435 if (vma->vm_file) in do_mmap()
1436 fput(vma->vm_file); in do_mmap()
1437 kmem_cache_free(vm_area_cachep, vma); in do_mmap()
1512 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1521 if (vma->vm_file) in split_vma()
1538 *new = *vma; in split_vma()
1539 *region = *vma->vm_region; in split_vma()
1542 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1554 delete_vma_from_mm(vma); in split_vma()
1556 delete_nommu_region(vma->vm_region); in split_vma()
1558 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1559 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1561 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1562 vma->vm_region->vm_top = addr; in split_vma()
1564 add_nommu_region(vma->vm_region); in split_vma()
1567 add_vma_to_mm(mm, vma); in split_vma()
1577 struct vm_area_struct *vma, in shrink_vma() argument
1584 delete_vma_from_mm(vma); in shrink_vma()
1585 if (from > vma->vm_start) in shrink_vma()
1586 vma->vm_end = from; in shrink_vma()
1588 vma->vm_start = to; in shrink_vma()
1589 add_vma_to_mm(mm, vma); in shrink_vma()
1592 region = vma->vm_region; in shrink_vma()
1617 struct vm_area_struct *vma; in do_munmap() local
1628 vma = find_vma(mm, start); in do_munmap()
1629 if (!vma) { in do_munmap()
1641 if (vma->vm_file) { in do_munmap()
1643 if (start > vma->vm_start) in do_munmap()
1645 if (end == vma->vm_end) in do_munmap()
1647 vma = vma->vm_next; in do_munmap()
1648 } while (vma); in do_munmap()
1652 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1654 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1658 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1660 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1661 ret = split_vma(mm, vma, start, 1); in do_munmap()
1665 return shrink_vma(mm, vma, start, end); in do_munmap()
1669 delete_vma_from_mm(vma); in do_munmap()
1670 delete_vma(mm, vma); in do_munmap()
1697 struct vm_area_struct *vma; in exit_mmap() local
1704 while ((vma = mm->mmap)) { in exit_mmap()
1705 mm->mmap = vma->vm_next; in exit_mmap()
1706 delete_vma_from_mm(vma); in exit_mmap()
1707 delete_vma(mm, vma); in exit_mmap()
1731 struct vm_area_struct *vma; in do_mremap() local
1745 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1746 if (!vma) in do_mremap()
1749 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1752 if (vma->vm_flags & VM_MAYSHARE) in do_mremap()
1755 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1759 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1760 return vma->vm_start; in do_mremap()
1775 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
1783 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1789 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1794 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1797 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1799 pfn += vma->vm_pgoff; in vm_iomap_memory()
1800 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1804 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
1807 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1809 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1812 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1813 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1926 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in filemap_fault() argument
1933 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) in filemap_map_pages() argument
1942 struct vm_area_struct *vma; in __access_remote_vm() local
1947 vma = find_vma(mm, addr); in __access_remote_vm()
1948 if (vma) { in __access_remote_vm()
1950 if (addr + len >= vma->vm_end) in __access_remote_vm()
1951 len = vma->vm_end - addr; in __access_remote_vm()
1954 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1955 copy_to_user_page(vma, NULL, addr, in __access_remote_vm()
1957 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1958 copy_from_user_page(vma, NULL, addr, in __access_remote_vm()
2022 struct vm_area_struct *vma; in nommu_shrink_inode_mappings() local
2034 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
2037 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
2050 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
2051 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
2054 region = vma->vm_region; in nommu_shrink_inode_mappings()