Lines Matching refs:vma
53 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
89 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
96 struct mm_struct *mm = vma->vm_mm; in move_ptes()
119 if (vma->vm_file) { in move_ptes()
120 mapping = vma->vm_file->f_mapping; in move_ptes()
123 if (vma->anon_vma) { in move_ptes()
124 anon_vma = vma->anon_vma; in move_ptes()
163 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
175 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
179 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
188 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
191 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
197 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, in move_page_tables()
198 vma); in move_page_tables()
201 anon_vma_lock_write(vma->anon_vma); in move_page_tables()
202 err = move_huge_pmd(vma, new_vma, old_addr, in move_page_tables()
206 anon_vma_unlock_write(vma->anon_vma); in move_page_tables()
212 split_huge_page_pmd(vma, old_addr, old_pmd); in move_page_tables()
224 move_ptes(vma, old_pmd, old_addr, old_addr + extent, in move_page_tables()
229 flush_tlb_range(vma, old_end-len, old_addr); in move_page_tables()
231 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
236 static unsigned long move_vma(struct vm_area_struct *vma, in move_vma() argument
240 struct mm_struct *mm = vma->vm_mm; in move_vma()
242 unsigned long vm_flags = vma->vm_flags; in move_vma()
265 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
270 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
271 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
276 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
280 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma()
281 err = vma->vm_ops->mremap(new_vma); in move_vma()
290 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
292 vma = new_vma; in move_vma()
303 vma->vm_flags &= ~VM_ACCOUNT; in move_vma()
304 excess = vma->vm_end - vma->vm_start - old_len; in move_vma()
305 if (old_addr > vma->vm_start && in move_vma()
306 old_addr + old_len < vma->vm_end) in move_vma()
320 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); in move_vma()
331 vma->vm_flags |= VM_ACCOUNT; in move_vma()
333 vma->vm_next->vm_flags |= VM_ACCOUNT; in move_vma()
348 struct vm_area_struct *vma = find_vma(mm, addr); in vma_to_resize() local
351 if (!vma || vma->vm_start > addr) in vma_to_resize()
354 if (is_vm_hugetlb_page(vma)) in vma_to_resize()
358 if (old_len > vma->vm_end - addr) in vma_to_resize()
362 return vma; in vma_to_resize()
365 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
366 pgoff += vma->vm_pgoff; in vma_to_resize()
370 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in vma_to_resize()
373 if (vma->vm_flags & VM_LOCKED) { in vma_to_resize()
385 if (vma->vm_flags & VM_ACCOUNT) { in vma_to_resize()
392 return vma; in vma_to_resize()
399 struct vm_area_struct *vma; in mremap_to() local
425 vma = vma_to_resize(addr, old_len, new_len, &charged); in mremap_to()
426 if (IS_ERR(vma)) { in mremap_to()
427 ret = PTR_ERR(vma); in mremap_to()
432 if (vma->vm_flags & VM_MAYSHARE) in mremap_to()
435 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
436 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
441 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); in mremap_to()
451 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
453 unsigned long end = vma->vm_end + delta; in vma_expandable()
454 if (end < vma->vm_end) /* overflow */ in vma_expandable()
456 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ in vma_expandable()
458 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
476 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
525 vma = vma_to_resize(addr, old_len, new_len, &charged); in SYSCALL_DEFINE5()
526 if (IS_ERR(vma)) { in SYSCALL_DEFINE5()
527 ret = PTR_ERR(vma); in SYSCALL_DEFINE5()
533 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
535 if (vma_expandable(vma, new_len - old_len)) { in SYSCALL_DEFINE5()
538 if (vma_adjust(vma, vma->vm_start, addr + new_len, in SYSCALL_DEFINE5()
539 vma->vm_pgoff, NULL)) { in SYSCALL_DEFINE5()
544 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); in SYSCALL_DEFINE5()
545 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
562 if (vma->vm_flags & VM_MAYSHARE) in SYSCALL_DEFINE5()
565 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
566 vma->vm_pgoff + in SYSCALL_DEFINE5()
567 ((addr - vma->vm_start) >> PAGE_SHIFT), in SYSCALL_DEFINE5()
574 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); in SYSCALL_DEFINE5()