mpnt              133 arch/arm/mm/fault-armv.c 	struct vm_area_struct *mpnt;
mpnt              146 arch/arm/mm/fault-armv.c 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
mpnt              152 arch/arm/mm/fault-armv.c 		if (mpnt->vm_mm != mm || mpnt == vma)
mpnt              154 arch/arm/mm/fault-armv.c 		if (!(mpnt->vm_flags & VM_MAYSHARE))
mpnt              156 arch/arm/mm/fault-armv.c 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
mpnt              157 arch/arm/mm/fault-armv.c 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
mpnt              240 arch/arm/mm/flush.c 	struct vm_area_struct *mpnt;
mpnt              252 arch/arm/mm/flush.c 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
mpnt              258 arch/arm/mm/flush.c 		if (mpnt->vm_mm != mm)
mpnt              260 arch/arm/mm/flush.c 		if (!(mpnt->vm_flags & VM_MAYSHARE))
mpnt              262 arch/arm/mm/flush.c 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
mpnt              263 arch/arm/mm/flush.c 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
mpnt               76 arch/nios2/mm/cacheflush.c 	struct vm_area_struct *mpnt;
mpnt               82 arch/nios2/mm/cacheflush.c 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
mpnt               85 arch/nios2/mm/cacheflush.c 		if (mpnt->vm_mm != mm)
mpnt               87 arch/nios2/mm/cacheflush.c 		if (!(mpnt->vm_flags & VM_MAYSHARE))
mpnt               90 arch/nios2/mm/cacheflush.c 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
mpnt               91 arch/nios2/mm/cacheflush.c 		flush_cache_page(mpnt, mpnt->vm_start + offset,
mpnt              328 arch/parisc/kernel/cache.c 	struct vm_area_struct *mpnt;
mpnt              351 arch/parisc/kernel/cache.c 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
mpnt              352 arch/parisc/kernel/cache.c 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
mpnt              353 arch/parisc/kernel/cache.c 		addr = mpnt->vm_start + offset;
mpnt              364 arch/parisc/kernel/cache.c 		flush_tlb_page(mpnt, addr);
mpnt              367 arch/parisc/kernel/cache.c 			__flush_cache_page(mpnt, addr, page_to_phys(page));
mpnt              369 arch/parisc/kernel/cache.c 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
mpnt               92 drivers/oprofile/buffer_sync.c 	struct vm_area_struct *mpnt;
mpnt               96 drivers/oprofile/buffer_sync.c 	mpnt = find_vma(mm, addr);
mpnt               97 drivers/oprofile/buffer_sync.c 	if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
mpnt              481 kernel/fork.c  	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
mpnt              518 kernel/fork.c  	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
mpnt              521 kernel/fork.c  		if (mpnt->vm_flags & VM_DONTCOPY) {
mpnt              522 kernel/fork.c  			vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
mpnt              534 kernel/fork.c  		if (mpnt->vm_flags & VM_ACCOUNT) {
mpnt              535 kernel/fork.c  			unsigned long len = vma_pages(mpnt);
mpnt              541 kernel/fork.c  		tmp = vm_area_dup(mpnt);
mpnt              544 kernel/fork.c  		retval = vma_dup_policy(mpnt, tmp);
mpnt              556 kernel/fork.c  		} else if (anon_vma_fork(tmp, mpnt))
mpnt              573 kernel/fork.c  			vma_interval_tree_insert_after(tmp, mpnt,
mpnt              601 kernel/fork.c  			retval = copy_page_range(mm, oldmm, mpnt);