Home
last modified time | relevance | path

Searched refs:vm_mm (Results 1 – 154 of 154) sorted by relevance

/linux-4.4.14/arch/mips/mm/
Dtlb-r3k.c84 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
164 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
169 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page()
171 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; in local_flush_tlb_page()
199 if (current->active_mm != vma->vm_mm) in __update_tlb()
205 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb()
207 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
Dtlb-r8k.c64 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
155 if (!cpu_context(cpu, vma->vm_mm)) in local_flush_tlb_page()
158 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
194 if (current->active_mm != vma->vm_mm) in __update_tlb()
203 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
Dtlb-r4k.c120 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
215 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
219 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
297 if (current->active_mm != vma->vm_mm) in __update_tlb()
306 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
Dpgtable-64.c73 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
Dc-tx39.c161 if (!(cpu_context(smp_processor_id(), vma->vm_mm))) in tx39_flush_cache_range()
170 struct mm_struct *mm = vma->vm_mm; in tx39_flush_cache_page()
Dc-r3k.c241 struct mm_struct *mm = vma->vm_mm; in r3k_flush_cache_page()
Dc-octeon.c84 mask = *mm_cpumask(vma->vm_mm); in octeon_flush_icache_all_cores()
Dc-r4k.c492 if (!(has_valid_asid(vma->vm_mm))) in local_r4k_flush_cache_range()
554 struct mm_struct *mm = vma->vm_mm; in local_r4k_flush_cache_page()
/linux-4.4.14/mm/
Dpgtable-generic.c53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
76 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
109 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
136 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush()
148 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
195 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); in pmdp_invalidate()
212 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
Dmprotect.c49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
51 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
66 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
141 struct mm_struct *mm = vma->vm_mm; in change_pmd_range()
217 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
261 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
Dmremap.c96 struct mm_struct *mm = vma->vm_mm; in move_ptes()
179 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
188 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
191 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
216 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, in move_page_tables()
231 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
240 struct mm_struct *mm = vma->vm_mm; in move_vma()
Dvmacache.c62 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update()
100 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
Dhuge_memory.c875 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
1277 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1505 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1557 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1601 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1675 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting()
1827 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map()
2006 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
2272 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
2294 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
[all …]
Dmadvise.c50 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
156 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
179 .mm = vma->vm_mm, in force_swapin_readahead()
Drmap.c176 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
812 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
832 struct mm_struct *mm = vma->vm_mm; in page_referenced_one()
912 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
979 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one()
1300 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
Dgup.c52 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
64 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
186 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
298 struct mm_struct *mm = vma->vm_mm; in faultin_page()
886 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
Dmemory.c650 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
1301 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1377 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1397 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1423 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1484 struct mm_struct *mm = vma->vm_mm; in insert_page()
1551 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1562 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1740 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2860 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
[all …]
Dksm.c374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
689 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
859 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
933 struct mm_struct *mm = vma->vm_mm; in replace_page()
1760 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
1951 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
Dpage_idle.c57 struct mm_struct *mm = vma->vm_mm; in page_idle_clear_pte_refs_one()
Ddebug.c161 vma->vm_prev, vma->vm_mm, in dump_vma()
Dmincore.c188 mincore_walk.mm = vma->vm_mm; in do_mincore()
Dmlock.c371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
501 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
Dmmap.c730 struct mm_struct *mm = vma->vm_mm; in vma_adjust()
1603 vma->vm_mm = mm; in mmap_region()
2104 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2133 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2153 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2229 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2807 vma->vm_mm = mm; in do_brk()
2933 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3085 vma->vm_mm = mm; in __install_special_mapping()
Dzsmalloc.c283 enum zs_mapmode vm_mm; /* mapping mode */ member
1101 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1126 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1293 area->vm_mm = mm; in zs_map_object()
Dswapfile.c1148 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte()
1153 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1160 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1161 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte()
1163 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte()
1283 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
Dnommu.c701 struct mm_struct *mm = vma->vm_mm; in protect_vma()
726 vma->vm_mm = mm; in add_vma_to_mm()
787 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
Dmemory-failure.c430 if (vma->vm_mm == t->mm) in collect_procs_anon()
465 if (vma->vm_mm == t->mm) in collect_procs_file()
Dhugetlb.c3161 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
3281 mm = vma->vm_mm; in unmap_hugepage_range()
3934 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
4210 spte = huge_pte_offset(svma->vm_mm, saddr); in huge_pmd_share()
Dfilemap.c1961 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault()
1969 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in filemap_fault()
Dmempolicy.c733 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
738 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
Dmigrate.c109 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
Dshmem.c1328 up_read(&vma->vm_mm->mmap_sem); in shmem_fault()
1359 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in shmem_fault()
Dmemcontrol.c4726 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
4947 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
/linux-4.4.14/arch/sh/mm/
Dtlbflush_32.c19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page()
42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
Dcache-sh4.c223 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page()
226 pgd = pgd_offset(vma->vm_mm, address); in sh4_flush_cache_page()
235 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page()
286 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
Dtlbflush_64.c71 if (vma->vm_mm) { in local_flush_tlb_page()
88 mm = vma->vm_mm; in local_flush_tlb_range()
Dcache-sh5.c113 vma_asid = cpu_asid(cpu, vma->vm_mm); in sh64_icache_inv_user_page()
535 sh64_dcache_purge_user_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
Dtlb-sh3.c36 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
Dtlb-pteaex.c25 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
Dtlb-sh4.c24 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
/linux-4.4.14/arch/arm/mm/
Dfault-armv.c61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte()
102 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte()
119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte()
135 struct mm_struct *mm = vma->vm_mm; in make_coherent()
155 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
Dtlb-v7.S36 vma_vm_mm r3, r2 @ get vma->vm_mm
37 mmid r3, r3 @ get vm_mm->context.id
Dtlb-v6.S37 vma_vm_mm r3, r2 @ get vma->vm_mm
39 mmid r3, r3 @ get vm_mm->context.id
Dflush.c166 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access()
262 if (mpnt->vm_mm != mm) in __flush_dcache_aliases()
426 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
/linux-4.4.14/arch/arc/mm/
Dtlb.c337 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
350 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
352 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
407 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
408 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
471 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
483 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range()
496 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range()
546 if (current->active_mm != vma->vm_mm) in create_tlb()
551 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); in create_tlb()
[all …]
/linux-4.4.14/arch/arm/kernel/
Dsmp_tlb.c162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page()
166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page()
188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range()
192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
Dasm-offsets.c127 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
/linux-4.4.14/arch/arm64/include/asm/
Dtlbflush.h94 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); in flush_tlb_page()
111 unsigned long asid = ASID(vma->vm_mm) << 48; in __flush_tlb_range()
115 flush_tlb_mm(vma->vm_mm); in __flush_tlb_range()
Dtlb.h40 struct vm_area_struct vma = { .vm_mm = tlb->mm, }; in tlb_flush()
/linux-4.4.14/include/linux/
Dmmu_notifier.h316 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
329 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
342 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
353 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
361 struct mm_struct *___mm = (__vma)->vm_mm; \
374 struct mm_struct *___mm = (__vma)->vm_mm; \
Dkhugepaged.h42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter()
46 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
Dmm_types.h319 struct mm_struct *vm_mm; /* The address space we belong to. */ member
Dhuge_mm.h137 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); in pmd_trans_huge_lock()
/linux-4.4.14/arch/s390/include/asm/
Dhugetlb.h50 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush()
59 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags()
60 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
Dpgtable.h1080 if (mm_has_pgste(vma->vm_mm)) { in ptep_test_and_clear_young()
1082 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); in ptep_test_and_clear_young()
1086 ptep_flush_direct(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young()
1090 if (mm_has_pgste(vma->vm_mm)) { in ptep_test_and_clear_young()
1091 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); in ptep_test_and_clear_young()
1188 if (mm_has_pgste(vma->vm_mm)) { in ptep_clear_flush()
1190 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); in ptep_clear_flush()
1194 ptep_flush_direct(vma->vm_mm, address, ptep); in ptep_clear_flush()
1197 if (mm_has_pgste(vma->vm_mm)) { in ptep_clear_flush()
1201 pgste = pgste_update_all(&pte, pgste, vma->vm_mm); in ptep_clear_flush()
[all …]
Dtlbflush.h192 __tlb_flush_mm_lazy(vma->vm_mm); in flush_tlb_range()
/linux-4.4.14/arch/avr32/mm/
Dtlb.c109 if (vma && current->active_mm != vma->vm_mm) in update_mmu_cache()
152 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { in flush_tlb_page()
156 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; in flush_tlb_page()
160 if (vma->vm_mm != current->mm) { in flush_tlb_page()
176 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
/linux-4.4.14/arch/m68k/include/asm/
Dtlbflush.h86 if (vma->vm_mm == current->active_mm) { in flush_tlb_page()
97 if (vma->vm_mm == current->active_mm) in flush_tlb_range()
177 sun3_put_context(vma->vm_mm->context); in flush_tlb_page()
194 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
Dcacheflush_mm.h209 if (vma->vm_mm == current->mm) in flush_cache_range()
215 if (vma->vm_mm == current->mm) in flush_cache_page()
/linux-4.4.14/arch/score/mm/
Dtlb-score.c83 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
161 if (vma && vma->vm_mm->context != 0) { in local_flush_tlb_page()
164 unsigned long vma_ASID = vma->vm_mm->context; in local_flush_tlb_page()
221 if (current->active_mm != vma->vm_mm) in __update_tlb()
Dcache.c175 struct mm_struct *mm = vma->vm_mm; in flush_cache_range()
/linux-4.4.14/arch/powerpc/mm/
Dtlb_hash32.c150 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); in flush_tlb_mm()
163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in flush_tlb_page()
178 flush_range(vma->vm_mm, start, end); in flush_tlb_range()
Dpgtable_64.c500 assert_spin_locked(&vma->vm_mm->page_table_lock); in pmdp_set_access_flags()
582 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); in pmdp_collapse_flush()
589 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young()
603 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_clear_flush_young()
619 assert_spin_locked(&vma->vm_mm->page_table_lock); in pmdp_splitting_flush()
646 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); in pmdp_splitting_flush()
718 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); in pmdp_invalidate()
Dhugetlbpage-book3e.c94 mm = vma->vm_mm; in book3e_hugetlb_preload()
152 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); in flush_hugetlb_page()
Dtlb_nohash.c206 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page()
334 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page()
378 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dpgtable.c208 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags()
Dmem.c515 hash_preload(vma->vm_mm, address, access, trap); in update_mmu_cache()
Dhugetlbpage.c794 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); in vma_mmu_pagesize()
/linux-4.4.14/arch/metag/include/asm/
Dtlbflush.h55 flush_tlb_mm(vma->vm_mm); in flush_tlb_page()
61 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dcacheflush.h52 flush_cache_mm(vma->vm_mm); in flush_cache_range()
58 flush_cache_mm(vma->vm_mm); in flush_cache_page()
Dpgtable.h258 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
/linux-4.4.14/arch/hexagon/mm/
Dvm_tlb.c40 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
80 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.4.14/arch/frv/include/asm/
Dtlbflush.h45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
/linux-4.4.14/arch/ia64/include/asm/
Dtlbflush.h80 if (vma->vm_mm == current->active_mm) in flush_tlb_page()
83 vma->vm_mm->context = 0; in flush_tlb_page()
Dtlb.h122 vma.vm_mm = tlb->mm; in ia64_tlb_flush_mmu_tlbonly()
Dpgtable.h419 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
537 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
/linux-4.4.14/arch/parisc/include/asm/
Dtlbflush.h34 __flush_tlb_range((vma)->vm_mm->context, start, end)
83 sid = vma->vm_mm->context; in flush_tlb_page()
Dpgtable.h482 purge_tlb_entries(vma->vm_mm, addr); in ptep_test_and_clear_young()
/linux-4.4.14/arch/mips/kernel/
Dsmp.c356 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
406 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { in flush_tlb_page()
417 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page()
418 cpu_context(cpu, vma->vm_mm) = 0; in flush_tlb_page()
/linux-4.4.14/arch/sh/kernel/vsyscall/
Dvsyscall.c90 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux-4.4.14/arch/hexagon/kernel/
Dvdso.c97 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux-4.4.14/arch/x86/include/asm/
Dtlbflush.h194 if (vma->vm_mm == current->active_mm) in flush_tlb_page()
201 if (vma->vm_mm == current->active_mm) in flush_tlb_range()
238 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
/linux-4.4.14/arch/m32r/mm/
Dfault.c358 if (vma && current->active_mm != vma->vm_mm) in update_mmu_cache()
460 if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
465 page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK); in local_flush_tlb_page()
479 mm = vma->vm_mm; in local_flush_tlb_range()
/linux-4.4.14/arch/mn10300/include/asm/
Dtlbflush.h111 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
138 #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
/linux-4.4.14/arch/alpha/include/asm/
Dtlbflush.h119 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
133 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dcacheflush.h54 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
/linux-4.4.14/arch/sparc/include/asm/
Dcacheflush_64.h24 flush_cache_mm((vma)->vm_mm)
26 flush_cache_mm((vma)->vm_mm)
Dhugetlb.h70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
Dpgtable_32.h431 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
/linux-4.4.14/arch/sh/kernel/
Dsmp.c391 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
438 (current->mm != vma->vm_mm)) { in flush_tlb_page()
448 cpu_context(i, vma->vm_mm) = 0; in flush_tlb_page()
/linux-4.4.14/arch/tile/kernel/
Dtlb.c64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page()
72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
Dvdso.c115 if (vma->vm_mm && vma->vm_start == VDSO_BASE) in arch_vma_name()
/linux-4.4.14/arch/x86/mm/
Dpgtable.c417 pte_update_defer(vma->vm_mm, address, ptep); in ptep_set_access_flags()
434 pmd_update_defer(vma->vm_mm, address, pmdp); in pmdp_set_access_flags()
457 pte_update(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young()
473 pmd_update(vma->vm_mm, addr, pmdp); in pmdp_test_and_clear_young()
521 pmd_update(vma->vm_mm, address, pmdp); in pmdp_splitting_flush()
Dtlb.c247 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.4.14/arch/x86/um/
Dmem_64.c6 if (vma->vm_mm && vma->vm_start == um_vdso_addr) in arch_vma_name()
Dmem_32.c19 gate_vma.vm_mm = NULL; in gate_vma_init()
/linux-4.4.14/arch/cris/include/asm/
Dtlbflush.h31 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
/linux-4.4.14/arch/arm/include/asm/
Dtlbflush.h424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page()
427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in __local_flush_tlb_page()
445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page()
462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page()
Dcacheflush.h230 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range()
240 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page()
/linux-4.4.14/drivers/xen/
Dxlate_mmu.c135 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn()
165 err = apply_to_page_range(vma->vm_mm, addr, range, in xen_xlate_remap_gfn_array()
Dgntdev.c798 if (use_ptemod && priv->mm != vma->vm_mm) { in gntdev_mmap()
830 err = apply_to_page_range(vma->vm_mm, vma->vm_start, in gntdev_mmap()
862 apply_to_page_range(vma->vm_mm, vma->vm_start, in gntdev_mmap()
Dprivcmd.c643 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()
/linux-4.4.14/arch/parisc/kernel/
Dcache.c578 BUG_ON(!vma->vm_mm->context); in flush_cache_range()
585 if (vma->vm_mm->context == mfsp(3)) { in flush_cache_range()
592 pgd = vma->vm_mm->pgd; in flush_cache_range()
607 BUG_ON(!vma->vm_mm->context); in flush_cache_page()
/linux-4.4.14/arch/sparc/kernel/
Dasm-offsets.c53 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in foo()
/linux-4.4.14/arch/mn10300/mm/
Dmmu-context.c43 cnx = mm_context(vma->vm_mm); in update_mmu_cache()
Dtlb-smp.c184 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.4.14/arch/xtensa/mm/
Dtlb.c92 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
131 struct mm_struct* mm = vma->vm_mm; in local_flush_tlb_page()
/linux-4.4.14/arch/s390/kernel/
Dvdso.c249 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) in arch_vma_name()
/linux-4.4.14/fs/
Ddax.c381 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in __dax_fault()
411 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in __dax_fault()
614 ptl = pmd_lock(vma->vm_mm, pmd); in __dax_pmd_fault()
622 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); in __dax_pmd_fault()
649 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in __dax_pmd_fault()
Dexec.c266 vma->vm_mm = mm; in __bprm_mm_init()
584 struct mm_struct *mm = vma->vm_mm; in shift_arg_pages()
Duserfaultfd.c263 struct mm_struct *mm = vma->vm_mm; in handle_userfault()
Daio.c314 struct mm_struct *mm = vma->vm_mm; in aio_ring_mremap()
Dbinfmt_elf.c1205 if (vma == get_gate_vma(vma->vm_mm)) in always_dump_vma()
/linux-4.4.14/fs/proc/
Dtask_mmu.c273 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
565 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
669 .mm = vma->vm_mm, in show_smap()
795 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte); in clear_soft_dirty()
798 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
801 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
815 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); in clear_soft_dirty_pmd()
823 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
861 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1537 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
Dtask_nommu.c151 struct mm_struct *mm = vma->vm_mm; in nommu_vma_show()
Dvmcore.c389 do_munmap(vma->vm_mm, from, len); in remap_oldmem_pfn_checked()
481 do_munmap(vma->vm_mm, vma->vm_start, len); in mmap_vmcore()
/linux-4.4.14/kernel/events/
Duprobes.c155 struct mm_struct *mm = vma->vm_mm; in __replace_page()
164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page()
741 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) in build_map_info()
749 info->mm = vma->vm_mm; in build_map_info()
1077 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1079 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1115 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ in uprobe_munmap()
1118 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || in uprobe_munmap()
1119 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) in uprobe_munmap()
1123 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); in uprobe_munmap()
Dcore.c4647 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; in perf_mmap_close()
4717 vma->vm_mm->pinned_vm -= mmap_locked; in perf_mmap_close()
4865 locked = vma->vm_mm->pinned_vm + extra; in perf_mmap()
4906 vma->vm_mm->pinned_vm += extra; in perf_mmap()
6080 if (vma->vm_start <= vma->vm_mm->start_brk && in perf_event_mmap_event()
6081 vma->vm_end >= vma->vm_mm->brk) { in perf_event_mmap_event()
6085 if (vma->vm_start <= vma->vm_mm->start_stack && in perf_event_mmap_event()
6086 vma->vm_end >= vma->vm_mm->start_stack) { in perf_event_mmap_event()
/linux-4.4.14/arch/sparc/mm/
Dsrmmu.c577 if ((ctx1 = vma->vm_mm->context) != -1) {
1232 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_range()
1240 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_page()
1283 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_range()
1290 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_page()
1672 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_range()
1689 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_range()
1704 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_page()
1719 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
Dtlb.c214 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_invalidate()
Dinit_64.c340 mm = vma->vm_mm; in update_mmu_cache()
2774 mm = vma->vm_mm; in update_mmu_cache_pmd()
/linux-4.4.14/arch/unicore32/include/asm/
Dtlbflush.h90 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in local_flush_tlb_page()
/linux-4.4.14/arch/arm64/mm/
Dflush.c111 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
/linux-4.4.14/fs/ncpfs/
Dmmap.c93 mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); in ncp_file_mmap_fault()
/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_bo_vm.c69 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle()
117 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault()
/linux-4.4.14/arch/alpha/kernel/
Dsmp.c699 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
734 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
752 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
/linux-4.4.14/arch/m32r/kernel/
Dsmp.c295 smp_flush_tlb_mm(vma->vm_mm); in smp_flush_tlb_range()
317 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
/linux-4.4.14/Documentation/
Dcachetlb.txt58 modifications for the address space 'vma->vm_mm' in the range
77 address space is available via vma->vm_mm. Also, one may
83 page table modification for address space 'vma->vm_mm' for
86 'vma->vm_mm' for virtual address 'addr'.
96 "vma->vm_mm", in the software page tables.
172 entries in the cache for 'vma->vm_mm' for virtual addresses in
189 address space is available via vma->vm_mm. Also, one may
200 'vma->vm_mm' for virtual address 'addr' which translates
/linux-4.4.14/arch/mips/include/asm/
Dhugetlb.h98 set_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
/linux-4.4.14/arch/um/kernel/
Dtlb.c399 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
520 if (vma->vm_mm == NULL) in flush_tlb_range()
522 else fix_range(vma->vm_mm, start, end, 0); in flush_tlb_range()
/linux-4.4.14/arch/ia64/mm/
Dinit.c118 vma->vm_mm = current->mm; in ia64_init_addr_space()
137 vma->vm_mm = current->mm; in ia64_init_addr_space()
278 gate_vma.vm_mm = NULL; in gate_vma_init()
Dtlb.c304 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
/linux-4.4.14/arch/powerpc/include/asm/
Dhugetlb.h135 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush()
Dpgtable-ppc64.h257 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
292 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
Dpgtable-ppc32.h245 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
/linux-4.4.14/arch/cris/arch-v10/mm/
Dtlb.c101 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.4.14/arch/cris/arch-v32/mm/
Dtlb.c130 page_id = vma->vm_mm->context.page_id; in __flush_tlb_page()
/linux-4.4.14/arch/parisc/mm/
Dhugetlbpage.c182 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
/linux-4.4.14/include/asm-generic/
Dpgtable.h58 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
74 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
/linux-4.4.14/arch/unicore32/kernel/
Dasm-offsets.c88 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
/linux-4.4.14/arch/nios2/mm/
Dtlb.c97 unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); in flush_tlb_range()
Dcacheflush.c85 if (mpnt->vm_mm != mm) in flush_aliases()
/linux-4.4.14/arch/powerpc/kernel/
Dvdso.c247 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) in arch_vma_name()
/linux-4.4.14/arch/arm64/kernel/
Dasm-offsets.c65 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
/linux-4.4.14/drivers/misc/sgi-gru/
Dgrufault.c227 pgdp = pgd_offset(vma->vm_mm, vaddr); in atomic_pte_lookup()
/linux-4.4.14/arch/arm/mach-rpc/
Decard.c240 vma.vm_mm = mm; in ecard_init_pgtables()
/linux-4.4.14/arch/s390/mm/
Dpgtable.c1278 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
/linux-4.4.14/security/selinux/
Dhooks.c3395 if (vma->vm_start >= vma->vm_mm->start_brk && in selinux_file_mprotect()
3396 vma->vm_end <= vma->vm_mm->brk) { in selinux_file_mprotect()
3399 vma->vm_start <= vma->vm_mm->start_stack && in selinux_file_mprotect()
3400 vma->vm_end >= vma->vm_mm->start_stack) { in selinux_file_mprotect()
/linux-4.4.14/kernel/
Dfork.c456 tmp->vm_mm = mm; in dup_mmap()
/linux-4.4.14/arch/ia64/kernel/
Dperfmon.c2289 vma->vm_mm = mm; in pfm_smpl_buffer_alloc()
2335 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, in pfm_smpl_buffer_alloc()
/linux-4.4.14/arch/x86/xen/
Dmmu.c2849 err = apply_to_page_range(vma->vm_mm, addr, range, in do_remap_gfn()
/linux-4.4.14/Documentation/virtual/uml/
DUserModeLinux-HOWTO.txt3295 (gdb) call pgd_offset_proc(vma->vm_mm, address)
3307 $23 = {vm_mm = 0x507d2434, vm_start = 0, vm_end = 134512640,
3313 (gdb) p *vma.vm_mm
/linux-4.4.14/drivers/android/
Dbinder.c2920 proc->vma_vm_mm = vma->vm_mm; in binder_mmap()
/linux-4.4.14/kernel/sched/
Dfair.c2231 if (!vma->vm_mm || in task_numa_work()