Home
last modified time | relevance | path

Searched refs:vm_mm (Results 1 – 156 of 156) sorted by relevance

/linux-4.1.27/arch/mips/mm/
Dtlb-r3k.c81 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
161 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
166 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page()
168 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; in local_flush_tlb_page()
196 if (current->active_mm != vma->vm_mm) in __update_tlb()
202 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb()
204 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
Dtlb-r8k.c64 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
155 if (!cpu_context(cpu, vma->vm_mm)) in local_flush_tlb_page()
158 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
194 if (current->active_mm != vma->vm_mm) in __update_tlb()
203 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
Dtlb-r4k.c120 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
215 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
219 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
297 if (current->active_mm != vma->vm_mm) in __update_tlb()
306 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
Dpgtable-64.c73 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
Dc-tx39.c163 if (!(cpu_context(smp_processor_id(), vma->vm_mm))) in tx39_flush_cache_range()
172 struct mm_struct *mm = vma->vm_mm; in tx39_flush_cache_page()
Dc-r3k.c241 struct mm_struct *mm = vma->vm_mm; in r3k_flush_cache_page()
Dc-octeon.c84 mask = *mm_cpumask(vma->vm_mm); in octeon_flush_icache_all_cores()
Dc-r4k.c492 if (!(has_valid_asid(vma->vm_mm))) in local_r4k_flush_cache_range()
554 struct mm_struct *mm = vma->vm_mm; in local_r4k_flush_cache_page()
/linux-4.1.27/arch/sh/mm/
Dtlbflush_32.c19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page()
42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
Dcache-sh4.c223 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page()
226 pgd = pgd_offset(vma->vm_mm, address); in sh4_flush_cache_page()
235 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page()
286 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
Dtlbflush_64.c71 if (vma->vm_mm) { in local_flush_tlb_page()
88 mm = vma->vm_mm; in local_flush_tlb_range()
Dcache-sh5.c113 vma_asid = cpu_asid(cpu, vma->vm_mm); in sh64_icache_inv_user_page()
535 sh64_dcache_purge_user_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
Dtlb-sh3.c36 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
Dtlb-sh4.c24 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
Dtlb-pteaex.c25 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
/linux-4.1.27/mm/
Dpgtable-generic.c53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
69 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
113 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_clear_flush()
143 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
196 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); in pmdp_invalidate()
Dmprotect.c47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
49 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
64 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
139 struct mm_struct *mm = vma->vm_mm; in change_pmd_range()
215 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
259 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
Dmremap.c95 struct mm_struct *mm = vma->vm_mm; in move_ptes()
178 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
187 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
190 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
215 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, in move_page_tables()
230 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
239 struct mm_struct *mm = vma->vm_mm; in move_vma()
Dvmacache.c62 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update()
100 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
Drmap.c171 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
726 struct mm_struct *mm = vma->vm_mm; in page_referenced_one()
801 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
868 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one()
1184 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1310 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { in try_to_unmap_one()
1315 up_read(&vma->vm_mm->mmap_sem); in try_to_unmap_one()
Dhuge_memory.c1208 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1436 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1488 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1532 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1606 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting()
1754 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map()
1933 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
2197 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
2219 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
2229 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
[all …]
Dmadvise.c49 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
154 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
177 .mm = vma->vm_mm, in force_swapin_readahead()
Dmmap.c729 struct mm_struct *mm = vma->vm_mm; in vma_adjust()
1593 vma->vm_mm = mm; in mmap_region()
2095 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2124 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2192 spin_lock(&vma->vm_mm->page_table_lock); in expand_upwards()
2199 vma->vm_mm->highest_vm_end = address; in expand_upwards()
2200 spin_unlock(&vma->vm_mm->page_table_lock); in expand_upwards()
2208 validate_mm(vma->vm_mm); in expand_upwards()
2259 spin_lock(&vma->vm_mm->page_table_lock); in expand_downwards()
2265 spin_unlock(&vma->vm_mm->page_table_lock); in expand_downwards()
[all …]
Dmemory.c649 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
1260 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1336 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1356 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1382 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1443 struct mm_struct *mm = vma->vm_mm; in insert_page()
1510 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1521 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1699 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2802 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
[all …]
Dgup.c38 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
152 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
264 struct mm_struct *mm = vma->vm_mm; in faultin_page()
843 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
Dksm.c374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
688 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
858 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
932 struct mm_struct *mm = vma->vm_mm; in replace_page()
1741 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
1930 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
Dmlock.c371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
501 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
Ddebug.c161 vma->vm_prev, vma->vm_mm, in dump_vma()
Dmincore.c188 mincore_walk.mm = vma->vm_mm; in do_mincore()
Dzsmalloc.c279 enum zs_mapmode vm_mm; /* mapping mode */ member
1101 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1126 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1293 area->vm_mm = mm; in zs_map_object()
Dswapfile.c1106 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte()
1111 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1118 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1119 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte()
1121 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte()
1241 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
Dnommu.c725 struct mm_struct *mm = vma->vm_mm; in protect_vma()
752 vma->vm_mm = mm; in add_vma_to_mm()
813 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
Dmemory-failure.c439 if (vma->vm_mm == t->mm) in collect_procs_anon()
474 if (vma->vm_mm == t->mm) in collect_procs_file()
Dhugetlb.c2740 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
2859 mm = vma->vm_mm; in unmap_hugepage_range()
3497 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
3743 spte = huge_pte_offset(svma->vm_mm, saddr); in huge_pmd_share()
Dfilemap.c1895 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault()
1903 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in filemap_fault()
Dmempolicy.c736 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
741 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
Dmigrate.c108 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
Dshmem.c1312 up_read(&vma->vm_mm->mmap_sem); in shmem_fault()
1343 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in shmem_fault()
Dmemcontrol.c4908 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
5102 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
/linux-4.1.27/arch/arm/mm/
Dfault-armv.c61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte()
102 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte()
119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte()
135 struct mm_struct *mm = vma->vm_mm; in make_coherent()
155 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
Dtlb-v7.S36 vma_vm_mm r3, r2 @ get vma->vm_mm
37 mmid r3, r3 @ get vm_mm->context.id
Dtlb-v6.S37 vma_vm_mm r3, r2 @ get vma->vm_mm
39 mmid r3, r3 @ get vm_mm->context.id
Dflush.c151 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access()
247 if (mpnt->vm_mm != mm) in __flush_dcache_aliases()
411 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
/linux-4.1.27/arch/arc/mm/
Dtlb.c288 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
301 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
303 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
358 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
359 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
413 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
425 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range()
473 if (current->active_mm != vma->vm_mm) in create_tlb()
478 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); in create_tlb()
/linux-4.1.27/arch/arm/kernel/
Dsmp_tlb.c162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page()
166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page()
188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range()
192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
Dasm-offsets.c127 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
/linux-4.1.27/arch/arm64/include/asm/
Dtlbflush.h89 ((unsigned long)ASID(vma->vm_mm) << 48); in flush_tlb_page()
99 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; in __flush_tlb_range()
135 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dtlb.h43 struct vm_area_struct vma = { .vm_mm = tlb->mm, }; in tlb_flush()
/linux-4.1.27/arch/s390/include/asm/
Dhugetlb.h54 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush()
63 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags()
64 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
Dpgtable.h1018 if (mm_has_pgste(vma->vm_mm)) { in ptep_test_and_clear_young()
1020 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); in ptep_test_and_clear_young()
1024 ptep_flush_direct(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young()
1028 if (mm_has_pgste(vma->vm_mm)) { in ptep_test_and_clear_young()
1029 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); in ptep_test_and_clear_young()
1126 if (mm_has_pgste(vma->vm_mm)) { in ptep_clear_flush()
1128 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); in ptep_clear_flush()
1132 ptep_flush_direct(vma->vm_mm, address, ptep); in ptep_clear_flush()
1135 if (mm_has_pgste(vma->vm_mm)) { in ptep_clear_flush()
1139 pgste = pgste_update_all(&pte, pgste, vma->vm_mm); in ptep_clear_flush()
[all …]
Dtlbflush.h195 __tlb_flush_mm_lazy(vma->vm_mm); in flush_tlb_range()
/linux-4.1.27/arch/avr32/mm/
Dtlb.c109 if (vma && current->active_mm != vma->vm_mm) in update_mmu_cache()
152 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { in flush_tlb_page()
156 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; in flush_tlb_page()
160 if (vma->vm_mm != current->mm) { in flush_tlb_page()
176 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
/linux-4.1.27/arch/m68k/include/asm/
Dtlbflush.h86 if (vma->vm_mm == current->active_mm) { in flush_tlb_page()
97 if (vma->vm_mm == current->active_mm) in flush_tlb_range()
177 sun3_put_context(vma->vm_mm->context); in flush_tlb_page()
194 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
Dcacheflush_mm.h209 if (vma->vm_mm == current->mm) in flush_cache_range()
215 if (vma->vm_mm == current->mm) in flush_cache_page()
/linux-4.1.27/arch/score/mm/
Dtlb-score.c83 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
161 if (vma && vma->vm_mm->context != 0) { in local_flush_tlb_page()
164 unsigned long vma_ASID = vma->vm_mm->context; in local_flush_tlb_page()
221 if (current->active_mm != vma->vm_mm) in __update_tlb()
Dcache.c175 struct mm_struct *mm = vma->vm_mm; in flush_cache_range()
/linux-4.1.27/arch/powerpc/mm/
Dtlb_hash32.c150 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); in flush_tlb_mm()
163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in flush_tlb_page()
178 flush_range(vma->vm_mm, start, end); in flush_tlb_range()
Dpgtable_64.c510 assert_spin_locked(&vma->vm_mm->page_table_lock); in pmdp_set_access_flags()
564 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_clear_flush()
596 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); in pmdp_clear_flush()
604 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young()
618 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_clear_flush_young()
634 assert_spin_locked(&vma->vm_mm->page_table_lock); in pmdp_splitting_flush()
661 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); in pmdp_splitting_flush()
733 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); in pmdp_invalidate()
Dhugetlbpage-book3e.c94 mm = vma->vm_mm; in book3e_hugetlb_preload()
152 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); in flush_hugetlb_page()
Dtlb_nohash.c205 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page()
333 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page()
377 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dpgtable.c208 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags()
Dmem.c515 hash_preload(vma->vm_mm, address, access, trap); in update_mmu_cache()
Dhugetlbpage.c779 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); in vma_mmu_pagesize()
/linux-4.1.27/arch/metag/include/asm/
Dtlbflush.h55 flush_tlb_mm(vma->vm_mm); in flush_tlb_page()
61 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dcacheflush.h52 flush_cache_mm(vma->vm_mm); in flush_cache_range()
58 flush_cache_mm(vma->vm_mm); in flush_cache_page()
Dpgtable.h258 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
/linux-4.1.27/arch/hexagon/mm/
Dvm_tlb.c40 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
80 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.1.27/arch/frv/include/asm/
Dtlbflush.h45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
/linux-4.1.27/arch/parisc/include/asm/
Dtlbflush.h34 __flush_tlb_range((vma)->vm_mm->context, start, end)
83 sid = vma->vm_mm->context; in flush_tlb_page()
Dpgtable.h463 purge_tlb_entries(vma->vm_mm, addr); in ptep_test_and_clear_young()
/linux-4.1.27/include/linux/
Dkhugepaged.h42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter()
46 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
Dmmu_notifier.h294 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
307 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
317 struct mm_struct *___mm = (__vma)->vm_mm; \
330 struct mm_struct *___mm = (__vma)->vm_mm; \
Dmm_types.h269 struct mm_struct *vm_mm; /* The address space we belong to. */ member
Dhuge_mm.h135 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); in pmd_trans_huge_lock()
/linux-4.1.27/arch/ia64/include/asm/
Dtlbflush.h80 if (vma->vm_mm == current->active_mm) in flush_tlb_page()
83 vma->vm_mm->context = 0; in flush_tlb_page()
Dtlb.h122 vma.vm_mm = tlb->mm; in ia64_tlb_flush_mmu_tlbonly()
Dpgtable.h419 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
537 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
/linux-4.1.27/arch/mips/kernel/
Dsmp.c364 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
414 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { in flush_tlb_page()
425 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page()
426 cpu_context(cpu, vma->vm_mm) = 0; in flush_tlb_page()
Dvdso.c119 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux-4.1.27/arch/hexagon/kernel/
Dvdso.c97 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux-4.1.27/arch/sh/kernel/vsyscall/
Dvsyscall.c90 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux-4.1.27/arch/x86/include/asm/
Dtlbflush.h194 if (vma->vm_mm == current->active_mm) in flush_tlb_page()
201 if (vma->vm_mm == current->active_mm) in flush_tlb_range()
238 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
/linux-4.1.27/arch/m32r/mm/
Dfault.c358 if (vma && current->active_mm != vma->vm_mm) in update_mmu_cache()
460 if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
465 page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK); in local_flush_tlb_page()
479 mm = vma->vm_mm; in local_flush_tlb_range()
/linux-4.1.27/arch/mn10300/include/asm/
Dtlbflush.h111 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
138 #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
/linux-4.1.27/arch/alpha/include/asm/
Dtlbflush.h119 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
133 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
Dcacheflush.h54 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
/linux-4.1.27/arch/sparc/include/asm/
Dcacheflush_64.h24 flush_cache_mm((vma)->vm_mm)
26 flush_cache_mm((vma)->vm_mm)
Dhugetlb.h74 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
Dpgtable_32.h431 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
/linux-4.1.27/arch/sh/kernel/
Dsmp.c391 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
438 (current->mm != vma->vm_mm)) { in flush_tlb_page()
448 cpu_context(i, vma->vm_mm) = 0; in flush_tlb_page()
/linux-4.1.27/arch/tile/kernel/
Dtlb.c64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page()
72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
Dvdso.c115 if (vma->vm_mm && vma->vm_start == VDSO_BASE) in arch_vma_name()
/linux-4.1.27/arch/x86/mm/
Dpgtable.c417 pte_update_defer(vma->vm_mm, address, ptep); in ptep_set_access_flags()
434 pmd_update_defer(vma->vm_mm, address, pmdp); in pmdp_set_access_flags()
457 pte_update(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young()
473 pmd_update(vma->vm_mm, addr, pmdp); in pmdp_test_and_clear_young()
521 pmd_update(vma->vm_mm, address, pmdp); in pmdp_splitting_flush()
Dtlb.c240 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.1.27/drivers/xen/
Dxlate_mmu.c88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn()
120 err = apply_to_page_range(vma->vm_mm, addr, range, in xen_xlate_remap_gfn_array()
Dgntdev.c798 if (use_ptemod && priv->mm != vma->vm_mm) { in gntdev_mmap()
830 err = apply_to_page_range(vma->vm_mm, vma->vm_start, in gntdev_mmap()
862 apply_to_page_range(vma->vm_mm, vma->vm_start, in gntdev_mmap()
Dprivcmd.c641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()
/linux-4.1.27/arch/x86/um/
Dmem_64.c7 if (vma->vm_mm && vma->vm_start == um_vdso_addr) in arch_vma_name()
Dmem_32.c20 gate_vma.vm_mm = NULL; in gate_vma_init()
/linux-4.1.27/arch/cris/include/asm/
Dtlbflush.h31 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
/linux-4.1.27/arch/arm/include/asm/
Dtlbflush.h424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page()
427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in __local_flush_tlb_page()
445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page()
462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page()
Dcacheflush.h234 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range()
244 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page()
/linux-4.1.27/arch/parisc/kernel/
Dcache.c578 BUG_ON(!vma->vm_mm->context); in flush_cache_range()
585 if (vma->vm_mm->context == mfsp(3)) { in flush_cache_range()
592 pgd = vma->vm_mm->pgd; in flush_cache_range()
607 BUG_ON(!vma->vm_mm->context); in flush_cache_page()
/linux-4.1.27/arch/mn10300/mm/
Dmmu-context.c43 cnx = mm_context(vma->vm_mm); in update_mmu_cache()
Dtlb-smp.c184 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.1.27/arch/sparc/kernel/
Dasm-offsets.c53 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in foo()
/linux-4.1.27/arch/s390/kernel/
Dvdso.c249 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) in arch_vma_name()
/linux-4.1.27/arch/xtensa/mm/
Dtlb.c92 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
131 struct mm_struct* mm = vma->vm_mm; in local_flush_tlb_page()
/linux-4.1.27/kernel/events/
Duprobes.c163 struct mm_struct *mm = vma->vm_mm; in __replace_page()
172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page()
747 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) in build_map_info()
755 info->mm = vma->vm_mm; in build_map_info()
1083 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1085 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1121 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ in uprobe_munmap()
1124 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || in uprobe_munmap()
1125 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) in uprobe_munmap()
1129 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); in uprobe_munmap()
Dcore.c4481 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; in perf_mmap_close()
4551 vma->vm_mm->pinned_vm -= mmap_locked; in perf_mmap_close()
4699 locked = vma->vm_mm->pinned_vm + extra; in perf_mmap()
4740 vma->vm_mm->pinned_vm += extra; in perf_mmap()
5895 if (vma->vm_start <= vma->vm_mm->start_brk && in perf_event_mmap_event()
5896 vma->vm_end >= vma->vm_mm->brk) { in perf_event_mmap_event()
5900 if (vma->vm_start <= vma->vm_mm->start_stack && in perf_event_mmap_event()
5901 vma->vm_end >= vma->vm_mm->start_stack) { in perf_event_mmap_event()
/linux-4.1.27/arch/sparc/mm/
Dsrmmu.c577 if ((ctx1 = vma->vm_mm->context) != -1) {
1232 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_range()
1240 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_page()
1283 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_range()
1290 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_page()
1672 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_range()
1689 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_range()
1704 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_page()
1719 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
Dtlb.c203 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_invalidate()
Dinit_64.c350 mm = vma->vm_mm; in update_mmu_cache()
2715 mm = vma->vm_mm; in update_mmu_cache_pmd()
/linux-4.1.27/arch/unicore32/include/asm/
Dtlbflush.h90 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in local_flush_tlb_page()
/linux-4.1.27/arch/arm64/mm/
Dflush.c116 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
/linux-4.1.27/arch/powerpc/kernel/
Dvdso.c167 struct page *upg = (vma && vma->vm_mm) ? in dump_vdso_pages()
178 struct page *upg = (vma && vma->vm_mm) ? in dump_vdso_pages()
288 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) in arch_vma_name()
/linux-4.1.27/fs/ncpfs/
Dmmap.c93 mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); in ncp_file_mmap_fault()
/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_bo_vm.c69 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle()
117 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault()
/linux-4.1.27/arch/alpha/kernel/
Dsmp.c699 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
734 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
752 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
/linux-4.1.27/arch/m32r/kernel/
Dsmp.c295 smp_flush_tlb_mm(vma->vm_mm); in smp_flush_tlb_range()
317 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
/linux-4.1.27/Documentation/
Dcachetlb.txt58 modifications for the address space 'vma->vm_mm' in the range
77 address space is available via vma->vm_mm. Also, one may
83 page table modification for address space 'vma->vm_mm' for
86 'vma->vm_mm' for virtual address 'addr'.
96 "vma->vm_mm", in the software page tables.
172 entries in the cache for 'vma->vm_mm' for virtual addresses in
189 address space is available via vma->vm_mm. Also, one may
200 'vma->vm_mm' for virtual address 'addr' which translates
/linux-4.1.27/arch/mips/include/asm/
Dhugetlb.h102 set_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
/linux-4.1.27/drivers/media/v4l2-core/
Dvideobuf2-memops.c52 vma_copy->vm_mm = NULL; in vb2_get_vma()
/linux-4.1.27/arch/ia64/mm/
Dinit.c119 vma->vm_mm = current->mm; in ia64_init_addr_space()
138 vma->vm_mm = current->mm; in ia64_init_addr_space()
285 gate_vma.vm_mm = NULL; in gate_vma_init()
Dtlb.c304 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
/linux-4.1.27/arch/um/kernel/
Dtlb.c389 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
506 if (vma->vm_mm == NULL) in flush_tlb_range()
508 else fix_range(vma->vm_mm, start, end, 0); in flush_tlb_range()
/linux-4.1.27/fs/proc/
Dtask_mmu.c271 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
619 .mm = vma->vm_mm, in show_smap()
762 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
776 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1483 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
Dtask_nommu.c151 struct mm_struct *mm = vma->vm_mm; in nommu_vma_show()
Dvmcore.c389 do_munmap(vma->vm_mm, from, len); in remap_oldmem_pfn_checked()
481 do_munmap(vma->vm_mm, vma->vm_start, len); in mmap_vmcore()
/linux-4.1.27/arch/cris/arch-v32/mm/
Dtlb.c130 page_id = vma->vm_mm->context.page_id; in __flush_tlb_page()
/linux-4.1.27/arch/cris/arch-v10/mm/
Dtlb.c101 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux-4.1.27/arch/powerpc/include/asm/
Dhugetlb.h140 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush()
Dpgtable-ppc64.h269 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
304 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
Dpgtable-ppc32.h264 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
/linux-4.1.27/include/asm-generic/
Dpgtable.h48 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
/linux-4.1.27/arch/unicore32/kernel/
Dasm-offsets.c88 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
/linux-4.1.27/fs/
Ddax.c341 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in do_dax_fault()
371 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in do_dax_fault()
Dexec.c260 vma->vm_mm = mm; in __bprm_mm_init()
578 struct mm_struct *mm = vma->vm_mm; in shift_arg_pages()
Daio.c320 struct mm_struct *mm = vma->vm_mm; in aio_ring_remap()
Dbinfmt_elf.c1204 if (vma == get_gate_vma(vma->vm_mm)) in always_dump_vma()
/linux-4.1.27/arch/nios2/mm/
Dtlb.c97 unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); in flush_tlb_range()
Dcacheflush.c101 if (mpnt->vm_mm != mm) in flush_aliases()
/linux-4.1.27/arch/arm64/kernel/
Dasm-offsets.c65 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
/linux-4.1.27/drivers/gpu/drm/exynos/
Dexynos_drm_gem.c397 vma_copy->vm_mm = NULL; in exynos_gem_get_vma()
/linux-4.1.27/drivers/misc/sgi-gru/
Dgrufault.c227 pgdp = pgd_offset(vma->vm_mm, vaddr); in atomic_pte_lookup()
/linux-4.1.27/arch/arm/mach-rpc/
Decard.c240 vma.vm_mm = mm; in ecard_init_pgtables()
/linux-4.1.27/fs/hugetlbfs/
Dinode.c133 hugetlb_prefault_arch_hook(vma->vm_mm); in hugetlbfs_file_mmap()
/linux-4.1.27/security/selinux/
Dhooks.c3361 if (vma->vm_start >= vma->vm_mm->start_brk && in selinux_file_mprotect()
3362 vma->vm_end <= vma->vm_mm->brk) { in selinux_file_mprotect()
3365 vma->vm_start <= vma->vm_mm->start_stack && in selinux_file_mprotect()
3366 vma->vm_end >= vma->vm_mm->start_stack) { in selinux_file_mprotect()
/linux-4.1.27/arch/s390/mm/
Dpgtable.c1354 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
/linux-4.1.27/kernel/
Dfork.c449 tmp->vm_mm = mm; in dup_mmap()
/linux-4.1.27/arch/x86/xen/
Dmmu.c2507 err = apply_to_page_range(vma->vm_mm, addr, range, in do_remap_mfn()
/linux-4.1.27/arch/ia64/kernel/
Dperfmon.c2289 vma->vm_mm = mm; in pfm_smpl_buffer_alloc()
2335 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, in pfm_smpl_buffer_alloc()
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
Dfile.c247 psize = get_slice_psize(vma->vm_mm, address); in spufs_mem_mmap_fault()
/linux-4.1.27/Documentation/virtual/uml/
DUserModeLinux-HOWTO.txt3295 (gdb) call pgd_offset_proc(vma->vm_mm, address)
3307 $23 = {vm_mm = 0x507d2434, vm_start = 0, vm_end = 134512640,
3313 (gdb) p *vma.vm_mm
/linux-4.1.27/drivers/android/
Dbinder.c2920 proc->vma_vm_mm = vma->vm_mm; in binder_mmap()
/linux-4.1.27/kernel/sched/
Dfair.c2194 if (!vma->vm_mm || in task_numa_work()