/linux-4.1.27/arch/mips/mm/ |
D | tlb-r3k.c | 81 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 161 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 166 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 168 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; in local_flush_tlb_page() 196 if (current->active_mm != vma->vm_mm) in __update_tlb() 202 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 204 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
D | tlb-r8k.c | 64 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 155 if (!cpu_context(cpu, vma->vm_mm)) in local_flush_tlb_page() 158 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 194 if (current->active_mm != vma->vm_mm) in __update_tlb() 203 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
|
D | tlb-r4k.c | 120 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 215 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 219 newpid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 297 if (current->active_mm != vma->vm_mm) in __update_tlb() 306 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
|
D | pgtable-64.c | 73 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
|
D | c-tx39.c | 163 if (!(cpu_context(smp_processor_id(), vma->vm_mm))) in tx39_flush_cache_range() 172 struct mm_struct *mm = vma->vm_mm; in tx39_flush_cache_page()
|
D | c-r3k.c | 241 struct mm_struct *mm = vma->vm_mm; in r3k_flush_cache_page()
|
D | c-octeon.c | 84 mask = *mm_cpumask(vma->vm_mm); in octeon_flush_icache_all_cores()
|
D | c-r4k.c | 492 if (!(has_valid_asid(vma->vm_mm))) in local_r4k_flush_cache_range() 554 struct mm_struct *mm = vma->vm_mm; in local_r4k_flush_cache_page()
|
/linux-4.1.27/arch/sh/mm/ |
D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
D | cache-sh4.c | 223 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page() 226 pgd = pgd_offset(vma->vm_mm, address); in sh4_flush_cache_page() 235 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page() 286 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
|
D | tlbflush_64.c | 71 if (vma->vm_mm) { in local_flush_tlb_page() 88 mm = vma->vm_mm; in local_flush_tlb_range()
|
D | cache-sh5.c | 113 vma_asid = cpu_asid(cpu, vma->vm_mm); in sh64_icache_inv_user_page() 535 sh64_dcache_purge_user_range(vma->vm_mm, start, end); in sh5_flush_cache_range() 536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
|
D | tlb-sh3.c | 36 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
|
D | tlb-sh4.c | 24 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
|
D | tlb-pteaex.c | 25 if (vma && current->active_mm != vma->vm_mm) in __update_tlb()
|
/linux-4.1.27/mm/ |
D | pgtable-generic.c | 53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 69 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 113 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_clear_flush() 143 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush() 196 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); in pmdp_invalidate()
|
D | mprotect.c | 47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 49 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection() 55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 64 struct mm_struct *mm = vma->vm_mm; in change_pte_range() 139 struct mm_struct *mm = vma->vm_mm; in change_pmd_range() 215 struct mm_struct *mm = vma->vm_mm; in change_protection_range() 259 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
|
D | mremap.c | 95 struct mm_struct *mm = vma->vm_mm; in move_ptes() 178 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables() 187 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables() 190 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables() 215 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, in move_page_tables() 230 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables() 239 struct mm_struct *mm = vma->vm_mm; in move_vma()
|
D | vmacache.c | 62 if (vmacache_valid_mm(newvma->vm_mm)) in vmacache_update() 100 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
|
D | rmap.c | 171 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare() 706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma() 726 struct mm_struct *mm = vma->vm_mm; in page_referenced_one() 801 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 868 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one() 1184 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one() 1310 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { in try_to_unmap_one() 1315 up_read(&vma->vm_mm->mmap_sem); in try_to_unmap_one()
|
D | huge_memory.c | 1208 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd() 1436 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd() 1488 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd() 1532 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock() 1606 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting() 1754 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map() 1933 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise() 2197 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate() 2219 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy() 2229 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy() [all …]
|
D | madvise.c | 49 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 154 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 177 .mm = vma->vm_mm, in force_swapin_readahead()
|
D | mmap.c | 729 struct mm_struct *mm = vma->vm_mm; in vma_adjust() 1593 vma->vm_mm = mm; in mmap_region() 2095 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth() 2124 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth() 2192 spin_lock(&vma->vm_mm->page_table_lock); in expand_upwards() 2199 vma->vm_mm->highest_vm_end = address; in expand_upwards() 2200 spin_unlock(&vma->vm_mm->page_table_lock); in expand_upwards() 2208 validate_mm(vma->vm_mm); in expand_upwards() 2259 spin_lock(&vma->vm_mm->page_table_lock); in expand_downwards() 2265 spin_unlock(&vma->vm_mm->page_table_lock); in expand_downwards() [all …]
|
D | memory.c | 649 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() 1260 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range() 1336 struct mm_struct *mm = vma->vm_mm; in unmap_vmas() 1356 struct mm_struct *mm = vma->vm_mm; in zap_page_range() 1382 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single() 1443 struct mm_struct *mm = vma->vm_mm; in insert_page() 1510 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page() 1521 struct mm_struct *mm = vma->vm_mm; in insert_pfn() 1699 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range() 2802 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte() [all …]
|
D | gup.c | 38 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() 152 struct mm_struct *mm = vma->vm_mm; in follow_page_mask() 264 struct mm_struct *mm = vma->vm_mm; in faultin_page() 843 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
|
D | ksm.c | 374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm() 688 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages() 858 struct mm_struct *mm = vma->vm_mm; in write_protect_page() 932 struct mm_struct *mm = vma->vm_mm; in replace_page() 1741 struct mm_struct *mm = vma->vm_mm; in ksm_madvise() 1930 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
|
D | mlock.c | 371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 501 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
|
D | debug.c | 161 vma->vm_prev, vma->vm_mm, in dump_vma()
|
D | mincore.c | 188 mincore_walk.mm = vma->vm_mm; in do_mincore()
|
D | zsmalloc.c | 279 enum zs_mapmode vm_mm; /* mapping mode */ member 1101 if (area->vm_mm == ZS_MM_WO) in __zs_map_object() 1126 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object() 1293 area->vm_mm = mm; in zs_map_object()
|
D | swapfile.c | 1106 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte() 1111 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte() 1118 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte() 1119 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte() 1121 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte() 1241 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
|
D | nommu.c | 725 struct mm_struct *mm = vma->vm_mm; in protect_vma() 752 vma->vm_mm = mm; in add_vma_to_mm() 813 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
|
D | memory-failure.c | 439 if (vma->vm_mm == t->mm) in collect_procs_anon() 474 if (vma->vm_mm == t->mm) in collect_procs_file()
|
D | hugetlb.c | 2740 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range() 2859 mm = vma->vm_mm; in unmap_hugepage_range() 3497 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection() 3743 spte = huge_pte_offset(svma->vm_mm, saddr); in huge_pmd_share()
|
D | filemap.c | 1895 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault() 1903 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in filemap_fault()
|
D | mempolicy.c | 736 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range() 741 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
|
D | migrate.c | 108 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
|
D | shmem.c | 1312 up_read(&vma->vm_mm->mmap_sem); in shmem_fault() 1343 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in shmem_fault()
|
D | memcontrol.c | 4908 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range() 5102 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
|
/linux-4.1.27/arch/arm/mm/ |
D | fault-armv.c | 61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 102 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 135 struct mm_struct *mm = vma->vm_mm; in make_coherent() 155 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
D | tlb-v7.S | 36 vma_vm_mm r3, r2 @ get vma->vm_mm 37 mmid r3, r3 @ get vm_mm->context.id
|
D | tlb-v6.S | 37 vma_vm_mm r3, r2 @ get vma->vm_mm 39 mmid r3, r3 @ get vm_mm->context.id
|
D | flush.c | 151 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access() 247 if (mpnt->vm_mm != mm) in __flush_dcache_aliases() 411 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
|
/linux-4.1.27/arch/arc/mm/ |
D | tlb.c | 288 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 301 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 303 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 358 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 359 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 413 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 425 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 473 if (current->active_mm != vma->vm_mm) in create_tlb() 478 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); in create_tlb()
|
/linux-4.1.27/arch/arm/kernel/ |
D | smp_tlb.c | 162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|
D | asm-offsets.c | 127 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | tlbflush.h | 89 ((unsigned long)ASID(vma->vm_mm) << 48); in flush_tlb_page() 99 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; in __flush_tlb_range() 135 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
D | tlb.h | 43 struct vm_area_struct vma = { .vm_mm = tlb->mm, }; in tlb_flush()
|
/linux-4.1.27/arch/s390/include/asm/ |
D | hugetlb.h | 54 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush() 63 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags() 64 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
|
D | pgtable.h | 1018 if (mm_has_pgste(vma->vm_mm)) { in ptep_test_and_clear_young() 1020 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); in ptep_test_and_clear_young() 1024 ptep_flush_direct(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young() 1028 if (mm_has_pgste(vma->vm_mm)) { in ptep_test_and_clear_young() 1029 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); in ptep_test_and_clear_young() 1126 if (mm_has_pgste(vma->vm_mm)) { in ptep_clear_flush() 1128 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); in ptep_clear_flush() 1132 ptep_flush_direct(vma->vm_mm, address, ptep); in ptep_clear_flush() 1135 if (mm_has_pgste(vma->vm_mm)) { in ptep_clear_flush() 1139 pgste = pgste_update_all(&pte, pgste, vma->vm_mm); in ptep_clear_flush() [all …]
|
D | tlbflush.h | 195 __tlb_flush_mm_lazy(vma->vm_mm); in flush_tlb_range()
|
/linux-4.1.27/arch/avr32/mm/ |
D | tlb.c | 109 if (vma && current->active_mm != vma->vm_mm) in update_mmu_cache() 152 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { in flush_tlb_page() 156 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; in flush_tlb_page() 160 if (vma->vm_mm != current->mm) { in flush_tlb_page() 176 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
/linux-4.1.27/arch/m68k/include/asm/ |
D | tlbflush.h | 86 if (vma->vm_mm == current->active_mm) { in flush_tlb_page() 97 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 177 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 194 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
D | cacheflush_mm.h | 209 if (vma->vm_mm == current->mm) in flush_cache_range() 215 if (vma->vm_mm == current->mm) in flush_cache_page()
|
/linux-4.1.27/arch/score/mm/ |
D | tlb-score.c | 83 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 161 if (vma && vma->vm_mm->context != 0) { in local_flush_tlb_page() 164 unsigned long vma_ASID = vma->vm_mm->context; in local_flush_tlb_page() 221 if (current->active_mm != vma->vm_mm) in __update_tlb()
|
D | cache.c | 175 struct mm_struct *mm = vma->vm_mm; in flush_cache_range()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | tlb_hash32.c | 150 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); in flush_tlb_mm() 163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in flush_tlb_page() 178 flush_range(vma->vm_mm, start, end); in flush_tlb_range()
|
D | pgtable_64.c | 510 assert_spin_locked(&vma->vm_mm->page_table_lock); in pmdp_set_access_flags() 564 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_clear_flush() 596 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); in pmdp_clear_flush() 604 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young() 618 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_clear_flush_young() 634 assert_spin_locked(&vma->vm_mm->page_table_lock); in pmdp_splitting_flush() 661 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); in pmdp_splitting_flush() 733 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); in pmdp_invalidate()
|
D | hugetlbpage-book3e.c | 94 mm = vma->vm_mm; in book3e_hugetlb_preload() 152 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); in flush_hugetlb_page()
|
D | tlb_nohash.c | 205 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page() 333 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page() 377 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
D | pgtable.c | 208 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags()
|
D | mem.c | 515 hash_preload(vma->vm_mm, address, access, trap); in update_mmu_cache()
|
D | hugetlbpage.c | 779 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); in vma_mmu_pagesize()
|
/linux-4.1.27/arch/metag/include/asm/ |
D | tlbflush.h | 55 flush_tlb_mm(vma->vm_mm); in flush_tlb_page() 61 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
D | cacheflush.h | 52 flush_cache_mm(vma->vm_mm); in flush_cache_range() 58 flush_cache_mm(vma->vm_mm); in flush_cache_page()
|
D | pgtable.h | 258 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
|
/linux-4.1.27/arch/hexagon/mm/ |
D | vm_tlb.c | 40 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 80 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
/linux-4.1.27/arch/frv/include/asm/ |
D | tlbflush.h | 45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \ 52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | tlbflush.h | 34 __flush_tlb_range((vma)->vm_mm->context, start, end) 83 sid = vma->vm_mm->context; in flush_tlb_page()
|
D | pgtable.h | 463 purge_tlb_entries(vma->vm_mm, addr); in ptep_test_and_clear_young()
|
/linux-4.1.27/include/linux/ |
D | khugepaged.h | 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter() 46 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
|
D | mmu_notifier.h | 294 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 307 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 317 struct mm_struct *___mm = (__vma)->vm_mm; \ 330 struct mm_struct *___mm = (__vma)->vm_mm; \
|
D | mm_types.h | 269 struct mm_struct *vm_mm; /* The address space we belong to. */ member
|
D | huge_mm.h | 135 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); in pmd_trans_huge_lock()
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | tlbflush.h | 80 if (vma->vm_mm == current->active_mm) in flush_tlb_page() 83 vma->vm_mm->context = 0; in flush_tlb_page()
|
D | tlb.h | 122 vma.vm_mm = tlb->mm; in ia64_tlb_flush_mmu_tlbonly()
|
D | pgtable.h | 419 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); in ptep_test_and_clear_young() 537 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
|
/linux-4.1.27/arch/mips/kernel/ |
D | smp.c | 364 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 414 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { in flush_tlb_page() 425 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page() 426 cpu_context(cpu, vma->vm_mm) = 0; in flush_tlb_page()
|
D | vdso.c | 119 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
|
/linux-4.1.27/arch/hexagon/kernel/ |
D | vdso.c | 97 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
|
/linux-4.1.27/arch/sh/kernel/vsyscall/ |
D | vsyscall.c | 90 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
|
/linux-4.1.27/arch/x86/include/asm/ |
D | tlbflush.h | 194 if (vma->vm_mm == current->active_mm) in flush_tlb_page() 201 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 238 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
|
/linux-4.1.27/arch/m32r/mm/ |
D | fault.c | 358 if (vma && current->active_mm != vma->vm_mm) in update_mmu_cache() 460 if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 465 page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK); in local_flush_tlb_page() 479 mm = vma->vm_mm; in local_flush_tlb_range()
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | tlbflush.h | 111 flush_tlb_mm(vma->vm_mm); in flush_tlb_range() 138 #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | tlbflush.h | 119 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() 133 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
D | cacheflush.h | 54 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | cacheflush_64.h | 24 flush_cache_mm((vma)->vm_mm) 26 flush_cache_mm((vma)->vm_mm)
|
D | hugetlb.h | 74 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
|
D | pgtable_32.h | 431 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
|
/linux-4.1.27/arch/sh/kernel/ |
D | smp.c | 391 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page() 438 (current->mm != vma->vm_mm)) { in flush_tlb_page() 448 cpu_context(i, vma->vm_mm) = 0; in flush_tlb_page()
|
/linux-4.1.27/arch/tile/kernel/ |
D | tlb.c | 64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page() 72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
D | vdso.c | 115 if (vma->vm_mm && vma->vm_start == VDSO_BASE) in arch_vma_name()
|
/linux-4.1.27/arch/x86/mm/ |
D | pgtable.c | 417 pte_update_defer(vma->vm_mm, address, ptep); in ptep_set_access_flags() 434 pmd_update_defer(vma->vm_mm, address, pmdp); in pmdp_set_access_flags() 457 pte_update(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young() 473 pmd_update(vma->vm_mm, addr, pmdp); in pmdp_test_and_clear_young() 521 pmd_update(vma->vm_mm, address, pmdp); in pmdp_splitting_flush()
|
D | tlb.c | 240 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
/linux-4.1.27/drivers/xen/ |
D | xlate_mmu.c | 88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn() 120 err = apply_to_page_range(vma->vm_mm, addr, range, in xen_xlate_remap_gfn_array()
|
D | gntdev.c | 798 if (use_ptemod && priv->mm != vma->vm_mm) { in gntdev_mmap() 830 err = apply_to_page_range(vma->vm_mm, vma->vm_start, in gntdev_mmap() 862 apply_to_page_range(vma->vm_mm, vma->vm_start, in gntdev_mmap()
|
D | privcmd.c | 641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()
|
/linux-4.1.27/arch/x86/um/ |
D | mem_64.c | 7 if (vma->vm_mm && vma->vm_start == um_vdso_addr) in arch_vma_name()
|
D | mem_32.c | 20 gate_vma.vm_mm = NULL; in gate_vma_init()
|
/linux-4.1.27/arch/cris/include/asm/ |
D | tlbflush.h | 31 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
/linux-4.1.27/arch/arm/include/asm/ |
D | tlbflush.h | 424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page() 427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in __local_flush_tlb_page() 445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page() 462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page()
|
D | cacheflush.h | 234 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range() 244 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page()
|
/linux-4.1.27/arch/parisc/kernel/ |
D | cache.c | 578 BUG_ON(!vma->vm_mm->context); in flush_cache_range() 585 if (vma->vm_mm->context == mfsp(3)) { in flush_cache_range() 592 pgd = vma->vm_mm->pgd; in flush_cache_range() 607 BUG_ON(!vma->vm_mm->context); in flush_cache_page()
|
/linux-4.1.27/arch/mn10300/mm/ |
D | mmu-context.c | 43 cnx = mm_context(vma->vm_mm); in update_mmu_cache()
|
D | tlb-smp.c | 184 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | asm-offsets.c | 53 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in foo()
|
/linux-4.1.27/arch/s390/kernel/ |
D | vdso.c | 249 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) in arch_vma_name()
|
/linux-4.1.27/arch/xtensa/mm/ |
D | tlb.c | 92 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 131 struct mm_struct* mm = vma->vm_mm; in local_flush_tlb_page()
|
/linux-4.1.27/kernel/events/ |
D | uprobes.c | 163 struct mm_struct *mm = vma->vm_mm; in __replace_page() 172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page() 747 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) in build_map_info() 755 info->mm = vma->vm_mm; in build_map_info() 1083 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap() 1085 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap() 1121 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ in uprobe_munmap() 1124 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || in uprobe_munmap() 1125 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) in uprobe_munmap() 1129 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); in uprobe_munmap()
|
D | core.c | 4481 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; in perf_mmap_close() 4551 vma->vm_mm->pinned_vm -= mmap_locked; in perf_mmap_close() 4699 locked = vma->vm_mm->pinned_vm + extra; in perf_mmap() 4740 vma->vm_mm->pinned_vm += extra; in perf_mmap() 5895 if (vma->vm_start <= vma->vm_mm->start_brk && in perf_event_mmap_event() 5896 vma->vm_end >= vma->vm_mm->brk) { in perf_event_mmap_event() 5900 if (vma->vm_start <= vma->vm_mm->start_stack && in perf_event_mmap_event() 5901 vma->vm_end >= vma->vm_mm->start_stack) { in perf_event_mmap_event()
|
/linux-4.1.27/arch/sparc/mm/ |
D | srmmu.c | 577 if ((ctx1 = vma->vm_mm->context) != -1) { 1232 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_range() 1240 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_page() 1283 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_range() 1290 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_page() 1672 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_range() 1689 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_range() 1704 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_page() 1719 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
|
D | tlb.c | 203 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_invalidate()
|
D | init_64.c | 350 mm = vma->vm_mm; in update_mmu_cache() 2715 mm = vma->vm_mm; in update_mmu_cache_pmd()
|
/linux-4.1.27/arch/unicore32/include/asm/ |
D | tlbflush.h | 90 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in local_flush_tlb_page()
|
/linux-4.1.27/arch/arm64/mm/ |
D | flush.c | 116 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | vdso.c | 167 struct page *upg = (vma && vma->vm_mm) ? in dump_vdso_pages() 178 struct page *upg = (vma && vma->vm_mm) ? in dump_vdso_pages() 288 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) in arch_vma_name()
|
/linux-4.1.27/fs/ncpfs/ |
D | mmap.c | 93 mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); in ncp_file_mmap_fault()
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_bo_vm.c | 69 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle() 117 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault()
|
/linux-4.1.27/arch/alpha/kernel/ |
D | smp.c | 699 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() 734 flush_tlb_mm(vma->vm_mm); in flush_tlb_range() 752 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
|
/linux-4.1.27/arch/m32r/kernel/ |
D | smp.c | 295 smp_flush_tlb_mm(vma->vm_mm); in smp_flush_tlb_range() 317 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
|
/linux-4.1.27/Documentation/ |
D | cachetlb.txt | 58 modifications for the address space 'vma->vm_mm' in the range 77 address space is available via vma->vm_mm. Also, one may 83 page table modification for address space 'vma->vm_mm' for 86 'vma->vm_mm' for virtual address 'addr'. 96 "vma->vm_mm", in the software page tables. 172 entries in the cache for 'vma->vm_mm' for virtual addresses in 189 address space is available via vma->vm_mm. Also, one may 200 'vma->vm_mm' for virtual address 'addr' which translates
|
/linux-4.1.27/arch/mips/include/asm/ |
D | hugetlb.h | 102 set_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
|
/linux-4.1.27/drivers/media/v4l2-core/ |
D | videobuf2-memops.c | 52 vma_copy->vm_mm = NULL; in vb2_get_vma()
|
/linux-4.1.27/arch/ia64/mm/ |
D | init.c | 119 vma->vm_mm = current->mm; in ia64_init_addr_space() 138 vma->vm_mm = current->mm; in ia64_init_addr_space() 285 gate_vma.vm_mm = NULL; in gate_vma_init()
|
D | tlb.c | 304 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
/linux-4.1.27/arch/um/kernel/ |
D | tlb.c | 389 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() 506 if (vma->vm_mm == NULL) in flush_tlb_range() 508 else fix_range(vma->vm_mm, start, end, 0); in flush_tlb_range()
|
/linux-4.1.27/fs/proc/ |
D | task_mmu.c | 271 struct mm_struct *mm = vma->vm_mm; in show_map_vma() 549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range() 619 .mm = vma->vm_mm, in show_smap() 762 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty() 776 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd() 820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range() 1483 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
|
D | task_nommu.c | 151 struct mm_struct *mm = vma->vm_mm; in nommu_vma_show()
|
D | vmcore.c | 389 do_munmap(vma->vm_mm, from, len); in remap_oldmem_pfn_checked() 481 do_munmap(vma->vm_mm, vma->vm_start, len); in mmap_vmcore()
|
/linux-4.1.27/arch/cris/arch-v32/mm/ |
D | tlb.c | 130 page_id = vma->vm_mm->context.page_id; in __flush_tlb_page()
|
/linux-4.1.27/arch/cris/arch-v10/mm/ |
D | tlb.c | 101 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | hugetlb.h | 140 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush()
|
D | pgtable-ppc64.h | 269 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 304 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
|
D | pgtable-ppc32.h | 264 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
|
/linux-4.1.27/include/asm-generic/ |
D | pgtable.h | 48 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young() 64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
|
/linux-4.1.27/arch/unicore32/kernel/ |
D | asm-offsets.c | 88 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
|
/linux-4.1.27/fs/ |
D | dax.c | 341 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in do_dax_fault() 371 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in do_dax_fault()
|
D | exec.c | 260 vma->vm_mm = mm; in __bprm_mm_init() 578 struct mm_struct *mm = vma->vm_mm; in shift_arg_pages()
|
D | aio.c | 320 struct mm_struct *mm = vma->vm_mm; in aio_ring_remap()
|
D | binfmt_elf.c | 1204 if (vma == get_gate_vma(vma->vm_mm)) in always_dump_vma()
|
/linux-4.1.27/arch/nios2/mm/ |
D | tlb.c | 97 unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); in flush_tlb_range()
|
D | cacheflush.c | 101 if (mpnt->vm_mm != mm) in flush_aliases()
|
/linux-4.1.27/arch/arm64/kernel/ |
D | asm-offsets.c | 65 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); in main()
|
/linux-4.1.27/drivers/gpu/drm/exynos/ |
D | exynos_drm_gem.c | 397 vma_copy->vm_mm = NULL; in exynos_gem_get_vma()
|
/linux-4.1.27/drivers/misc/sgi-gru/ |
D | grufault.c | 227 pgdp = pgd_offset(vma->vm_mm, vaddr); in atomic_pte_lookup()
|
/linux-4.1.27/arch/arm/mach-rpc/ |
D | ecard.c | 240 vma.vm_mm = mm; in ecard_init_pgtables()
|
/linux-4.1.27/fs/hugetlbfs/ |
D | inode.c | 133 hugetlb_prefault_arch_hook(vma->vm_mm); in hugetlbfs_file_mmap()
|
/linux-4.1.27/security/selinux/ |
D | hooks.c | 3361 if (vma->vm_start >= vma->vm_mm->start_brk && in selinux_file_mprotect() 3362 vma->vm_end <= vma->vm_mm->brk) { in selinux_file_mprotect() 3365 vma->vm_start <= vma->vm_mm->start_stack && in selinux_file_mprotect() 3366 vma->vm_end >= vma->vm_mm->start_stack) { in selinux_file_mprotect()
|
/linux-4.1.27/arch/s390/mm/ |
D | pgtable.c | 1354 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
|
/linux-4.1.27/kernel/ |
D | fork.c | 449 tmp->vm_mm = mm; in dup_mmap()
|
/linux-4.1.27/arch/x86/xen/ |
D | mmu.c | 2507 err = apply_to_page_range(vma->vm_mm, addr, range, in do_remap_mfn()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | perfmon.c | 2289 vma->vm_mm = mm; in pfm_smpl_buffer_alloc() 2335 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, in pfm_smpl_buffer_alloc()
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
D | file.c | 247 psize = get_slice_psize(vma->vm_mm, address); in spufs_mem_mmap_fault()
|
/linux-4.1.27/Documentation/virtual/uml/ |
D | UserModeLinux-HOWTO.txt | 3295 (gdb) call pgd_offset_proc(vma->vm_mm, address) 3307 $23 = {vm_mm = 0x507d2434, vm_start = 0, vm_end = 134512640, 3313 (gdb) p *vma.vm_mm
|
/linux-4.1.27/drivers/android/ |
D | binder.c | 2920 proc->vma_vm_mm = vma->vm_mm; in binder_mmap()
|
/linux-4.1.27/kernel/sched/ |
D | fair.c | 2194 if (!vma->vm_mm || in task_numa_work()
|