vm_mm 55 arch/alpha/include/asm/cacheflush.h struct mm_struct *mm = vma->vm_mm; vm_mm 120 arch/alpha/include/asm/tlbflush.h struct mm_struct *mm = vma->vm_mm; vm_mm 134 arch/alpha/include/asm/tlbflush.h flush_tlb_mm(vma->vm_mm); vm_mm 695 arch/alpha/kernel/smp.c struct mm_struct *mm = vma->vm_mm; vm_mm 728 arch/alpha/kernel/smp.c flush_tlb_mm(vma->vm_mm); vm_mm 746 arch/alpha/kernel/smp.c struct mm_struct *mm = vma->vm_mm; vm_mm 338 arch/arc/mm/tlb.c local_flush_tlb_mm(vma->vm_mm); vm_mm 351 arch/arc/mm/tlb.c if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { vm_mm 353 arch/arc/mm/tlb.c tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); vm_mm 408 arch/arc/mm/tlb.c if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { vm_mm 409 arch/arc/mm/tlb.c tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); vm_mm 472 arch/arc/mm/tlb.c on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); vm_mm 484 arch/arc/mm/tlb.c on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); vm_mm 497 arch/arc/mm/tlb.c on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); vm_mm 547 arch/arc/mm/tlb.c if (current->active_mm != vma->vm_mm) vm_mm 552 arch/arc/mm/tlb.c tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); vm_mm 710 arch/arc/mm/tlb.c if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { vm_mm 711 arch/arc/mm/tlb.c unsigned int asid = hw_pid(vma->vm_mm, cpu); vm_mm 227 arch/arm/include/asm/cacheflush.h struct mm_struct *mm = vma->vm_mm; vm_mm 237 arch/arm/include/asm/cacheflush.h struct mm_struct *mm = vma->vm_mm; vm_mm 425 arch/arm/include/asm/tlbflush.h uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); vm_mm 428 arch/arm/include/asm/tlbflush.h cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { vm_mm 446 arch/arm/include/asm/tlbflush.h uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); vm_mm 463 arch/arm/include/asm/tlbflush.h uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); vm_mm 125 arch/arm/kernel/asm-offsets.c DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); vm_mm 202 arch/arm/kernel/smp_tlb.c on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, vm_mm 206 arch/arm/kernel/smp_tlb.c broadcast_tlb_mm_a15_erratum(vma->vm_mm); vm_mm 228 arch/arm/kernel/smp_tlb.c on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, vm_mm 232 arch/arm/kernel/smp_tlb.c broadcast_tlb_mm_a15_erratum(vma->vm_mm); vm_mm 58 arch/arm/mm/fault-armv.c set_pte_at(vma->vm_mm, address, ptep, entry); vm_mm 99 arch/arm/mm/fault-armv.c pgd = pgd_offset(vma->vm_mm, address); vm_mm 116 arch/arm/mm/fault-armv.c ptl = pte_lockptr(vma->vm_mm, pmd); vm_mm 132 arch/arm/mm/fault-armv.c struct mm_struct *mm = vma->vm_mm; vm_mm 152 arch/arm/mm/fault-armv.c if (mpnt->vm_mm != mm || mpnt == vma) vm_mm 163 arch/arm/mm/flush.c if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) vm_mm 258 arch/arm/mm/flush.c if (mpnt->vm_mm != mm) vm_mm 160 arch/arm64/include/asm/tlbflush.h unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); vm_mm 184 arch/arm64/include/asm/tlbflush.h unsigned long asid = ASID(vma->vm_mm); vm_mm 191 arch/arm64/include/asm/tlbflush.h flush_tlb_mm(vma->vm_mm); vm_mm 76 arch/arm64/kernel/asm-offsets.c DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); vm_mm 382 arch/arm64/mm/hugetlbpage.c ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); vm_mm 388 arch/arm64/mm/hugetlbpage.c orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); vm_mm 399 arch/arm64/mm/hugetlbpage.c set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); vm_mm 442 arch/arm64/mm/hugetlbpage.c ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); vm_mm 443 arch/arm64/mm/hugetlbpage.c clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); vm_mm 79 arch/csky/kernel/vdso.c if (vma->vm_mm == NULL) vm_mm 82 arch/csky/kernel/vdso.c if (vma->vm_start == (long)vma->vm_mm->context.vdso) vm_mm 50 arch/csky/mm/tlb.c unsigned long newpid = cpu_asid(vma->vm_mm); vm_mm 120 arch/csky/mm/tlb.c int newpid = cpu_asid(vma->vm_mm); vm_mm 85 arch/hexagon/kernel/vdso.c if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) vm_mm 28 arch/hexagon/mm/vm_tlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 68 arch/hexagon/mm/vm_tlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 420 arch/ia64/include/asm/pgtable.h set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); vm_mm 538 arch/ia64/include/asm/pgtable.h set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \ vm_mm 106 arch/ia64/include/asm/tlbflush.h if (vma->vm_mm == current->active_mm) vm_mm 109 arch/ia64/include/asm/tlbflush.h vma->vm_mm->context = 0; vm_mm 2288 arch/ia64/kernel/perfmon.c vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma)); vm_mm 315 arch/ia64/mm/tlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 210 arch/m68k/include/asm/cacheflush_mm.h if (vma->vm_mm == current->mm) vm_mm 216 arch/m68k/include/asm/cacheflush_mm.h if (vma->vm_mm == current->mm) vm_mm 87 arch/m68k/include/asm/tlbflush.h if (vma->vm_mm == current->active_mm) { vm_mm 98 arch/m68k/include/asm/tlbflush.h if (vma->vm_mm == current->active_mm) vm_mm 178 arch/m68k/include/asm/tlbflush.h sun3_put_context(vma->vm_mm->context); vm_mm 195 arch/m68k/include/asm/tlbflush.h struct mm_struct *mm = vma->vm_mm; vm_mm 75 arch/mips/include/asm/hugetlb.h set_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 568 arch/mips/kernel/smp.c struct mm_struct *mm = vma->vm_mm; vm_mm 648 arch/mips/kernel/smp.c write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); vm_mm 655 arch/mips/kernel/smp.c } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || vm_mm 656 arch/mips/kernel/smp.c (current->mm != vma->vm_mm)) { vm_mm 674 arch/mips/kernel/smp.c if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) vm_mm 675 arch/mips/kernel/smp.c set_cpu_context(cpu, vma->vm_mm, 1); vm_mm 84 arch/mips/mm/c-octeon.c mask = *mm_cpumask(vma->vm_mm); vm_mm 242 arch/mips/mm/c-r3k.c struct mm_struct *mm = vma->vm_mm; vm_mm 581 arch/mips/mm/c-r4k.c if (!has_valid_asid(vma->vm_mm, R4K_INDEX)) vm_mm 650 arch/mips/mm/c-r4k.c struct mm_struct *mm = vma->vm_mm; vm_mm 162 arch/mips/mm/c-tx39.c if (!(cpu_context(smp_processor_id(), vma->vm_mm))) vm_mm 171 arch/mips/mm/c-tx39.c struct mm_struct *mm = vma->vm_mm; vm_mm 74 arch/mips/mm/tlb-r3k.c struct mm_struct *mm = vma->vm_mm; vm_mm 155 arch/mips/mm/tlb-r3k.c if (cpu_context(cpu, vma->vm_mm) != 0) { vm_mm 160 arch/mips/mm/tlb-r3k.c printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); vm_mm 162 arch/mips/mm/tlb-r3k.c newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; vm_mm 191 arch/mips/mm/tlb-r3k.c if (current->active_mm != vma->vm_mm) vm_mm 197 arch/mips/mm/tlb-r3k.c if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { vm_mm 199 arch/mips/mm/tlb-r3k.c (cpu_context(cpu, vma->vm_mm)), pid); vm_mm 110 arch/mips/mm/tlb-r4k.c struct mm_struct *mm = vma->vm_mm; vm_mm 216 arch/mips/mm/tlb-r4k.c if (cpu_context(cpu, vma->vm_mm) != 0) { vm_mm 228 arch/mips/mm/tlb-r4k.c write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); vm_mm 230 arch/mips/mm/tlb-r4k.c write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); vm_mm 306 arch/mips/mm/tlb-r4k.c if (current->active_mm != vma->vm_mm) vm_mm 319 arch/mips/mm/tlb-r4k.c pgdp = pgd_offset(vma->vm_mm, address); vm_mm 59 arch/nds32/mm/cacheflush.c if (vma->vm_mm == current->active_mm) { vm_mm 146 arch/nds32/mm/cacheflush.c if (va_present(vma->vm_mm, start)) vm_mm 307 arch/nds32/mm/cacheflush.c if (vma->vm_mm != current->active_mm) vm_mm 372 arch/nds32/mm/proc.c if (va_present(vma->vm_mm, start)) { vm_mm 381 arch/nds32/mm/proc.c if (va_present(vma->vm_mm, start)) { vm_mm 388 arch/nds32/mm/proc.c if (va_present(vma->vm_mm, end - 1)) { vm_mm 396 arch/nds32/mm/proc.c if (va_present(vma->vm_mm, t_start)) { vm_mm 27 arch/nds32/mm/tlb.c ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; vm_mm 44 arch/nds32/mm/tlb.c ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; vm_mm 85 arch/nios2/mm/cacheflush.c if (mpnt->vm_mm != mm) vm_mm 106 arch/nios2/mm/tlb.c unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); vm_mm 116 arch/nios2/mm/tlb.c unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); vm_mm 506 arch/parisc/include/asm/pgtable.h spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); vm_mm 509 arch/parisc/include/asm/pgtable.h spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); vm_mm 513 arch/parisc/include/asm/pgtable.h purge_tlb_entries(vma->vm_mm, addr); vm_mm 514 arch/parisc/include/asm/pgtable.h spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); vm_mm 20 arch/parisc/include/asm/tlbflush.h __flush_tlb_range((vma)->vm_mm->context, start, end) vm_mm 67 arch/parisc/include/asm/tlbflush.h purge_tlb_entries(vma->vm_mm, addr); vm_mm 603 arch/parisc/kernel/cache.c if (vma->vm_mm->context) vm_mm 609 arch/parisc/kernel/cache.c if (vma->vm_mm->context == mfsp(3)) { vm_mm 617 arch/parisc/kernel/cache.c pgd = vma->vm_mm->pgd; vm_mm 625 arch/parisc/kernel/cache.c if (unlikely(vma->vm_mm->context)) { vm_mm 639 arch/parisc/kernel/cache.c if (likely(vma->vm_mm->context)) { vm_mm 181 arch/parisc/mm/hugetlbpage.c struct mm_struct *mm = vma->vm_mm; vm_mm 315 arch/powerpc/include/asm/book3s/32/pgtable.h __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) vm_mm 386 arch/powerpc/include/asm/book3s/64/pgtable.h __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ vm_mm 146 arch/powerpc/include/asm/book3s/64/tlbflush.h if (atomic_read(&vma->vm_mm->context.copros) > 0) vm_mm 54 arch/powerpc/include/asm/hugetlb.h huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); vm_mm 296 arch/powerpc/include/asm/nohash/32/pgtable.h __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) vm_mm 256 arch/powerpc/include/asm/nohash/64/pgtable.h __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ vm_mm 284 arch/powerpc/include/asm/nohash/64/pgtable.h int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ vm_mm 224 arch/powerpc/kernel/vdso.c if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) vm_mm 341 arch/powerpc/mm/book3s32/mmu.c hash_preload(vma->vm_mm, address); vm_mm 134 arch/powerpc/mm/book3s32/tlb.c flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); vm_mm 147 arch/powerpc/mm/book3s32/tlb.c mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; vm_mm 162 arch/powerpc/mm/book3s32/tlb.c flush_range(vma->vm_mm, start, end); vm_mm 141 arch/powerpc/mm/book3s64/hash_hugetlbpage.c pte_val = pte_update(vma->vm_mm, addr, ptep, vm_mm 154 arch/powerpc/mm/book3s64/hash_hugetlbpage.c set_huge_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 248 arch/powerpc/mm/book3s64/hash_pgtable.c serialize_against_pte_lookup(vma->vm_mm); vm_mm 258 arch/powerpc/mm/book3s64/hash_pgtable.c flush_tlb_pmd_range(vma->vm_mm, &pmd, address); vm_mm 1650 arch/powerpc/mm/book3s64/hash_utils.c hash_preload(vma->vm_mm, address, is_exec, trap); vm_mm 41 arch/powerpc/mm/book3s64/pgtable.c assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); vm_mm 58 arch/powerpc/mm/book3s64/pgtable.c return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); vm_mm 110 arch/powerpc/mm/book3s64/pgtable.c old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); vm_mm 119 arch/powerpc/mm/book3s64/pgtable.c serialize_against_pte_lookup(vma->vm_mm); vm_mm 430 arch/powerpc/mm/book3s64/pgtable.c pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); vm_mm 442 arch/powerpc/mm/book3s64/pgtable.c set_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 331 arch/powerpc/mm/book3s64/pkeys.c return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); vm_mm 352 arch/powerpc/mm/book3s64/pkeys.c pkey = execute_only_pkey(vma->vm_mm); vm_mm 400 arch/powerpc/mm/book3s64/pkeys.c if (current->mm != vma->vm_mm) vm_mm 18 arch/powerpc/mm/book3s64/radix_hugetlbpage.c radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); vm_mm 27 arch/powerpc/mm/book3s64/radix_hugetlbpage.c radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); vm_mm 37 arch/powerpc/mm/book3s64/radix_hugetlbpage.c radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); vm_mm 99 arch/powerpc/mm/book3s64/radix_hugetlbpage.c struct mm_struct *mm = vma->vm_mm; vm_mm 109 arch/powerpc/mm/book3s64/radix_hugetlbpage.c set_huge_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 956 arch/powerpc/mm/book3s64/radix_pgtable.c serialize_against_pte_lookup(vma->vm_mm); vm_mm 958 arch/powerpc/mm/book3s64/radix_pgtable.c radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); vm_mm 1035 arch/powerpc/mm/book3s64/radix_pgtable.c struct mm_struct *mm = vma->vm_mm; vm_mm 1070 arch/powerpc/mm/book3s64/radix_pgtable.c struct mm_struct *mm = vma->vm_mm; vm_mm 611 arch/powerpc/mm/book3s64/radix_tlb.c radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); vm_mm 780 arch/powerpc/mm/book3s64/radix_tlb.c radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); vm_mm 948 arch/powerpc/mm/book3s64/radix_tlb.c __radix__flush_tlb_range(vma->vm_mm, start, end, false); vm_mm 1188 arch/powerpc/mm/book3s64/radix_tlb.c radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); vm_mm 505 arch/powerpc/mm/hugetlbpage.c struct mm_struct *mm = vma->vm_mm; vm_mm 554 arch/powerpc/mm/hugetlbpage.c unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); vm_mm 138 arch/powerpc/mm/nohash/book3e_hugetlbpage.c mm = vma->vm_mm; vm_mm 203 arch/powerpc/mm/nohash/book3e_hugetlbpage.c __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); vm_mm 220 arch/powerpc/mm/nohash/tlb.c __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, vm_mm 342 arch/powerpc/mm/nohash/tlb.c __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, vm_mm 389 arch/powerpc/mm/nohash/tlb.c flush_tlb_mm(vma->vm_mm); vm_mm 215 arch/powerpc/mm/pgtable.c assert_pte_locked(vma->vm_mm, address); vm_mm 247 arch/powerpc/mm/pgtable.c assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); vm_mm 354 arch/riscv/include/asm/pgtable.h set_pte_at(vma->vm_mm, address, ptep, entry); vm_mm 92 arch/riscv/kernel/vdso.c if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) vm_mm 28 arch/riscv/mm/tlbflush.c __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); vm_mm 34 arch/riscv/mm/tlbflush.c __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); vm_mm 62 arch/s390/include/asm/hugetlb.h huge_ptep_get_and_clear(vma->vm_mm, address, ptep); vm_mm 71 arch/s390/include/asm/hugetlb.h huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); vm_mm 72 arch/s390/include/asm/hugetlb.h set_huge_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 1065 arch/s390/include/asm/pgtable.h pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); vm_mm 1092 arch/s390/include/asm/pgtable.h return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); vm_mm 1132 arch/s390/include/asm/pgtable.h ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); vm_mm 1523 arch/s390/include/asm/pgtable.h pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); vm_mm 1533 arch/s390/include/asm/pgtable.h pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); vm_mm 1585 arch/s390/include/asm/pgtable.h return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); vm_mm 1594 arch/s390/include/asm/pgtable.h return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); vm_mm 1611 arch/s390/include/asm/pgtable.h return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); vm_mm 126 arch/s390/include/asm/tlbflush.h __tlb_flush_mm_lazy(vma->vm_mm); vm_mm 59 arch/s390/kernel/vdso.c if (vma->vm_mm->context.compat_mm) { vm_mm 80 arch/s390/kernel/vdso.c if (vma->vm_mm->context.compat_mm) vm_mm 87 arch/s390/kernel/vdso.c if (WARN_ON_ONCE(current->mm != vma->vm_mm)) vm_mm 310 arch/s390/mm/pgtable.c struct mm_struct *mm = vma->vm_mm; vm_mm 327 arch/s390/mm/pgtable.c struct mm_struct *mm = vma->vm_mm; vm_mm 393 arch/sh/kernel/smp.c struct mm_struct *mm = vma->vm_mm; vm_mm 439 arch/sh/kernel/smp.c if ((atomic_read(&vma->vm_mm->mm_users) != 1) || vm_mm 440 arch/sh/kernel/smp.c (current->mm != vma->vm_mm)) { vm_mm 450 arch/sh/kernel/smp.c cpu_context(i, vma->vm_mm) = 0; vm_mm 89 arch/sh/kernel/vsyscall/vsyscall.c if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) vm_mm 223 arch/sh/mm/cache-sh4.c if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) vm_mm 226 arch/sh/mm/cache-sh4.c pgd = pgd_offset(vma->vm_mm, address); vm_mm 235 arch/sh/mm/cache-sh4.c if ((vma->vm_mm == current->active_mm)) vm_mm 286 arch/sh/mm/cache-sh4.c if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) vm_mm 113 arch/sh/mm/cache-sh5.c vma_asid = cpu_asid(cpu, vma->vm_mm); vm_mm 535 arch/sh/mm/cache-sh5.c sh64_dcache_purge_user_range(vma->vm_mm, start, end); vm_mm 536 arch/sh/mm/cache-sh5.c sh64_icache_inv_user_page_range(vma->vm_mm, start, end); vm_mm 25 arch/sh/mm/tlb-pteaex.c if (vma && current->active_mm != vma->vm_mm) vm_mm 35 arch/sh/mm/tlb-sh3.c if (vma && current->active_mm != vma->vm_mm) vm_mm 23 arch/sh/mm/tlb-sh4.c if (vma && current->active_mm != vma->vm_mm) vm_mm 19 arch/sh/mm/tlbflush_32.c if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { vm_mm 24 arch/sh/mm/tlbflush_32.c asid = cpu_asid(cpu, vma->vm_mm); vm_mm 28 arch/sh/mm/tlbflush_32.c if (vma->vm_mm != current->mm) { vm_mm 42 arch/sh/mm/tlbflush_32.c struct mm_struct *mm = vma->vm_mm; vm_mm 71 arch/sh/mm/tlbflush_64.c if (vma->vm_mm) { vm_mm 88 arch/sh/mm/tlbflush_64.c mm = vma->vm_mm; vm_mm 25 arch/sparc/include/asm/cacheflush_64.h flush_cache_mm((vma)->vm_mm) vm_mm 27 arch/sparc/include/asm/cacheflush_64.h flush_cache_mm((vma)->vm_mm) vm_mm 50 arch/sparc/include/asm/hugetlb.h set_huge_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 432 arch/sparc/include/asm/pgtable_32.h set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ vm_mm 55 arch/sparc/kernel/asm-offsets.c DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); vm_mm 430 arch/sparc/mm/init_64.c mm = vma->vm_mm; vm_mm 2959 arch/sparc/mm/init_64.c mm = vma->vm_mm; vm_mm 610 arch/sparc/mm/srmmu.c if ((ctx1 = vma->vm_mm->context) != -1) { vm_mm 1265 arch/sparc/mm/srmmu.c FLUSH_BEGIN(vma->vm_mm) vm_mm 1273 arch/sparc/mm/srmmu.c FLUSH_BEGIN(vma->vm_mm) vm_mm 1316 arch/sparc/mm/srmmu.c FLUSH_BEGIN(vma->vm_mm) vm_mm 1323 arch/sparc/mm/srmmu.c FLUSH_BEGIN(vma->vm_mm) vm_mm 1705 arch/sparc/mm/srmmu.c struct mm_struct *mm = vma->vm_mm; vm_mm 1722 arch/sparc/mm/srmmu.c struct mm_struct *mm = vma->vm_mm; vm_mm 1737 arch/sparc/mm/srmmu.c struct mm_struct *mm = vma->vm_mm; vm_mm 1752 arch/sparc/mm/srmmu.c struct mm_struct *mm = vma->vm_mm; vm_mm 236 arch/sparc/mm/tlb.c __set_pmd_acct(vma->vm_mm, address, old, pmd); vm_mm 260 arch/sparc/mm/tlb.c (vma->vm_mm)->context.thp_pte_count--; vm_mm 430 arch/um/kernel/tlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 565 arch/um/kernel/tlb.c if (vma->vm_mm == NULL) vm_mm 567 arch/um/kernel/tlb.c else fix_range(vma->vm_mm, start, end, 0); vm_mm 87 arch/unicore32/include/asm/tlbflush.h if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { vm_mm 85 arch/unicore32/kernel/asm-offsets.c DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); vm_mm 45 arch/x86/entry/vdso/vma.c const struct vdso_image *image = vma->vm_mm->context.vdso_image; vm_mm 90 arch/x86/entry/vdso/vma.c const struct vdso_image *image = vma->vm_mm->context.vdso_image; vm_mm 323 arch/x86/include/asm/mmu_context.h if (current->mm != vma->vm_mm) vm_mm 572 arch/x86/include/asm/tlbflush.h flush_tlb_mm_range((vma)->vm_mm, start, end, \ vm_mm 585 arch/x86/include/asm/tlbflush.h flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); vm_mm 68 arch/x86/mm/pkeys.c if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey) vm_mm 93 arch/x86/mm/pkeys.c pkey = execute_only_pkey(vma->vm_mm); vm_mm 7 arch/x86/um/mem_64.c if (vma->vm_mm && vma->vm_start == um_vdso_addr) vm_mm 313 arch/x86/xen/mmu_pv.c trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep); vm_mm 322 arch/x86/xen/mmu_pv.c trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte); vm_mm 2745 arch/x86/xen/mmu_pv.c err = apply_to_page_range(vma->vm_mm, addr, range, vm_mm 92 arch/xtensa/mm/tlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 129 arch/xtensa/mm/tlb.c struct mm_struct* mm = vma->vm_mm; vm_mm 318 drivers/android/binder_alloc.c alloc->vma_vm_mm = vma->vm_mm; vm_mm 70 drivers/gpu/drm/i915/i915_mm.c r.mm = vma->vm_mm; vm_mm 72 drivers/gpu/drm/ttm/ttm_bo_vm.c up_read(&vmf->vma->vm_mm->mmap_sem); vm_mm 138 drivers/gpu/drm/ttm/ttm_bo_vm.c up_read(&vmf->vma->vm_mm->mmap_sem); vm_mm 986 drivers/infiniband/core/uverbs_main.c mm = priv->vma->vm_mm; vm_mm 1013 drivers/infiniband/core/uverbs_main.c if (vma->vm_mm != mm) vm_mm 214 drivers/misc/sgi-gru/grufault.c pgdp = pgd_offset(vma->vm_mm, vaddr); vm_mm 1090 drivers/xen/gntdev.c if (use_ptemod && priv->mm != vma->vm_mm) { vm_mm 1123 drivers/xen/gntdev.c err = apply_to_page_range(vma->vm_mm, vma->vm_start, vm_mm 1152 drivers/xen/gntdev.c apply_to_page_range(vma->vm_mm, vma->vm_start, vm_mm 953 drivers/xen/privcmd.c return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, vm_mm 138 drivers/xen/xlate_mmu.c set_pte_at(info->vma->vm_mm, addr, ptep, pte); vm_mm 168 drivers/xen/xlate_mmu.c err = apply_to_page_range(vma->vm_mm, addr, range, vm_mm 289 drivers/xen/xlate_mmu.c .mm = vma->vm_mm, vm_mm 294 drivers/xen/xlate_mmu.c return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r); vm_mm 331 fs/aio.c struct mm_struct *mm = vma->vm_mm; vm_mm 1278 fs/binfmt_elf.c if (vma == get_gate_vma(vma->vm_mm)) vm_mm 801 fs/dax.c if (follow_pte_pmd(vma->vm_mm, address, &range, vm_mm 825 fs/dax.c set_pmd_at(vma->vm_mm, address, pmdp, pmd); vm_mm 839 fs/dax.c set_pte_at(vma->vm_mm, address, ptep, pte); vm_mm 1345 fs/dax.c count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); vm_mm 1427 fs/dax.c zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); vm_mm 1437 fs/dax.c pgtable = pte_alloc_one(vma->vm_mm); vm_mm 1442 fs/dax.c ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); vm_mm 1449 fs/dax.c pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); vm_mm 1450 fs/dax.c mm_inc_nr_ptes(vma->vm_mm); vm_mm 1454 fs/dax.c set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); vm_mm 1461 fs/dax.c pte_free(vma->vm_mm, pgtable); vm_mm 628 fs/exec.c struct mm_struct *mm = vma->vm_mm; vm_mm 275 fs/proc/task_mmu.c return vma->vm_start <= vma->vm_mm->start_stack && vm_mm 276 fs/proc/task_mmu.c vma->vm_end >= vma->vm_mm->start_stack; vm_mm 302 fs/proc/task_mmu.c struct mm_struct *mm = vma->vm_mm; vm_mm 624 fs/proc/task_mmu.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 1008 fs/proc/task_mmu.c set_pte_at(vma->vm_mm, addr, pte, ptent); vm_mm 1035 fs/proc/task_mmu.c set_pmd_at(vma->vm_mm, addr, pmdp, pmd); vm_mm 1038 fs/proc/task_mmu.c set_pmd_at(vma->vm_mm, addr, pmdp, pmd); vm_mm 1081 fs/proc/task_mmu.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 1823 fs/proc/task_mmu.c struct mm_struct *mm = vma->vm_mm; vm_mm 131 fs/proc/task_nommu.c struct mm_struct *mm = vma->vm_mm; vm_mm 147 fs/proc/task_nommu.c struct mm_struct *mm = vma->vm_mm; vm_mm 533 fs/proc/vmcore.c do_munmap(vma->vm_mm, from, len, NULL); vm_mm 661 fs/proc/vmcore.c do_munmap(vma->vm_mm, vma->vm_start, len, NULL); vm_mm 354 fs/userfaultfd.c struct mm_struct *mm = vmf->vma->vm_mm; vm_mm 706 fs/userfaultfd.c ctx->mm = vma->vm_mm; vm_mm 793 fs/userfaultfd.c struct mm_struct *mm = vma->vm_mm; vm_mm 71 include/asm-generic/pgtable.h set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); vm_mm 87 include/asm-generic/pgtable.h set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); vm_mm 322 include/asm-generic/pgtable.h set_pmd_at(vma->vm_mm, address, pmdp, pmd); vm_mm 618 include/asm-generic/pgtable.h return ptep_get_and_clear(vma->vm_mm, addr, ptep); vm_mm 629 include/asm-generic/pgtable.h set_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 367 include/asm-generic/tlb.h .vm_mm = tlb->mm, vm_mm 106 include/linux/huge_mm.h if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) vm_mm 219 include/linux/huge_mm.h VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); vm_mm 228 include/linux/huge_mm.h VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); vm_mm 579 include/linux/hugetlb.h return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); vm_mm 589 include/linux/hugetlb.h set_huge_pte_at(vma->vm_mm, addr, ptep, pte); vm_mm 57 include/linux/khugepaged.h if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) vm_mm 61 include/linux/khugepaged.h !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) vm_mm 62 include/linux/khugepaged.h if (__khugepaged_enter(vma->vm_mm)) vm_mm 534 include/linux/mm.h vma->vm_mm = mm; vm_mm 562 include/linux/mm.h #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } vm_mm 314 include/linux/mm_types.h struct mm_struct *vm_mm; /* The address space we belong to. */ vm_mm 428 include/linux/mmu_notifier.h __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ vm_mm 441 include/linux/mmu_notifier.h __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ vm_mm 454 include/linux/mmu_notifier.h __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ vm_mm 465 include/linux/mmu_notifier.h __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ vm_mm 473 include/linux/mmu_notifier.h struct mm_struct *___mm = (__vma)->vm_mm; \ vm_mm 486 include/linux/mmu_notifier.h struct mm_struct *___mm = (__vma)->vm_mm; \ vm_mm 499 include/linux/mmu_notifier.h struct mm_struct *___mm = (__vma)->vm_mm; \ vm_mm 5574 kernel/events/core.c event->pmu->event_mapped(event, vma->vm_mm); vm_mm 5597 kernel/events/core.c event->pmu->event_unmapped(event, vma->vm_mm); vm_mm 5616 kernel/events/core.c atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); vm_mm 5690 kernel/events/core.c atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); vm_mm 5858 kernel/events/core.c locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; vm_mm 5899 kernel/events/core.c atomic64_add(extra, &vma->vm_mm->pinned_vm); vm_mm 5916 kernel/events/core.c event->pmu->event_mapped(event, vma->vm_mm); vm_mm 7512 kernel/events/core.c if (vma->vm_start <= vma->vm_mm->start_brk && vm_mm 7513 kernel/events/core.c vma->vm_end >= vma->vm_mm->brk) { vm_mm 7517 kernel/events/core.c if (vma->vm_start <= vma->vm_mm->start_stack && vm_mm 7518 kernel/events/core.c vma->vm_end >= vma->vm_mm->start_stack) { vm_mm 157 kernel/events/uprobes.c struct mm_struct *mm = vma->vm_mm; vm_mm 171 kernel/events/uprobes.c err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, vm_mm 1002 kernel/events/uprobes.c if (!mmget_not_zero(vma->vm_mm)) vm_mm 1010 kernel/events/uprobes.c info->mm = vma->vm_mm; vm_mm 1345 kernel/events/uprobes.c if (du->mm != vma->vm_mm || vm_mm 1350 kernel/events/uprobes.c ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); vm_mm 1352 kernel/events/uprobes.c update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); vm_mm 1379 kernel/events/uprobes.c test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) vm_mm 1398 kernel/events/uprobes.c filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { vm_mm 1400 kernel/events/uprobes.c install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); vm_mm 1436 kernel/events/uprobes.c if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ vm_mm 1439 kernel/events/uprobes.c if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || vm_mm 1440 kernel/events/uprobes.c test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) vm_mm 1444 kernel/events/uprobes.c set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); vm_mm 547 kernel/fork.c tmp->vm_mm = mm; vm_mm 2564 kernel/sched/fair.c if (!vma->vm_mm || vm_mm 128 mm/debug.c vma->vm_prev, vma->vm_mm, vm_mm 2367 mm/filemap.c up_read(&vmf->vma->vm_mm->mmap_sem); vm_mm 2506 mm/filemap.c count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); vm_mm 154 mm/gup.c set_pte_at(vma->vm_mm, address, pte, entry); vm_mm 186 mm/gup.c struct mm_struct *mm = vma->vm_mm; vm_mm 325 mm/gup.c struct mm_struct *mm = vma->vm_mm; vm_mm 440 mm/gup.c struct mm_struct *mm = vma->vm_mm; vm_mm 523 mm/gup.c struct mm_struct *mm = vma->vm_mm; vm_mm 1206 mm/gup.c struct mm_struct *mm = vma->vm_mm; vm_mm 927 mm/hmm.c walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops, vm_mm 931 mm/hmm.c ret = walk_page_range(vma->vm_mm, start, end, vm_mm 586 mm/huge_memory.c if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { vm_mm 592 mm/huge_memory.c pgtable = pte_alloc_one(vma->vm_mm); vm_mm 606 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vm_mm 612 mm/huge_memory.c ret = check_stable_address_space(vma->vm_mm); vm_mm 623 mm/huge_memory.c pte_free(vma->vm_mm, pgtable); vm_mm 634 mm/huge_memory.c pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); vm_mm 635 mm/huge_memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); vm_mm 636 mm/huge_memory.c add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); vm_mm 637 mm/huge_memory.c mm_inc_nr_ptes(vma->vm_mm); vm_mm 648 mm/huge_memory.c pte_free(vma->vm_mm, pgtable); vm_mm 721 mm/huge_memory.c !mm_forbids_zeropage(vma->vm_mm) && vm_mm 727 mm/huge_memory.c pgtable = pte_alloc_one(vma->vm_mm); vm_mm 730 mm/huge_memory.c zero_page = mm_get_huge_zero_page(vma->vm_mm); vm_mm 732 mm/huge_memory.c pte_free(vma->vm_mm, pgtable); vm_mm 736 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vm_mm 740 mm/huge_memory.c ret = check_stable_address_space(vma->vm_mm); vm_mm 748 mm/huge_memory.c set_huge_zero_page(pgtable, vma->vm_mm, vma, vm_mm 756 mm/huge_memory.c pte_free(vma->vm_mm, pgtable); vm_mm 773 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 838 mm/huge_memory.c pgtable = pte_alloc_one(vma->vm_mm); vm_mm 861 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 939 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 1085 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 1161 mm/huge_memory.c vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); vm_mm 1183 mm/huge_memory.c vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); vm_mm 1222 mm/huge_memory.c mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, vm_mm 1247 mm/huge_memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 1251 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vm_mm 1266 mm/huge_memory.c pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); vm_mm 1267 mm/huge_memory.c pmd_populate(vma->vm_mm, &_pmd, pgtable); vm_mm 1280 mm/huge_memory.c set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); vm_mm 1286 mm/huge_memory.c pmd_populate(vma->vm_mm, vmf->pmd, pgtable); vm_mm 1325 mm/huge_memory.c vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); vm_mm 1390 mm/huge_memory.c if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, vm_mm 1411 mm/huge_memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 1431 mm/huge_memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); vm_mm 1434 mm/huge_memory.c add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); vm_mm 1470 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 1547 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vm_mm 1636 mm/huge_memory.c if (mm_tlb_flush_pending(vma->vm_mm)) { vm_mm 1647 mm/huge_memory.c mmu_notifier_invalidate_range(vma->vm_mm, haddr, vm_mm 1657 mm/huge_memory.c migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, vm_mm 1673 mm/huge_memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); vm_mm 1870 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 1927 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 2015 mm/huge_memory.c ptl = pmd_lock(vma->vm_mm, pmd); vm_mm 2033 mm/huge_memory.c ptl = pud_lock(vma->vm_mm, pud); vm_mm 2086 mm/huge_memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 2090 mm/huge_memory.c ptl = pud_lock(vma->vm_mm, pud); vm_mm 2108 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 2142 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 2305 mm/huge_memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 2309 mm/huge_memory.c ptl = pmd_lock(vma->vm_mm, pmd); vm_mm 2352 mm/huge_memory.c pgd = pgd_offset(vma->vm_mm, address); vm_mm 3024 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 3049 mm/huge_memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 3523 mm/hugetlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 3658 mm/hugetlb.c mm = vma->vm_mm; vm_mm 4556 mm/hugetlb.c struct mm_struct *mm = vma->vm_mm; vm_mm 4901 mm/hugetlb.c spte = huge_pte_offset(svma->vm_mm, saddr, vm_mm 381 mm/internal.h up_read(&vmf->vma->vm_mm->mmap_sem); vm_mm 324 mm/khugepaged.c if (mm_has_pgste(vma->vm_mm)) vm_mm 412 mm/khugepaged.c test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) vm_mm 625 mm/khugepaged.c mmu_notifier_test_young(vma->vm_mm, address)) vm_mm 659 mm/khugepaged.c add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); vm_mm 669 mm/khugepaged.c pte_clear(vma->vm_mm, address, _pte); vm_mm 687 mm/khugepaged.c pte_clear(vma->vm_mm, address, _pte); vm_mm 1214 mm/khugepaged.c mmu_notifier_test_young(vma->vm_mm, address)) vm_mm 1379 mm/khugepaged.c add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); vm_mm 1383 mm/khugepaged.c ptl = pmd_lock(vma->vm_mm, pmd); vm_mm 1448 mm/khugepaged.c pmd = mm_find_pmd(vma->vm_mm, addr); vm_mm 1458 mm/khugepaged.c if (down_write_trylock(&vma->vm_mm->mmap_sem)) { vm_mm 1459 mm/khugepaged.c spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); vm_mm 1463 mm/khugepaged.c up_write(&vma->vm_mm->mmap_sem); vm_mm 1464 mm/khugepaged.c mm_dec_nr_ptes(vma->vm_mm); vm_mm 1465 mm/khugepaged.c pte_free(vma->vm_mm, pmd_pgtable(_pmd)); vm_mm 1468 mm/khugepaged.c khugepaged_add_pte_mapped_thp(vma->vm_mm, addr); vm_mm 850 mm/ksm.c if (ksm_test_exit(vma->vm_mm)) vm_mm 1035 mm/ksm.c struct mm_struct *mm = vma->vm_mm; vm_mm 1122 mm/ksm.c struct mm_struct *mm = vma->vm_mm; vm_mm 2439 mm/ksm.c struct mm_struct *mm = vma->vm_mm; vm_mm 2641 mm/ksm.c if ((rmap_item->mm == vma->vm_mm) == search_new_forks) vm_mm 69 mm/madvise.c struct mm_struct *mm = vma->vm_mm; vm_mm 199 mm/madvise.c orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); vm_mm 263 mm/madvise.c walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); vm_mm 388 mm/madvise.c orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 487 mm/madvise.c walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); vm_mm 495 mm/madvise.c struct mm_struct *mm = vma->vm_mm; vm_mm 520 mm/madvise.c walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); vm_mm 544 mm/madvise.c struct mm_struct *mm = vma->vm_mm; vm_mm 702 mm/madvise.c struct mm_struct *mm = vma->vm_mm; vm_mm 725 mm/madvise.c walk_page_range(vma->vm_mm, range.start, range.end, vm_mm 5700 mm/memcontrol.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 5922 mm/memcontrol.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 274 mm/memory-failure.c pgd = pgd_offset(vma->vm_mm, address); vm_mm 463 mm/memory-failure.c if (vma->vm_mm == t->mm) vm_mm 498 mm/memory-failure.c if (vma->vm_mm == t->mm) vm_mm 484 mm/memory.c pgd_t *pgd = pgd_offset(vma->vm_mm, addr); vm_mm 1232 mm/memory.c pgd = pgd_offset(vma->vm_mm, addr); vm_mm 1310 mm/memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, vm_mm 1333 mm/memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 1335 mm/memory.c tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); vm_mm 1336 mm/memory.c update_hiwater_rss(vma->vm_mm); vm_mm 1360 mm/memory.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 1362 mm/memory.c tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); vm_mm 1363 mm/memory.c update_hiwater_rss(vma->vm_mm); vm_mm 1425 mm/memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 1492 mm/memory.c BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); vm_mm 1584 mm/memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 1902 mm/memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 2324 mm/memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 2475 mm/memory.c vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, vm_mm 2598 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vm_mm 2762 mm/memory.c if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) vm_mm 2768 mm/memory.c migration_entry_wait(vma->vm_mm, vmf->pmd, vm_mm 2813 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vm_mm 2824 mm/memory.c count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); vm_mm 2835 mm/memory.c locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); vm_mm 2860 mm/memory.c if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, vm_mm 2869 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, vm_mm 2889 mm/memory.c inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); vm_mm 2890 mm/memory.c dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); vm_mm 2901 mm/memory.c set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); vm_mm 2902 mm/memory.c arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); vm_mm 2988 mm/memory.c if (pte_alloc(vma->vm_mm, vmf->pmd)) vm_mm 2997 mm/memory.c !mm_forbids_zeropage(vma->vm_mm)) { vm_mm 3000 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vm_mm 3004 mm/memory.c ret = check_stable_address_space(vma->vm_mm); vm_mm 3022 mm/memory.c if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, vm_mm 3037 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, vm_mm 3042 mm/memory.c ret = check_stable_address_space(vma->vm_mm); vm_mm 3054 mm/memory.c inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); vm_mm 3059 mm/memory.c set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); vm_mm 3102 mm/memory.c vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); vm_mm 3147 mm/memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vm_mm 3153 mm/memory.c mm_inc_nr_ptes(vma->vm_mm); vm_mm 3154 mm/memory.c pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); vm_mm 3157 mm/memory.c } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { vm_mm 3184 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, vm_mm 3194 mm/memory.c pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); vm_mm 3199 mm/memory.c mm_inc_nr_ptes(vma->vm_mm); vm_mm 3223 mm/memory.c vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); vm_mm 3229 mm/memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); vm_mm 3240 mm/memory.c add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); vm_mm 3248 mm/memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); vm_mm 3317 mm/memory.c inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); vm_mm 3322 mm/memory.c inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); vm_mm 3325 mm/memory.c set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); vm_mm 3366 mm/memory.c ret = check_stable_address_space(vmf->vma->vm_mm); vm_mm 3460 mm/memory.c vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); vm_mm 3528 mm/memory.c if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL, vm_mm 3601 mm/memory.c struct mm_struct *vm_mm = vma->vm_mm; vm_mm 3615 mm/memory.c vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vm_mm 3642 mm/memory.c pte_free(vm_mm, vmf->prealloc_pte); vm_mm 3680 mm/memory.c vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); vm_mm 3874 mm/memory.c vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); vm_mm 3920 mm/memory.c struct mm_struct *mm = vma->vm_mm; vm_mm 4006 mm/memory.c count_memcg_event_mm(vma->vm_mm, PGFAULT); vm_mm 4024 mm/memory.c ret = hugetlb_fault(vma->vm_mm, vma, address, flags); vm_mm 4245 mm/memory.c ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); vm_mm 4266 mm/memory.c if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) vm_mm 772 mm/mempolicy.c err = split_vma(vma->vm_mm, vma, vmstart, 1); vm_mm 777 mm/mempolicy.c err = split_vma(vma->vm_mm, vma, vmend, 0); vm_mm 257 mm/migrate.c set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); vm_mm 265 mm/migrate.c set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); vm_mm 2195 mm/migrate.c struct mm_struct *mm = vma->vm_mm; vm_mm 2370 mm/migrate.c migrate->vma->vm_mm, migrate->start, migrate->end); vm_mm 2373 mm/migrate.c walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, vm_mm 2710 mm/migrate.c struct mm_struct *mm = vma->vm_mm; vm_mm 2758 mm/migrate.c if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) vm_mm 2871 mm/migrate.c migrate->vma->vm_mm, vm_mm 222 mm/mincore.c err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); vm_mm 386 mm/mlock.c pte = get_locked_pte(vma->vm_mm, start, &ptl); vm_mm 522 mm/mlock.c struct mm_struct *mm = vma->vm_mm; vm_mm 721 mm/mmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 2296 mm/mmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 2321 mm/mmap.c if (is_hugepage_only_range(vma->vm_mm, new_start, size)) vm_mm 2341 mm/mmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 2433 mm/mmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 3214 mm/mmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 3342 mm/mmap.c if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) vm_mm 61 mm/mprotect.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 65 mm/mprotect.c atomic_read(&vma->vm_mm->mm_users) == 1) vm_mm 68 mm/mprotect.c flush_tlb_batched_pending(vma->vm_mm); vm_mm 138 mm/mprotect.c set_pte_at(vma->vm_mm, addr, pte, newpte); vm_mm 152 mm/mprotect.c set_pte_at(vma->vm_mm, addr, pte, newpte); vm_mm 223 mm/mprotect.c vma, vma->vm_mm, addr, end); vm_mm 305 mm/mprotect.c struct mm_struct *mm = vma->vm_mm; vm_mm 376 mm/mprotect.c struct mm_struct *mm = vma->vm_mm; vm_mm 120 mm/mremap.c struct mm_struct *mm = vma->vm_mm; vm_mm 156 mm/mremap.c flush_tlb_batched_pending(vma->vm_mm); vm_mm 200 mm/mremap.c struct mm_struct *mm = vma->vm_mm; vm_mm 218 mm/mremap.c old_ptl = pmd_lock(vma->vm_mm, old_pmd); vm_mm 252 mm/mremap.c mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, vm_mm 263 mm/mremap.c old_pmd = get_old_pmd(vma->vm_mm, old_addr); vm_mm 266 mm/mremap.c new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); vm_mm 304 mm/mremap.c if (pte_alloc(new_vma->vm_mm, new_pmd)) vm_mm 324 mm/mremap.c struct mm_struct *mm = vma->vm_mm; vm_mm 595 mm/nommu.c vma->vm_mm = mm; vm_mm 654 mm/nommu.c struct mm_struct *mm = vma->vm_mm; vm_mm 50 mm/page_vma_mapped.c pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); vm_mm 140 mm/page_vma_mapped.c struct mm_struct *mm = pvmw->vma->vm_mm; vm_mm 361 mm/pagewalk.c .mm = vma->vm_mm, vm_mm 61 mm/pgtable-generic.c set_pte_at(vma->vm_mm, address, ptep, entry); vm_mm 84 mm/pgtable-generic.c struct mm_struct *mm = (vma)->vm_mm; vm_mm 103 mm/pgtable-generic.c set_pmd_at(vma->vm_mm, address, pmdp, entry); vm_mm 131 mm/pgtable-generic.c pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); vm_mm 144 mm/pgtable-generic.c pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); vm_mm 206 mm/pgtable-generic.c pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); vm_mm 178 mm/rmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 821 mm/rmap.c if (!mm_match_cgroup(vma->vm_mm, memcg)) vm_mm 901 mm/rmap.c 0, vma, vma->vm_mm, address, vm_mm 920 mm/rmap.c set_pte_at(vma->vm_mm, address, pte, entry); vm_mm 934 mm/rmap.c set_pmd_at(vma->vm_mm, address, pmd, entry); vm_mm 1344 mm/rmap.c struct mm_struct *mm = vma->vm_mm; vm_mm 1377 mm/rmap.c mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, vm_mm 1633 mm/shmem.c struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; vm_mm 1768 mm/shmem.c charge_mm = vma ? vma->vm_mm : current->mm; vm_mm 2061 mm/shmem.c test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) vm_mm 3998 mm/shmem.c test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) vm_mm 1867 mm/swapfile.c if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, vm_mm 1873 mm/swapfile.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); vm_mm 1880 mm/swapfile.c dec_mm_counter(vma->vm_mm, MM_SWAPENTS); vm_mm 1881 mm/swapfile.c inc_mm_counter(vma->vm_mm, MM_ANONPAGES); vm_mm 1883 mm/swapfile.c set_pte_at(vma->vm_mm, addr, pte, vm_mm 2054 mm/swapfile.c pgd = pgd_offset(vma->vm_mm, addr); vm_mm 38 mm/vmacache.c if (vmacache_valid_mm(newvma->vm_mm)) vm_mm 77 mm/vmacache.c if (WARN_ON_ONCE(vma->vm_mm != mm)) vm_mm 302 mm/zsmalloc.c enum zs_mapmode vm_mm; /* mapping mode */ vm_mm 1187 mm/zsmalloc.c if (area->vm_mm == ZS_MM_WO) vm_mm 1212 mm/zsmalloc.c if (area->vm_mm == ZS_MM_RO) vm_mm 1328 mm/zsmalloc.c area->vm_mm = mm; vm_mm 3740 security/selinux/hooks.c if (vma->vm_start >= vma->vm_mm->start_brk && vm_mm 3741 security/selinux/hooks.c vma->vm_end <= vma->vm_mm->brk) { vm_mm 3746 security/selinux/hooks.c ((vma->vm_start <= vma->vm_mm->start_stack && vm_mm 3747 security/selinux/hooks.c vma->vm_end >= vma->vm_mm->start_stack) ||