mm                  9 arch/alpha/include/asm/cacheflush.h #define flush_cache_mm(mm)			do { } while (0)
mm                 10 arch/alpha/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)			do { } while (0)
mm                 55 arch/alpha/include/asm/cacheflush.h 		struct mm_struct *mm = vma->vm_mm;
mm                 56 arch/alpha/include/asm/cacheflush.h 		if (current->active_mm == mm)
mm                 57 arch/alpha/include/asm/cacheflush.h 			__load_new_mm_context(mm);
mm                 59 arch/alpha/include/asm/cacheflush.h 			mm->context[smp_processor_id()] = 0;
mm                 78 arch/alpha/include/asm/machvec.h 	void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
mm                121 arch/alpha/include/asm/mmu_context.h __get_new_mm_context(struct mm_struct *mm, long cpu)
mm                194 arch/alpha/include/asm/mmu_context.h 		struct mm_struct * mm = current->active_mm;	\
mm                196 arch/alpha/include/asm/mmu_context.h 		if (!mm->context[cpu])			\
mm                197 arch/alpha/include/asm/mmu_context.h 			__load_new_mm_context(mm);		\
mm                217 arch/alpha/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                233 arch/alpha/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                238 arch/alpha/include/asm/mmu_context.h 		mm->context[i] = 0;
mm                241 arch/alpha/include/asm/mmu_context.h 		  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
mm                246 arch/alpha/include/asm/mmu_context.h destroy_context(struct mm_struct *mm)
mm                252 arch/alpha/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                255 arch/alpha/include/asm/mmu_context.h 	  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
mm                 17 arch/alpha/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
mm                 24 arch/alpha/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
mm                 30 arch/alpha/include/asm/pgalloc.h pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
mm                 35 arch/alpha/include/asm/pgalloc.h extern pgd_t *pgd_alloc(struct mm_struct *mm);
mm                 38 arch/alpha/include/asm/pgalloc.h pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 44 arch/alpha/include/asm/pgalloc.h pmd_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 51 arch/alpha/include/asm/pgalloc.h pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 29 arch/alpha/include/asm/pgtable.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                249 arch/alpha/include/asm/pgtable.h extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                288 arch/alpha/include/asm/pgtable.h #define pgd_offset(mm, address)	((mm)->pgd+pgd_index(address))
mm                  7 arch/alpha/include/asm/tlb.h #define __pte_free_tlb(tlb, pte, address)		pte_free((tlb)->mm, pte)
mm                  8 arch/alpha/include/asm/tlb.h #define __pmd_free_tlb(tlb, pmd, address)		pmd_free((tlb)->mm, pmd)
mm                 22 arch/alpha/include/asm/tlbflush.h ev4_flush_tlb_current(struct mm_struct *mm)
mm                 24 arch/alpha/include/asm/tlbflush.h 	__load_new_mm_context(mm);
mm                 29 arch/alpha/include/asm/tlbflush.h ev5_flush_tlb_current(struct mm_struct *mm)
mm                 31 arch/alpha/include/asm/tlbflush.h 	__load_new_mm_context(mm);
mm                 39 arch/alpha/include/asm/tlbflush.h ev4_flush_tlb_current_page(struct mm_struct * mm,
mm                 45 arch/alpha/include/asm/tlbflush.h 		__load_new_mm_context(mm);
mm                 52 arch/alpha/include/asm/tlbflush.h ev5_flush_tlb_current_page(struct mm_struct * mm,
mm                 57 arch/alpha/include/asm/tlbflush.h 		__load_new_mm_context(mm);
mm                 90 arch/alpha/include/asm/tlbflush.h flush_tlb_other(struct mm_struct *mm)
mm                 92 arch/alpha/include/asm/tlbflush.h 	unsigned long *mmc = &mm->context[smp_processor_id()];
mm                108 arch/alpha/include/asm/tlbflush.h flush_tlb_mm(struct mm_struct *mm)
mm                110 arch/alpha/include/asm/tlbflush.h 	if (mm == current->active_mm)
mm                111 arch/alpha/include/asm/tlbflush.h 		flush_tlb_current(mm);
mm                113 arch/alpha/include/asm/tlbflush.h 		flush_tlb_other(mm);
mm                120 arch/alpha/include/asm/tlbflush.h 	struct mm_struct *mm = vma->vm_mm;
mm                122 arch/alpha/include/asm/tlbflush.h 	if (mm == current->active_mm)
mm                123 arch/alpha/include/asm/tlbflush.h 		flush_tlb_current_page(mm, vma, addr);
mm                125 arch/alpha/include/asm/tlbflush.h 		flush_tlb_other(mm);
mm                 73 arch/alpha/kernel/osf_sys.c 	struct mm_struct *mm;
mm                 75 arch/alpha/kernel/osf_sys.c 	mm = current->mm;
mm                 76 arch/alpha/kernel/osf_sys.c 	mm->end_code = bss_start + bss_len;
mm                 77 arch/alpha/kernel/osf_sys.c 	mm->start_brk = bss_start + bss_len;
mm                 78 arch/alpha/kernel/osf_sys.c 	mm->brk = bss_start + bss_len;
mm                640 arch/alpha/kernel/smp.c 	struct mm_struct *mm = (struct mm_struct *) x;
mm                641 arch/alpha/kernel/smp.c 	if (mm == current->active_mm && !asn_locked())
mm                642 arch/alpha/kernel/smp.c 		flush_tlb_current(mm);
mm                644 arch/alpha/kernel/smp.c 		flush_tlb_other(mm);
mm                648 arch/alpha/kernel/smp.c flush_tlb_mm(struct mm_struct *mm)
mm                652 arch/alpha/kernel/smp.c 	if (mm == current->active_mm) {
mm                653 arch/alpha/kernel/smp.c 		flush_tlb_current(mm);
mm                654 arch/alpha/kernel/smp.c 		if (atomic_read(&mm->mm_users) <= 1) {
mm                659 arch/alpha/kernel/smp.c 				if (mm->context[cpu])
mm                660 arch/alpha/kernel/smp.c 					mm->context[cpu] = 0;
mm                667 arch/alpha/kernel/smp.c 	smp_call_function(ipi_flush_tlb_mm, mm, 1);
mm                675 arch/alpha/kernel/smp.c 	struct mm_struct *mm;
mm                683 arch/alpha/kernel/smp.c 	struct mm_struct * mm = data->mm;
mm                685 arch/alpha/kernel/smp.c 	if (mm == current->active_mm && !asn_locked())
mm                686 arch/alpha/kernel/smp.c 		flush_tlb_current_page(mm, data->vma, data->addr);
mm                688 arch/alpha/kernel/smp.c 		flush_tlb_other(mm);
mm                695 arch/alpha/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
mm                699 arch/alpha/kernel/smp.c 	if (mm == current->active_mm) {
mm                700 arch/alpha/kernel/smp.c 		flush_tlb_current_page(mm, vma, addr);
mm                701 arch/alpha/kernel/smp.c 		if (atomic_read(&mm->mm_users) <= 1) {
mm                706 arch/alpha/kernel/smp.c 				if (mm->context[cpu])
mm                707 arch/alpha/kernel/smp.c 					mm->context[cpu] = 0;
mm                715 arch/alpha/kernel/smp.c 	data.mm = mm;
mm                735 arch/alpha/kernel/smp.c 	struct mm_struct *mm = (struct mm_struct *) x;
mm                736 arch/alpha/kernel/smp.c 	if (mm == current->active_mm && !asn_locked())
mm                737 arch/alpha/kernel/smp.c 		__load_new_mm_context(mm);
mm                739 arch/alpha/kernel/smp.c 		flush_tlb_other(mm);
mm                746 arch/alpha/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
mm                753 arch/alpha/kernel/smp.c 	if (mm == current->active_mm) {
mm                754 arch/alpha/kernel/smp.c 		__load_new_mm_context(mm);
mm                755 arch/alpha/kernel/smp.c 		if (atomic_read(&mm->mm_users) <= 1) {
mm                760 arch/alpha/kernel/smp.c 				if (mm->context[cpu])
mm                761 arch/alpha/kernel/smp.c 					mm->context[cpu] = 0;
mm                768 arch/alpha/kernel/smp.c 	smp_call_function(ipi_flush_icache_page, mm, 1);
mm                959 arch/alpha/kernel/traps.c 		struct mm_struct *mm = current->mm;
mm                960 arch/alpha/kernel/traps.c 		down_read(&mm->mmap_sem);
mm                961 arch/alpha/kernel/traps.c 		if (find_vma(mm, (unsigned long)va))
mm                965 arch/alpha/kernel/traps.c 		up_read(&mm->mmap_sem);
mm                 88 arch/alpha/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                110 arch/alpha/mm/fault.c 	if (!mm || faulthandler_disabled())
mm                120 arch/alpha/mm/fault.c 	down_read(&mm->mmap_sem);
mm                121 arch/alpha/mm/fault.c 	vma = find_vma(mm, address);
mm                183 arch/alpha/mm/fault.c 	up_read(&mm->mmap_sem);
mm                190 arch/alpha/mm/fault.c 	up_read(&mm->mmap_sem);
mm                214 arch/alpha/mm/fault.c 	up_read(&mm->mmap_sem);
mm                221 arch/alpha/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 42 arch/alpha/mm/init.c pgd_alloc(struct mm_struct *mm)
mm                 51 arch/arc/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)			/* called on fork (VIVT only) */
mm                 55 arch/arc/include/asm/cacheflush.h #define flush_cache_mm(mm)			/* called on munmap/exit */
mm                 56 arch/arc/include/asm/cacheflush.h #define flush_cache_range(mm, u_vstart, u_vend)
mm                 62 arch/arc/include/asm/cacheflush.h void flush_cache_mm(struct mm_struct *mm);
mm                 53 arch/arc/include/asm/hugepage.h static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                 64 arch/arc/include/asm/hugepage.h extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                 68 arch/arc/include/asm/hugepage.h extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm                 48 arch/arc/include/asm/mmu_context.h #define asid_mm(mm, cpu)	mm->context.asid[cpu]
mm                 49 arch/arc/include/asm/mmu_context.h #define hw_pid(mm, cpu)		(asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
mm                 58 arch/arc/include/asm/mmu_context.h static inline void get_new_mmu_context(struct mm_struct *mm)
mm                 75 arch/arc/include/asm/mmu_context.h 	if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
mm                 93 arch/arc/include/asm/mmu_context.h 	asid_mm(mm, cpu) = asid_cpu(cpu);
mm                 96 arch/arc/include/asm/mmu_context.h 	write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
mm                106 arch/arc/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                111 arch/arc/include/asm/mmu_context.h 		asid_mm(mm, i) = MM_CTXT_NO_ASID;
mm                116 arch/arc/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                122 arch/arc/include/asm/mmu_context.h 	asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
mm                171 arch/arc/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)   do { } while (0)
mm                173 arch/arc/include/asm/mmu_context.h #define enter_lazy_tlb(mm, tsk)
mm                 36 arch/arc/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
mm                 42 arch/arc/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
mm                 52 arch/arc/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 71 arch/arc/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 90 arch/arc/include/asm/pgalloc.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                101 arch/arc/include/asm/pgalloc.h pte_alloc_one(struct mm_struct *mm)
mm                119 arch/arc/include/asm/pgalloc.h static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                124 arch/arc/include/asm/pgalloc.h static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
mm                130 arch/arc/include/asm/pgalloc.h #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
mm                272 arch/arc/include/asm/pgtable.h #define pte_clear(mm, addr, ptep)	set_pte_at(mm, addr, ptep, __pte(0))
mm                328 arch/arc/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                339 arch/arc/include/asm/pgtable.h #define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))
mm                355 arch/arc/include/asm/pgtable.h #define pgd_offset_fast(mm, addr)	\
mm                361 arch/arc/include/asm/pgtable.h #define pgd_offset_fast(mm, addr)	pgd_offset(mm, addr)
mm                 12 arch/arc/include/asm/tlbflush.h void local_flush_tlb_mm(struct mm_struct *mm);
mm                 27 arch/arc/include/asm/tlbflush.h #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
mm                 37 arch/arc/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 91 arch/arc/kernel/process.c 	down_read(&current->mm->mmap_sem);
mm                 92 arch/arc/kernel/process.c 	ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
mm                 94 arch/arc/kernel/process.c 	up_read(&current->mm->mmap_sem);
mm                168 arch/arc/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                174 arch/arc/kernel/smp.c 	mmget(mm);
mm                175 arch/arc/kernel/smp.c 	mmgrab(mm);
mm                176 arch/arc/kernel/smp.c 	current->active_mm = mm;
mm                177 arch/arc/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                 64 arch/arc/kernel/troubleshoot.c 	struct mm_struct *mm;
mm                 68 arch/arc/kernel/troubleshoot.c 	mm = get_task_mm(tsk);
mm                 69 arch/arc/kernel/troubleshoot.c 	if (!mm)
mm                 72 arch/arc/kernel/troubleshoot.c 	exe_file = get_mm_exe_file(mm);
mm                 73 arch/arc/kernel/troubleshoot.c 	mmput(mm);
mm               1035 arch/arc/mm/cache.c void flush_cache_mm(struct mm_struct *mm)
mm                 65 arch/arc/mm/fault.c 	struct mm_struct *mm = tsk->mm;
mm                 88 arch/arc/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                104 arch/arc/mm/fault.c 	down_read(&mm->mmap_sem);
mm                106 arch/arc/mm/fault.c 	vma = find_vma(mm, address);
mm                156 arch/arc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 32 arch/arc/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                 63 arch/arc/mm/mmap.c 		vma = find_vma(mm, addr);
mm                 71 arch/arc/mm/mmap.c 	info.low_limit = mm->mmap_base;
mm                293 arch/arc/mm/tlb.c noinline void local_flush_tlb_mm(struct mm_struct *mm)
mm                301 arch/arc/mm/tlb.c 	if (atomic_read(&mm->mm_users) == 0)
mm                311 arch/arc/mm/tlb.c 	destroy_context(mm);
mm                312 arch/arc/mm/tlb.c 	if (current->mm == mm)
mm                313 arch/arc/mm/tlb.c 		get_new_mmu_context(mm);
mm                459 arch/arc/mm/tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                461 arch/arc/mm/tlb.c 	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
mm                462 arch/arc/mm/tlb.c 			 mm, 1);
mm                663 arch/arc/mm/tlb.c void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                668 arch/arc/mm/tlb.c 	assert_spin_locked(&mm->page_table_lock);
mm                671 arch/arc/mm/tlb.c 	if (!pmd_huge_pte(mm, pmdp))
mm                674 arch/arc/mm/tlb.c 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
mm                675 arch/arc/mm/tlb.c 	pmd_huge_pte(mm, pmdp) = pgtable;
mm                678 arch/arc/mm/tlb.c pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
mm                683 arch/arc/mm/tlb.c 	assert_spin_locked(&mm->page_table_lock);
mm                685 arch/arc/mm/tlb.c 	pgtable = pmd_huge_pte(mm, pmdp);
mm                688 arch/arc/mm/tlb.c 		pmd_huge_pte(mm, pmdp) = NULL;
mm                690 arch/arc/mm/tlb.c 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
mm                 88 arch/arm/include/asm/bug.h void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
mm                218 arch/arm/include/asm/cacheflush.h static inline void vivt_flush_cache_mm(struct mm_struct *mm)
mm                220 arch/arm/include/asm/cacheflush.h 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
mm                227 arch/arm/include/asm/cacheflush.h 	struct mm_struct *mm = vma->vm_mm;
mm                229 arch/arm/include/asm/cacheflush.h 	if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
mm                237 arch/arm/include/asm/cacheflush.h 	struct mm_struct *mm = vma->vm_mm;
mm                239 arch/arm/include/asm/cacheflush.h 	if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
mm                246 arch/arm/include/asm/cacheflush.h #define flush_cache_mm(mm) \
mm                247 arch/arm/include/asm/cacheflush.h 		vivt_flush_cache_mm(mm)
mm                253 arch/arm/include/asm/cacheflush.h extern void flush_cache_mm(struct mm_struct *mm);
mm                258 arch/arm/include/asm/cacheflush.h #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
mm                 22 arch/arm/include/asm/efi.h int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
mm                 23 arch/arm/include/asm/efi.h int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
mm                 39 arch/arm/include/asm/efi.h static inline void efi_set_pgd(struct mm_struct *mm)
mm                 41 arch/arm/include/asm/efi.h 	check_and_switch_context(mm, NULL);
mm                144 arch/arm/include/asm/elf.h 		    (elf_addr_t)current->mm->context.vdso);	\
mm                 17 arch/arm/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 42 arch/arm/include/asm/mach/map.h extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
mm                 27 arch/arm/include/asm/mmu.h #define ASID(mm)	((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
mm                 29 arch/arm/include/asm/mmu.h #define ASID(mm)	(0)
mm                 24 arch/arm/include/asm/mmu_context.h void __check_vmalloc_seq(struct mm_struct *mm);
mm                 28 arch/arm/include/asm/mmu_context.h void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
mm                 30 arch/arm/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                 32 arch/arm/include/asm/mmu_context.h 	atomic64_set(&mm->context.id, 0);
mm                 37 arch/arm/include/asm/mmu_context.h void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
mm                 40 arch/arm/include/asm/mmu_context.h static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
mm                 50 arch/arm/include/asm/mmu_context.h static inline void check_and_switch_context(struct mm_struct *mm,
mm                 53 arch/arm/include/asm/mmu_context.h 	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
mm                 54 arch/arm/include/asm/mmu_context.h 		__check_vmalloc_seq(mm);
mm                 64 arch/arm/include/asm/mmu_context.h 		mm->context.switch_pending = 1;
mm                 66 arch/arm/include/asm/mmu_context.h 		cpu_switch_mm(mm->pgd, mm);
mm                 74 arch/arm/include/asm/mmu_context.h 	struct mm_struct *mm = current->mm;
mm                 76 arch/arm/include/asm/mmu_context.h 	if (mm && mm->context.switch_pending) {
mm                 84 arch/arm/include/asm/mmu_context.h 		if (mm->context.switch_pending) {
mm                 85 arch/arm/include/asm/mmu_context.h 			mm->context.switch_pending = 0;
mm                 86 arch/arm/include/asm/mmu_context.h 			cpu_switch_mm(mm->pgd, mm);
mm                 96 arch/arm/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                104 arch/arm/include/asm/mmu_context.h #define destroy_context(mm)		do { } while(0)
mm                117 arch/arm/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                152 arch/arm/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 25 arch/arm/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 30 arch/arm/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 36 arch/arm/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                 46 arch/arm/include/asm/pgalloc.h #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
mm                 47 arch/arm/include/asm/pgalloc.h #define pmd_free(mm, pmd)		do { } while (0)
mm                 48 arch/arm/include/asm/pgalloc.h #define pud_populate(mm,pmd,pte)	BUG()
mm                 52 arch/arm/include/asm/pgalloc.h extern pgd_t *pgd_alloc(struct mm_struct *mm);
mm                 53 arch/arm/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
mm                 82 arch/arm/include/asm/pgalloc.h pte_alloc_one_kernel(struct mm_struct *mm)
mm                 84 arch/arm/include/asm/pgalloc.h 	pte_t *pte = __pte_alloc_one_kernel(mm);
mm                 99 arch/arm/include/asm/pgalloc.h pte_alloc_one(struct mm_struct *mm)
mm                103 arch/arm/include/asm/pgalloc.h 	pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
mm                129 arch/arm/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
mm                138 arch/arm/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
mm                256 arch/arm/include/asm/pgtable-3level.h static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                182 arch/arm/include/asm/pgtable.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
mm                217 arch/arm/include/asm/pgtable.h #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
mm                226 arch/arm/include/asm/pgtable.h #define pte_accessible(mm, pte)	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
mm                255 arch/arm/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 64 arch/arm/include/asm/proc-fns.h 	void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
mm                 90 arch/arm/include/asm/proc-fns.h extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
mm                154 arch/arm/include/asm/proc-fns.h #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
mm                185 arch/arm/include/asm/proc-fns.h #define cpu_switch_mm(pgd,mm)	{ }
mm                 66 arch/arm/include/asm/processor.h 		regs->ARM_r10 = current->mm->start_data;		\
mm                 68 arch/arm/include/asm/processor.h 		regs->ARM_r10 = current->mm->start_data;		\
mm                 17 arch/arm/include/asm/ptdump.h 	struct mm_struct		*mm;
mm                367 arch/arm/include/asm/tlbflush.h static inline void __local_flush_tlb_mm(struct mm_struct *mm)
mm                370 arch/arm/include/asm/tlbflush.h 	const int asid = ASID(mm);
mm                374 arch/arm/include/asm/tlbflush.h 		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
mm                386 arch/arm/include/asm/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)
mm                388 arch/arm/include/asm/tlbflush.h 	const int asid = ASID(mm);
mm                394 arch/arm/include/asm/tlbflush.h 	__local_flush_tlb_mm(mm);
mm                401 arch/arm/include/asm/tlbflush.h static inline void __flush_tlb_mm(struct mm_struct *mm)
mm                408 arch/arm/include/asm/tlbflush.h 	__local_flush_tlb_mm(mm);
mm                412 arch/arm/include/asm/tlbflush.h 	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
mm                617 arch/arm/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                649 arch/arm/include/asm/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)							{ }
mm                657 arch/arm/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 13 arch/arm/include/asm/vdso.h void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
mm                 19 arch/arm/include/asm/vdso.h static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
mm                 24 arch/arm/kernel/efi.c int __init efi_set_mapping_permissions(struct mm_struct *mm,
mm                 40 arch/arm/kernel/efi.c 		return apply_to_page_range(mm, base, size, set_permissions, md);
mm                 45 arch/arm/kernel/efi.c int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
mm                 67 arch/arm/kernel/efi.c 	create_mapping_late(mm, &desc, true);
mm                 73 arch/arm/kernel/efi.c 		return efi_set_mapping_permissions(mm, md);
mm                 74 arch/arm/kernel/perf_callchain.c 	if (!current->mm)
mm                342 arch/arm/kernel/process.c struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
mm                347 arch/arm/kernel/process.c int in_gate_area(struct mm_struct *mm, unsigned long addr)
mm                369 arch/arm/kernel/process.c static unsigned long sigpage_addr(const struct mm_struct *mm,
mm                378 arch/arm/kernel/process.c 	first = PAGE_ALIGN(mm->start_stack);
mm                405 arch/arm/kernel/process.c 	current->mm->context.sigpage = new_vma->vm_start;
mm                417 arch/arm/kernel/process.c 	struct mm_struct *mm = current->mm;
mm                432 arch/arm/kernel/process.c 	if (down_write_killable(&mm->mmap_sem))
mm                434 arch/arm/kernel/process.c 	hint = sigpage_addr(mm, npages);
mm                441 arch/arm/kernel/process.c 	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
mm                450 arch/arm/kernel/process.c 	mm->context.sigpage = addr;
mm                456 arch/arm/kernel/process.c 	arm_install_vdso(mm, addr + PAGE_SIZE);
mm                459 arch/arm/kernel/process.c 	up_write(&mm->mmap_sem);
mm                261 arch/arm/kernel/ptrace.c 		tmp = tsk->mm->start_code;
mm                263 arch/arm/kernel/ptrace.c 		tmp = tsk->mm->start_data;
mm                265 arch/arm/kernel/ptrace.c 		tmp = tsk->mm->end_code;
mm                453 arch/arm/kernel/signal.c 			struct mm_struct *mm = current->mm;
mm                460 arch/arm/kernel/signal.c 			retcode = mm->context.sigpage + signal_return_offset +
mm                386 arch/arm/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                395 arch/arm/kernel/smp.c 	cpu_switch_mm(mm->pgd, mm);
mm                397 arch/arm/kernel/smp.c 	enter_lazy_tlb(mm, current);
mm                405 arch/arm/kernel/smp.c 	mmgrab(mm);
mm                406 arch/arm/kernel/smp.c 	current->active_mm = mm;
mm                407 arch/arm/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                 33 arch/arm/kernel/smp_tlb.c 	struct mm_struct *mm = (struct mm_struct *)arg;
mm                 35 arch/arm/kernel/smp_tlb.c 	local_flush_tlb_mm(mm);
mm                164 arch/arm/kernel/smp_tlb.c static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
mm                173 arch/arm/kernel/smp_tlb.c 	a15_erratum_get_cpumask(this_cpu, mm, &mask);
mm                187 arch/arm/kernel/smp_tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                190 arch/arm/kernel/smp_tlb.c 		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
mm                192 arch/arm/kernel/smp_tlb.c 		__flush_tlb_mm(mm);
mm                193 arch/arm/kernel/smp_tlb.c 	broadcast_tlb_mm_a15_erratum(mm);
mm                 22 arch/arm/kernel/suspend.c 	struct mm_struct *mm = current->active_mm;
mm                 37 arch/arm/kernel/suspend.c 		cpu_switch_mm(mm->pgd, mm);
mm                100 arch/arm/kernel/swp_emulate.c 	down_read(&current->mm->mmap_sem);
mm                101 arch/arm/kernel/swp_emulate.c 	if (find_vma(current->mm, addr) == NULL)
mm                105 arch/arm/kernel/swp_emulate.c 	up_read(&current->mm->mmap_sem);
mm                729 arch/arm/kernel/traps.c 		show_pte(KERN_ERR, current->mm, addr);
mm                 60 arch/arm/kernel/vdso.c 	current->mm->context.vdso = new_vma->vm_start;
mm                230 arch/arm/kernel/vdso.c static int install_vvar(struct mm_struct *mm, unsigned long addr)
mm                234 arch/arm/kernel/vdso.c 	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
mm                242 arch/arm/kernel/vdso.c void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
mm                247 arch/arm/kernel/vdso.c 	mm->context.vdso = 0;
mm                252 arch/arm/kernel/vdso.c 	if (install_vvar(mm, addr))
mm                259 arch/arm/kernel/vdso.c 	vma = _install_special_mapping(mm, addr, len,
mm                264 arch/arm/kernel/vdso.c 		mm->context.vdso = addr;
mm                 32 arch/arm/lib/uaccess_with_memcpy.c 	pgd = pgd_offset(current->mm, addr);
mm                 55 arch/arm/lib/uaccess_with_memcpy.c 		ptl = &current->mm->page_table_lock;
mm                 71 arch/arm/lib/uaccess_with_memcpy.c 	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
mm                 99 arch/arm/lib/uaccess_with_memcpy.c 		down_read(&current->mm->mmap_sem);
mm                107 arch/arm/lib/uaccess_with_memcpy.c 				up_read(&current->mm->mmap_sem);
mm                111 arch/arm/lib/uaccess_with_memcpy.c 				down_read(&current->mm->mmap_sem);
mm                131 arch/arm/lib/uaccess_with_memcpy.c 		up_read(&current->mm->mmap_sem);
mm                168 arch/arm/lib/uaccess_with_memcpy.c 	down_read(&current->mm->mmap_sem);
mm                175 arch/arm/lib/uaccess_with_memcpy.c 			up_read(&current->mm->mmap_sem);
mm                178 arch/arm/lib/uaccess_with_memcpy.c 			down_read(&current->mm->mmap_sem);
mm                196 arch/arm/lib/uaccess_with_memcpy.c 	up_read(&current->mm->mmap_sem);
mm                140 arch/arm/mach-omap2/pm.h extern void omap_pm_setup_sr_i2c_pcb_length(u32 mm);
mm                144 arch/arm/mach-omap2/pm.h static inline void omap_pm_setup_sr_i2c_pcb_length(u32 mm) { }
mm                762 arch/arm/mach-omap2/vc.c void __init omap_pm_setup_sr_i2c_pcb_length(u32 mm)
mm                764 arch/arm/mach-omap2/vc.c 	sr_i2c_pcb_length = mm;
mm                214 arch/arm/mach-rpc/ecard.c static void ecard_init_pgtables(struct mm_struct *mm)
mm                216 arch/arm/mach-rpc/ecard.c 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
mm                231 arch/arm/mach-rpc/ecard.c 	src_pgd = pgd_offset(mm, (unsigned long)IO_BASE);
mm                232 arch/arm/mach-rpc/ecard.c 	dst_pgd = pgd_offset(mm, IO_START);
mm                236 arch/arm/mach-rpc/ecard.c 	src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE);
mm                237 arch/arm/mach-rpc/ecard.c 	dst_pgd = pgd_offset(mm, EASI_START);
mm                247 arch/arm/mach-rpc/ecard.c 	struct mm_struct * mm = mm_alloc();
mm                250 arch/arm/mach-rpc/ecard.c 	if (!mm)
mm                253 arch/arm/mach-rpc/ecard.c 	current->mm = mm;
mm                254 arch/arm/mach-rpc/ecard.c 	current->active_mm = mm;
mm                255 arch/arm/mach-rpc/ecard.c 	activate_mm(active_mm, mm);
mm                257 arch/arm/mach-rpc/ecard.c 	ecard_init_pgtables(mm);
mm                 51 arch/arm/mm/context.c void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
mm                 59 arch/arm/mm/context.c 	context_id = mm->context.id.counter;
mm                189 arch/arm/mm/context.c static u64 new_context(struct mm_struct *mm, unsigned int cpu)
mm                192 arch/arm/mm/context.c 	u64 asid = atomic64_read(&mm->context.id);
mm                233 arch/arm/mm/context.c 	cpumask_clear(mm_cpumask(mm));
mm                237 arch/arm/mm/context.c void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
mm                243 arch/arm/mm/context.c 	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
mm                244 arch/arm/mm/context.c 		__check_vmalloc_seq(mm);
mm                253 arch/arm/mm/context.c 	asid = atomic64_read(&mm->context.id);
mm                260 arch/arm/mm/context.c 	asid = atomic64_read(&mm->context.id);
mm                262 arch/arm/mm/context.c 		asid = new_context(mm, cpu);
mm                263 arch/arm/mm/context.c 		atomic64_set(&mm->context.id, asid);
mm                272 arch/arm/mm/context.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                276 arch/arm/mm/context.c 	cpu_switch_mm(mm->pgd, mm);
mm                374 arch/arm/mm/dump.c static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
mm                377 arch/arm/mm/dump.c 	pgd_t *pgd = pgd_offset(mm, 0UL);
mm                399 arch/arm/mm/dump.c 	walk_pgd(&st, info->mm, info->base_addr);
mm                421 arch/arm/mm/dump.c 	.mm = &init_mm,
mm                132 arch/arm/mm/fault-armv.c 	struct mm_struct *mm = vma->vm_mm;
mm                152 arch/arm/mm/fault-armv.c 		if (mpnt->vm_mm != mm || mpnt == vma)
mm                 34 arch/arm/mm/fault.c void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
mm                 38 arch/arm/mm/fault.c 	if (!mm)
mm                 39 arch/arm/mm/fault.c 		mm = &init_mm;
mm                 41 arch/arm/mm/fault.c 	printk("%spgd = %p\n", lvl, mm->pgd);
mm                 42 arch/arm/mm/fault.c 	pgd = pgd_offset(mm, addr);
mm                 98 arch/arm/mm/fault.c void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
mm                106 arch/arm/mm/fault.c __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
mm                124 arch/arm/mm/fault.c 	show_pte(KERN_ALERT, mm, addr);
mm                149 arch/arm/mm/fault.c 		show_pte(KERN_ERR, tsk->mm, addr);
mm                169 arch/arm/mm/fault.c 	struct mm_struct *mm = tsk->active_mm;
mm                178 arch/arm/mm/fault.c 		__do_kernel_fault(mm, addr, fsr, regs);
mm                203 arch/arm/mm/fault.c __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
mm                209 arch/arm/mm/fault.c 	vma = find_vma(mm, addr);
mm                241 arch/arm/mm/fault.c 	struct mm_struct *mm;
mm                250 arch/arm/mm/fault.c 	mm  = tsk->mm;
mm                260 arch/arm/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                273 arch/arm/mm/fault.c 	if (!down_read_trylock(&mm->mmap_sem)) {
mm                277 arch/arm/mm/fault.c 		down_read(&mm->mmap_sem);
mm                292 arch/arm/mm/fault.c 	fault = __do_page_fault(mm, addr, fsr, flags, tsk);
mm                330 arch/arm/mm/fault.c 	up_read(&mm->mmap_sem);
mm                376 arch/arm/mm/fault.c 	__do_kernel_fault(mm, addr, fsr, regs);
mm                539 arch/arm/mm/fault.c 	show_pte(KERN_ALERT, current->mm, addr);
mm                 63 arch/arm/mm/flush.c void flush_cache_mm(struct mm_struct *mm)
mm                 66 arch/arm/mm/flush.c 		vivt_flush_cache_mm(mm);
mm                239 arch/arm/mm/flush.c 	struct mm_struct *mm = current->active_mm;
mm                258 arch/arm/mm/flush.c 		if (mpnt->vm_mm != mm)
mm                569 arch/arm/mm/init.c 				  pmdval_t prot, struct mm_struct *mm)
mm                573 arch/arm/mm/init.c 	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
mm                597 arch/arm/mm/init.c 			struct mm_struct *mm)
mm                618 arch/arm/mm/init.c 				set ? perms[i].prot : perms[i].clear, mm);
mm                636 arch/arm/mm/init.c 			if (s->mm)
mm                637 arch/arm/mm/init.c 				set_section_perms(perms, n, true, s->mm);
mm                116 arch/arm/mm/ioremap.c void __check_vmalloc_seq(struct mm_struct *mm)
mm                122 arch/arm/mm/ioremap.c 		memcpy(pgd_offset(mm, VMALLOC_START),
mm                126 arch/arm/mm/ioremap.c 		mm->context.vmalloc_seq = seq;
mm                 33 arch/arm/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                 65 arch/arm/mm/mmap.c 		vma = find_vma(mm, addr);
mm                 73 arch/arm/mm/mmap.c 	info.low_limit = mm->mmap_base;
mm                 86 arch/arm/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                116 arch/arm/mm/mmap.c 		vma = find_vma(mm, addr);
mm                125 arch/arm/mm/mmap.c 	info.high_limit = mm->mmap_base;
mm                139 arch/arm/mm/mmap.c 		info.low_limit = mm->mmap_base;
mm                846 arch/arm/mm/mmu.c static void __init create_36bit_mapping(struct mm_struct *mm,
mm                889 arch/arm/mm/mmu.c 	pgd = pgd_offset(mm, addr);
mm                907 arch/arm/mm/mmu.c static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
mm                923 arch/arm/mm/mmu.c 		create_36bit_mapping(mm, md, type, ng);
mm                938 arch/arm/mm/mmu.c 	pgd = pgd_offset(mm, addr);
mm                975 arch/arm/mm/mmu.c void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
mm                979 arch/arm/mm/mmu.c 	pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
mm                982 arch/arm/mm/mmu.c 	pmd_alloc(mm, pud, 0);
mm                984 arch/arm/mm/mmu.c 	__create_mapping(mm, md, late_alloc, ng);
mm                 30 arch/arm/mm/pgd.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 56 arch/arm/mm/pgd.c 	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
mm                 61 arch/arm/mm/pgd.c 	new_pmd = pmd_alloc(mm, new_pud, 0);
mm                 72 arch/arm/mm/pgd.c 		new_pud = pud_alloc(mm, new_pgd, 0);
mm                 76 arch/arm/mm/pgd.c 		new_pmd = pmd_alloc(mm, new_pud, 0);
mm                 80 arch/arm/mm/pgd.c 		new_pte = pte_alloc_map(mm, new_pmd, 0);
mm                106 arch/arm/mm/pgd.c 	pmd_free(mm, new_pmd);
mm                107 arch/arm/mm/pgd.c 	mm_dec_nr_pmds(mm);
mm                109 arch/arm/mm/pgd.c 	pud_free(mm, new_pud);
mm                116 arch/arm/mm/pgd.c void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
mm                140 arch/arm/mm/pgd.c 	pte_free(mm, pte);
mm                141 arch/arm/mm/pgd.c 	mm_dec_nr_ptes(mm);
mm                144 arch/arm/mm/pgd.c 	pmd_free(mm, pmd);
mm                145 arch/arm/mm/pgd.c 	mm_dec_nr_pmds(mm);
mm                148 arch/arm/mm/pgd.c 	pud_free(mm, pud);
mm                164 arch/arm/mm/pgd.c 		pmd_free(mm, pmd);
mm                165 arch/arm/mm/pgd.c 		mm_dec_nr_pmds(mm);
mm                167 arch/arm/mm/pgd.c 		pud_free(mm, pud);
mm                 15 arch/arm/mm/proc-v7-bugs.c extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
mm                 16 arch/arm/mm/proc-v7-bugs.c extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
mm                 17 arch/arm/mm/proc-v7-bugs.c extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
mm                 18 arch/arm/mm/proc-v7-bugs.c extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
mm                 29 arch/arm/probes/uprobes/core.c int set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
mm                 32 arch/arm/probes/uprobes/core.c 	return uprobe_write_opcode(auprobe, mm, vaddr,
mm                 72 arch/arm/probes/uprobes/core.c int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
mm                 98 arch/arm64/include/asm/cacheflush.h static inline void flush_cache_mm(struct mm_struct *mm)
mm                131 arch/arm64/include/asm/cacheflush.h #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
mm                 21 arch/arm64/include/asm/efi.h int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
mm                 22 arch/arm64/include/asm/efi.h int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
mm                136 arch/arm64/include/asm/efi.h static inline void efi_set_pgd(struct mm_struct *mm)
mm                138 arch/arm64/include/asm/efi.h 	__switch_mm(mm);
mm                141 arch/arm64/include/asm/efi.h 		if (mm != current->active_mm) {
mm                149 arch/arm64/include/asm/efi.h 			update_saved_ttbr0(current, mm);
mm                146 arch/arm64/include/asm/elf.h 		    (elf_addr_t)current->mm->context.vdso);		\
mm                215 arch/arm64/include/asm/elf.h 			(Elf64_Off)current->mm->context.vdso);		\
mm                147 arch/arm64/include/asm/fpsimdmacros.h 	_check_num (\imm), -0x20, 0x1f
mm                150 arch/arm64/include/asm/fpsimdmacros.h 		| (((\imm) & 0x3f) << 5)
mm                 26 arch/arm64/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 41 arch/arm64/include/asm/hugetlb.h extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 48 arch/arm64/include/asm/hugetlb.h extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                 51 arch/arm64/include/asm/hugetlb.h extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                 57 arch/arm64/include/asm/hugetlb.h extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
mm                 59 arch/arm64/include/asm/hugetlb.h extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 30 arch/arm64/include/asm/mmu.h #define ASID(mm)	((mm)->context.id.counter & 0xffff)
mm                126 arch/arm64/include/asm/mmu.h extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
mm                 49 arch/arm64/include/asm/mmu_context.h static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
mm                 53 arch/arm64/include/asm/mmu_context.h 	cpu_do_switch_mm(virt_to_phys(pgd),mm);
mm                115 arch/arm64/include/asm/mmu_context.h 	struct mm_struct *mm = current->active_mm;
mm                121 arch/arm64/include/asm/mmu_context.h 	if (mm != &init_mm && !system_uses_ttbr0_pan())
mm                122 arch/arm64/include/asm/mmu_context.h 		cpu_switch_mm(mm->pgd, mm);
mm                175 arch/arm64/include/asm/mmu_context.h #define destroy_context(mm)		do { } while(0)
mm                176 arch/arm64/include/asm/mmu_context.h void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
mm                178 arch/arm64/include/asm/mmu_context.h #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })
mm                182 arch/arm64/include/asm/mmu_context.h 				      struct mm_struct *mm)
mm                189 arch/arm64/include/asm/mmu_context.h 	if (mm == &init_mm)
mm                192 arch/arm64/include/asm/mmu_context.h 		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
mm                198 arch/arm64/include/asm/mmu_context.h 				      struct mm_struct *mm)
mm                204 arch/arm64/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                245 arch/arm64/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 22 arch/arm64/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 27 arch/arm64/include/asm/pgalloc.h 	if (mm == &init_mm)
mm                 40 arch/arm64/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
mm                 52 arch/arm64/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
mm                 65 arch/arm64/include/asm/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 70 arch/arm64/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
mm                 81 arch/arm64/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
mm                 92 arch/arm64/include/asm/pgalloc.h extern pgd_t *pgd_alloc(struct mm_struct *mm);
mm                 93 arch/arm64/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
mm                106 arch/arm64/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
mm                115 arch/arm64/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
mm                 70 arch/arm64/include/asm/pgtable.h #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
mm                111 arch/arm64/include/asm/pgtable.h #define pte_accessible(mm, pte)	\
mm                112 arch/arm64/include/asm/pgtable.h 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
mm                243 arch/arm64/include/asm/pgtable.h static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
mm                255 arch/arm64/include/asm/pgtable.h 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
mm                271 arch/arm64/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                277 arch/arm64/include/asm/pgtable.h 	__check_racy_pte_update(mm, ptep, pte);
mm                402 arch/arm64/include/asm/pgtable.h #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
mm                650 arch/arm64/include/asm/pgtable.h #define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
mm                757 arch/arm64/include/asm/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm                765 arch/arm64/include/asm/pgtable.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                768 arch/arm64/include/asm/pgtable.h 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
mm                777 arch/arm64/include/asm/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
mm                798 arch/arm64/include/asm/pgtable.h static inline void pmdp_set_wrprotect(struct mm_struct *mm,
mm                801 arch/arm64/include/asm/pgtable.h 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
mm                 20 arch/arm64/include/asm/proc-fns.h extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
mm                 19 arch/arm64/include/asm/ptdump.h 	struct mm_struct		*mm;
mm                 26 arch/arm64/include/asm/tlb.h 	struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
mm                 37 arch/arm64/include/asm/tlb.h 			flush_tlb_mm(tlb->mm);
mm                147 arch/arm64/include/asm/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                149 arch/arm64/include/asm/tlbflush.h 	unsigned long asid = __TLBI_VADDR(0, ASID(mm));
mm                 59 arch/arm64/kernel/efi.c int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
mm                 79 arch/arm64/kernel/efi.c 	create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
mm                 98 arch/arm64/kernel/efi.c int __init efi_set_mapping_permissions(struct mm_struct *mm,
mm                111 arch/arm64/kernel/efi.c 	return apply_to_page_range(mm, md->virt_addr,
mm                 34 arch/arm64/kernel/probes/uprobes.c int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
mm                 40 arch/arm64/kernel/probes/uprobes.c 	if (mm->context.flags & MMCF_AARCH32)
mm                339 arch/arm64/kernel/process.c 	if (current->mm)
mm                572 arch/arm64/kernel/process.c 	current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
mm               1539 arch/arm64/kernel/ptrace.c 		tmp = tsk->mm->start_code;
mm               1541 arch/arm64/kernel/ptrace.c 		tmp = tsk->mm->start_data;
mm               1543 arch/arm64/kernel/ptrace.c 		tmp = tsk->mm->end_code;
mm                736 arch/arm64/kernel/signal.c 		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
mm                346 arch/arm64/kernel/signal32.c 		void *vdso_base = current->mm->context.vdso;
mm                374 arch/arm64/kernel/signal32.c 		retcode = (unsigned long)current->mm->context.vdso +
mm                196 arch/arm64/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                206 arch/arm64/kernel/smp.c 	mmgrab(mm);
mm                207 arch/arm64/kernel/smp.c 	current->active_mm = mm;
mm                386 arch/arm64/kernel/traps.c 	down_read(&current->mm->mmap_sem);
mm                387 arch/arm64/kernel/traps.c 	if (find_vma(current->mm, addr) == NULL)
mm                391 arch/arm64/kernel/traps.c 	up_read(&current->mm->mmap_sem);
mm                 95 arch/arm64/kernel/vdso.c 	current->mm->context.vdso = (void *)new_vma->vm_start;
mm                140 arch/arm64/kernel/vdso.c 				    struct mm_struct *mm,
mm                157 arch/arm64/kernel/vdso.c 	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
mm                164 arch/arm64/kernel/vdso.c 	mm->context.vdso = (void *)vdso_base;
mm                165 arch/arm64/kernel/vdso.c 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
mm                175 arch/arm64/kernel/vdso.c 	mm->context.vdso = NULL;
mm                295 arch/arm64/kernel/vdso.c static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
mm                306 arch/arm64/kernel/vdso.c 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
mm                315 arch/arm64/kernel/vdso.c static int aarch32_sigreturn_setup(struct mm_struct *mm)
mm                330 arch/arm64/kernel/vdso.c 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
mm                337 arch/arm64/kernel/vdso.c 	mm->context.vdso = (void *)addr;
mm                346 arch/arm64/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                349 arch/arm64/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                352 arch/arm64/kernel/vdso.c 	ret = aarch32_kuser_helpers_setup(mm);
mm                358 arch/arm64/kernel/vdso.c 				       mm,
mm                362 arch/arm64/kernel/vdso.c 	ret = aarch32_sigreturn_setup(mm);
mm                366 arch/arm64/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                407 arch/arm64/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                410 arch/arm64/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                414 arch/arm64/kernel/vdso.c 				       mm,
mm                418 arch/arm64/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                 64 arch/arm64/kvm/fpsimd.c 	BUG_ON(!current->mm);
mm                134 arch/arm64/mm/context.c static u64 new_context(struct mm_struct *mm)
mm                137 arch/arm64/mm/context.c 	u64 asid = atomic64_read(&mm->context.id);
mm                183 arch/arm64/mm/context.c void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
mm                191 arch/arm64/mm/context.c 	asid = atomic64_read(&mm->context.id);
mm                216 arch/arm64/mm/context.c 	asid = atomic64_read(&mm->context.id);
mm                218 arch/arm64/mm/context.c 		asid = new_context(mm);
mm                219 arch/arm64/mm/context.c 		atomic64_set(&mm->context.id, asid);
mm                237 arch/arm64/mm/context.c 		cpu_switch_mm(mm->pgd, mm);
mm                344 arch/arm64/mm/dump.c static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
mm                349 arch/arm64/mm/dump.c 	pgd_t *pgdp = pgd_offset(mm, start);
mm                371 arch/arm64/mm/dump.c 	walk_pgd(&st, info->mm, info->base_addr);
mm                387 arch/arm64/mm/dump.c 	.mm		= &init_mm,
mm                116 arch/arm64/mm/fault.c static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
mm                119 arch/arm64/mm/fault.c 	if (mm == &init_mm)
mm                120 arch/arm64/mm/fault.c 		return __pa_symbol(mm->pgd);
mm                122 arch/arm64/mm/fault.c 	return (unsigned long)virt_to_phys(mm->pgd);
mm                130 arch/arm64/mm/fault.c 	struct mm_struct *mm;
mm                136 arch/arm64/mm/fault.c 		mm = current->active_mm;
mm                137 arch/arm64/mm/fault.c 		if (mm == &init_mm) {
mm                144 arch/arm64/mm/fault.c 		mm = &init_mm;
mm                152 arch/arm64/mm/fault.c 		 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
mm                153 arch/arm64/mm/fault.c 		 vabits_actual, mm_to_pgd_phys(mm));
mm                154 arch/arm64/mm/fault.c 	pgdp = pgd_offset(mm, addr);
mm                409 arch/arm64/mm/fault.c static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
mm                412 arch/arm64/mm/fault.c 	struct vm_area_struct *vma = find_vma(mm, addr);
mm                455 arch/arm64/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                467 arch/arm64/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                503 arch/arm64/mm/fault.c 	if (!down_read_trylock(&mm->mmap_sem)) {
mm                507 arch/arm64/mm/fault.c 		down_read(&mm->mmap_sem);
mm                516 arch/arm64/mm/fault.c 			up_read(&mm->mmap_sem);
mm                522 arch/arm64/mm/fault.c 	fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
mm                548 arch/arm64/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 66 arch/arm64/mm/hugetlbpage.c static int find_num_contig(struct mm_struct *mm, unsigned long addr,
mm                 69 arch/arm64/mm/hugetlbpage.c 	pgd_t *pgdp = pgd_offset(mm, addr);
mm                117 arch/arm64/mm/hugetlbpage.c static pte_t get_clear_flush(struct mm_struct *mm,
mm                128 arch/arm64/mm/hugetlbpage.c 		pte_t pte = ptep_get_and_clear(mm, addr, ptep);
mm                143 arch/arm64/mm/hugetlbpage.c 		struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
mm                158 arch/arm64/mm/hugetlbpage.c static void clear_flush(struct mm_struct *mm,
mm                164 arch/arm64/mm/hugetlbpage.c 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
mm                168 arch/arm64/mm/hugetlbpage.c 		pte_clear(mm, addr, ptep);
mm                173 arch/arm64/mm/hugetlbpage.c void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                189 arch/arm64/mm/hugetlbpage.c 		set_pte_at(mm, addr, ptep, pte);
mm                193 arch/arm64/mm/hugetlbpage.c 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
mm                198 arch/arm64/mm/hugetlbpage.c 	clear_flush(mm, addr, ptep, pgsize, ncontig);
mm                201 arch/arm64/mm/hugetlbpage.c 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
mm                204 arch/arm64/mm/hugetlbpage.c void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
mm                216 arch/arm64/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm,
mm                224 arch/arm64/mm/hugetlbpage.c 	pgdp = pgd_offset(mm, addr);
mm                225 arch/arm64/mm/hugetlbpage.c 	pudp = pud_alloc(mm, pgdp, addr);
mm                232 arch/arm64/mm/hugetlbpage.c 		pmdp = pmd_alloc(mm, pudp, addr);
mm                244 arch/arm64/mm/hugetlbpage.c 		ptep = pte_alloc_map(mm, pmdp, addr);
mm                248 arch/arm64/mm/hugetlbpage.c 			ptep = huge_pmd_share(mm, addr, pudp);
mm                250 arch/arm64/mm/hugetlbpage.c 			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
mm                252 arch/arm64/mm/hugetlbpage.c 		pmdp = pmd_alloc(mm, pudp, addr);
mm                260 arch/arm64/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm,
mm                267 arch/arm64/mm/hugetlbpage.c 	pgdp = pgd_offset(mm, addr);
mm                313 arch/arm64/mm/hugetlbpage.c void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
mm                322 arch/arm64/mm/hugetlbpage.c 		pte_clear(mm, addr, ptep);
mm                325 arch/arm64/mm/hugetlbpage.c pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                333 arch/arm64/mm/hugetlbpage.c 		return ptep_get_and_clear(mm, addr, ptep);
mm                335 arch/arm64/mm/hugetlbpage.c 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
mm                337 arch/arm64/mm/hugetlbpage.c 	return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
mm                404 arch/arm64/mm/hugetlbpage.c void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                414 arch/arm64/mm/hugetlbpage.c 		ptep_set_wrprotect(mm, addr, ptep);
mm                418 arch/arm64/mm/hugetlbpage.c 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
mm                421 arch/arm64/mm/hugetlbpage.c 	pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
mm                428 arch/arm64/mm/hugetlbpage.c 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
mm                411 arch/arm64/mm/mmu.c void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
mm                417 arch/arm64/mm/mmu.c 	BUG_ON(mm == &init_mm);
mm                422 arch/arm64/mm/mmu.c 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
mm                 20 arch/arm64/mm/pgd.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 30 arch/arm64/mm/pgd.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 23 arch/c6x/include/asm/cacheflush.h #define flush_cache_mm(mm)			do {} while (0)
mm                 24 arch/c6x/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)			do {} while (0)
mm                 25 arch/c6x/include/asm/cacheflush.h #define flush_cache_range(mm, start, end)	do {} while (0)
mm                 51 arch/c6x/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
mm                 14 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_mm(mm)			dcache_wbinv_all()
mm                 16 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_dup_mm(mm)			cache_wbinv_all()
mm                181 arch/csky/abiv1/inc/abi/entry.h 	andi	\rx, (\imm >> 3)
mm                 28 arch/csky/abiv1/mmap.c 	struct mm_struct *mm = current->mm;
mm                 58 arch/csky/abiv1/mmap.c 		vma = find_vma(mm, addr);
mm                 66 arch/csky/abiv1/mmap.c 	info.low_limit = mm->mmap_base;
mm                 14 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_mm(mm)			do { } while (0)
mm                 15 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_dup_mm(mm)			do { } while (0)
mm                249 arch/csky/abiv2/inc/abi/entry.h 	andi	\rx, (\imm >> 3)
mm                 34 arch/csky/include/asm/asid.h 		      unsigned int cpu, struct mm_struct *mm);
mm                 44 arch/csky/include/asm/asid.h 				      struct mm_struct *mm)
mm                 71 arch/csky/include/asm/asid.h 	asid_new_context(info, pasid, cpu, mm);
mm                 24 arch/csky/include/asm/mmu_context.h #define cpu_asid(mm)		(atomic64_read(&mm->context.asid) & ASID_MASK)
mm                 26 arch/csky/include/asm/mmu_context.h #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.asid, 0); 0; })
mm                 29 arch/csky/include/asm/mmu_context.h #define destroy_context(mm)		do {} while (0)
mm                 30 arch/csky/include/asm/mmu_context.h #define enter_lazy_tlb(mm, tsk)		do {} while (0)
mm                 31 arch/csky/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)		do {} while (0)
mm                 33 arch/csky/include/asm/mmu_context.h void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
mm                 14 arch/csky/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                 20 arch/csky/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 30 arch/csky/include/asm/pgalloc.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 45 arch/csky/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 50 arch/csky/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 47 arch/csky/include/asm/pgtable.h #define pte_clear(mm, addr, ptep)	set_pte((ptep), \
mm                125 arch/csky/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
mm                288 arch/csky/include/asm/pgtable.h static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
mm                290 arch/csky/include/asm/pgtable.h 	return mm->pgd + pgd_index(address);
mm                 87 arch/csky/include/asm/processor.h #define copy_segments(tsk, mm)		do { } while (0)
mm                 88 arch/csky/include/asm/processor.h #define release_segments(mm)		do { } while (0)
mm                 21 arch/csky/include/asm/tlb.h #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
mm                 17 arch/csky/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 17 arch/csky/kernel/asm-offsets.c 	DEFINE(TASK_MM,           offsetof(struct task_struct, mm));
mm                248 arch/csky/kernel/ptrace.c 	if (current->mm) {
mm                250 arch/csky/kernel/ptrace.c 		       (int) current->mm->start_code,
mm                251 arch/csky/kernel/ptrace.c 		       (int) current->mm->end_code,
mm                252 arch/csky/kernel/ptrace.c 		       (int) current->mm->start_data,
mm                253 arch/csky/kernel/ptrace.c 		       (int) current->mm->end_data,
mm                254 arch/csky/kernel/ptrace.c 		       (int) current->mm->end_data,
mm                255 arch/csky/kernel/ptrace.c 		       (int) current->mm->brk);
mm                257 arch/csky/kernel/ptrace.c 		       (int) current->mm->start_stack,
mm                137 arch/csky/kernel/signal.c 	struct csky_vdso *vdso = current->mm->context.vdso;
mm                208 arch/csky/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                227 arch/csky/kernel/smp.c 	mmget(mm);
mm                228 arch/csky/kernel/smp.c 	mmgrab(mm);
mm                229 arch/csky/kernel/smp.c 	current->active_mm = mm;
mm                230 arch/csky/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                 51 arch/csky/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                 53 arch/csky/kernel/vdso.c 	down_write(&mm->mmap_sem);
mm                 62 arch/csky/kernel/vdso.c 			mm,
mm                 70 arch/csky/kernel/vdso.c 	mm->context.vdso = (void *)addr;
mm                 73 arch/csky/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                 80 arch/csky/mm/asid.c 		       struct mm_struct *mm)
mm                126 arch/csky/mm/asid.c 	cpumask_clear(mm_cpumask(mm));
mm                138 arch/csky/mm/asid.c 		      unsigned int cpu, struct mm_struct *mm)
mm                147 arch/csky/mm/asid.c 		asid = new_context(info, pasid, mm);
mm                155 arch/csky/mm/asid.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                 19 arch/csky/mm/context.c void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
mm                 21 arch/csky/mm/context.c 	asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
mm                 51 arch/csky/mm/fault.c 	struct mm_struct *mm = tsk->mm;
mm                116 arch/csky/mm/fault.c 	if (in_atomic() || !mm)
mm                119 arch/csky/mm/fault.c 	down_read(&mm->mmap_sem);
mm                120 arch/csky/mm/fault.c 	vma = find_vma(mm, address);
mm                169 arch/csky/mm/fault.c 	up_read(&mm->mmap_sem);
mm                177 arch/csky/mm/fault.c 	up_read(&mm->mmap_sem);
mm                216 arch/csky/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 25 arch/csky/mm/tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                 28 arch/csky/mm/tlb.c 	asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
mm                 84 arch/h8300/include/asm/processor.h 	(_regs)->er5 = current->mm->start_data;	/* GOT base */	\
mm                 94 arch/h8300/include/asm/processor.h 	(_regs)->er5 = current->mm->start_data;	/* GOT base */	\
mm                 30 arch/h8300/kernel/asm-offsets.c 	OFFSET(TASK_MM, task_struct, mm);
mm                205 arch/h8300/kernel/signal.c 	regs->er5 = current->mm->start_data;	/* GOT base */
mm                 63 arch/h8300/kernel/traps.c 	if (current->mm) {
mm                 65 arch/h8300/kernel/traps.c 			(int) current->mm->start_code,
mm                 66 arch/h8300/kernel/traps.c 			(int) current->mm->end_code,
mm                 67 arch/h8300/kernel/traps.c 			(int) current->mm->start_data,
mm                 68 arch/h8300/kernel/traps.c 			(int) current->mm->end_data,
mm                 69 arch/h8300/kernel/traps.c 			(int) current->mm->end_data,
mm                 70 arch/h8300/kernel/traps.c 			(int) current->mm->brk);
mm                 72 arch/h8300/kernel/traps.c 			(int) current->mm->start_stack,
mm                 29 arch/hexagon/include/asm/cacheflush.h #define flush_cache_mm(mm)			do { } while (0)
mm                 30 arch/hexagon/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)			do { } while (0)
mm                 18 arch/hexagon/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                 27 arch/hexagon/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm,
mm                 36 arch/hexagon/include/asm/mmu_context.h 	struct mm_struct *mm)
mm                 46 arch/hexagon/include/asm/mmu_context.h 					struct mm_struct *mm)
mm                 21 arch/hexagon/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 36 arch/hexagon/include/asm/pgalloc.h 	mm->context.generation = kmap_generation;
mm                 39 arch/hexagon/include/asm/pgalloc.h 	mm->context.ptbase = __pa(pgd);
mm                 44 arch/hexagon/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 49 arch/hexagon/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 69 arch/hexagon/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                 78 arch/hexagon/include/asm/pgalloc.h 	mm->context.generation = kmap_generation;
mm                 88 arch/hexagon/include/asm/pgalloc.h 	pmdindex = (pgd_t *)pmd - mm->pgd;
mm                206 arch/hexagon/include/asm/pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
mm                232 arch/hexagon/include/asm/pgtable.h #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
mm                407 arch/hexagon/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
mm                 25 arch/hexagon/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 43 arch/hexagon/include/asm/tlbflush.h #define flush_tlb_pgtables(mm, start, end)
mm                101 arch/hexagon/kernel/signal.c 	struct hexagon_vdso *vdso = current->mm->context.vdso;
mm                 53 arch/hexagon/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                 55 arch/hexagon/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                 68 arch/hexagon/kernel/vdso.c 	ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
mm                 76 arch/hexagon/kernel/vdso.c 	mm->context.vdso = (void *)vdso_base;
mm                 79 arch/hexagon/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                 39 arch/hexagon/mm/vm_fault.c 	struct mm_struct *mm = current->mm;
mm                 50 arch/hexagon/mm/vm_fault.c 	if (unlikely(in_interrupt() || !mm))
mm                 58 arch/hexagon/mm/vm_fault.c 	down_read(&mm->mmap_sem);
mm                 59 arch/hexagon/mm/vm_fault.c 	vma = find_vma(mm, address);
mm                111 arch/hexagon/mm/vm_fault.c 		up_read(&mm->mmap_sem);
mm                115 arch/hexagon/mm/vm_fault.c 	up_read(&mm->mmap_sem);
mm                142 arch/hexagon/mm/vm_fault.c 	up_read(&mm->mmap_sem);
mm                 28 arch/hexagon/mm/vm_tlb.c 	struct mm_struct *mm = vma->vm_mm;
mm                 30 arch/hexagon/mm/vm_tlb.c 	if (mm->context.ptbase == current->active_mm->context.ptbase)
mm                 56 arch/hexagon/mm/vm_tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                 59 arch/hexagon/mm/vm_tlb.c 	if (current->active_mm->context.ptbase == mm->context.ptbase)
mm                 68 arch/hexagon/mm/vm_tlb.c 	struct mm_struct *mm = vma->vm_mm;
mm                 70 arch/hexagon/mm/vm_tlb.c 	if (mm->context.ptbase  == current->active_mm->context.ptbase)
mm                 21 arch/ia64/include/asm/cacheflush.h #define flush_cache_mm(mm)			do { } while (0)
mm                 22 arch/ia64/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)			do { } while (0)
mm                 16 arch/ia64/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 50 arch/ia64/include/asm/mmu_context.h extern void wrap_mmu_context (struct mm_struct *mm);
mm                 53 arch/ia64/include/asm/mmu_context.h enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
mm                 80 arch/ia64/include/asm/mmu_context.h get_mmu_context (struct mm_struct *mm)
mm                 83 arch/ia64/include/asm/mmu_context.h 	nv_mm_context_t context = mm->context;
mm                 90 arch/ia64/include/asm/mmu_context.h 	context = mm->context;
mm                 92 arch/ia64/include/asm/mmu_context.h 		cpumask_clear(mm_cpumask(mm));
mm                 99 arch/ia64/include/asm/mmu_context.h 				wrap_mmu_context(mm);
mm                101 arch/ia64/include/asm/mmu_context.h 		mm->context = context = ia64_ctx.next++;
mm                120 arch/ia64/include/asm/mmu_context.h init_new_context (struct task_struct *p, struct mm_struct *mm)
mm                122 arch/ia64/include/asm/mmu_context.h 	mm->context = 0;
mm                127 arch/ia64/include/asm/mmu_context.h destroy_context (struct mm_struct *mm)
mm                165 arch/ia64/include/asm/mmu_context.h activate_context (struct mm_struct *mm)
mm                170 arch/ia64/include/asm/mmu_context.h 		context = get_mmu_context(mm);
mm                171 arch/ia64/include/asm/mmu_context.h 		if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
mm                172 arch/ia64/include/asm/mmu_context.h 			cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
mm                178 arch/ia64/include/asm/mmu_context.h 	} while (unlikely(context != mm->context));
mm                181 arch/ia64/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 27 arch/ia64/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 32 arch/ia64/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 39 arch/ia64/include/asm/pgalloc.h pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
mm                 44 arch/ia64/include/asm/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 49 arch/ia64/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                 53 arch/ia64/include/asm/pgalloc.h #define __pud_free_tlb(tlb, pud, address)	pud_free((tlb)->mm, pud)
mm                 57 arch/ia64/include/asm/pgalloc.h pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
mm                 62 arch/ia64/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 67 arch/ia64/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 72 arch/ia64/include/asm/pgalloc.h #define __pmd_free_tlb(tlb, pmd, address)	pmd_free((tlb)->mm, pmd)
mm                 75 arch/ia64/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
mm                 82 arch/ia64/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
mm                 87 arch/ia64/include/asm/pgalloc.h #define __pte_free_tlb(tlb, pte, address)	pte_free((tlb)->mm, pte)
mm                267 arch/ia64/include/asm/pgtable.h #define pte_clear(mm,addr,pte)		(pte_val(*(pte)) = 0UL)
mm                344 arch/ia64/include/asm/pgtable.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                373 arch/ia64/include/asm/pgtable.h pgd_offset (const struct mm_struct *mm, unsigned long address)
mm                375 arch/ia64/include/asm/pgtable.h 	return mm->pgd + pgd_index(address);
mm                386 arch/ia64/include/asm/pgtable.h #define pgd_offset_gate(mm, addr)	pgd_offset_k(addr)
mm                426 arch/ia64/include/asm/pgtable.h ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                432 arch/ia64/include/asm/pgtable.h 	pte_clear(mm, addr, ptep);
mm                438 arch/ia64/include/asm/pgtable.h ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                449 arch/ia64/include/asm/pgtable.h 	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
mm                319 arch/ia64/include/asm/processor.h 	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
mm                321 arch/ia64/include/asm/processor.h 	if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) {	\
mm                 56 arch/ia64/include/asm/tlbflush.h   extern void smp_flush_tlb_mm (struct mm_struct *mm);
mm                 65 arch/ia64/include/asm/tlbflush.h local_finish_flush_tlb_mm (struct mm_struct *mm)
mm                 67 arch/ia64/include/asm/tlbflush.h 	if (mm == current->active_mm)
mm                 68 arch/ia64/include/asm/tlbflush.h 		activate_context(mm);
mm                 77 arch/ia64/include/asm/tlbflush.h flush_tlb_mm (struct mm_struct *mm)
mm                 79 arch/ia64/include/asm/tlbflush.h 	if (!mm)
mm                 82 arch/ia64/include/asm/tlbflush.h 	set_bit(mm->context, ia64_ctx.flushmap);
mm                 83 arch/ia64/include/asm/tlbflush.h 	mm->context = 0;
mm                 85 arch/ia64/include/asm/tlbflush.h 	if (atomic_read(&mm->mm_users) == 0)
mm                 89 arch/ia64/include/asm/tlbflush.h 	smp_flush_tlb_mm(mm);
mm                 91 arch/ia64/include/asm/tlbflush.h 	local_finish_flush_tlb_mm(mm);
mm               1409 arch/ia64/kernel/perfmon.c 	if (task->mm == NULL || size == 0UL || vaddr == NULL) {
mm               1410 arch/ia64/kernel/perfmon.c 		printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
mm               1873 arch/ia64/kernel/perfmon.c 	if (ctx->ctx_smpl_vaddr && current->mm) {
mm               2198 arch/ia64/kernel/perfmon.c 	struct mm_struct *mm = task->mm;
mm               2234 arch/ia64/kernel/perfmon.c 	vma = vm_area_alloc(mm);
mm               2261 arch/ia64/kernel/perfmon.c 	down_write(&task->mm->mmap_sem);
mm               2267 arch/ia64/kernel/perfmon.c 		up_write(&task->mm->mmap_sem);
mm               2278 arch/ia64/kernel/perfmon.c 		up_write(&task->mm->mmap_sem);
mm               2286 arch/ia64/kernel/perfmon.c 	insert_vm_struct(mm, vma);
mm               2289 arch/ia64/kernel/perfmon.c 	up_write(&task->mm->mmap_sem);
mm               2514 arch/ia64/kernel/perfmon.c 	if (task->mm == NULL) {
mm               1024 arch/ia64/kernel/setup.c 	BUG_ON(current->mm);
mm                293 arch/ia64/kernel/smp.c smp_flush_tlb_mm (struct mm_struct *mm)
mm                298 arch/ia64/kernel/smp.c 	if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
mm                300 arch/ia64/kernel/smp.c 		local_finish_flush_tlb_mm(mm);
mm                306 arch/ia64/kernel/smp.c 			mm, 1);
mm                308 arch/ia64/kernel/smp.c 		cpumask_copy(cpus, mm_cpumask(mm));
mm                310 arch/ia64/kernel/smp.c 			(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
mm                314 arch/ia64/kernel/smp.c 	local_finish_flush_tlb_mm(mm);
mm                 32 arch/ia64/kernel/sys_ia64.c 	struct mm_struct *mm = current->mm;
mm                 40 arch/ia64/kernel/sys_ia64.c 		if (is_hugepage_only_range(mm, addr, len))
mm                 65 arch/ia64/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                 74 arch/ia64/mm/fault.c 	prefetchw(&mm->mmap_sem);
mm                 79 arch/ia64/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                105 arch/ia64/mm/fault.c 	down_read(&mm->mmap_sem);
mm                107 arch/ia64/mm/fault.c 	vma = find_vma_prev(mm, address, &prev_vma);
mm                182 arch/ia64/mm/fault.c 	up_read(&mm->mmap_sem);
mm                213 arch/ia64/mm/fault.c 	up_read(&mm->mmap_sem);
mm                279 arch/ia64/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 29 arch/ia64/mm/hugetlbpage.c huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
mm                 37 arch/ia64/mm/hugetlbpage.c 	pgd = pgd_offset(mm, taddr);
mm                 38 arch/ia64/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, taddr);
mm                 40 arch/ia64/mm/hugetlbpage.c 		pmd = pmd_alloc(mm, pud, taddr);
mm                 42 arch/ia64/mm/hugetlbpage.c 			pte = pte_alloc_map(mm, pmd, taddr);
mm                 48 arch/ia64/mm/hugetlbpage.c huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
mm                 56 arch/ia64/mm/hugetlbpage.c 	pgd = pgd_offset(mm, taddr);
mm                 88 arch/ia64/mm/hugetlbpage.c struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
mm                 96 arch/ia64/mm/hugetlbpage.c 	ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
mm                 93 arch/ia64/mm/init.c 	current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
mm                114 arch/ia64/mm/init.c 	vma = vm_area_alloc(current->mm);
mm                121 arch/ia64/mm/init.c 		down_write(&current->mm->mmap_sem);
mm                122 arch/ia64/mm/init.c 		if (insert_vm_struct(current->mm, vma)) {
mm                123 arch/ia64/mm/init.c 			up_write(&current->mm->mmap_sem);
mm                127 arch/ia64/mm/init.c 		up_write(&current->mm->mmap_sem);
mm                132 arch/ia64/mm/init.c 		vma = vm_area_alloc(current->mm);
mm                139 arch/ia64/mm/init.c 			down_write(&current->mm->mmap_sem);
mm                140 arch/ia64/mm/init.c 			if (insert_vm_struct(current->mm, vma)) {
mm                141 arch/ia64/mm/init.c 				up_write(&current->mm->mmap_sem);
mm                145 arch/ia64/mm/init.c 			up_write(&current->mm->mmap_sem);
mm                285 arch/ia64/mm/init.c struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
mm                297 arch/ia64/mm/init.c int in_gate_area(struct mm_struct *mm, unsigned long addr)
mm                 79 arch/ia64/mm/tlb.c wrap_mmu_context (struct mm_struct *mm)
mm                250 arch/ia64/mm/tlb.c ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
mm                257 arch/ia64/mm/tlb.c 	if (mm != active_mm) {
mm                259 arch/ia64/mm/tlb.c 		if (mm && active_mm) {
mm                260 arch/ia64/mm/tlb.c 			activate_context(mm);
mm                282 arch/ia64/mm/tlb.c         if (mm != active_mm) {
mm                315 arch/ia64/mm/tlb.c 	struct mm_struct *mm = vma->vm_mm;
mm                320 arch/ia64/mm/tlb.c 	if (mm != current->active_mm) {
mm                321 arch/ia64/mm/tlb.c 		mm->context = 0;
mm                336 arch/ia64/mm/tlb.c 	if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
mm                337 arch/ia64/mm/tlb.c 		ia64_global_tlb_purge(mm, start, end, nbits);
mm                196 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cache_mm(struct mm_struct *mm)
mm                198 arch/m68k/include/asm/cacheflush_mm.h 	if (mm == current->mm)
mm                202 arch/m68k/include/asm/cacheflush_mm.h #define flush_cache_dup_mm(mm)			flush_cache_mm(mm)
mm                210 arch/m68k/include/asm/cacheflush_mm.h 	if (vma->vm_mm == current->mm)
mm                216 arch/m68k/include/asm/cacheflush_mm.h 	if (vma->vm_mm == current->mm)
mm                 12 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_mm(mm)			do { } while (0)
mm                 13 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_dup_mm(mm)			do { } while (0)
mm                 13 arch/m68k/include/asm/flat.h 		if (current->mm) \
mm                 14 arch/m68k/include/asm/flat.h 			(regs)->d5 = current->mm->start_data; \
mm                  8 arch/m68k/include/asm/mcf_pgalloc.h extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                 15 arch/m68k/include/asm/mcf_pgalloc.h extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 31 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
mm                 32 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_alloc_one(mm, address)      ({ BUG(); ((pmd_t *)2); })
mm                 34 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
mm                 37 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
mm                 50 arch/m68k/include/asm/mcf_pgalloc.h static inline struct page *pte_alloc_one(struct mm_struct *mm)
mm                 74 arch/m68k/include/asm/mcf_pgalloc.h static inline void pte_free(struct mm_struct *mm, struct page *page)
mm                 84 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_free(mm, pmd) BUG()
mm                 86 arch/m68k/include/asm/mcf_pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 91 arch/m68k/include/asm/mcf_pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                103 arch/m68k/include/asm/mcf_pgalloc.h #define pgd_populate(mm, pmd, pte) BUG()
mm                185 arch/m68k/include/asm/mcf_pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
mm                335 arch/m68k/include/asm/mcf_pgtable.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
mm                  8 arch/m68k/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 32 arch/m68k/include/asm/mmu_context.h static inline void get_mmu_context(struct mm_struct *mm)
mm                 36 arch/m68k/include/asm/mmu_context.h 	if (mm->context != NO_CONTEXT)
mm                 49 arch/m68k/include/asm/mmu_context.h 	mm->context = ctx;
mm                 50 arch/m68k/include/asm/mmu_context.h 	context_mm[ctx] = mm;
mm                 56 arch/m68k/include/asm/mmu_context.h #define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
mm                 61 arch/m68k/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                 63 arch/m68k/include/asm/mmu_context.h 	if (mm->context != NO_CONTEXT) {
mm                 64 arch/m68k/include/asm/mmu_context.h 		clear_bit(mm->context, context_map);
mm                 65 arch/m68k/include/asm/mmu_context.h 		mm->context = NO_CONTEXT;
mm                 78 arch/m68k/include/asm/mmu_context.h 	get_mmu_context(tsk->mm);
mm                 79 arch/m68k/include/asm/mmu_context.h 	set_context(tsk->mm->context, next->pgd);
mm                 87 arch/m68k/include/asm/mmu_context.h 	struct mm_struct *mm)
mm                 89 arch/m68k/include/asm/mmu_context.h 	get_mmu_context(mm);
mm                 90 arch/m68k/include/asm/mmu_context.h 	set_context(mm->context, mm->pgd);
mm                 93 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk, mm) do { } while (0)
mm                100 arch/m68k/include/asm/mmu_context.h 	struct mm_struct *mm;
mm                117 arch/m68k/include/asm/mmu_context.h 		mm = &init_mm;
mm                119 arch/m68k/include/asm/mmu_context.h 		pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
mm                120 arch/m68k/include/asm/mmu_context.h 		mm = task->mm;
mm                123 arch/m68k/include/asm/mmu_context.h 	if (!mm)
mm                126 arch/m68k/include/asm/mmu_context.h 	pgd = pgd_offset(mm, mmuar);
mm                140 arch/m68k/include/asm/mmu_context.h 	asid = mm->context & 0xff;
mm                156 arch/m68k/include/asm/mmu_context.h 	pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
mm                165 arch/m68k/include/asm/mmu_context.h extern unsigned long get_free_context(struct mm_struct *mm);
mm                170 arch/m68k/include/asm/mmu_context.h 				   struct mm_struct *mm)
mm                172 arch/m68k/include/asm/mmu_context.h 	mm->context = SUN3_INVALID_CONTEXT;
mm                178 arch/m68k/include/asm/mmu_context.h static inline void get_mmu_context(struct mm_struct *mm)
mm                180 arch/m68k/include/asm/mmu_context.h 	if (mm->context == SUN3_INVALID_CONTEXT)
mm                181 arch/m68k/include/asm/mmu_context.h 		mm->context = get_free_context(mm);
mm                185 arch/m68k/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                187 arch/m68k/include/asm/mmu_context.h 	if (mm->context != SUN3_INVALID_CONTEXT)
mm                188 arch/m68k/include/asm/mmu_context.h 		clear_context(mm->context);
mm                191 arch/m68k/include/asm/mmu_context.h static inline void activate_context(struct mm_struct *mm)
mm                193 arch/m68k/include/asm/mmu_context.h 	get_mmu_context(mm);
mm                194 arch/m68k/include/asm/mmu_context.h 	sun3_put_context(mm->context);
mm                200 arch/m68k/include/asm/mmu_context.h 	activate_context(tsk->mm);
mm                203 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
mm                218 arch/m68k/include/asm/mmu_context.h 				   struct mm_struct *mm)
mm                220 arch/m68k/include/asm/mmu_context.h 	mm->context = virt_to_phys(mm->pgd);
mm                224 arch/m68k/include/asm/mmu_context.h #define destroy_context(mm)		do { } while(0)
mm                226 arch/m68k/include/asm/mmu_context.h static inline void switch_mm_0230(struct mm_struct *mm)
mm                229 arch/m68k/include/asm/mmu_context.h 		0x80000000 | _PAGE_TABLE, mm->context
mm                259 arch/m68k/include/asm/mmu_context.h static inline void switch_mm_0460(struct mm_struct *mm)
mm                267 arch/m68k/include/asm/mmu_context.h 	asm volatile ("movec %0,%%urp" : : "r" (mm->context));
mm                293 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                310 arch/m68k/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                320 arch/m68k/include/asm/mmu_context.h #define destroy_context(mm)	do { } while (0)
mm                321 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 11 arch/m68k/include/asm/motorola_pgalloc.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 25 arch/m68k/include/asm/motorola_pgalloc.h static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                 31 arch/m68k/include/asm/motorola_pgalloc.h static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                 52 arch/m68k/include/asm/motorola_pgalloc.h static inline void pte_free(struct mm_struct *mm, pgtable_t page)
mm                 70 arch/m68k/include/asm/motorola_pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 75 arch/m68k/include/asm/motorola_pgalloc.h static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 87 arch/m68k/include/asm/motorola_pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 89 arch/m68k/include/asm/motorola_pgalloc.h 	pmd_free(mm, (pmd_t *)pgd);
mm                 92 arch/m68k/include/asm/motorola_pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 98 arch/m68k/include/asm/motorola_pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
mm                103 arch/m68k/include/asm/motorola_pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
mm                109 arch/m68k/include/asm/motorola_pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
mm                132 arch/m68k/include/asm/motorola_pgtable.h #define pte_clear(mm,addr,ptep)		({ pte_val(*(ptep)) = 0; })
mm                196 arch/m68k/include/asm/motorola_pgtable.h static inline pgd_t *pgd_offset(const struct mm_struct *mm,
mm                199 arch/m68k/include/asm/motorola_pgtable.h 	return mm->pgd + pgd_index(address);
mm                 29 arch/m68k/include/asm/pgtable_mm.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                 20 arch/m68k/include/asm/sun3_pgalloc.h #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
mm                 28 arch/m68k/include/asm/sun3_pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
mm                 33 arch/m68k/include/asm/sun3_pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
mm                 43 arch/m68k/include/asm/sun3_pgalloc.h #define pmd_free(mm, x)			do { } while (0)
mm                 46 arch/m68k/include/asm/sun3_pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 51 arch/m68k/include/asm/sun3_pgalloc.h static inline pgd_t * pgd_alloc(struct mm_struct *mm)
mm                 61 arch/m68k/include/asm/sun3_pgalloc.h #define pgd_populate(mm, pmd, pte) BUG()
mm                125 arch/m68k/include/asm/sun3_pgtable.h static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                191 arch/m68k/include/asm/sun3_pgtable.h #define pgd_offset(mm, address) \
mm                192 arch/m68k/include/asm/sun3_pgtable.h ((mm)->pgd + pgd_index(address))
mm                 79 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                 81 arch/m68k/include/asm/tlbflush.h 	if (mm == current->active_mm)
mm                145 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_mm (struct mm_struct *mm)
mm                152 arch/m68k/include/asm/tlbflush.h      sun3_put_context(mm->context);
mm                195 arch/m68k/include/asm/tlbflush.h 	struct mm_struct *mm = vma->vm_mm;
mm                201 arch/m68k/include/asm/tlbflush.h 	sun3_put_context(mm->context);
mm                207 arch/m68k/include/asm/tlbflush.h 		if(pmeg_ctx[seg] == mm->context) {
mm                256 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                266 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_range(struct mm_struct *mm,
mm                 27 arch/m68k/kernel/asm-offsets.c 	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
mm                188 arch/m68k/kernel/ptrace.c 			tmp = child->mm->start_code;
mm                190 arch/m68k/kernel/ptrace.c 			tmp = child->mm->start_data;
mm                192 arch/m68k/kernel/ptrace.c 			tmp = child->mm->end_code;
mm                402 arch/m68k/kernel/sys_m68k.c 		down_read(&current->mm->mmap_sem);
mm                403 arch/m68k/kernel/sys_m68k.c 		vma = find_vma(current->mm, addr);
mm                453 arch/m68k/kernel/sys_m68k.c 	up_read(&current->mm->mmap_sem);
mm                466 arch/m68k/kernel/sys_m68k.c 		struct mm_struct *mm = current->mm;
mm                473 arch/m68k/kernel/sys_m68k.c 		down_read(&mm->mmap_sem);
mm                474 arch/m68k/kernel/sys_m68k.c 		pgd = pgd_offset(mm, (unsigned long)mem);
mm                480 arch/m68k/kernel/sys_m68k.c 		pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
mm                496 arch/m68k/kernel/sys_m68k.c 		up_read(&mm->mmap_sem);
mm                500 arch/m68k/kernel/sys_m68k.c 		up_read(&mm->mmap_sem);
mm                537 arch/m68k/kernel/sys_m68k.c 	struct mm_struct *mm = current->mm;
mm                540 arch/m68k/kernel/sys_m68k.c 	down_read(&mm->mmap_sem);
mm                546 arch/m68k/kernel/sys_m68k.c 	up_read(&mm->mmap_sem);
mm                 71 arch/m68k/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                 77 arch/m68k/mm/fault.c 		regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
mm                 83 arch/m68k/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                 89 arch/m68k/mm/fault.c 	down_read(&mm->mmap_sem);
mm                 91 arch/m68k/mm/fault.c 	vma = find_vma(mm, address);
mm                180 arch/m68k/mm/fault.c 	up_read(&mm->mmap_sem);
mm                188 arch/m68k/mm/fault.c 	up_read(&mm->mmap_sem);
mm                217 arch/m68k/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 82 arch/m68k/mm/mcfmmu.c 	current->mm = NULL;
mm                 93 arch/m68k/mm/mcfmmu.c 	struct mm_struct *mm;
mm                104 arch/m68k/mm/mcfmmu.c 	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
mm                105 arch/m68k/mm/mcfmmu.c 	if (!mm) {
mm                110 arch/m68k/mm/mcfmmu.c 	pgd = pgd_offset(mm, mmuar);
mm                138 arch/m68k/mm/mcfmmu.c 	asid = mm->context & 0xff;
mm                219 arch/m68k/mm/mcfmmu.c 	struct mm_struct *mm;
mm                226 arch/m68k/mm/mcfmmu.c 	mm = context_mm[next_mmu_context];
mm                227 arch/m68k/mm/mcfmmu.c 	flush_tlb_mm(mm);
mm                228 arch/m68k/mm/mcfmmu.c 	destroy_context(mm);
mm                 89 arch/m68k/mm/sun3mmu.c 	current->mm = NULL;
mm                246 arch/m68k/sun3/mmu_emu.c unsigned long get_free_context(struct mm_struct *mm)
mm                270 arch/m68k/sun3/mmu_emu.c 	ctx_alloc[new] = mm;
mm                363 arch/m68k/sun3/mmu_emu.c 	if(current->mm == NULL) {
mm                367 arch/m68k/sun3/mmu_emu.c 		context = current->mm->context;
mm                371 arch/m68k/sun3/mmu_emu.c 			crp = current->mm->pgd;
mm                 87 arch/microblaze/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)				do { } while (0)
mm                 90 arch/microblaze/include/asm/cacheflush.h #define flush_cache_mm(mm)			do { } while (0)
mm                 39 arch/microblaze/include/asm/mmu_context_mm.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 84 arch/microblaze/include/asm/mmu_context_mm.h static inline void get_mmu_context(struct mm_struct *mm)
mm                 88 arch/microblaze/include/asm/mmu_context_mm.h 	if (mm->context != NO_CONTEXT)
mm                 99 arch/microblaze/include/asm/mmu_context_mm.h 	mm->context = ctx;
mm                100 arch/microblaze/include/asm/mmu_context_mm.h 	context_mm[ctx] = mm;
mm                106 arch/microblaze/include/asm/mmu_context_mm.h # define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
mm                111 arch/microblaze/include/asm/mmu_context_mm.h static inline void destroy_context(struct mm_struct *mm)
mm                113 arch/microblaze/include/asm/mmu_context_mm.h 	if (mm->context != NO_CONTEXT) {
mm                114 arch/microblaze/include/asm/mmu_context_mm.h 		clear_bit(mm->context, context_map);
mm                115 arch/microblaze/include/asm/mmu_context_mm.h 		mm->context = NO_CONTEXT;
mm                133 arch/microblaze/include/asm/mmu_context_mm.h 			struct mm_struct *mm)
mm                135 arch/microblaze/include/asm/mmu_context_mm.h 	current->thread.pgdir = mm->pgd;
mm                136 arch/microblaze/include/asm/mmu_context_mm.h 	get_mmu_context(mm);
mm                137 arch/microblaze/include/asm/mmu_context_mm.h 	set_context(mm->context, mm->pgd);
mm                 39 arch/microblaze/include/asm/pgalloc.h #define pgd_free(mm, pgd)	free_pgd(pgd)
mm                 40 arch/microblaze/include/asm/pgalloc.h #define pgd_alloc(mm)		get_pgd()
mm                 48 arch/microblaze/include/asm/pgalloc.h #define pmd_alloc_one_fast(mm, address)	({ BUG(); ((pmd_t *)1); })
mm                 49 arch/microblaze/include/asm/pgalloc.h #define pmd_alloc_one(mm, address)	({ BUG(); ((pmd_t *)2); })
mm                 51 arch/microblaze/include/asm/pgalloc.h extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
mm                 53 arch/microblaze/include/asm/pgalloc.h #define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, (pte))
mm                 55 arch/microblaze/include/asm/pgalloc.h #define pmd_populate(mm, pmd, pte) \
mm                 58 arch/microblaze/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) \
mm                 65 arch/microblaze/include/asm/pgalloc.h #define pmd_alloc_one(mm, address)	({ BUG(); ((pmd_t *)2); })
mm                 66 arch/microblaze/include/asm/pgalloc.h #define pmd_free(mm, x)			do { } while (0)
mm                 67 arch/microblaze/include/asm/pgalloc.h #define __pmd_free_tlb(tlb, x, addr)	pmd_free((tlb)->mm, x)
mm                 68 arch/microblaze/include/asm/pgalloc.h #define pgd_populate(mm, pmd, pte)	BUG()
mm                298 arch/microblaze/include/asm/pgtable.h #define pte_clear(mm, addr, ptep) \
mm                299 arch/microblaze/include/asm/pgtable.h 	do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
mm                420 arch/microblaze/include/asm/pgtable.h static inline void set_pte(struct mm_struct *mm, unsigned long addr,
mm                426 arch/microblaze/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                439 arch/microblaze/include/asm/pgtable.h static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
mm                447 arch/microblaze/include/asm/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm                459 arch/microblaze/include/asm/pgtable.h static inline void ptep_mkdirty(struct mm_struct *mm,
mm                480 arch/microblaze/include/asm/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
mm                129 arch/microblaze/include/asm/processor.h #  define deactivate_mm(tsk, mm)	do { } while (0)
mm                 31 arch/microblaze/include/asm/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)
mm                 54 arch/microblaze/include/asm/tlbflush.h static inline void flush_tlb_pgtables(struct mm_struct *mm,
mm                 61 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_mm(mm)			BUG()
mm                 63 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_range(mm, start, end)		BUG()
mm                 64 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_pgtables(mm, start, end)	BUG()
mm                 78 arch/microblaze/kernel/asm-offsets.c 	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
mm                 96 arch/microblaze/kernel/ptrace.c 				val = child->mm->start_code;
mm                 98 arch/microblaze/kernel/ptrace.c 				val = child->mm->start_data;
mm                100 arch/microblaze/kernel/ptrace.c 				val = child->mm->end_code
mm                101 arch/microblaze/kernel/ptrace.c 					- child->mm->start_code;
mm                199 arch/microblaze/kernel/signal.c 			pgd_offset(current->mm, address),
mm                 90 arch/microblaze/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                109 arch/microblaze/mm/fault.c 	if (unlikely(faulthandler_disabled() || !mm)) {
mm                116 arch/microblaze/mm/fault.c 			 mm);
mm                140 arch/microblaze/mm/fault.c 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
mm                145 arch/microblaze/mm/fault.c 		down_read(&mm->mmap_sem);
mm                148 arch/microblaze/mm/fault.c 	vma = find_vma(mm, address);
mm                252 arch/microblaze/mm/fault.c 	up_read(&mm->mmap_sem);
mm                263 arch/microblaze/mm/fault.c 	up_read(&mm->mmap_sem);
mm                282 arch/microblaze/mm/fault.c 	up_read(&mm->mmap_sem);
mm                290 arch/microblaze/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 56 arch/microblaze/mm/mmu_context.c 	struct mm_struct *mm;
mm                 62 arch/microblaze/mm/mmu_context.c 	mm = context_mm[next_mmu_context];
mm                 63 arch/microblaze/mm/mmu_context.c 	flush_tlb_mm(mm);
mm                 64 arch/microblaze/mm/mmu_context.c 	destroy_context(mm);
mm                188 arch/microblaze/mm/pgtable.c static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
mm                195 arch/microblaze/mm/pgtable.c 	pgd = pgd_offset(mm, addr & PAGE_MASK);
mm                217 arch/microblaze/mm/pgtable.c 	struct mm_struct *mm;
mm                223 arch/microblaze/mm/pgtable.c 		mm = current->mm;
mm                225 arch/microblaze/mm/pgtable.c 		mm = &init_mm;
mm                228 arch/microblaze/mm/pgtable.c 	if (get_pteptr(mm, addr, &pte))
mm                234 arch/microblaze/mm/pgtable.c __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 48 arch/mips/include/asm/cacheflush.h extern void (*flush_cache_mm)(struct mm_struct *mm);
mm                 49 arch/mips/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)	do { (void) (mm); } while (0)
mm                107 arch/mips/include/asm/dsemul.h extern void dsemul_mm_cleanup(struct mm_struct *mm);
mm                109 arch/mips/include/asm/dsemul.h static inline void dsemul_mm_cleanup(struct mm_struct *mm)
mm                476 arch/mips/include/asm/elf.h 		    (unsigned long)current->mm->context.vdso);		\
mm                 14 arch/mips/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 41 arch/mips/include/asm/hugetlb.h static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                 48 arch/mips/include/asm/hugetlb.h 	set_pte_at(mm, addr, ptep, clear);
mm                106 arch/mips/include/asm/mmu_context.h static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
mm                109 arch/mips/include/asm/mmu_context.h 		return atomic64_read(&mm->context.mmid);
mm                111 arch/mips/include/asm/mmu_context.h 	return mm->context.asid[cpu];
mm                115 arch/mips/include/asm/mmu_context.h 				   struct mm_struct *mm, u64 ctx)
mm                118 arch/mips/include/asm/mmu_context.h 		atomic64_set(&mm->context.mmid, ctx);
mm                120 arch/mips/include/asm/mmu_context.h 		mm->context.asid[cpu] = ctx;
mm                124 arch/mips/include/asm/mmu_context.h #define cpu_asid(cpu, mm) \
mm                125 arch/mips/include/asm/mmu_context.h 	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
mm                127 arch/mips/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                131 arch/mips/include/asm/mmu_context.h extern void get_new_mmu_context(struct mm_struct *mm);
mm                132 arch/mips/include/asm/mmu_context.h extern void check_mmu_context(struct mm_struct *mm);
mm                133 arch/mips/include/asm/mmu_context.h extern void check_switch_mmu_context(struct mm_struct *mm);
mm                140 arch/mips/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                145 arch/mips/include/asm/mmu_context.h 		set_cpu_context(0, mm, 0);
mm                148 arch/mips/include/asm/mmu_context.h 			set_cpu_context(i, mm, 0);
mm                151 arch/mips/include/asm/mmu_context.h 	mm->context.bd_emupage_allocmap = NULL;
mm                152 arch/mips/include/asm/mmu_context.h 	spin_lock_init(&mm->context.bd_emupage_lock);
mm                153 arch/mips/include/asm/mmu_context.h 	init_waitqueue_head(&mm->context.bd_emupage_queue);
mm                183 arch/mips/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                185 arch/mips/include/asm/mmu_context.h 	dsemul_mm_cleanup(mm);
mm                189 arch/mips/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
mm                192 arch/mips/include/asm/mmu_context.h drop_mmu_context(struct mm_struct *mm)
mm                202 arch/mips/include/asm/mmu_context.h 	ctx = cpu_context(cpu, mm);
mm                223 arch/mips/include/asm/mmu_context.h 	} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
mm                229 arch/mips/include/asm/mmu_context.h 		get_new_mmu_context(mm);
mm                230 arch/mips/include/asm/mmu_context.h 		write_c0_entryhi(cpu_asid(cpu, mm));
mm                234 arch/mips/include/asm/mmu_context.h 		set_cpu_context(cpu, mm, 0);
mm                 18 arch/mips/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                 24 arch/mips/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 38 arch/mips/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                 48 arch/mips/include/asm/pgalloc.h extern pgd_t *pgd_alloc(struct mm_struct *mm);
mm                 50 arch/mips/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 63 arch/mips/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 73 arch/mips/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 78 arch/mips/include/asm/pgalloc.h #define __pmd_free_tlb(tlb, x, addr)	pmd_free((tlb)->mm, x)
mm                 84 arch/mips/include/asm/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 94 arch/mips/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                 99 arch/mips/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
mm                104 arch/mips/include/asm/pgalloc.h #define __pud_free_tlb(tlb, x, addr)	pud_free((tlb)->mm, x)
mm                210 arch/mips/include/asm/pgtable-32.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
mm                333 arch/mips/include/asm/pgtable-64.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
mm                133 arch/mips/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                171 arch/mips/include/asm/pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                185 arch/mips/include/asm/pgtable.h 	set_pte_at(mm, addr, ptep, null);
mm                218 arch/mips/include/asm/pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                224 arch/mips/include/asm/pgtable.h 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
mm                227 arch/mips/include/asm/pgtable.h 		set_pte_at(mm, addr, ptep, __pte(0));
mm                232 arch/mips/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                529 arch/mips/include/asm/pgtable.h extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                630 arch/mips/include/asm/pgtable.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                 40 arch/mips/include/asm/tlbflush.h #define flush_tlb_mm(mm)		drop_mmu_context(mm)
mm                 84 arch/mips/kernel/asm-offsets.c 	OFFSET(TASK_MM, task_struct, mm);
mm                 44 arch/mips/kernel/pm.c 	if (current->mm)
mm                 45 arch/mips/kernel/pm.c 		write_c0_entryhi(cpu_asid(cpu, current->mm));
mm                806 arch/mips/kernel/signal.c 	void *vdso = current->mm->context.vdso;
mm                488 arch/mips/kernel/smp.c static void flush_tlb_mm_ipi(void *mm)
mm                490 arch/mips/kernel/smp.c 	drop_mmu_context((struct mm_struct *)mm);
mm                529 arch/mips/kernel/smp.c void flush_tlb_mm(struct mm_struct *mm)
mm                538 arch/mips/kernel/smp.c 	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
mm                539 arch/mips/kernel/smp.c 		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
mm                544 arch/mips/kernel/smp.c 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
mm                545 arch/mips/kernel/smp.c 				set_cpu_context(cpu, mm, 0);
mm                548 arch/mips/kernel/smp.c 	drop_mmu_context(mm);
mm                568 arch/mips/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
mm                576 arch/mips/kernel/smp.c 		write_c0_memorymapid(cpu_asid(0, mm));
mm                588 arch/mips/kernel/smp.c 	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
mm                608 arch/mips/kernel/smp.c 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
mm                609 arch/mips/kernel/smp.c 				set_cpu_context(cpu, mm, !exec);
mm                656 arch/mips/kernel/smp.c 		   (current->mm != vma->vm_mm)) {
mm                757 arch/mips/kernel/traps.c 		down_read(&current->mm->mmap_sem);
mm                758 arch/mips/kernel/traps.c 		vma = find_vma(current->mm, (unsigned long)fault_addr);
mm                763 arch/mips/kernel/traps.c 		up_read(&current->mm->mmap_sem);
mm               2219 arch/mips/kernel/traps.c 	BUG_ON(current->mm);
mm                 28 arch/mips/kernel/uprobes.c 	struct mm_struct *mm, unsigned long addr)
mm                221 arch/mips/kernel/uprobes.c int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
mm                224 arch/mips/kernel/uprobes.c 	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
mm                 90 arch/mips/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                 95 arch/mips/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                147 arch/mips/kernel/vdso.c 	vma = _install_special_mapping(mm, base, vvar_size,
mm                173 arch/mips/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_addr, image->size,
mm                182 arch/mips/kernel/vdso.c 	mm->context.vdso = (void *)vdso_addr;
mm                186 arch/mips/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                657 arch/mips/kvm/tlb.c 	cpumask_set_cpu(cpu, mm_cpumask(current->mm));
mm                658 arch/mips/kvm/tlb.c 	current->active_mm = current->mm;
mm               1051 arch/mips/kvm/trap_emul.c 	struct mm_struct *mm;
mm               1058 arch/mips/kvm/trap_emul.c 		mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
mm               1059 arch/mips/kvm/trap_emul.c 		check_switch_mmu_context(mm);
mm               1073 arch/mips/kvm/trap_emul.c 		check_switch_mmu_context(current->mm);
mm               1086 arch/mips/kvm/trap_emul.c 	struct mm_struct *mm;
mm               1107 arch/mips/kvm/trap_emul.c 			mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
mm               1108 arch/mips/kvm/trap_emul.c 			get_new_mmu_context(mm);
mm               1110 arch/mips/kvm/trap_emul.c 			write_c0_entryhi(cpu_asid(cpu, mm));
mm               1111 arch/mips/kvm/trap_emul.c 			TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
mm               1187 arch/mips/kvm/trap_emul.c 	struct mm_struct *mm;
mm               1200 arch/mips/kvm/trap_emul.c 		mm = kern_mm;
mm               1202 arch/mips/kvm/trap_emul.c 		mm = user_mm;
mm               1223 arch/mips/kvm/trap_emul.c 	check_mmu_context(mm);
mm               1259 arch/mips/kvm/trap_emul.c 	check_switch_mmu_context(current->mm);
mm                 77 arch/mips/math-emu/dsemul.c 	mm_context_t *mm_ctx = &current->mm->context;
mm                125 arch/mips/math-emu/dsemul.c static void free_emuframe(int idx, struct mm_struct *mm)
mm                127 arch/mips/math-emu/dsemul.c 	mm_context_t *mm_ctx = &mm->context;
mm                166 arch/mips/math-emu/dsemul.c 	if (tsk->mm)
mm                167 arch/mips/math-emu/dsemul.c 		free_emuframe(fr_idx, tsk->mm);
mm                201 arch/mips/math-emu/dsemul.c 	free_emuframe(fr_idx, current->mm);
mm                205 arch/mips/math-emu/dsemul.c void dsemul_mm_cleanup(struct mm_struct *mm)
mm                207 arch/mips/math-emu/dsemul.c 	mm_context_t *mm_ctx = &mm->context;
mm                280 arch/mips/math-emu/dsemul.c 		free_emuframe(fr_idx, current->mm);
mm                111 arch/mips/mm/c-octeon.c static void octeon_flush_cache_mm(struct mm_struct *mm)
mm                228 arch/mips/mm/c-r3k.c static void r3k_flush_cache_mm(struct mm_struct *mm)
mm                242 arch/mips/mm/c-r3k.c 	struct mm_struct *mm = vma->vm_mm;
mm                249 arch/mips/mm/c-r3k.c 		 cpu_context(smp_processor_id(), mm), addr);
mm                252 arch/mips/mm/c-r3k.c 	if (cpu_context(smp_processor_id(), mm) == 0)
mm                255 arch/mips/mm/c-r3k.c 	pgdp = pgd_offset(mm, addr);
mm                538 arch/mips/mm/c-r4k.c static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
mm                544 arch/mips/mm/c-r4k.c 		return cpu_context(0, mm) != 0;
mm                557 arch/mips/mm/c-r4k.c 		if (cpu_context(i, mm))
mm                607 arch/mips/mm/c-r4k.c 	struct mm_struct *mm = args;
mm                609 arch/mips/mm/c-r4k.c 	if (!has_valid_asid(mm, R4K_INDEX))
mm                629 arch/mips/mm/c-r4k.c static void r4k_flush_cache_mm(struct mm_struct *mm)
mm                634 arch/mips/mm/c-r4k.c 	r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
mm                650 arch/mips/mm/c-r4k.c 	struct mm_struct *mm = vma->vm_mm;
mm                662 arch/mips/mm/c-r4k.c 	if (!has_valid_asid(mm, R4K_HIT))
mm                666 arch/mips/mm/c-r4k.c 	pgdp = pgd_offset(mm, addr);
mm                678 arch/mips/mm/c-r4k.c 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
mm                702 arch/mips/mm/c-r4k.c 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
mm                703 arch/mips/mm/c-r4k.c 			drop_mmu_context(mm);
mm                148 arch/mips/mm/c-tx39.c static void tx39_flush_cache_mm(struct mm_struct *mm)
mm                153 arch/mips/mm/c-tx39.c 	if (cpu_context(smp_processor_id(), mm) != 0)
mm                171 arch/mips/mm/c-tx39.c 	struct mm_struct *mm = vma->vm_mm;
mm                181 arch/mips/mm/c-tx39.c 	if (cpu_context(smp_processor_id(), mm) == 0)
mm                185 arch/mips/mm/c-tx39.c 	pgdp = pgd_offset(mm, page);
mm                203 arch/mips/mm/c-tx39.c 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
mm                 29 arch/mips/mm/cache.c void (*flush_cache_mm)(struct mm_struct *mm);
mm                 21 arch/mips/mm/context.c void get_new_mmu_context(struct mm_struct *mm)
mm                 42 arch/mips/mm/context.c 	set_cpu_context(cpu, mm, asid);
mm                 47 arch/mips/mm/context.c void check_mmu_context(struct mm_struct *mm)
mm                 59 arch/mips/mm/context.c 	if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
mm                 60 arch/mips/mm/context.c 		get_new_mmu_context(mm);
mm                124 arch/mips/mm/context.c static u64 get_new_mmid(struct mm_struct *mm)
mm                129 arch/mips/mm/context.c 	mmid = cpu_context(0, mm);
mm                175 arch/mips/mm/context.c 	set_cpu_context(0, mm, mmid);
mm                179 arch/mips/mm/context.c void check_switch_mmu_context(struct mm_struct *mm)
mm                186 arch/mips/mm/context.c 		check_mmu_context(mm);
mm                187 arch/mips/mm/context.c 		write_c0_entryhi(cpu_asid(cpu, mm));
mm                209 arch/mips/mm/context.c 	ctx = cpu_context(cpu, mm);
mm                216 arch/mips/mm/context.c 		ctx = cpu_context(cpu, mm);
mm                218 arch/mips/mm/context.c 			ctx = get_new_mmid(mm);
mm                263 arch/mips/mm/context.c 	TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
mm                 43 arch/mips/mm/fault.c 	struct mm_struct *mm = tsk->mm;
mm                 94 arch/mips/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                100 arch/mips/mm/fault.c 	down_read(&mm->mmap_sem);
mm                101 arch/mips/mm/fault.c 	vma = find_vma(mm, address);
mm                194 arch/mips/mm/fault.c 	up_read(&mm->mmap_sem);
mm                202 arch/mips/mm/fault.c 	up_read(&mm->mmap_sem);
mm                254 arch/mips/mm/fault.c 	up_read(&mm->mmap_sem);
mm                261 arch/mips/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 24 arch/mips/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
mm                 31 arch/mips/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                 32 arch/mips/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
mm                 34 arch/mips/mm/hugetlbpage.c 		pte = (pte_t *)pmd_alloc(mm, pud, addr);
mm                 39 arch/mips/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
mm                 46 arch/mips/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                 33 arch/mips/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                 68 arch/mips/mm/mmap.c 		vma = find_vma(mm, addr);
mm                 81 arch/mips/mm/mmap.c 		info.high_limit = mm->mmap_base;
mm                 96 arch/mips/mm/mmap.c 	info.low_limit = mm->mmap_base;
mm                 45 arch/mips/mm/pgtable-32.c void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                100 arch/mips/mm/pgtable-64.c void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                 11 arch/mips/mm/pgtable.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 74 arch/mips/mm/tlb-r3k.c 	struct mm_struct *mm = vma->vm_mm;
mm                 77 arch/mips/mm/tlb-r3k.c 	if (cpu_context(cpu, mm) != 0) {
mm                 82 arch/mips/mm/tlb-r3k.c 			cpu_context(cpu, mm) & asid_mask, start, end);
mm                 88 arch/mips/mm/tlb-r3k.c 			int newpid = cpu_context(cpu, mm) & asid_mask;
mm                108 arch/mips/mm/tlb-r3k.c 			drop_mmu_context(mm);
mm                110 arch/mips/mm/tlb-r4k.c 	struct mm_struct *mm = vma->vm_mm;
mm                113 arch/mips/mm/tlb-r4k.c 	if (cpu_context(cpu, mm) != 0) {
mm                124 arch/mips/mm/tlb-r4k.c 			int newpid = cpu_asid(cpu, mm);
mm                160 arch/mips/mm/tlb-r4k.c 			drop_mmu_context(mm);
mm                 18 arch/nds32/include/asm/cacheflush.h void flush_cache_mm(struct mm_struct *mm);
mm                 19 arch/nds32/include/asm/cacheflush.h void flush_cache_dup_mm(struct mm_struct *mm);
mm                175 arch/nds32/include/asm/elf.h 		    (elf_addr_t)current->mm->context.vdso);	\
mm                 13 arch/nds32/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                 15 arch/nds32/include/asm/mmu_context.h 	mm->context.id = 0;
mm                 19 arch/nds32/include/asm/mmu_context.h #define destroy_context(mm)	do { } while(0)
mm                 25 arch/nds32/include/asm/mmu_context.h static inline void __new_context(struct mm_struct *mm)
mm                 40 arch/nds32/include/asm/mmu_context.h 	mm->context.id = cid;
mm                 43 arch/nds32/include/asm/mmu_context.h static inline void check_context(struct mm_struct *mm)
mm                 46 arch/nds32/include/asm/mmu_context.h 	    ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS))
mm                 47 arch/nds32/include/asm/mmu_context.h 		__new_context(mm);
mm                 50 arch/nds32/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 65 arch/nds32/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 18 arch/nds32/include/asm/pgalloc.h #define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
mm                 19 arch/nds32/include/asm/pgalloc.h #define pmd_free(mm, pmd)			do { } while (0)
mm                 20 arch/nds32/include/asm/pgalloc.h #define pgd_populate(mm, pmd, pte)	BUG()
mm                 23 arch/nds32/include/asm/pgalloc.h extern pgd_t *pgd_alloc(struct mm_struct *mm);
mm                 24 arch/nds32/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
mm                 26 arch/nds32/include/asm/pgalloc.h static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                 30 arch/nds32/include/asm/pgalloc.h 	pte = __pte_alloc_one(mm, GFP_PGTABLE_USER);
mm                 44 arch/nds32/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmdp, pte_t * ptep)
mm                 49 arch/nds32/include/asm/pgalloc.h 	BUG_ON(mm != &init_mm);
mm                 60 arch/nds32/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t * pmdp, pgtable_t ptep)
mm                 64 arch/nds32/include/asm/pgalloc.h 	BUG_ON(mm == &init_mm);
mm                191 arch/nds32/include/asm/pgtable.h #define pte_clear(mm,addr,ptep)	set_pte_at((mm),(addr),(ptep), __pte(0))
mm                205 arch/nds32/include/asm/pgtable.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                365 arch/nds32/include/asm/pgtable.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
mm                 16 arch/nds32/include/asm/proc-fns.h extern void cpu_switch_mm(struct mm_struct *mm);
mm                  9 arch/nds32/include/asm/tlb.h #define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, pte)
mm                 10 arch/nds32/include/asm/tlb.h #define __pmd_free_tlb(tlb, pmd, addr)	pmd_free((tln)->mm, pmd)
mm                 17 arch/nds32/include/asm/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)
mm                262 arch/nds32/kernel/signal.c 	retcode = VDSO_SYMBOL(current->mm->context.vdso, rt_sigtramp);
mm                 34 arch/nds32/kernel/sys_nds32.c 	vma = find_vma(current->mm, start);
mm                 20 arch/nds32/kernel/traps.c extern void show_pte(struct mm_struct *mm, unsigned long addr);
mm                 96 arch/nds32/kernel/vdso.c 	unsigned long start = current->mm->mmap_base, end, offset, addr;
mm                116 arch/nds32/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                133 arch/nds32/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                154 arch/nds32/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_base, vvar_page_num * PAGE_SIZE,
mm                178 arch/nds32/kernel/vdso.c 	mm->context.vdso = (void *)vdso_base;
mm                179 arch/nds32/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_base, vdso_text_len,
mm                188 arch/nds32/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                192 arch/nds32/kernel/vdso.c 	mm->context.vdso = NULL;
mm                193 arch/nds32/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                148 arch/nds32/mm/alignment.c extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
mm                 79 arch/nds32/mm/cacheflush.c extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
mm                119 arch/nds32/mm/cacheflush.c void flush_cache_mm(struct mm_struct *mm)
mm                129 arch/nds32/mm/cacheflush.c void flush_cache_dup_mm(struct mm_struct *mm)
mm                 23 arch/nds32/mm/fault.c void show_pte(struct mm_struct *mm, unsigned long addr)
mm                 26 arch/nds32/mm/fault.c 	if (!mm)
mm                 27 arch/nds32/mm/fault.c 		mm = &init_mm;
mm                 29 arch/nds32/mm/fault.c 	pr_alert("pgd = %p\n", mm->pgd);
mm                 30 arch/nds32/mm/fault.c 	pgd = pgd_offset(mm, addr);
mm                 74 arch/nds32/mm/fault.c 	struct mm_struct *mm;
mm                 83 arch/nds32/mm/fault.c 	mm = tsk->mm;
mm                118 arch/nds32/mm/fault.c 	if (unlikely(faulthandler_disabled() || !mm))
mm                126 arch/nds32/mm/fault.c 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
mm                131 arch/nds32/mm/fault.c 		down_read(&mm->mmap_sem);
mm                145 arch/nds32/mm/fault.c 	vma = find_vma(mm, addr);
mm                256 arch/nds32/mm/fault.c 	up_read(&mm->mmap_sem);
mm                264 arch/nds32/mm/fault.c 	up_read(&mm->mmap_sem);
mm                311 arch/nds32/mm/fault.c 	show_pte(mm, addr);
mm                324 arch/nds32/mm/fault.c 	up_read(&mm->mmap_sem);
mm                331 arch/nds32/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 13 arch/nds32/mm/mm-nds32.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 41 arch/nds32/mm/mm-nds32.c void pgd_free(struct mm_struct *mm, pgd_t * pgd)
mm                 61 arch/nds32/mm/mm-nds32.c 	pte_free(mm, pte);
mm                 62 arch/nds32/mm/mm-nds32.c 	mm_dec_nr_ptes(mm);
mm                 63 arch/nds32/mm/mm-nds32.c 	pmd_free(mm, pmd);
mm                 80 arch/nds32/mm/mm-nds32.c 	if (current->mm && current->mm->pgd)
mm                 81 arch/nds32/mm/mm-nds32.c 		pgd = current->mm->pgd;
mm                 26 arch/nds32/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                 60 arch/nds32/mm/mmap.c 		vma = find_vma(mm, addr);
mm                 68 arch/nds32/mm/mmap.c 	info.low_limit = mm->mmap_base;
mm                 32 arch/nds32/mm/proc.c pte_t va_present(struct mm_struct * mm, unsigned long addr)
mm                 39 arch/nds32/mm/proc.c 	pgd = pgd_offset(mm, addr);
mm                 58 arch/nds32/mm/proc.c 	struct mm_struct *mm = current->mm;
mm                 64 arch/nds32/mm/proc.c 		pte = va_present(mm, addr);
mm                 77 arch/nds32/mm/proc.c 	struct mm_struct *mm = current->mm;
mm                 83 arch/nds32/mm/proc.c 		pte = va_present(mm, addr);
mm                526 arch/nds32/mm/proc.c void cpu_switch_mm(struct mm_struct *mm)
mm                530 arch/nds32/mm/proc.c 	cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
mm                532 arch/nds32/mm/proc.c 	__nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
mm                 24 arch/nios2/include/asm/cacheflush.h extern void flush_cache_mm(struct mm_struct *mm);
mm                 25 arch/nios2/include/asm/cacheflush.h extern void flush_cache_dup_mm(struct mm_struct *mm);
mm                 29 arch/nios2/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 40 arch/nios2/include/asm/mmu_context.h 					struct mm_struct *mm)
mm                 42 arch/nios2/include/asm/mmu_context.h 	mm->context = 0;
mm                 50 arch/nios2/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                 58 arch/nios2/include/asm/mmu_context.h 				struct mm_struct *mm)
mm                 17 arch/nios2/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                 23 arch/nios2/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 35 arch/nios2/include/asm/pgalloc.h extern pgd_t *pgd_alloc(struct mm_struct *mm);
mm                 37 arch/nios2/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                108 arch/nios2/include/asm/pgtable.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
mm                210 arch/nios2/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                227 arch/nios2/include/asm/pgtable.h static inline void pte_clear(struct mm_struct *mm,
mm                234 arch/nios2/include/asm/pgtable.h 	set_pte_at(mm, addr, ptep, null);
mm                 25 arch/nios2/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                160 arch/nios2/kernel/process.c 	if (current->mm) {
mm                162 arch/nios2/kernel/process.c 			(int) current->mm->start_code,
mm                163 arch/nios2/kernel/process.c 			(int) current->mm->end_code,
mm                164 arch/nios2/kernel/process.c 			(int) current->mm->start_data,
mm                165 arch/nios2/kernel/process.c 			(int) current->mm->end_data,
mm                166 arch/nios2/kernel/process.c 			(int) current->mm->end_data,
mm                167 arch/nios2/kernel/process.c 			(int) current->mm->brk);
mm                169 arch/nios2/kernel/process.c 			(int) current->mm->start_stack,
mm                 41 arch/nios2/kernel/sys_nios2.c 	vma = find_vma(current->mm, addr);
mm                 75 arch/nios2/mm/cacheflush.c 	struct mm_struct *mm = current->active_mm;
mm                 85 arch/nios2/mm/cacheflush.c 		if (mpnt->vm_mm != mm)
mm                103 arch/nios2/mm/cacheflush.c void flush_cache_mm(struct mm_struct *mm)
mm                108 arch/nios2/mm/cacheflush.c void flush_cache_dup_mm(struct mm_struct *mm)
mm                 47 arch/nios2/mm/fault.c 	struct mm_struct *mm = tsk->mm;
mm                 80 arch/nios2/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                 86 arch/nios2/mm/fault.c 	if (!down_read_trylock(&mm->mmap_sem)) {
mm                 90 arch/nios2/mm/fault.c 		down_read(&mm->mmap_sem);
mm                 93 arch/nios2/mm/fault.c 	vma = find_vma(mm, address);
mm                175 arch/nios2/mm/fault.c 	up_read(&mm->mmap_sem);
mm                183 arch/nios2/mm/fault.c 	up_read(&mm->mmap_sem);
mm                221 arch/nios2/mm/fault.c 	up_read(&mm->mmap_sem);
mm                228 arch/nios2/mm/fault.c 	up_read(&mm->mmap_sem);
mm                112 arch/nios2/mm/init.c 	struct mm_struct *mm = current->mm;
mm                115 arch/nios2/mm/init.c 	down_write(&mm->mmap_sem);
mm                118 arch/nios2/mm/init.c 	ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
mm                122 arch/nios2/mm/init.c 	up_write(&mm->mmap_sem);
mm                 54 arch/nios2/mm/pgtable.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                256 arch/nios2/mm/tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                258 arch/nios2/mm/tlb.c 	if (current->mm == mm) {
mm                259 arch/nios2/mm/tlb.c 		unsigned long mmu_pid = get_pid_from_context(&mm->context);
mm                262 arch/nios2/mm/tlb.c 		memset(&mm->context, 0, sizeof(mm_context_t));
mm                 70 arch/openrisc/include/asm/cacheflush.h #define flush_cache_mm(mm)				do { } while (0)
mm                 71 arch/openrisc/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)				do { } while (0)
mm                 20 arch/openrisc/include/asm/mmu_context.h extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
mm                 21 arch/openrisc/include/asm/mmu_context.h extern void destroy_context(struct mm_struct *mm);
mm                 25 arch/openrisc/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
mm                 35 arch/openrisc/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 25 arch/openrisc/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) \
mm                 28 arch/openrisc/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 39 arch/openrisc/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 58 arch/openrisc/include/asm/pgalloc.h extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 64 arch/openrisc/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 69 arch/openrisc/include/asm/pgalloc.h extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
mm                 71 arch/openrisc/include/asm/pgalloc.h static inline struct page *pte_alloc_one(struct mm_struct *mm)
mm                 85 arch/openrisc/include/asm/pgalloc.h static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                 90 arch/openrisc/include/asm/pgalloc.h static inline void pte_free(struct mm_struct *mm, struct page *pte)
mm                 50 arch/openrisc/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
mm                222 arch/openrisc/include/asm/pgtable.h #define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
mm                376 arch/openrisc/include/asm/pgtable.h #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
mm                 33 arch/openrisc/include/asm/tlbflush.h extern void local_flush_tlb_mm(struct mm_struct *mm);
mm                 47 arch/openrisc/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 55 arch/openrisc/include/asm/tlbflush.h 	flush_tlb_mm(current->mm);
mm                 45 arch/openrisc/kernel/asm-offsets.c 	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
mm                110 arch/openrisc/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                116 arch/openrisc/kernel/smp.c 	atomic_inc(&mm->mm_count);
mm                117 arch/openrisc/kernel/smp.c 	current->active_mm = mm;
mm                118 arch/openrisc/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                231 arch/openrisc/kernel/smp.c void flush_tlb_mm(struct mm_struct *mm)
mm                 49 arch/openrisc/mm/fault.c 	struct mm_struct *mm;
mm                 95 arch/openrisc/mm/fault.c 	mm = tsk->mm;
mm                103 arch/openrisc/mm/fault.c 	if (in_interrupt() || !mm)
mm                107 arch/openrisc/mm/fault.c 	down_read(&mm->mmap_sem);
mm                108 arch/openrisc/mm/fault.c 	vma = find_vma(mm, address);
mm                196 arch/openrisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                205 arch/openrisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                264 arch/openrisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                271 arch/openrisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                117 arch/openrisc/mm/ioremap.c pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
mm                127 arch/openrisc/mm/tlb.c void local_flush_tlb_mm(struct mm_struct *mm)
mm                163 arch/openrisc/mm/tlb.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                165 arch/openrisc/mm/tlb.c 	mm->context = NO_CONTEXT;
mm                174 arch/openrisc/mm/tlb.c void destroy_context(struct mm_struct *mm)
mm                176 arch/openrisc/mm/tlb.c 	flush_tlb_mm(mm);
mm                 23 arch/parisc/include/asm/cacheflush.h #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
mm                 37 arch/parisc/include/asm/cacheflush.h void flush_cache_mm(struct mm_struct *mm);
mm                  8 arch/parisc/include/asm/hugetlb.h void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 12 arch/parisc/include/asm/hugetlb.h pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm                 15 arch/parisc/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 43 arch/parisc/include/asm/hugetlb.h void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                 12 arch/parisc/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 23 arch/parisc/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                 25 arch/parisc/include/asm/mmu_context.h 	BUG_ON(atomic_read(&mm->mm_users) != 1);
mm                 27 arch/parisc/include/asm/mmu_context.h 	mm->context = alloc_sid();
mm                 32 arch/parisc/include/asm/mmu_context.h destroy_context(struct mm_struct *mm)
mm                 34 arch/parisc/include/asm/mmu_context.h 	free_sid(mm->context);
mm                 35 arch/parisc/include/asm/mmu_context.h 	mm->context = 0;
mm                 76 arch/parisc/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 24 arch/parisc/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 50 arch/parisc/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 62 arch/parisc/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
mm                 68 arch/parisc/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 76 arch/parisc/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 85 arch/parisc/include/asm/pgalloc.h 		mm_inc_nr_pmds(mm);
mm                100 arch/parisc/include/asm/pgalloc.h #define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
mm                101 arch/parisc/include/asm/pgalloc.h #define pmd_free(mm, x)			do { } while (0)
mm                102 arch/parisc/include/asm/pgalloc.h #define pgd_populate(mm, pmd, pte)	BUG()
mm                107 arch/parisc/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
mm                123 arch/parisc/include/asm/pgalloc.h #define pmd_populate(mm, pmd, pte_page) \
mm                124 arch/parisc/include/asm/pgalloc.h 	pmd_populate_kernel(mm, pmd, page_address(pte_page))
mm                 69 arch/parisc/include/asm/pgtable.h static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
mm                 74 arch/parisc/include/asm/pgtable.h 	mtsp(mm->context, 1);
mm                 89 arch/parisc/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pteval)			\
mm                 93 arch/parisc/include/asm/pgtable.h 		spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
mm                 96 arch/parisc/include/asm/pgtable.h 		purge_tlb_entries(mm, addr);			\
mm                 97 arch/parisc/include/asm/pgtable.h 		spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
mm                316 arch/parisc/include/asm/pgtable.h #define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
mm                444 arch/parisc/include/asm/pgtable.h #define pgd_offset(mm, address) \
mm                445 arch/parisc/include/asm/pgtable.h ((mm)->pgd + ((address) >> PGDIR_SHIFT))
mm                519 arch/parisc/include/asm/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                524 arch/parisc/include/asm/pgtable.h 	spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
mm                527 arch/parisc/include/asm/pgtable.h 	purge_tlb_entries(mm, addr);
mm                528 arch/parisc/include/asm/pgtable.h 	spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
mm                533 arch/parisc/include/asm/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                536 arch/parisc/include/asm/pgtable.h 	spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
mm                538 arch/parisc/include/asm/pgtable.h 	purge_tlb_entries(mm, addr);
mm                539 arch/parisc/include/asm/pgtable.h 	spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
mm                252 arch/parisc/include/asm/processor.h 	__u32 spaceid = (__u32)current->mm->context;	\
mm                  7 arch/parisc/include/asm/tlb.h #define __pmd_free_tlb(tlb, pmd, addr)	pmd_free((tlb)->mm, pmd)
mm                  8 arch/parisc/include/asm/tlb.h #define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, pte)
mm                 36 arch/parisc/include/asm/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                 38 arch/parisc/include/asm/tlbflush.h 	BUG_ON(mm == &init_mm); /* Should never happen */
mm                 54 arch/parisc/include/asm/tlbflush.h 	if (mm) {
mm                 55 arch/parisc/include/asm/tlbflush.h 		if (mm->context != 0)
mm                 56 arch/parisc/include/asm/tlbflush.h 			free_sid(mm->context);
mm                 57 arch/parisc/include/asm/tlbflush.h 		mm->context = alloc_sid();
mm                 58 arch/parisc/include/asm/tlbflush.h 		if (mm == current->active_mm)
mm                 59 arch/parisc/include/asm/tlbflush.h 			load_context(mm->context);
mm                 49 arch/parisc/kernel/asm-offsets.c 	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
mm                522 arch/parisc/kernel/cache.c static inline unsigned long mm_total_size(struct mm_struct *mm)
mm                527 arch/parisc/kernel/cache.c 	for (vma = mm->mmap; vma; vma = vma->vm_next)
mm                547 arch/parisc/kernel/cache.c void flush_cache_mm(struct mm_struct *mm)
mm                555 arch/parisc/kernel/cache.c 	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
mm                556 arch/parisc/kernel/cache.c 		if (mm->context)
mm                562 arch/parisc/kernel/cache.c 	if (mm->context == mfsp(3)) {
mm                563 arch/parisc/kernel/cache.c 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                572 arch/parisc/kernel/cache.c 	pgd = mm->pgd;
mm                573 arch/parisc/kernel/cache.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                585 arch/parisc/kernel/cache.c 			if (unlikely(mm->context)) {
mm                316 arch/parisc/kernel/process.c unsigned long arch_randomize_brk(struct mm_struct *mm)
mm                318 arch/parisc/kernel/process.c 	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
mm                320 arch/parisc/kernel/process.c 	if (ret < mm->brk)
mm                321 arch/parisc/kernel/process.c 		return mm->brk;
mm                282 arch/parisc/kernel/smp.c 	BUG_ON(current->mm);
mm                 85 arch/parisc/kernel/sys_parisc.c 	struct mm_struct *mm = current->mm;
mm                113 arch/parisc/kernel/sys_parisc.c 		vma = find_vma_prev(mm, addr, &prev);
mm                122 arch/parisc/kernel/sys_parisc.c 	info.low_limit = mm->mmap_legacy_base;
mm                141 arch/parisc/kernel/sys_parisc.c 	struct mm_struct *mm = current->mm;
mm                170 arch/parisc/kernel/sys_parisc.c 		vma = find_vma_prev(mm, addr, &prev);
mm                180 arch/parisc/kernel/sys_parisc.c 	info.high_limit = mm->mmap_base;
mm                241 arch/parisc/kernel/sys_parisc.c void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                243 arch/parisc/kernel/sys_parisc.c 	mm->mmap_legacy_base = mmap_legacy_base();
mm                244 arch/parisc/kernel/sys_parisc.c 	mm->mmap_base = mmap_upper_limit(rlim_stack);
mm                247 arch/parisc/kernel/sys_parisc.c 		mm->mmap_base = mm->mmap_legacy_base;
mm                248 arch/parisc/kernel/sys_parisc.c 		mm->get_unmapped_area = arch_get_unmapped_area;
mm                250 arch/parisc/kernel/sys_parisc.c 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm                720 arch/parisc/kernel/traps.c 			down_read(&current->mm->mmap_sem);
mm                721 arch/parisc/kernel/traps.c 			vma = find_vma(current->mm,regs->iaoq[0]);
mm                728 arch/parisc/kernel/traps.c 				up_read(&current->mm->mmap_sem);
mm                731 arch/parisc/kernel/traps.c 			up_read(&current->mm->mmap_sem);
mm                264 arch/parisc/mm/fault.c 	struct mm_struct *mm;
mm                273 arch/parisc/mm/fault.c 	mm = tsk->mm;
mm                274 arch/parisc/mm/fault.c 	if (!mm)
mm                285 arch/parisc/mm/fault.c 	down_read(&mm->mmap_sem);
mm                286 arch/parisc/mm/fault.c 	vma = find_vma_prev(mm, address, &prev_vma);
mm                342 arch/parisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                354 arch/parisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                426 arch/parisc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 48 arch/parisc/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm,
mm                 63 arch/parisc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                 64 arch/parisc/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
mm                 66 arch/parisc/mm/hugetlbpage.c 		pmd = pmd_alloc(mm, pud, addr);
mm                 68 arch/parisc/mm/hugetlbpage.c 			pte = pte_alloc_map(mm, pmd, addr);
mm                 73 arch/parisc/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm,
mm                 83 arch/parisc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                 99 arch/parisc/mm/hugetlbpage.c static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
mm                111 arch/parisc/mm/hugetlbpage.c 		purge_tlb_entries(mm, addr);
mm                117 arch/parisc/mm/hugetlbpage.c static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                134 arch/parisc/mm/hugetlbpage.c 	purge_tlb_entries_huge(mm, addr_start);
mm                137 arch/parisc/mm/hugetlbpage.c void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                142 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
mm                143 arch/parisc/mm/hugetlbpage.c 	__set_huge_pte_at(mm, addr, ptep, entry);
mm                144 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
mm                148 arch/parisc/mm/hugetlbpage.c pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm                154 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
mm                156 arch/parisc/mm/hugetlbpage.c 	__set_huge_pte_at(mm, addr, ptep, __pte(0));
mm                157 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
mm                163 arch/parisc/mm/hugetlbpage.c void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                169 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
mm                171 arch/parisc/mm/hugetlbpage.c 	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
mm                172 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
mm                181 arch/parisc/mm/hugetlbpage.c 	struct mm_struct *mm = vma->vm_mm;
mm                183 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
mm                186 arch/parisc/mm/hugetlbpage.c 		__set_huge_pte_at(mm, addr, ptep, pte);
mm                188 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
mm                  8 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 11 arch/powerpc/include/asm/book3s/32/pgalloc.h 			pgtable_gfp_flags(mm, GFP_KERNEL));
mm                 14 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 24 arch/powerpc/include/asm/book3s/32/pgalloc.h #define pmd_free(mm, x) 		do { } while (0)
mm                 28 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
mm                 34 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
mm                215 arch/powerpc/include/asm/book3s/32/pgtable.h #define pte_clear(mm, addr, ptep) \
mm                239 arch/powerpc/include/asm/book3s/32/pgtable.h extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
mm                318 arch/powerpc/include/asm/book3s/32/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm                325 arch/powerpc/include/asm/book3s/32/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
mm                357 arch/powerpc/include/asm/book3s/32/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
mm                525 arch/powerpc/include/asm/book3s/32/pgtable.h static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                556 arch/powerpc/include/asm/book3s/32/pgtable.h 		flush_hash_entry(mm, ptep, addr);
mm                  9 arch/powerpc/include/asm/book3s/32/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 20 arch/powerpc/include/asm/book3s/32/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)
mm                 22 arch/powerpc/include/asm/book3s/32/tlbflush.h 	flush_tlb_mm(mm);
mm                146 arch/powerpc/include/asm/book3s/64/hash-4k.h extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
mm                151 arch/powerpc/include/asm/book3s/64/hash-4k.h extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                153 arch/powerpc/include/asm/book3s/64/hash-4k.h extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm                154 arch/powerpc/include/asm/book3s/64/hash-4k.h extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                157 arch/powerpc/include/asm/book3s/64/hash-64k.h #define pte_pagesize_index(mm, addr, pte)	\
mm                263 arch/powerpc/include/asm/book3s/64/hash-64k.h extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
mm                268 arch/powerpc/include/asm/book3s/64/hash-64k.h extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                270 arch/powerpc/include/asm/book3s/64/hash-64k.h extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm                271 arch/powerpc/include/asm/book3s/64/hash-64k.h extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                146 arch/powerpc/include/asm/book3s/64/hash.h extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
mm                150 arch/powerpc/include/asm/book3s/64/hash.h static inline unsigned long hash__pte_update(struct mm_struct *mm,
mm                173 arch/powerpc/include/asm/book3s/64/hash.h 		assert_pte_locked(mm, addr);
mm                177 arch/powerpc/include/asm/book3s/64/hash.h 		hpte_need_flush(mm, addr, ptep, old, huge);
mm                224 arch/powerpc/include/asm/book3s/64/hash.h static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                235 arch/powerpc/include/asm/book3s/64/hash.h extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
mm                238 arch/powerpc/include/asm/book3s/64/hash.h static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
mm                464 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
mm                496 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
mm                686 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern void subpage_prot_free(struct mm_struct *mm);
mm                688 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline void subpage_prot_free(struct mm_struct *mm) {}
mm                 25 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
mm                 28 arch/powerpc/include/asm/book3s/64/pgalloc.h 	return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
mm                 31 arch/powerpc/include/asm/book3s/64/pgalloc.h 	page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
mm                 39 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 48 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 53 arch/powerpc/include/asm/book3s/64/pgalloc.h 		return radix__pgd_alloc(mm);
mm                 56 arch/powerpc/include/asm/book3s/64/pgalloc.h 			       pgtable_gfp_flags(mm, GFP_KERNEL));
mm                 81 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 84 arch/powerpc/include/asm/book3s/64/pgalloc.h 		return radix__pgd_free(mm, pgd);
mm                 88 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
mm                 93 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 98 arch/powerpc/include/asm/book3s/64/pgalloc.h 			       pgtable_gfp_flags(mm, GFP_KERNEL));
mm                110 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                115 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                131 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                133 arch/powerpc/include/asm/book3s/64/pgalloc.h 	return pmd_fragment_alloc(mm, addr);
mm                136 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                152 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                158 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                349 arch/powerpc/include/asm/book3s/64/pgtable.h #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
mm                353 arch/powerpc/include/asm/book3s/64/pgtable.h static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
mm                358 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pte_update(mm, addr, ptep, clr, set, huge);
mm                359 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pte_update(mm, addr, ptep, clr, set, huge);
mm                371 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
mm                378 arch/powerpc/include/asm/book3s/64/pgtable.h 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
mm                427 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
mm                431 arch/powerpc/include/asm/book3s/64/pgtable.h 		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
mm                433 arch/powerpc/include/asm/book3s/64/pgtable.h 		pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
mm                437 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                445 arch/powerpc/include/asm/book3s/64/pgtable.h 		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
mm                447 arch/powerpc/include/asm/book3s/64/pgtable.h 		pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
mm                451 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm                454 arch/powerpc/include/asm/book3s/64/pgtable.h 	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
mm                459 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
mm                468 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
mm                470 arch/powerpc/include/asm/book3s/64/pgtable.h 	return ptep_get_and_clear(mm, addr, ptep);
mm                474 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
mm                477 arch/powerpc/include/asm/book3s/64/pgtable.h 	pte_update(mm, addr, ptep, ~0UL, 0, 0);
mm                815 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                819 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__set_pte_at(mm, addr, ptep, pte, percpu);
mm                820 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__set_pte_at(mm, addr, ptep, pte, percpu);
mm               1011 arch/powerpc/include/asm/book3s/64/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
mm               1140 arch/powerpc/include/asm/book3s/64/pgtable.h extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm               1154 arch/powerpc/include/asm/book3s/64/pgtable.h pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
mm               1158 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
mm               1159 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
mm               1179 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
mm               1186 arch/powerpc/include/asm/book3s/64/pgtable.h 	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
mm               1191 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
mm               1195 arch/powerpc/include/asm/book3s/64/pgtable.h 		pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
mm               1197 arch/powerpc/include/asm/book3s/64/pgtable.h 		pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
mm               1246 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
mm               1250 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
mm               1251 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
mm               1264 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
mm               1268 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
mm               1269 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
mm               1273 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
mm               1277 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pgtable_trans_huge_withdraw(mm, pmdp);
mm               1278 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pgtable_trans_huge_withdraw(mm, pmdp);
mm               1301 arch/powerpc/include/asm/book3s/64/pgtable.h extern void serialize_against_pte_lookup(struct mm_struct *mm);
mm                150 arch/powerpc/include/asm/book3s/64/radix.h static inline unsigned long radix__pte_update(struct mm_struct *mm,
mm                160 arch/powerpc/include/asm/book3s/64/radix.h 		assert_pte_locked(mm, addr);
mm                165 arch/powerpc/include/asm/book3s/64/radix.h static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
mm                175 arch/powerpc/include/asm/book3s/64/radix.h 		old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
mm                190 arch/powerpc/include/asm/book3s/64/radix.h static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                247 arch/powerpc/include/asm/book3s/64/radix.h extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
mm                252 arch/powerpc/include/asm/book3s/64/radix.h extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                254 arch/powerpc/include/asm/book3s/64/radix.h extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm                255 arch/powerpc/include/asm/book3s/64/radix.h extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                 17 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h 	struct mm_struct	*mm;
mm                 62 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
mm                 66 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__flush_tlb_mm(struct mm_struct *mm)
mm                 70 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__local_flush_all_mm(struct mm_struct *mm)
mm                 81 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__flush_all_mm(struct mm_struct *mm)
mm                116 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
mm                118 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
mm                 46 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
mm                 54 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
mm                 55 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__local_flush_all_mm(struct mm_struct *mm);
mm                 57 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
mm                 61 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_mm(struct mm_struct *mm);
mm                 62 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_all_mm(struct mm_struct *mm);
mm                 64 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
mm                 67 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h #define radix__flush_tlb_mm(mm)		radix__local_flush_tlb_mm(mm)
mm                 68 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h #define radix__flush_all_mm(mm)		radix__local_flush_all_mm(mm)
mm                 70 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
mm                 73 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
mm                 84 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)
mm                 87 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__local_flush_tlb_mm(mm);
mm                 88 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__local_flush_tlb_mm(mm);
mm                 99 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void local_flush_all_mm(struct mm_struct *mm)
mm                102 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__local_flush_all_mm(mm);
mm                103 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__local_flush_all_mm(mm);
mm                114 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                117 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__flush_tlb_mm(mm);
mm                118 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__flush_tlb_mm(mm);
mm                129 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_all_mm(struct mm_struct *mm)
mm                132 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__flush_all_mm(mm);
mm                133 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__flush_all_mm(mm);
mm                136 arch/powerpc/include/asm/book3s/64/tlbflush.h #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
mm                138 arch/powerpc/include/asm/book3s/64/tlbflush.h #define flush_all_mm(mm)		local_flush_all_mm(mm)
mm                 16 arch/powerpc/include/asm/book3s/pgtable.h extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
mm                 17 arch/powerpc/include/asm/cacheflush.h #define flush_cache_mm(mm)			do { } while (0)
mm                 18 arch/powerpc/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)			do { } while (0)
mm                 16 arch/powerpc/include/asm/copro.h int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
mm                 19 arch/powerpc/include/asm/copro.h int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
mm                 23 arch/powerpc/include/asm/copro.h void copro_flush_all_slbs(struct mm_struct *mm);
mm                 25 arch/powerpc/include/asm/copro.h static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
mm                174 arch/powerpc/include/asm/elf.h 	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base);	\
mm                 22 arch/powerpc/include/asm/hugetlb.h int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
mm                 25 arch/powerpc/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 30 arch/powerpc/include/asm/hugetlb.h 		return slice_is_hugepage_only_range(mm, addr, len);
mm                 40 arch/powerpc/include/asm/hugetlb.h static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                 44 arch/powerpc/include/asm/hugetlb.h 	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
mm                210 arch/powerpc/include/asm/iommu.h extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
mm                213 arch/powerpc/include/asm/iommu.h extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
mm                 12 arch/powerpc/include/asm/mm-arch-hooks.h static inline void arch_remap(struct mm_struct *mm,
mm                 20 arch/powerpc/include/asm/mm-arch-hooks.h 	if (old_start == mm->context.vdso_base)
mm                 21 arch/powerpc/include/asm/mm-arch-hooks.h 		mm->context.vdso_base = new_start;
mm                265 arch/powerpc/include/asm/mmu.h extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
mm                267 arch/powerpc/include/asm/mmu.h static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
mm                295 arch/powerpc/include/asm/mmu.h extern u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address);
mm                297 arch/powerpc/include/asm/mmu.h static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
mm                 17 arch/powerpc/include/asm/mmu_context.h extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
mm                 18 arch/powerpc/include/asm/mmu_context.h extern void destroy_context(struct mm_struct *mm);
mm                 23 arch/powerpc/include/asm/mmu_context.h extern bool mm_iommu_preregistered(struct mm_struct *mm);
mm                 24 arch/powerpc/include/asm/mmu_context.h extern long mm_iommu_new(struct mm_struct *mm,
mm                 27 arch/powerpc/include/asm/mmu_context.h extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
mm                 30 arch/powerpc/include/asm/mmu_context.h extern long mm_iommu_put(struct mm_struct *mm,
mm                 32 arch/powerpc/include/asm/mmu_context.h extern void mm_iommu_init(struct mm_struct *mm);
mm                 33 arch/powerpc/include/asm/mmu_context.h extern void mm_iommu_cleanup(struct mm_struct *mm);
mm                 34 arch/powerpc/include/asm/mmu_context.h extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
mm                 37 arch/powerpc/include/asm/mmu_context.h 		struct mm_struct *mm, unsigned long ua, unsigned long size);
mm                 38 arch/powerpc/include/asm/mmu_context.h extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
mm                 44 arch/powerpc/include/asm/mmu_context.h extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
mm                 45 arch/powerpc/include/asm/mmu_context.h extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
mm                 50 arch/powerpc/include/asm/mmu_context.h static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
mm                 55 arch/powerpc/include/asm/mmu_context.h static inline void mm_iommu_init(struct mm_struct *mm) { }
mm                 57 arch/powerpc/include/asm/mmu_context.h extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
mm                 77 arch/powerpc/include/asm/mmu_context.h static inline int alloc_extended_context(struct mm_struct *mm,
mm                 88 arch/powerpc/include/asm/mmu_context.h 	VM_WARN_ON(mm->context.extended_id[index]);
mm                 89 arch/powerpc/include/asm/mmu_context.h 	mm->context.extended_id[index] = context_id;
mm                 93 arch/powerpc/include/asm/mmu_context.h static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
mm                 97 arch/powerpc/include/asm/mmu_context.h 	context_id = get_user_context(&mm->context, ea);
mm                109 arch/powerpc/include/asm/mmu_context.h static inline int alloc_extended_context(struct mm_struct *mm,
mm                117 arch/powerpc/include/asm/mmu_context.h static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
mm                124 arch/powerpc/include/asm/mmu_context.h extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
mm                126 arch/powerpc/include/asm/mmu_context.h static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
mm                130 arch/powerpc/include/asm/mmu_context.h extern int use_cop(unsigned long acop, struct mm_struct *mm);
mm                131 arch/powerpc/include/asm/mmu_context.h extern void drop_cop(unsigned long acop, struct mm_struct *mm);
mm                134 arch/powerpc/include/asm/mmu_context.h static inline void inc_mm_active_cpus(struct mm_struct *mm)
mm                136 arch/powerpc/include/asm/mmu_context.h 	atomic_inc(&mm->context.active_cpus);
mm                139 arch/powerpc/include/asm/mmu_context.h static inline void dec_mm_active_cpus(struct mm_struct *mm)
mm                141 arch/powerpc/include/asm/mmu_context.h 	atomic_dec(&mm->context.active_cpus);
mm                144 arch/powerpc/include/asm/mmu_context.h static inline void mm_context_add_copro(struct mm_struct *mm)
mm                151 arch/powerpc/include/asm/mmu_context.h 	if (atomic_inc_return(&mm->context.copros) == 1)
mm                152 arch/powerpc/include/asm/mmu_context.h 		inc_mm_active_cpus(mm);
mm                155 arch/powerpc/include/asm/mmu_context.h static inline void mm_context_remove_copro(struct mm_struct *mm)
mm                178 arch/powerpc/include/asm/mmu_context.h 		flush_all_mm(mm);
mm                180 arch/powerpc/include/asm/mmu_context.h 		c = atomic_dec_if_positive(&mm->context.copros);
mm                185 arch/powerpc/include/asm/mmu_context.h 			dec_mm_active_cpus(mm);
mm                189 arch/powerpc/include/asm/mmu_context.h static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
mm                190 arch/powerpc/include/asm/mmu_context.h static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
mm                191 arch/powerpc/include/asm/mmu_context.h static inline void mm_context_add_copro(struct mm_struct *mm) { }
mm                192 arch/powerpc/include/asm/mmu_context.h static inline void mm_context_remove_copro(struct mm_struct *mm) { }
mm                211 arch/powerpc/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                223 arch/powerpc/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm,
mm                232 arch/powerpc/include/asm/mmu_context.h extern void arch_exit_mmap(struct mm_struct *mm);
mm                234 arch/powerpc/include/asm/mmu_context.h static inline void arch_unmap(struct mm_struct *mm,
mm                237 arch/powerpc/include/asm/mmu_context.h 	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
mm                238 arch/powerpc/include/asm/mmu_context.h 		mm->context.vdso_base = 0;
mm                241 arch/powerpc/include/asm/mmu_context.h static inline void arch_bprm_mm_init(struct mm_struct *mm,
mm                249 arch/powerpc/include/asm/mmu_context.h void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
mm                258 arch/powerpc/include/asm/mmu_context.h #define pkey_mm_init(mm)
mm                262 arch/powerpc/include/asm/mmu_context.h #define arch_dup_pkeys(oldmm, mm)
mm                272 arch/powerpc/include/asm/mmu_context.h 				struct mm_struct *mm)
mm                274 arch/powerpc/include/asm/mmu_context.h 	arch_dup_pkeys(oldmm, mm);
mm                 13 arch/powerpc/include/asm/nohash/32/pgalloc.h #define pmd_free(mm, x) 		do { } while (0)
mm                 17 arch/powerpc/include/asm/nohash/32/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
mm                 26 arch/powerpc/include/asm/nohash/32/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
mm                163 arch/powerpc/include/asm/nohash/32/pgtable.h #define pte_clear(mm, addr, ptep) \
mm                299 arch/powerpc/include/asm/nohash/32/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm                306 arch/powerpc/include/asm/nohash/32/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
mm                362 arch/powerpc/include/asm/nohash/32/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
mm                 20 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 23 arch/powerpc/include/asm/nohash/64/pgalloc.h 			pgtable_gfp_flags(mm, GFP_KERNEL));
mm                 26 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                 31 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                 36 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                 42 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 48 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 51 arch/powerpc/include/asm/nohash/64/pgalloc.h 			pgtable_gfp_flags(mm, GFP_KERNEL));
mm                 54 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                191 arch/powerpc/include/asm/nohash/64/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
mm                208 arch/powerpc/include/asm/nohash/64/pgtable.h static inline unsigned long pte_update(struct mm_struct *mm,
mm                232 arch/powerpc/include/asm/nohash/64/pgtable.h 		assert_pte_locked(mm, addr);
mm                242 arch/powerpc/include/asm/nohash/64/pgtable.h static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
mm                249 arch/powerpc/include/asm/nohash/64/pgtable.h 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
mm                261 arch/powerpc/include/asm/nohash/64/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
mm                268 arch/powerpc/include/asm/nohash/64/pgtable.h 	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
mm                272 arch/powerpc/include/asm/nohash/64/pgtable.h static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                278 arch/powerpc/include/asm/nohash/64/pgtable.h 	pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
mm                290 arch/powerpc/include/asm/nohash/64/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm                293 arch/powerpc/include/asm/nohash/64/pgtable.h 	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
mm                297 arch/powerpc/include/asm/nohash/64/pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
mm                300 arch/powerpc/include/asm/nohash/64/pgtable.h 	pte_update(mm, addr, ptep, ~0UL, 0, 0);
mm                 20 arch/powerpc/include/asm/nohash/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 23 arch/powerpc/include/asm/nohash/pgalloc.h 			pgtable_gfp_flags(mm, GFP_KERNEL));
mm                 26 arch/powerpc/include/asm/nohash/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                184 arch/powerpc/include/asm/nohash/pgtable.h extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
mm                192 arch/powerpc/include/asm/nohash/pgtable.h static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 36 arch/powerpc/include/asm/nohash/tlbflush.h extern void local_flush_tlb_mm(struct mm_struct *mm);
mm                 39 arch/powerpc/include/asm/nohash/tlbflush.h extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
mm                 43 arch/powerpc/include/asm/nohash/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 45 arch/powerpc/include/asm/nohash/tlbflush.h extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
mm                 48 arch/powerpc/include/asm/nohash/tlbflush.h #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
mm                 50 arch/powerpc/include/asm/nohash/tlbflush.h #define __flush_tlb_page(mm,addr,p,i)	__local_flush_tlb_page(mm,addr,p,i)
mm                275 arch/powerpc/include/asm/paca.h extern void copy_mm_to_paca(struct mm_struct *mm);
mm                  8 arch/powerpc/include/asm/pgalloc.h static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
mm                 10 arch/powerpc/include/asm/pgalloc.h 	if (unlikely(mm == &init_mm))
mm                 15 arch/powerpc/include/asm/pgalloc.h static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
mm                 23 arch/powerpc/include/asm/pgalloc.h pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
mm                 25 arch/powerpc/include/asm/pgalloc.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 27 arch/powerpc/include/asm/pgalloc.h 	return (pte_t *)pte_fragment_alloc(mm, 1);
mm                 30 arch/powerpc/include/asm/pgalloc.h static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                 32 arch/powerpc/include/asm/pgalloc.h 	return (pgtable_t)pte_fragment_alloc(mm, 0);
mm                 38 arch/powerpc/include/asm/pgalloc.h static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                 43 arch/powerpc/include/asm/pgalloc.h static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
mm                 74 arch/powerpc/include/asm/pkeys.h #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
mm                 76 arch/powerpc/include/asm/pkeys.h #define __mm_pkey_allocated(mm, pkey) {	\
mm                 77 arch/powerpc/include/asm/pkeys.h 	mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
mm                 80 arch/powerpc/include/asm/pkeys.h #define __mm_pkey_free(mm, pkey) {	\
mm                 81 arch/powerpc/include/asm/pkeys.h 	mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey);	\
mm                 84 arch/powerpc/include/asm/pkeys.h #define __mm_pkey_is_allocated(mm, pkey)	\
mm                 85 arch/powerpc/include/asm/pkeys.h 	(mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
mm                 90 arch/powerpc/include/asm/pkeys.h static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
mm                 99 arch/powerpc/include/asm/pkeys.h 	return __mm_pkey_is_allocated(mm, pkey);
mm                107 arch/powerpc/include/asm/pkeys.h static inline int mm_pkey_alloc(struct mm_struct *mm)
mm                124 arch/powerpc/include/asm/pkeys.h 	if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
mm                127 arch/powerpc/include/asm/pkeys.h 	ret = ffz((u32)mm_pkey_allocation_map(mm));
mm                128 arch/powerpc/include/asm/pkeys.h 	__mm_pkey_allocated(mm, ret);
mm                133 arch/powerpc/include/asm/pkeys.h static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
mm                138 arch/powerpc/include/asm/pkeys.h 	if (!mm_pkey_is_allocated(mm, pkey))
mm                141 arch/powerpc/include/asm/pkeys.h 	__mm_pkey_free(mm, pkey);
mm                150 arch/powerpc/include/asm/pkeys.h extern int __execute_only_pkey(struct mm_struct *mm);
mm                151 arch/powerpc/include/asm/pkeys.h static inline int execute_only_pkey(struct mm_struct *mm)
mm                156 arch/powerpc/include/asm/pkeys.h 	return __execute_only_pkey(mm);
mm                202 arch/powerpc/include/asm/pkeys.h extern void pkey_mm_init(struct mm_struct *mm);
mm                 44 arch/powerpc/include/asm/pte-walk.h 	VM_WARN(pgdir != current->mm->pgd,
mm                 27 arch/powerpc/include/asm/slice.h unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
mm                 29 arch/powerpc/include/asm/slice.h void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
mm                 32 arch/powerpc/include/asm/slice.h void slice_init_new_context_exec(struct mm_struct *mm);
mm                 37 arch/powerpc/include/asm/slice.h static inline void slice_init_new_context_exec(struct mm_struct *mm) {}
mm                 39 arch/powerpc/include/asm/slice.h static inline unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
mm                128 arch/powerpc/include/asm/spu.h 	struct mm_struct *mm;
mm                197 arch/powerpc/include/asm/spu.h extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
mm                202 arch/powerpc/include/asm/spu.h extern void spu_flush_all_slbs(struct mm_struct *mm);
mm                 44 arch/powerpc/include/asm/tlb.h extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
mm                 52 arch/powerpc/include/asm/tlb.h 		flush_hash_entry(tlb->mm, ptep, address);
mm                 57 arch/powerpc/include/asm/tlb.h static inline int mm_is_core_local(struct mm_struct *mm)
mm                 59 arch/powerpc/include/asm/tlb.h 	return cpumask_subset(mm_cpumask(mm),
mm                 64 arch/powerpc/include/asm/tlb.h static inline int mm_is_thread_local(struct mm_struct *mm)
mm                 66 arch/powerpc/include/asm/tlb.h 	if (atomic_read(&mm->context.active_cpus) > 1)
mm                 68 arch/powerpc/include/asm/tlb.h 	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
mm                 70 arch/powerpc/include/asm/tlb.h static inline void mm_reset_thread_local(struct mm_struct *mm)
mm                 72 arch/powerpc/include/asm/tlb.h 	WARN_ON(atomic_read(&mm->context.copros) > 0);
mm                 78 arch/powerpc/include/asm/tlb.h 	WARN_ON(current->mm != mm);
mm                 79 arch/powerpc/include/asm/tlb.h 	atomic_set(&mm->context.active_cpus, 1);
mm                 80 arch/powerpc/include/asm/tlb.h 	cpumask_clear(mm_cpumask(mm));
mm                 81 arch/powerpc/include/asm/tlb.h 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
mm                 84 arch/powerpc/include/asm/tlb.h static inline int mm_is_thread_local(struct mm_struct *mm)
mm                 86 arch/powerpc/include/asm/tlb.h 	return cpumask_equal(mm_cpumask(mm),
mm                 92 arch/powerpc/include/asm/tlb.h static inline int mm_is_core_local(struct mm_struct *mm)
mm                 97 arch/powerpc/include/asm/tlb.h static inline int mm_is_thread_local(struct mm_struct *mm)
mm                 79 arch/powerpc/kernel/asm-offsets.c 	OFFSET(MM, task_struct, mm);
mm               1016 arch/powerpc/kernel/iommu.c extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
mm               1027 arch/powerpc/kernel/iommu.c 			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
mm                 33 arch/powerpc/kernel/mce_power.c 	struct mm_struct *mm;
mm                 36 arch/powerpc/kernel/mce_power.c 		mm = current->mm;
mm                 38 arch/powerpc/kernel/mce_power.c 		mm = &init_mm;
mm                 41 arch/powerpc/kernel/mce_power.c 	ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
mm                305 arch/powerpc/kernel/paca.c void copy_mm_to_paca(struct mm_struct *mm)
mm                308 arch/powerpc/kernel/paca.c 	mm_context_t *context = &mm->context;
mm               2165 arch/powerpc/kernel/process.c unsigned long arch_randomize_brk(struct mm_struct *mm)
mm               2167 arch/powerpc/kernel/process.c 	unsigned long base = mm->brk;
mm               2180 arch/powerpc/kernel/process.c 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
mm               2185 arch/powerpc/kernel/process.c 	if (ret < mm->brk)
mm               2186 arch/powerpc/kernel/process.c 		return mm->brk;
mm                933 arch/powerpc/kernel/signal_32.c 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
mm                935 arch/powerpc/kernel/signal_32.c 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
mm               1395 arch/powerpc/kernel/signal_32.c 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
mm               1397 arch/powerpc/kernel/signal_32.c 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
mm                869 arch/powerpc/kernel/signal_64.c 	if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
mm                870 arch/powerpc/kernel/signal_64.c 		regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
mm                 38 arch/powerpc/kernel/uprobes.c 		struct mm_struct *mm, unsigned long addr)
mm                128 arch/powerpc/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                158 arch/powerpc/kernel/vdso.c 	current->mm->context.vdso_base = 0;
mm                174 arch/powerpc/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                193 arch/powerpc/kernel/vdso.c 	current->mm->context.vdso_base = vdso_base;
mm                205 arch/powerpc/kernel/vdso.c 	rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
mm                210 arch/powerpc/kernel/vdso.c 		current->mm->context.vdso_base = 0;
mm                214 arch/powerpc/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                218 arch/powerpc/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                299 arch/powerpc/kvm/book3s_64_mmu_hv.c 				current->mm->pgd, false, pte_idx_ret);
mm                595 arch/powerpc/kvm/book3s_64_mmu_hv.c 		down_read(&current->mm->mmap_sem);
mm                596 arch/powerpc/kvm/book3s_64_mmu_hv.c 		vma = find_vma(current->mm, hva);
mm                605 arch/powerpc/kvm/book3s_64_mmu_hv.c 		up_read(&current->mm->mmap_sem);
mm                624 arch/powerpc/kvm/book3s_64_mmu_hv.c 			ptep = find_current_mm_pte(current->mm->pgd,
mm                348 arch/powerpc/kvm/book3s_64_mmu_radix.c 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
mm                494 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_free(kvm->mm, pud);
mm                517 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd_free(kvm->mm, kvm->arch.pgtable);
mm                580 arch/powerpc/kvm/book3s_64_mmu_radix.c 		new_pud = pud_alloc_one(kvm->mm, gpa);
mm                602 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd_populate(kvm->mm, pgd, new_pud);
mm                653 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud_populate(kvm->mm, pud, new_pmd);
mm                705 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pmd_populate(kvm->mm, pmd, new_ptep);
mm                730 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud_free(kvm->mm, new_pud);
mm               1136 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
mm                258 arch/powerpc/kvm/book3s_64_vio.c 	account_locked_vm(current->mm,
mm                283 arch/powerpc/kvm/book3s_64_vio.c 	ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
mm                329 arch/powerpc/kvm/book3s_64_vio.c 	account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
mm                372 arch/powerpc/kvm/book3s_64_vio.c 		mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
mm                413 arch/powerpc/kvm/book3s_64_vio.c static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
mm                419 arch/powerpc/kvm/book3s_64_vio.c 	iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
mm                432 arch/powerpc/kvm/book3s_64_vio.c 	mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
mm                450 arch/powerpc/kvm/book3s_64_vio.c 	if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
mm                459 arch/powerpc/kvm/book3s_64_vio.c 		iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
mm                494 arch/powerpc/kvm/book3s_64_vio.c 	mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
mm                505 arch/powerpc/kvm/book3s_64_vio.c 	ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
mm                586 arch/powerpc/kvm/book3s_64_vio.c 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
mm                677 arch/powerpc/kvm/book3s_64_vio.c 				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
mm                731 arch/powerpc/kvm/book3s_64_vio.c 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
mm                128 arch/powerpc/kvm/book3s_64_vio_hv.c 		mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
mm                221 arch/powerpc/kvm/book3s_64_vio_hv.c static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
mm                238 arch/powerpc/kvm/book3s_64_vio_hv.c 			mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
mm                257 arch/powerpc/kvm/book3s_64_vio_hv.c 	iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
mm                271 arch/powerpc/kvm/book3s_64_vio_hv.c 	mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
mm                289 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
mm                301 arch/powerpc/kvm/book3s_64_vio_hv.c 		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
mm                336 arch/powerpc/kvm/book3s_64_vio_hv.c 	mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
mm                347 arch/powerpc/kvm/book3s_64_vio_hv.c 	ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
mm                511 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
mm                522 arch/powerpc/kvm/book3s_64_vio_hv.c 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
mm               4286 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.pgdir = current->mm->pgd;
mm               4618 arch/powerpc/kvm/book3s_hv.c 	down_read(&current->mm->mmap_sem);
mm               4619 arch/powerpc/kvm/book3s_hv.c 	vma = find_vma(current->mm, hva);
mm               4625 arch/powerpc/kvm/book3s_hv.c 	up_read(&current->mm->mmap_sem);
mm               4658 arch/powerpc/kvm/book3s_hv.c 	up_read(&current->mm->mmap_sem);
mm                576 arch/powerpc/kvm/book3s_hv_nested.c 	gp->shadow_pgtable = pgd_alloc(kvm->mm);
mm                590 arch/powerpc/kvm/book3s_hv_nested.c 	pgd_free(kvm->mm, gp->shadow_pgtable);
mm                611 arch/powerpc/kvm/book3s_hv_nested.c 		pgd_free(kvm->mm, gp->shadow_pgtable);
mm                778 arch/powerpc/kvm/booke.c 	vcpu->arch.pgdir = current->mm->pgd;
mm                358 arch/powerpc/kvm/e500_mmu_host.c 		down_read(&current->mm->mmap_sem);
mm                360 arch/powerpc/kvm/e500_mmu_host.c 		vma = find_vma(current->mm, hva);
mm                444 arch/powerpc/kvm/e500_mmu_host.c 		up_read(&current->mm->mmap_sem);
mm                300 arch/powerpc/mm/book3s32/mmu.c void hash_preload(struct mm_struct *mm, unsigned long ea)
mm                306 arch/powerpc/mm/book3s32/mmu.c 	pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
mm                308 arch/powerpc/mm/book3s32/mmu.c 		add_hash_page(mm->context.id, ea, pmd_val(*pmd));
mm                 76 arch/powerpc/mm/book3s32/mmu_context.c int init_new_context(struct task_struct *t, struct mm_struct *mm)
mm                 78 arch/powerpc/mm/book3s32/mmu_context.c 	mm->context.id = __init_new_context();
mm                 95 arch/powerpc/mm/book3s32/mmu_context.c void destroy_context(struct mm_struct *mm)
mm                 98 arch/powerpc/mm/book3s32/mmu_context.c 	if (mm->context.id != NO_CONTEXT) {
mm                 99 arch/powerpc/mm/book3s32/mmu_context.c 		__destroy_context(mm->context.id);
mm                100 arch/powerpc/mm/book3s32/mmu_context.c 		mm->context.id = NO_CONTEXT;
mm                 35 arch/powerpc/mm/book3s32/tlb.c void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
mm                 41 arch/powerpc/mm/book3s32/tlb.c 		flush_hash_pages(mm->context.id, addr, ptephys, 1);
mm                 74 arch/powerpc/mm/book3s32/tlb.c static void flush_range(struct mm_struct *mm, unsigned long start,
mm                 80 arch/powerpc/mm/book3s32/tlb.c 	unsigned int ctx = mm->context.id;
mm                 90 arch/powerpc/mm/book3s32/tlb.c 	pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
mm                118 arch/powerpc/mm/book3s32/tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                133 arch/powerpc/mm/book3s32/tlb.c 	for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
mm                140 arch/powerpc/mm/book3s32/tlb.c 	struct mm_struct *mm;
mm                147 arch/powerpc/mm/book3s32/tlb.c 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
mm                148 arch/powerpc/mm/book3s32/tlb.c 	pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
mm                150 arch/powerpc/mm/book3s32/tlb.c 		flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
mm                189 arch/powerpc/mm/book3s64/hash_pgtable.c unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
mm                198 arch/powerpc/mm/book3s64/hash_pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                218 arch/powerpc/mm/book3s64/hash_pgtable.c 		hpte_do_hugepage_flush(mm, addr, pmdp, old);
mm                266 arch/powerpc/mm/book3s64/hash_pgtable.c void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                271 arch/powerpc/mm/book3s64/hash_pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                286 arch/powerpc/mm/book3s64/hash_pgtable.c pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
mm                291 arch/powerpc/mm/book3s64/hash_pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                311 arch/powerpc/mm/book3s64/hash_pgtable.c void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
mm                321 arch/powerpc/mm/book3s64/hash_pgtable.c 	psize = get_slice_psize(mm, addr);
mm                331 arch/powerpc/mm/book3s64/hash_pgtable.c 		vsid = get_user_vsid(&mm->context, addr, ssize);
mm                338 arch/powerpc/mm/book3s64/hash_pgtable.c 	if (mm_is_thread_local(mm))
mm                344 arch/powerpc/mm/book3s64/hash_pgtable.c pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                352 arch/powerpc/mm/book3s64/hash_pgtable.c 	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
mm                376 arch/powerpc/mm/book3s64/hash_pgtable.c 	serialize_against_pte_lookup(mm);
mm                 41 arch/powerpc/mm/book3s64/hash_tlb.c void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
mm                 64 arch/powerpc/mm/book3s64/hash_tlb.c 		psize = get_slice_psize(mm, addr);
mm                 73 arch/powerpc/mm/book3s64/hash_tlb.c 		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
mm                 76 arch/powerpc/mm/book3s64/hash_tlb.c 		psize = pte_pagesize_index(mm, addr, pte);
mm                 91 arch/powerpc/mm/book3s64/hash_tlb.c 		vsid = get_user_vsid(&mm->context, addr, ssize);
mm                105 arch/powerpc/mm/book3s64/hash_tlb.c 		flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
mm                120 arch/powerpc/mm/book3s64/hash_tlb.c 	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
mm                126 arch/powerpc/mm/book3s64/hash_tlb.c 		batch->mm = mm;
mm                150 arch/powerpc/mm/book3s64/hash_tlb.c 	local = mm_is_thread_local(batch->mm);
mm                192 arch/powerpc/mm/book3s64/hash_tlb.c void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
mm                202 arch/powerpc/mm/book3s64/hash_tlb.c 	BUG_ON(!mm->pgd);
mm                215 arch/powerpc/mm/book3s64/hash_tlb.c 		pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
mm                227 arch/powerpc/mm/book3s64/hash_tlb.c 			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
mm                229 arch/powerpc/mm/book3s64/hash_tlb.c 			hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
mm                235 arch/powerpc/mm/book3s64/hash_tlb.c void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
mm                256 arch/powerpc/mm/book3s64/hash_tlb.c 			hpte_need_flush(mm, addr, pte, pteval, 0);
mm               1142 arch/powerpc/mm/book3s64/hash_utils.c void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
mm               1144 arch/powerpc/mm/book3s64/hash_utils.c 	if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
mm               1146 arch/powerpc/mm/book3s64/hash_utils.c 	slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
mm               1147 arch/powerpc/mm/book3s64/hash_utils.c 	copro_flush_all_slbs(mm);
mm               1148 arch/powerpc/mm/book3s64/hash_utils.c 	if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
mm               1150 arch/powerpc/mm/book3s64/hash_utils.c 		copy_mm_to_paca(mm);
mm               1164 arch/powerpc/mm/book3s64/hash_utils.c static int subpage_protection(struct mm_struct *mm, unsigned long ea)
mm               1166 arch/powerpc/mm/book3s64/hash_utils.c 	struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
mm               1202 arch/powerpc/mm/book3s64/hash_utils.c static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
mm               1220 arch/powerpc/mm/book3s64/hash_utils.c static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
mm               1225 arch/powerpc/mm/book3s64/hash_utils.c 			copy_mm_to_paca(mm);
mm               1243 arch/powerpc/mm/book3s64/hash_utils.c int hash_page_mm(struct mm_struct *mm, unsigned long ea,
mm               1264 arch/powerpc/mm/book3s64/hash_utils.c 		if (! mm) {
mm               1269 arch/powerpc/mm/book3s64/hash_utils.c 		psize = get_slice_psize(mm, ea);
mm               1271 arch/powerpc/mm/book3s64/hash_utils.c 		vsid = get_user_vsid(&mm->context, ea, ssize);
mm               1292 arch/powerpc/mm/book3s64/hash_utils.c 	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
mm               1301 arch/powerpc/mm/book3s64/hash_utils.c 	pgdir = mm->pgd;
mm               1308 arch/powerpc/mm/book3s64/hash_utils.c 	if (user_region && mm_is_thread_local(mm))
mm               1363 arch/powerpc/mm/book3s64/hash_utils.c 		if (current->mm == mm)
mm               1364 arch/powerpc/mm/book3s64/hash_utils.c 			check_paca_psize(ea, mm, psize, user_region);
mm               1379 arch/powerpc/mm/book3s64/hash_utils.c 		demote_segment_4k(mm, ea);
mm               1389 arch/powerpc/mm/book3s64/hash_utils.c 			demote_segment_4k(mm, ea);
mm               1401 arch/powerpc/mm/book3s64/hash_utils.c 			copro_flush_all_slbs(mm);
mm               1407 arch/powerpc/mm/book3s64/hash_utils.c 	if (current->mm == mm)
mm               1408 arch/powerpc/mm/book3s64/hash_utils.c 		check_paca_psize(ea, mm, psize, user_region);
mm               1417 arch/powerpc/mm/book3s64/hash_utils.c 		int spp = subpage_protection(mm, ea);
mm               1450 arch/powerpc/mm/book3s64/hash_utils.c 	struct mm_struct *mm = current->mm;
mm               1454 arch/powerpc/mm/book3s64/hash_utils.c 		mm = &init_mm;
mm               1459 arch/powerpc/mm/book3s64/hash_utils.c 	return hash_page_mm(mm, ea, access, trap, flags);
mm               1468 arch/powerpc/mm/book3s64/hash_utils.c 	struct mm_struct *mm = current->mm;
mm               1472 arch/powerpc/mm/book3s64/hash_utils.c 		mm = &init_mm;
mm               1494 arch/powerpc/mm/book3s64/hash_utils.c 	return hash_page_mm(mm, ea, access, trap, flags);
mm               1498 arch/powerpc/mm/book3s64/hash_utils.c static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
mm               1500 arch/powerpc/mm/book3s64/hash_utils.c 	int psize = get_slice_psize(mm, ea);
mm               1503 arch/powerpc/mm/book3s64/hash_utils.c 	if (unlikely(psize != mm_ctx_user_psize(&mm->context)))
mm               1509 arch/powerpc/mm/book3s64/hash_utils.c 	if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea)))
mm               1515 arch/powerpc/mm/book3s64/hash_utils.c static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
mm               1521 arch/powerpc/mm/book3s64/hash_utils.c static void hash_preload(struct mm_struct *mm, unsigned long ea,
mm               1534 arch/powerpc/mm/book3s64/hash_utils.c 	if (!should_hash_preload(mm, ea))
mm               1538 arch/powerpc/mm/book3s64/hash_utils.c 		" trap=%lx\n", mm, mm->pgd, ea, access, trap);
mm               1541 arch/powerpc/mm/book3s64/hash_utils.c 	pgdir = mm->pgd;
mm               1547 arch/powerpc/mm/book3s64/hash_utils.c 	vsid = get_user_vsid(&mm->context, ea, ssize);
mm               1577 arch/powerpc/mm/book3s64/hash_utils.c 	if (mm_is_thread_local(mm))
mm               1582 arch/powerpc/mm/book3s64/hash_utils.c 	if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K)
mm               1588 arch/powerpc/mm/book3s64/hash_utils.c 				    ssize, subpage_protection(mm, ea));
mm               1595 arch/powerpc/mm/book3s64/hash_utils.c 				   mm_ctx_user_psize(&mm->context),
mm               1596 arch/powerpc/mm/book3s64/hash_utils.c 				   mm_ctx_user_psize(&mm->context),
mm               1658 arch/powerpc/mm/book3s64/hash_utils.c u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
mm               1664 arch/powerpc/mm/book3s64/hash_utils.c 	if (!mm || !mm->pgd)
mm               1668 arch/powerpc/mm/book3s64/hash_utils.c 	ptep = find_linux_pte(mm->pgd, address, NULL, NULL);
mm                 50 arch/powerpc/mm/book3s64/iommu_api.c bool mm_iommu_preregistered(struct mm_struct *mm)
mm                 52 arch/powerpc/mm/book3s64/iommu_api.c 	return !list_empty(&mm->context.iommu_group_mem_list);
mm                 56 arch/powerpc/mm/book3s64/iommu_api.c static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
mm                 66 arch/powerpc/mm/book3s64/iommu_api.c 		ret = account_locked_vm(mm, entries, true);
mm                 99 arch/powerpc/mm/book3s64/iommu_api.c 	down_read(&mm->mmap_sem);
mm                117 arch/powerpc/mm/book3s64/iommu_api.c 	up_read(&mm->mmap_sem);
mm                150 arch/powerpc/mm/book3s64/iommu_api.c 	list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
mm                161 arch/powerpc/mm/book3s64/iommu_api.c 	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
mm                178 arch/powerpc/mm/book3s64/iommu_api.c 	account_locked_vm(mm, locked_entries, false);
mm                183 arch/powerpc/mm/book3s64/iommu_api.c long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
mm                186 arch/powerpc/mm/book3s64/iommu_api.c 	return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
mm                191 arch/powerpc/mm/book3s64/iommu_api.c long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
mm                195 arch/powerpc/mm/book3s64/iommu_api.c 	return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
mm                245 arch/powerpc/mm/book3s64/iommu_api.c long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
mm                278 arch/powerpc/mm/book3s64/iommu_api.c 	account_locked_vm(mm, unlock_entries, false);
mm                284 arch/powerpc/mm/book3s64/iommu_api.c struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
mm                289 arch/powerpc/mm/book3s64/iommu_api.c 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
mm                302 arch/powerpc/mm/book3s64/iommu_api.c struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
mm                307 arch/powerpc/mm/book3s64/iommu_api.c 	list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
mm                320 arch/powerpc/mm/book3s64/iommu_api.c struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
mm                327 arch/powerpc/mm/book3s64/iommu_api.c 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
mm                391 arch/powerpc/mm/book3s64/iommu_api.c extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
mm                398 arch/powerpc/mm/book3s64/iommu_api.c 	mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
mm                415 arch/powerpc/mm/book3s64/iommu_api.c bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
mm                421 arch/powerpc/mm/book3s64/iommu_api.c 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
mm                458 arch/powerpc/mm/book3s64/iommu_api.c void mm_iommu_init(struct mm_struct *mm)
mm                460 arch/powerpc/mm/book3s64/iommu_api.c 	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
mm                 91 arch/powerpc/mm/book3s64/mmu_context.c static int hash__init_new_context(struct mm_struct *mm)
mm                 95 arch/powerpc/mm/book3s64/mmu_context.c 	mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
mm                 97 arch/powerpc/mm/book3s64/mmu_context.c 	if (!mm->context.hash_context)
mm                114 arch/powerpc/mm/book3s64/mmu_context.c 	if (mm->context.id == 0) {
mm                115 arch/powerpc/mm/book3s64/mmu_context.c 		memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
mm                116 arch/powerpc/mm/book3s64/mmu_context.c 		slice_init_new_context_exec(mm);
mm                119 arch/powerpc/mm/book3s64/mmu_context.c 		memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
mm                122 arch/powerpc/mm/book3s64/mmu_context.c 		if (current->mm->context.hash_context->spt) {
mm                123 arch/powerpc/mm/book3s64/mmu_context.c 			mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
mm                125 arch/powerpc/mm/book3s64/mmu_context.c 			if (!mm->context.hash_context->spt) {
mm                126 arch/powerpc/mm/book3s64/mmu_context.c 				kfree(mm->context.hash_context);
mm                133 arch/powerpc/mm/book3s64/mmu_context.c 	index = realloc_context_ids(&mm->context);
mm                136 arch/powerpc/mm/book3s64/mmu_context.c 		kfree(mm->context.hash_context->spt);
mm                138 arch/powerpc/mm/book3s64/mmu_context.c 		kfree(mm->context.hash_context);
mm                142 arch/powerpc/mm/book3s64/mmu_context.c 	pkey_mm_init(mm);
mm                153 arch/powerpc/mm/book3s64/mmu_context.c static int radix__init_new_context(struct mm_struct *mm)
mm                167 arch/powerpc/mm/book3s64/mmu_context.c 	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
mm                177 arch/powerpc/mm/book3s64/mmu_context.c 	mm->context.hash_context = NULL;
mm                182 arch/powerpc/mm/book3s64/mmu_context.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                187 arch/powerpc/mm/book3s64/mmu_context.c 		index = radix__init_new_context(mm);
mm                189 arch/powerpc/mm/book3s64/mmu_context.c 		index = hash__init_new_context(mm);
mm                194 arch/powerpc/mm/book3s64/mmu_context.c 	mm->context.id = index;
mm                196 arch/powerpc/mm/book3s64/mmu_context.c 	mm->context.pte_frag = NULL;
mm                197 arch/powerpc/mm/book3s64/mmu_context.c 	mm->context.pmd_frag = NULL;
mm                199 arch/powerpc/mm/book3s64/mmu_context.c 	mm_iommu_init(mm);
mm                201 arch/powerpc/mm/book3s64/mmu_context.c 	atomic_set(&mm->context.active_cpus, 0);
mm                202 arch/powerpc/mm/book3s64/mmu_context.c 	atomic_set(&mm->context.copros, 0);
mm                240 arch/powerpc/mm/book3s64/mmu_context.c static void destroy_pagetable_cache(struct mm_struct *mm)
mm                244 arch/powerpc/mm/book3s64/mmu_context.c 	frag = mm->context.pte_frag;
mm                248 arch/powerpc/mm/book3s64/mmu_context.c 	frag = mm->context.pmd_frag;
mm                254 arch/powerpc/mm/book3s64/mmu_context.c void destroy_context(struct mm_struct *mm)
mm                257 arch/powerpc/mm/book3s64/mmu_context.c 	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
mm                273 arch/powerpc/mm/book3s64/mmu_context.c 		process_tb[mm->context.id].prtb0 = 0;
mm                275 arch/powerpc/mm/book3s64/mmu_context.c 		subpage_prot_free(mm);
mm                276 arch/powerpc/mm/book3s64/mmu_context.c 	destroy_contexts(&mm->context);
mm                277 arch/powerpc/mm/book3s64/mmu_context.c 	mm->context.id = MMU_NO_CONTEXT;
mm                280 arch/powerpc/mm/book3s64/mmu_context.c void arch_exit_mmap(struct mm_struct *mm)
mm                282 arch/powerpc/mm/book3s64/mmu_context.c 	destroy_pagetable_cache(mm);
mm                299 arch/powerpc/mm/book3s64/mmu_context.c 		process_tb[mm->context.id].prtb0 = 0;
mm                 64 arch/powerpc/mm/book3s64/pgtable.c void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                 74 arch/powerpc/mm/book3s64/pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                 78 arch/powerpc/mm/book3s64/pgtable.c 	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
mm                 95 arch/powerpc/mm/book3s64/pgtable.c void serialize_against_pte_lookup(struct mm_struct *mm)
mm                 98 arch/powerpc/mm/book3s64/pgtable.c 	smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
mm                266 arch/powerpc/mm/book3s64/pgtable.c static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
mm                273 arch/powerpc/mm/book3s64/pgtable.c 	spin_lock(&mm->page_table_lock);
mm                274 arch/powerpc/mm/book3s64/pgtable.c 	ret = mm->context.pmd_frag;
mm                282 arch/powerpc/mm/book3s64/pgtable.c 		mm->context.pmd_frag = pmd_frag;
mm                284 arch/powerpc/mm/book3s64/pgtable.c 	spin_unlock(&mm->page_table_lock);
mm                288 arch/powerpc/mm/book3s64/pgtable.c static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
mm                294 arch/powerpc/mm/book3s64/pgtable.c 	if (mm == &init_mm)
mm                314 arch/powerpc/mm/book3s64/pgtable.c 	spin_lock(&mm->page_table_lock);
mm                320 arch/powerpc/mm/book3s64/pgtable.c 	if (likely(!mm->context.pmd_frag)) {
mm                322 arch/powerpc/mm/book3s64/pgtable.c 		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
mm                324 arch/powerpc/mm/book3s64/pgtable.c 	spin_unlock(&mm->page_table_lock);
mm                329 arch/powerpc/mm/book3s64/pgtable.c pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
mm                333 arch/powerpc/mm/book3s64/pgtable.c 	pmd = get_pmd_from_cache(mm);
mm                337 arch/powerpc/mm/book3s64/pgtable.c 	return __alloc_for_pmdcache(mm);
mm                165 arch/powerpc/mm/book3s64/pkeys.c void pkey_mm_init(struct mm_struct *mm)
mm                169 arch/powerpc/mm/book3s64/pkeys.c 	mm_pkey_allocation_map(mm) = initial_allocation_mask;
mm                170 arch/powerpc/mm/book3s64/pkeys.c 	mm->context.execute_only_pkey = execute_only_key;
mm                320 arch/powerpc/mm/book3s64/pkeys.c int __execute_only_pkey(struct mm_struct *mm)
mm                322 arch/powerpc/mm/book3s64/pkeys.c 	return mm->context.execute_only_pkey;
mm                396 arch/powerpc/mm/book3s64/pkeys.c 	if (!current->mm)
mm                400 arch/powerpc/mm/book3s64/pkeys.c 	if (current->mm != vma->vm_mm)
mm                420 arch/powerpc/mm/book3s64/pkeys.c void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
mm                426 arch/powerpc/mm/book3s64/pkeys.c 	mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
mm                427 arch/powerpc/mm/book3s64/pkeys.c 	mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
mm                 50 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct mm_struct *mm = current->mm;
mm                 76 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 		vma = find_vma(mm, addr);
mm                 88 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
mm                 99 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct mm_struct *mm = vma->vm_mm;
mm                106 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	    (atomic_read(&mm->context.copros) > 0))
mm                923 arch/powerpc/mm/book3s64/radix_pgtable.c unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
mm                931 arch/powerpc/mm/book3s64/radix_pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                934 arch/powerpc/mm/book3s64/radix_pgtable.c 	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
mm                969 arch/powerpc/mm/book3s64/radix_pgtable.c void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                974 arch/powerpc/mm/book3s64/radix_pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                977 arch/powerpc/mm/book3s64/radix_pgtable.c 	if (!pmd_huge_pte(mm, pmdp))
mm                980 arch/powerpc/mm/book3s64/radix_pgtable.c 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
mm                981 arch/powerpc/mm/book3s64/radix_pgtable.c 	pmd_huge_pte(mm, pmdp) = pgtable;
mm                984 arch/powerpc/mm/book3s64/radix_pgtable.c pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
mm                990 arch/powerpc/mm/book3s64/radix_pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                993 arch/powerpc/mm/book3s64/radix_pgtable.c 	pgtable = pmd_huge_pte(mm, pmdp);
mm                996 arch/powerpc/mm/book3s64/radix_pgtable.c 		pmd_huge_pte(mm, pmdp) = NULL;
mm                998 arch/powerpc/mm/book3s64/radix_pgtable.c 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
mm               1008 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
mm               1014 arch/powerpc/mm/book3s64/radix_pgtable.c 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
mm               1026 arch/powerpc/mm/book3s64/radix_pgtable.c 	serialize_against_pte_lookup(mm);
mm               1035 arch/powerpc/mm/book3s64/radix_pgtable.c 	struct mm_struct *mm = vma->vm_mm;
mm               1044 arch/powerpc/mm/book3s64/radix_pgtable.c 	if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
mm               1052 arch/powerpc/mm/book3s64/radix_pgtable.c 		radix__flush_tlb_page_psize(mm, address, psize);
mm               1070 arch/powerpc/mm/book3s64/radix_pgtable.c 	struct mm_struct *mm = vma->vm_mm;
mm               1078 arch/powerpc/mm/book3s64/radix_pgtable.c 	    (atomic_read(&mm->context.copros) > 0))
mm               1081 arch/powerpc/mm/book3s64/radix_pgtable.c 	set_pte_at(mm, addr, ptep, pte);
mm                352 arch/powerpc/mm/book3s64/radix_tlb.c static inline void _tlbiel_pid_multicast(struct mm_struct *mm,
mm                355 arch/powerpc/mm/book3s64/radix_tlb.c 	struct cpumask *cpus = mm_cpumask(mm);
mm                364 arch/powerpc/mm/book3s64/radix_tlb.c 	if (atomic_read(&mm->context.copros) > 0)
mm                490 arch/powerpc/mm/book3s64/radix_tlb.c static inline void _tlbiel_va_multicast(struct mm_struct *mm,
mm                494 arch/powerpc/mm/book3s64/radix_tlb.c 	struct cpumask *cpus = mm_cpumask(mm);
mm                497 arch/powerpc/mm/book3s64/radix_tlb.c 	if (atomic_read(&mm->context.copros) > 0)
mm                540 arch/powerpc/mm/book3s64/radix_tlb.c static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
mm                545 arch/powerpc/mm/book3s64/radix_tlb.c 	struct cpumask *cpus = mm_cpumask(mm);
mm                551 arch/powerpc/mm/book3s64/radix_tlb.c 	if (atomic_read(&mm->context.copros) > 0)
mm                566 arch/powerpc/mm/book3s64/radix_tlb.c void radix__local_flush_tlb_mm(struct mm_struct *mm)
mm                571 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                579 arch/powerpc/mm/book3s64/radix_tlb.c void radix__local_flush_all_mm(struct mm_struct *mm)
mm                584 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                592 arch/powerpc/mm/book3s64/radix_tlb.c void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
mm                598 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                615 arch/powerpc/mm/book3s64/radix_tlb.c static bool mm_is_singlethreaded(struct mm_struct *mm)
mm                617 arch/powerpc/mm/book3s64/radix_tlb.c 	if (atomic_read(&mm->context.copros) > 0)
mm                619 arch/powerpc/mm/book3s64/radix_tlb.c 	if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
mm                624 arch/powerpc/mm/book3s64/radix_tlb.c static bool mm_needs_flush_escalation(struct mm_struct *mm)
mm                631 arch/powerpc/mm/book3s64/radix_tlb.c 	if (atomic_read(&mm->context.copros) > 0)
mm                639 arch/powerpc/mm/book3s64/radix_tlb.c 	struct mm_struct *mm = arg;
mm                640 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long pid = mm->context.id;
mm                642 arch/powerpc/mm/book3s64/radix_tlb.c 	if (current->mm == mm)
mm                645 arch/powerpc/mm/book3s64/radix_tlb.c 	if (current->active_mm == mm) {
mm                649 arch/powerpc/mm/book3s64/radix_tlb.c 		BUG_ON(current->mm);
mm                651 arch/powerpc/mm/book3s64/radix_tlb.c 		switch_mm(mm, &init_mm, current);
mm                653 arch/powerpc/mm/book3s64/radix_tlb.c 		mmdrop(mm);
mm                658 arch/powerpc/mm/book3s64/radix_tlb.c static void exit_flush_lazy_tlbs(struct mm_struct *mm)
mm                667 arch/powerpc/mm/book3s64/radix_tlb.c 	smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
mm                668 arch/powerpc/mm/book3s64/radix_tlb.c 				(void *)mm, 1);
mm                669 arch/powerpc/mm/book3s64/radix_tlb.c 	mm_reset_thread_local(mm);
mm                672 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_mm(struct mm_struct *mm)
mm                676 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                686 arch/powerpc/mm/book3s64/radix_tlb.c 	if (!mm_is_thread_local(mm)) {
mm                687 arch/powerpc/mm/book3s64/radix_tlb.c 		if (unlikely(mm_is_singlethreaded(mm))) {
mm                688 arch/powerpc/mm/book3s64/radix_tlb.c 			exit_flush_lazy_tlbs(mm);
mm                693 arch/powerpc/mm/book3s64/radix_tlb.c 			if (mm_needs_flush_escalation(mm))
mm                698 arch/powerpc/mm/book3s64/radix_tlb.c 			_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
mm                708 arch/powerpc/mm/book3s64/radix_tlb.c static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
mm                712 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                718 arch/powerpc/mm/book3s64/radix_tlb.c 	if (!mm_is_thread_local(mm)) {
mm                719 arch/powerpc/mm/book3s64/radix_tlb.c 		if (unlikely(mm_is_singlethreaded(mm))) {
mm                721 arch/powerpc/mm/book3s64/radix_tlb.c 				exit_flush_lazy_tlbs(mm);
mm                728 arch/powerpc/mm/book3s64/radix_tlb.c 			_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
mm                735 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_all_mm(struct mm_struct *mm)
mm                737 arch/powerpc/mm/book3s64/radix_tlb.c 	__flush_all_mm(mm, false);
mm                747 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
mm                752 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                758 arch/powerpc/mm/book3s64/radix_tlb.c 	if (!mm_is_thread_local(mm)) {
mm                759 arch/powerpc/mm/book3s64/radix_tlb.c 		if (unlikely(mm_is_singlethreaded(mm))) {
mm                760 arch/powerpc/mm/book3s64/radix_tlb.c 			exit_flush_lazy_tlbs(mm);
mm                766 arch/powerpc/mm/book3s64/radix_tlb.c 			_tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
mm                834 arch/powerpc/mm/book3s64/radix_tlb.c static inline void __radix__flush_tlb_range(struct mm_struct *mm,
mm                845 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm                851 arch/powerpc/mm/book3s64/radix_tlb.c 	if (!mm_is_thread_local(mm)) {
mm                852 arch/powerpc/mm/book3s64/radix_tlb.c 		if (unlikely(mm_is_singlethreaded(mm))) {
mm                854 arch/powerpc/mm/book3s64/radix_tlb.c 				exit_flush_lazy_tlbs(mm);
mm                873 arch/powerpc/mm/book3s64/radix_tlb.c 				if (mm_needs_flush_escalation(mm))
mm                878 arch/powerpc/mm/book3s64/radix_tlb.c 				_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
mm                926 arch/powerpc/mm/book3s64/radix_tlb.c 			_tlbiel_va_range_multicast(mm,
mm                929 arch/powerpc/mm/book3s64/radix_tlb.c 				_tlbiel_va_range_multicast(mm,
mm                932 arch/powerpc/mm/book3s64/radix_tlb.c 				_tlbiel_va_range_multicast(mm,
mm               1006 arch/powerpc/mm/book3s64/radix_tlb.c static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
mm               1012 arch/powerpc/mm/book3s64/radix_tlb.c 	struct mm_struct *mm = tlb->mm;
mm               1025 arch/powerpc/mm/book3s64/radix_tlb.c 		__flush_all_mm(mm, true);
mm               1027 arch/powerpc/mm/book3s64/radix_tlb.c 	} else if (mm_tlb_flush_nested(mm)) {
mm               1055 arch/powerpc/mm/book3s64/radix_tlb.c 			__radix__flush_tlb_range(mm, start, end, true);
mm               1057 arch/powerpc/mm/book3s64/radix_tlb.c 			radix__flush_all_mm(mm);
mm               1061 arch/powerpc/mm/book3s64/radix_tlb.c 			radix__flush_tlb_mm(mm);
mm               1063 arch/powerpc/mm/book3s64/radix_tlb.c 			radix__flush_all_mm(mm);
mm               1066 arch/powerpc/mm/book3s64/radix_tlb.c 			radix__flush_tlb_range_psize(mm, start, end, psize);
mm               1068 arch/powerpc/mm/book3s64/radix_tlb.c 			radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
mm               1073 arch/powerpc/mm/book3s64/radix_tlb.c static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
mm               1083 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm               1089 arch/powerpc/mm/book3s64/radix_tlb.c 	if (!mm_is_thread_local(mm)) {
mm               1090 arch/powerpc/mm/book3s64/radix_tlb.c 		if (unlikely(mm_is_singlethreaded(mm))) {
mm               1092 arch/powerpc/mm/book3s64/radix_tlb.c 				exit_flush_lazy_tlbs(mm);
mm               1111 arch/powerpc/mm/book3s64/radix_tlb.c 				if (mm_needs_flush_escalation(mm))
mm               1117 arch/powerpc/mm/book3s64/radix_tlb.c 				_tlbiel_pid_multicast(mm, pid,
mm               1128 arch/powerpc/mm/book3s64/radix_tlb.c 			_tlbiel_va_range_multicast(mm,
mm               1134 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
mm               1137 arch/powerpc/mm/book3s64/radix_tlb.c 	return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
mm               1140 arch/powerpc/mm/book3s64/radix_tlb.c static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
mm               1143 arch/powerpc/mm/book3s64/radix_tlb.c 	__radix__flush_tlb_range_psize(mm, start, end, psize, true);
mm               1147 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
mm               1151 arch/powerpc/mm/book3s64/radix_tlb.c 	pid = mm->context.id;
mm               1157 arch/powerpc/mm/book3s64/radix_tlb.c 		radix__flush_all_mm(mm);
mm               1166 arch/powerpc/mm/book3s64/radix_tlb.c 	if (!mm_is_thread_local(mm)) {
mm               1167 arch/powerpc/mm/book3s64/radix_tlb.c 		if (unlikely(mm_is_singlethreaded(mm))) {
mm               1168 arch/powerpc/mm/book3s64/radix_tlb.c 			exit_flush_lazy_tlbs(mm);
mm               1174 arch/powerpc/mm/book3s64/radix_tlb.c 			_tlbiel_va_range_multicast(mm,
mm               1217 arch/powerpc/mm/book3s64/radix_tlb.c extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
mm               1219 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long pid = mm->context.id;
mm                 33 arch/powerpc/mm/book3s64/slb.c static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
mm                318 arch/powerpc/mm/book3s64/slb.c 	struct mm_struct *mm = current->mm;
mm                350 arch/powerpc/mm/book3s64/slb.c 			slb_allocate_user(mm, exec);
mm                354 arch/powerpc/mm/book3s64/slb.c 	if (!is_kernel_addr(mm->mmap_base)) {
mm                355 arch/powerpc/mm/book3s64/slb.c 		if (preload_add(ti, mm->mmap_base))
mm                356 arch/powerpc/mm/book3s64/slb.c 			slb_allocate_user(mm, mm->mmap_base);
mm                368 arch/powerpc/mm/book3s64/slb.c 	struct mm_struct *mm = current->mm;
mm                369 arch/powerpc/mm/book3s64/slb.c 	unsigned long heap = mm->start_brk;
mm                382 arch/powerpc/mm/book3s64/slb.c 			slb_allocate_user(mm, start);
mm                388 arch/powerpc/mm/book3s64/slb.c 			slb_allocate_user(mm, sp);
mm                394 arch/powerpc/mm/book3s64/slb.c 			slb_allocate_user(mm, heap);
mm                405 arch/powerpc/mm/book3s64/slb.c void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
mm                475 arch/powerpc/mm/book3s64/slb.c 	copy_mm_to_paca(mm);
mm                498 arch/powerpc/mm/book3s64/slb.c 		slb_allocate_user(mm, ea);
mm                733 arch/powerpc/mm/book3s64/slb.c static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
mm                744 arch/powerpc/mm/book3s64/slb.c 	if (ea >= mm_ctx_slb_addr_limit(&mm->context))
mm                747 arch/powerpc/mm/book3s64/slb.c 	context = get_user_context(&mm->context, ea);
mm                758 arch/powerpc/mm/book3s64/slb.c 	bpsize = get_slice_psize(mm, ea);
mm                802 arch/powerpc/mm/book3s64/slb.c 		struct mm_struct *mm = current->mm;
mm                805 arch/powerpc/mm/book3s64/slb.c 		if (unlikely(!mm))
mm                808 arch/powerpc/mm/book3s64/slb.c 		err = slb_allocate_user(mm, ea);
mm                 22 arch/powerpc/mm/book3s64/subpage_prot.c void subpage_prot_free(struct mm_struct *mm)
mm                 24 arch/powerpc/mm/book3s64/subpage_prot.c 	struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
mm                 53 arch/powerpc/mm/book3s64/subpage_prot.c static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
mm                 62 arch/powerpc/mm/book3s64/subpage_prot.c 	pgd = pgd_offset(mm, addr);
mm                 71 arch/powerpc/mm/book3s64/subpage_prot.c 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                 74 arch/powerpc/mm/book3s64/subpage_prot.c 		pte_update(mm, addr, pte, 0, 0, 0);
mm                 88 arch/powerpc/mm/book3s64/subpage_prot.c 	struct mm_struct *mm = current->mm;
mm                 95 arch/powerpc/mm/book3s64/subpage_prot.c 	down_write(&mm->mmap_sem);
mm                 97 arch/powerpc/mm/book3s64/subpage_prot.c 	spt = mm_ctx_subpage_prot(&mm->context);
mm                126 arch/powerpc/mm/book3s64/subpage_prot.c 		hpte_flush_range(mm, addr, nw);
mm                130 arch/powerpc/mm/book3s64/subpage_prot.c 	up_write(&mm->mmap_sem);
mm                146 arch/powerpc/mm/book3s64/subpage_prot.c static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
mm                155 arch/powerpc/mm/book3s64/subpage_prot.c 	vma = find_vma(mm, addr);
mm                171 arch/powerpc/mm/book3s64/subpage_prot.c static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
mm                191 arch/powerpc/mm/book3s64/subpage_prot.c 	struct mm_struct *mm = current->mm;
mm                204 arch/powerpc/mm/book3s64/subpage_prot.c 	    addr >= mm->task_size || len >= mm->task_size ||
mm                205 arch/powerpc/mm/book3s64/subpage_prot.c 	    addr + len > mm->task_size)
mm                208 arch/powerpc/mm/book3s64/subpage_prot.c 	if (is_hugepage_only_range(mm, addr, len))
mm                220 arch/powerpc/mm/book3s64/subpage_prot.c 	down_write(&mm->mmap_sem);
mm                222 arch/powerpc/mm/book3s64/subpage_prot.c 	spt = mm_ctx_subpage_prot(&mm->context);
mm                233 arch/powerpc/mm/book3s64/subpage_prot.c 		mm->context.hash_context->spt = spt;
mm                236 arch/powerpc/mm/book3s64/subpage_prot.c 	subpage_mark_vma_nohuge(mm, addr, len);
mm                262 arch/powerpc/mm/book3s64/subpage_prot.c 		demote_segment_4k(mm, addr);
mm                270 arch/powerpc/mm/book3s64/subpage_prot.c 		up_write(&mm->mmap_sem);
mm                274 arch/powerpc/mm/book3s64/subpage_prot.c 		down_write(&mm->mmap_sem);
mm                277 arch/powerpc/mm/book3s64/subpage_prot.c 		hpte_flush_range(mm, addr, nw);
mm                283 arch/powerpc/mm/book3s64/subpage_prot.c 	up_write(&mm->mmap_sem);
mm                 23 arch/powerpc/mm/copro_fault.c int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
mm                 30 arch/powerpc/mm/copro_fault.c 	if (mm == NULL)
mm                 33 arch/powerpc/mm/copro_fault.c 	if (mm->pgd == NULL)
mm                 36 arch/powerpc/mm/copro_fault.c 	down_read(&mm->mmap_sem);
mm                 38 arch/powerpc/mm/copro_fault.c 	vma = find_vma(mm, ea);
mm                 85 arch/powerpc/mm/copro_fault.c 	up_read(&mm->mmap_sem);
mm                 90 arch/powerpc/mm/copro_fault.c int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
mm                 98 arch/powerpc/mm/copro_fault.c 		if (mm == NULL)
mm                100 arch/powerpc/mm/copro_fault.c 		psize = get_slice_psize(mm, ea);
mm                102 arch/powerpc/mm/copro_fault.c 		vsid = get_user_vsid(&mm->context, ea, ssize);
mm                146 arch/powerpc/mm/copro_fault.c void copro_flush_all_slbs(struct mm_struct *mm)
mm                149 arch/powerpc/mm/copro_fault.c 	spu_flush_all_slbs(mm);
mm                151 arch/powerpc/mm/copro_fault.c 	cxl_slbia(mm);
mm                105 arch/powerpc/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                111 arch/powerpc/mm/fault.c 	up_read(&mm->mmap_sem);
mm                440 arch/powerpc/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                475 arch/powerpc/mm/fault.c 	if (unlikely(faulthandler_disabled() || !mm)) {
mm                480 arch/powerpc/mm/fault.c 					   faulthandler_disabled(), mm);
mm                492 arch/powerpc/mm/fault.c 					       get_mm_addr_key(mm, address));
mm                521 arch/powerpc/mm/fault.c 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
mm                526 arch/powerpc/mm/fault.c 		down_read(&mm->mmap_sem);
mm                536 arch/powerpc/mm/fault.c 	vma = find_vma(mm, address);
mm                550 arch/powerpc/mm/fault.c 		up_read(&mm->mmap_sem);
mm                582 arch/powerpc/mm/fault.c 		up_read(&mm->mmap_sem);
mm                613 arch/powerpc/mm/fault.c 	up_read(&current->mm->mmap_sem);
mm                 35 arch/powerpc/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
mm                 41 arch/powerpc/mm/hugetlbpage.c 	return __find_linux_pte(mm->pgd, addr, NULL, NULL);
mm                 44 arch/powerpc/mm/hugetlbpage.c static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
mm                 60 arch/powerpc/mm/hugetlbpage.c 		new = pte_alloc_one(mm);
mm                 73 arch/powerpc/mm/hugetlbpage.c 		new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
mm                107 arch/powerpc/mm/hugetlbpage.c 			pte_free(mm, new);
mm                119 arch/powerpc/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
mm                130 arch/powerpc/mm/hugetlbpage.c 	pg = pgd_offset(mm, addr);
mm                140 arch/powerpc/mm/hugetlbpage.c 		ptl = &mm->page_table_lock;
mm                144 arch/powerpc/mm/hugetlbpage.c 		pu = pud_alloc(mm, pg, addr);
mm                150 arch/powerpc/mm/hugetlbpage.c 			ptl = pud_lockptr(mm, pu);
mm                154 arch/powerpc/mm/hugetlbpage.c 			pm = pmd_alloc(mm, pu, addr);
mm                161 arch/powerpc/mm/hugetlbpage.c 				ptl = pmd_lockptr(mm, pm);
mm                168 arch/powerpc/mm/hugetlbpage.c 		ptl = &mm->page_table_lock;
mm                172 arch/powerpc/mm/hugetlbpage.c 		pu = pud_alloc(mm, pg, addr);
mm                176 arch/powerpc/mm/hugetlbpage.c 			ptl = pud_lockptr(mm, pu);
mm                180 arch/powerpc/mm/hugetlbpage.c 			pm = pmd_alloc(mm, pu, addr);
mm                183 arch/powerpc/mm/hugetlbpage.c 			ptl = pmd_lockptr(mm, pm);
mm                193 arch/powerpc/mm/hugetlbpage.c 	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
mm                279 arch/powerpc/mm/hugetlbpage.c 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
mm                280 arch/powerpc/mm/hugetlbpage.c 	    mm_is_thread_local(tlb->mm)) {
mm                390 arch/powerpc/mm/hugetlbpage.c 	mm_dec_nr_pmds(tlb->mm);
mm                441 arch/powerpc/mm/hugetlbpage.c 	mm_dec_nr_puds(tlb->mm);
mm                473 arch/powerpc/mm/hugetlbpage.c 		pgd = pgd_offset(tlb->mm, addr);
mm                505 arch/powerpc/mm/hugetlbpage.c 	struct mm_struct *mm = vma->vm_mm;
mm                512 arch/powerpc/mm/hugetlbpage.c 	ptl = &mm->page_table_lock;
mm                525 arch/powerpc/mm/hugetlbpage.c 			__migration_entry_wait(mm, ptep, ptl);
mm                 94 arch/powerpc/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                115 arch/powerpc/mm/mmap.c 		vma = find_vma(mm, addr);
mm                123 arch/powerpc/mm/mmap.c 	info.low_limit = mm->mmap_base;
mm                138 arch/powerpc/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                159 arch/powerpc/mm/mmap.c 		vma = find_vma(mm, addr);
mm                168 arch/powerpc/mm/mmap.c 	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
mm                185 arch/powerpc/mm/mmap.c static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
mm                190 arch/powerpc/mm/mmap.c 		mm->mmap_base = TASK_UNMAPPED_BASE;
mm                191 arch/powerpc/mm/mmap.c 		mm->get_unmapped_area = radix__arch_get_unmapped_area;
mm                193 arch/powerpc/mm/mmap.c 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm                194 arch/powerpc/mm/mmap.c 		mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
mm                199 arch/powerpc/mm/mmap.c extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
mm                207 arch/powerpc/mm/mmap.c void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                215 arch/powerpc/mm/mmap.c 		return radix__arch_pick_mmap_layout(mm, random_factor,
mm                222 arch/powerpc/mm/mmap.c 		mm->mmap_base = TASK_UNMAPPED_BASE;
mm                223 arch/powerpc/mm/mmap.c 		mm->get_unmapped_area = arch_get_unmapped_area;
mm                225 arch/powerpc/mm/mmap.c 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm                226 arch/powerpc/mm/mmap.c 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm                 17 arch/powerpc/mm/mmu_context.c 				   struct mm_struct *mm)
mm                 20 arch/powerpc/mm/mmu_context.c 	tsk->thread.pgdir = mm->pgd;
mm                 24 arch/powerpc/mm/mmu_context.c 				   struct mm_struct *mm)
mm                 27 arch/powerpc/mm/mmu_context.c 	get_paca()->pgd = mm->pgd;
mm                 31 arch/powerpc/mm/mmu_context.c 				   struct mm_struct *mm) { }
mm                 97 arch/powerpc/mm/mmu_context.c void arch_exit_mmap(struct mm_struct *mm)
mm                 99 arch/powerpc/mm/mmu_context.c 	void *frag = pte_frag_get(&mm->context);
mm                 94 arch/powerpc/mm/mmu_decl.h void hash_preload(struct mm_struct *mm, unsigned long ea);
mm                132 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	struct mm_struct *mm;
mm                138 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	mm = vma->vm_mm;
mm                151 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
mm                161 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
mm                111 arch/powerpc/mm/nohash/mmu_context.c 	struct mm_struct *mm;
mm                119 arch/powerpc/mm/nohash/mmu_context.c 		mm = context_mm[id];
mm                124 arch/powerpc/mm/nohash/mmu_context.c 		if (mm->context.active) {
mm                130 arch/powerpc/mm/nohash/mmu_context.c 		pr_hardcont(" | steal %d from 0x%p", id, mm);
mm                133 arch/powerpc/mm/nohash/mmu_context.c 		mm->context.id = MMU_NO_CONTEXT;
mm                140 arch/powerpc/mm/nohash/mmu_context.c 		for_each_cpu(cpu, mm_cpumask(mm)) {
mm                165 arch/powerpc/mm/nohash/mmu_context.c 	struct mm_struct *mm;
mm                173 arch/powerpc/mm/nohash/mmu_context.c 		mm = context_mm[id];
mm                175 arch/powerpc/mm/nohash/mmu_context.c 		pr_hardcont(" | steal %d from 0x%p", id, mm);
mm                178 arch/powerpc/mm/nohash/mmu_context.c 		mm->context.id = MMU_NO_CONTEXT;
mm                183 arch/powerpc/mm/nohash/mmu_context.c 			mm->context.active = 0;
mm                206 arch/powerpc/mm/nohash/mmu_context.c 	struct mm_struct *mm;
mm                212 arch/powerpc/mm/nohash/mmu_context.c 	mm = context_mm[id];
mm                214 arch/powerpc/mm/nohash/mmu_context.c 	pr_hardcont(" | steal %d from 0x%p", id, mm);
mm                217 arch/powerpc/mm/nohash/mmu_context.c 	local_flush_tlb_mm(mm);
mm                220 arch/powerpc/mm/nohash/mmu_context.c 	mm->context.id = MMU_NO_CONTEXT;
mm                367 arch/powerpc/mm/nohash/mmu_context.c int init_new_context(struct task_struct *t, struct mm_struct *mm)
mm                369 arch/powerpc/mm/nohash/mmu_context.c 	pr_hard("initing context for mm @%p\n", mm);
mm                378 arch/powerpc/mm/nohash/mmu_context.c 	if (mm->context.id == 0)
mm                379 arch/powerpc/mm/nohash/mmu_context.c 		slice_init_new_context_exec(mm);
mm                380 arch/powerpc/mm/nohash/mmu_context.c 	mm->context.id = MMU_NO_CONTEXT;
mm                381 arch/powerpc/mm/nohash/mmu_context.c 	mm->context.active = 0;
mm                382 arch/powerpc/mm/nohash/mmu_context.c 	pte_frag_set(&mm->context, NULL);
mm                389 arch/powerpc/mm/nohash/mmu_context.c void destroy_context(struct mm_struct *mm)
mm                394 arch/powerpc/mm/nohash/mmu_context.c 	if (mm->context.id == MMU_NO_CONTEXT)
mm                397 arch/powerpc/mm/nohash/mmu_context.c 	WARN_ON(mm->context.active != 0);
mm                400 arch/powerpc/mm/nohash/mmu_context.c 	id = mm->context.id;
mm                403 arch/powerpc/mm/nohash/mmu_context.c 		mm->context.id = MMU_NO_CONTEXT;
mm                405 arch/powerpc/mm/nohash/mmu_context.c 		mm->context.active = 0;
mm                194 arch/powerpc/mm/nohash/tlb.c void local_flush_tlb_mm(struct mm_struct *mm)
mm                199 arch/powerpc/mm/nohash/tlb.c 	pid = mm->context.id;
mm                206 arch/powerpc/mm/nohash/tlb.c void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
mm                212 arch/powerpc/mm/nohash/tlb.c 	pid = mm ? mm->context.id : 0;
mm                270 arch/powerpc/mm/nohash/tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                275 arch/powerpc/mm/nohash/tlb.c 	pid = mm->context.id;
mm                278 arch/powerpc/mm/nohash/tlb.c 	if (!mm_is_core_local(mm)) {
mm                281 arch/powerpc/mm/nohash/tlb.c 		smp_call_function_many(mm_cpumask(mm),
mm                290 arch/powerpc/mm/nohash/tlb.c void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
mm                300 arch/powerpc/mm/nohash/tlb.c 	if (WARN_ON(!mm))
mm                304 arch/powerpc/mm/nohash/tlb.c 	pid = mm->context.id;
mm                307 arch/powerpc/mm/nohash/tlb.c 	cpu_mask = mm_cpumask(mm);
mm                308 arch/powerpc/mm/nohash/tlb.c 	if (!mm_is_core_local(mm)) {
mm                395 arch/powerpc/mm/nohash/tlb.c 	flush_tlb_mm(tlb->mm);
mm                423 arch/powerpc/mm/nohash/tlb.c 			__flush_tlb_page(tlb->mm, start, tsize, 1);
mm                433 arch/powerpc/mm/nohash/tlb.c 		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
mm                 33 arch/powerpc/mm/pgtable-frag.c static pte_t *get_pte_from_cache(struct mm_struct *mm)
mm                 40 arch/powerpc/mm/pgtable-frag.c 	spin_lock(&mm->page_table_lock);
mm                 41 arch/powerpc/mm/pgtable-frag.c 	ret = pte_frag_get(&mm->context);
mm                 49 arch/powerpc/mm/pgtable-frag.c 		pte_frag_set(&mm->context, pte_frag);
mm                 51 arch/powerpc/mm/pgtable-frag.c 	spin_unlock(&mm->page_table_lock);
mm                 55 arch/powerpc/mm/pgtable-frag.c static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
mm                 83 arch/powerpc/mm/pgtable-frag.c 	spin_lock(&mm->page_table_lock);
mm                 89 arch/powerpc/mm/pgtable-frag.c 	if (likely(!pte_frag_get(&mm->context))) {
mm                 91 arch/powerpc/mm/pgtable-frag.c 		pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
mm                 93 arch/powerpc/mm/pgtable-frag.c 	spin_unlock(&mm->page_table_lock);
mm                 98 arch/powerpc/mm/pgtable-frag.c pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
mm                102 arch/powerpc/mm/pgtable-frag.c 	pte = get_pte_from_cache(mm);
mm                106 arch/powerpc/mm/pgtable-frag.c 	return __alloc_for_ptecache(mm, kernel);
mm                179 arch/powerpc/mm/pgtable.c void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
mm                198 arch/powerpc/mm/pgtable.c 	__set_pte_at(mm, addr, ptep, pte, 0);
mm                265 arch/powerpc/mm/pgtable.c void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
mm                271 arch/powerpc/mm/pgtable.c 	if (mm == &init_mm)
mm                273 arch/powerpc/mm/pgtable.c 	pgd = mm->pgd + pgd_index(addr);
mm                287 arch/powerpc/mm/pgtable.c 	assert_spin_locked(pte_lockptr(mm, pmd));
mm                133 arch/powerpc/mm/pgtable_32.c get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
mm                141 arch/powerpc/mm/pgtable_32.c         pgd = pgd_offset(mm, addr & PAGE_MASK);
mm                 86 arch/powerpc/mm/slice.c static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
mm                 91 arch/powerpc/mm/slice.c 	if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
mm                 93 arch/powerpc/mm/slice.c 	vma = find_vma(mm, addr);
mm                 97 arch/powerpc/mm/slice.c static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
mm                 99 arch/powerpc/mm/slice.c 	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
mm                103 arch/powerpc/mm/slice.c static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
mm                114 arch/powerpc/mm/slice.c 	return !slice_area_is_free(mm, start, end - start);
mm                117 arch/powerpc/mm/slice.c static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
mm                127 arch/powerpc/mm/slice.c 		if (!slice_low_has_vma(mm, i))
mm                134 arch/powerpc/mm/slice.c 		if (!slice_high_has_vma(mm, i))
mm                138 arch/powerpc/mm/slice.c static bool slice_check_range_fits(struct mm_struct *mm,
mm                173 arch/powerpc/mm/slice.c 	struct mm_struct *mm = parm;
mm                176 arch/powerpc/mm/slice.c 	if (mm != current->active_mm)
mm                187 arch/powerpc/mm/slice.c static void slice_convert(struct mm_struct *mm,
mm                197 arch/powerpc/mm/slice.c 	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
mm                200 arch/powerpc/mm/slice.c 	psize_mask = slice_mask_for_size(&mm->context, psize);
mm                207 arch/powerpc/mm/slice.c 	lpsizes = mm_ctx_low_slices(&mm->context);
mm                217 arch/powerpc/mm/slice.c 		old_mask = slice_mask_for_size(&mm->context, old_psize);
mm                226 arch/powerpc/mm/slice.c 	hpsizes = mm_ctx_high_slices(&mm->context);
mm                227 arch/powerpc/mm/slice.c 	for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
mm                236 arch/powerpc/mm/slice.c 		old_mask = slice_mask_for_size(&mm->context, old_psize);
mm                246 arch/powerpc/mm/slice.c 		  (unsigned long)mm_ctx_low_slices(&mm->context),
mm                247 arch/powerpc/mm/slice.c 		  (unsigned long)mm_ctx_high_slices(&mm->context));
mm                251 arch/powerpc/mm/slice.c 	copro_flush_all_slbs(mm);
mm                278 arch/powerpc/mm/slice.c static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
mm                324 arch/powerpc/mm/slice.c static unsigned long slice_find_area_topdown(struct mm_struct *mm,
mm                339 arch/powerpc/mm/slice.c 	addr = mm->mmap_base;
mm                347 arch/powerpc/mm/slice.c 		addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
mm                380 arch/powerpc/mm/slice.c 	return slice_find_area_bottomup(mm, len, available, psize, high_limit);
mm                384 arch/powerpc/mm/slice.c static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
mm                389 arch/powerpc/mm/slice.c 		return slice_find_area_topdown(mm, len, mask, psize, high_limit);
mm                391 arch/powerpc/mm/slice.c 		return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
mm                440 arch/powerpc/mm/slice.c 	struct mm_struct *mm = current->mm;
mm                459 arch/powerpc/mm/slice.c 	if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
mm                465 arch/powerpc/mm/slice.c 		mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
mm                467 arch/powerpc/mm/slice.c 		on_each_cpu(slice_flush_segments, mm, 1);
mm                471 arch/powerpc/mm/slice.c 	BUG_ON(mm->task_size == 0);
mm                472 arch/powerpc/mm/slice.c 	BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
mm                475 arch/powerpc/mm/slice.c 	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
mm                485 arch/powerpc/mm/slice.c 		    !slice_area_is_free(mm, addr, len))
mm                492 arch/powerpc/mm/slice.c 	maskp = slice_mask_for_size(&mm->context, psize);
mm                519 arch/powerpc/mm/slice.c 		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
mm                537 arch/powerpc/mm/slice.c 		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
mm                546 arch/powerpc/mm/slice.c 		newaddr = slice_find_area(mm, len, &good_mask,
mm                560 arch/powerpc/mm/slice.c 	slice_mask_for_free(mm, &potential_mask, high_limit);
mm                565 arch/powerpc/mm/slice.c 		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
mm                582 arch/powerpc/mm/slice.c 		newaddr = slice_find_area(mm, len, &good_mask,
mm                593 arch/powerpc/mm/slice.c 	newaddr = slice_find_area(mm, len, &potential_mask,
mm                600 arch/powerpc/mm/slice.c 		newaddr = slice_find_area(mm, len, &potential_mask,
mm                616 arch/powerpc/mm/slice.c 	if (need_extra_context(mm, newaddr)) {
mm                617 arch/powerpc/mm/slice.c 		if (alloc_extended_context(mm, newaddr) < 0)
mm                627 arch/powerpc/mm/slice.c 		slice_convert(mm, &potential_mask, psize);
mm                629 arch/powerpc/mm/slice.c 			on_each_cpu(slice_flush_segments, mm, 1);
mm                634 arch/powerpc/mm/slice.c 	if (need_extra_context(mm, newaddr)) {
mm                635 arch/powerpc/mm/slice.c 		if (alloc_extended_context(mm, newaddr) < 0)
mm                649 arch/powerpc/mm/slice.c 				       mm_ctx_user_psize(&current->mm->context), 0);
mm                659 arch/powerpc/mm/slice.c 				       mm_ctx_user_psize(&current->mm->context), 1);
mm                662 arch/powerpc/mm/slice.c unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
mm                670 arch/powerpc/mm/slice.c 		psizes = mm_ctx_low_slices(&mm->context);
mm                673 arch/powerpc/mm/slice.c 		psizes = mm_ctx_high_slices(&mm->context);
mm                681 arch/powerpc/mm/slice.c void slice_init_new_context_exec(struct mm_struct *mm)
mm                687 arch/powerpc/mm/slice.c 	slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
mm                694 arch/powerpc/mm/slice.c 	mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
mm                695 arch/powerpc/mm/slice.c 	mm_ctx_set_user_psize(&mm->context, psize);
mm                700 arch/powerpc/mm/slice.c 	lpsizes = mm_ctx_low_slices(&mm->context);
mm                703 arch/powerpc/mm/slice.c 	hpsizes = mm_ctx_high_slices(&mm->context);
mm                709 arch/powerpc/mm/slice.c 	mask = slice_mask_for_size(&mm->context, psize);
mm                718 arch/powerpc/mm/slice.c 	struct mm_struct *mm = current->mm;
mm                720 arch/powerpc/mm/slice.c 	slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
mm                725 arch/powerpc/mm/slice.c 	mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
mm                729 arch/powerpc/mm/slice.c void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
mm                737 arch/powerpc/mm/slice.c 	slice_convert(mm, &mask, psize);
mm                760 arch/powerpc/mm/slice.c int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
mm                764 arch/powerpc/mm/slice.c 	unsigned int psize = mm_ctx_user_psize(&mm->context);
mm                768 arch/powerpc/mm/slice.c 	maskp = slice_mask_for_size(&mm->context, psize);
mm                775 arch/powerpc/mm/slice.c 		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
mm                777 arch/powerpc/mm/slice.c 		return !slice_check_range_fits(mm, &available, addr, len);
mm                780 arch/powerpc/mm/slice.c 	return !slice_check_range_fits(mm, maskp, addr, len);
mm                323 arch/powerpc/oprofile/cell/spu_task_sync.c 	struct mm_struct *mm = spu->mm;
mm                325 arch/powerpc/oprofile/cell/spu_task_sync.c 	if (!mm)
mm                328 arch/powerpc/oprofile/cell/spu_task_sync.c 	exe_file = get_mm_exe_file(mm);
mm                335 arch/powerpc/oprofile/cell/spu_task_sync.c 	down_read(&mm->mmap_sem);
mm                336 arch/powerpc/oprofile/cell/spu_task_sync.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                352 arch/powerpc/oprofile/cell/spu_task_sync.c 	up_read(&mm->mmap_sem);
mm                358 arch/powerpc/oprofile/cell/spu_task_sync.c 	up_read(&mm->mmap_sem);
mm                122 arch/powerpc/perf/callchain.c 	pgdir = current->mm->pgd;
mm                209 arch/powerpc/perf/callchain.c 	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
mm                210 arch/powerpc/perf/callchain.c 	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
mm                368 arch/powerpc/perf/callchain.c 	if (vdso32_sigtramp && current->mm->context.vdso_base &&
mm                369 arch/powerpc/perf/callchain.c 	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
mm                379 arch/powerpc/perf/callchain.c 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
mm                380 arch/powerpc/perf/callchain.c 	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
mm                 81 arch/powerpc/platforms/cell/spu_base.c void spu_flush_all_slbs(struct mm_struct *mm)
mm                 88 arch/powerpc/platforms/cell/spu_base.c 		if (spu->mm == mm)
mm                 97 arch/powerpc/platforms/cell/spu_base.c static inline void mm_needs_global_tlbie(struct mm_struct *mm)
mm                102 arch/powerpc/platforms/cell/spu_base.c 	bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
mm                105 arch/powerpc/platforms/cell/spu_base.c void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
mm                110 arch/powerpc/platforms/cell/spu_base.c 	spu->mm = mm;
mm                112 arch/powerpc/platforms/cell/spu_base.c 	if (mm)
mm                113 arch/powerpc/platforms/cell/spu_base.c 		mm_needs_global_tlbie(mm);
mm                156 arch/powerpc/platforms/cell/spu_base.c 	ret = copro_calculate_slb(spu->mm, ea, &slb);
mm                102 arch/powerpc/platforms/cell/spufs/context.c 	struct mm_struct *mm;
mm                113 arch/powerpc/platforms/cell/spufs/context.c 	mm = ctx->owner;
mm                115 arch/powerpc/platforms/cell/spufs/context.c 	mmput(mm);
mm                128 arch/powerpc/platforms/cell/spufs/fault.c 		ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt);
mm                339 arch/powerpc/platforms/cell/spufs/file.c 		up_read(&current->mm->mmap_sem);
mm                343 arch/powerpc/platforms/cell/spufs/file.c 		down_read(&current->mm->mmap_sem);
mm               1456 arch/powerpc/platforms/cell/spufs/file.c 	if (ctx->owner != current->mm)
mm               3122 arch/powerpc/xmon/xmon.c 	struct mm_struct *mm;
mm               3129 arch/powerpc/xmon/xmon.c 		mm = &init_mm;
mm               3134 arch/powerpc/xmon/xmon.c 		mm = &init_mm;
mm               3136 arch/powerpc/xmon/xmon.c 		mm = tsk->active_mm;
mm               3147 arch/powerpc/xmon/xmon.c 	if (mm == &init_mm) {
mm               3151 arch/powerpc/xmon/xmon.c 		pgdp = pgd_offset(mm, addr);
mm               3152 arch/powerpc/xmon/xmon.c 		pgdir = pgd_offset(mm, 0);
mm               4083 arch/powerpc/xmon/xmon.c 	DUMP_FIELD(spu, "0x%p", mm);
mm                 21 arch/riscv/include/asm/cacheflush.h static inline void flush_cache_mm(struct mm_struct *mm)
mm                 25 arch/riscv/include/asm/cacheflush.h static inline void flush_cache_dup_mm(struct mm_struct *mm)
mm                 93 arch/riscv/include/asm/cacheflush.h #define flush_icache_mm(mm, local) flush_icache_all()
mm                 98 arch/riscv/include/asm/cacheflush.h void flush_icache_mm(struct mm_struct *mm, bool local);
mm                 62 arch/riscv/include/asm/elf.h 		(elf_addr_t)current->mm->context.vdso);		\
mm                  8 arch/riscv/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 16 arch/riscv/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm,
mm                 23 arch/riscv/include/asm/mmu_context.h 	struct mm_struct *mm)
mm                 28 arch/riscv/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                 42 arch/riscv/include/asm/mmu_context.h 	struct mm_struct *mm)
mm                 15 arch/riscv/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm,
mm                 23 arch/riscv/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm,
mm                 32 arch/riscv/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                 42 arch/riscv/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 57 arch/riscv/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 64 arch/riscv/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 70 arch/riscv/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 75 arch/riscv/include/asm/pgalloc.h #define __pmd_free_tlb(tlb, pmd, addr)  pmd_free((tlb)->mm, pmd)
mm                161 arch/riscv/include/asm/pgtable.h static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
mm                163 arch/riscv/include/asm/pgtable.h 	return mm->pgd + pgd_index(addr);
mm                333 arch/riscv/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm,
mm                342 arch/riscv/include/asm/pgtable.h static inline void pte_clear(struct mm_struct *mm,
mm                345 arch/riscv/include/asm/pgtable.h 	set_pte_at(mm, addr, ptep, __pte(0));
mm                363 arch/riscv/include/asm/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm                380 arch/riscv/include/asm/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm,
mm                 18 arch/riscv/include/asm/tlb.h 	flush_tlb_mm(tlb->mm);
mm                 26 arch/riscv/include/asm/tlbflush.h void flush_tlb_mm(struct mm_struct *mm);
mm                 40 arch/riscv/include/asm/tlbflush.h #define flush_tlb_mm(mm) flush_tlb_all()
mm                193 arch/riscv/kernel/signal.c 		current->mm->context.vdso, rt_sigreturn);
mm                138 arch/riscv/kernel/smpboot.c 	struct mm_struct *mm = &init_mm;
mm                141 arch/riscv/kernel/smpboot.c 	mmgrab(mm);
mm                142 arch/riscv/kernel/smpboot.c 	current->active_mm = mm;
mm                 64 arch/riscv/kernel/sys_riscv.c 	flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
mm                 58 arch/riscv/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                 64 arch/riscv/kernel/vdso.c 	down_write(&mm->mmap_sem);
mm                 76 arch/riscv/kernel/vdso.c 	mm->context.vdso = (void *)vdso_base;
mm                 78 arch/riscv/kernel/vdso.c 	ret = install_special_mapping(mm, vdso_base, vdso_len,
mm                 83 arch/riscv/kernel/vdso.c 		mm->context.vdso = NULL;
mm                 86 arch/riscv/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm                 29 arch/riscv/mm/cacheflush.c void flush_icache_mm(struct mm_struct *mm, bool local)
mm                 37 arch/riscv/mm/cacheflush.c 	mask = &mm->context.icache_stale_mask;
mm                 48 arch/riscv/mm/cacheflush.c 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
mm                 50 arch/riscv/mm/cacheflush.c 	if (mm != current->active_mm || !local) {
mm                 24 arch/riscv/mm/context.c static inline void flush_icache_deferred(struct mm_struct *mm)
mm                 28 arch/riscv/mm/context.c 	cpumask_t *mask = &mm->context.icache_stale_mask;
mm                 31 arch/riscv/mm/fault.c 	struct mm_struct *mm;
mm                 41 arch/riscv/mm/fault.c 	mm = tsk->mm;
mm                 63 arch/riscv/mm/fault.c 	if (unlikely(faulthandler_disabled() || !mm))
mm                 72 arch/riscv/mm/fault.c 	down_read(&mm->mmap_sem);
mm                 73 arch/riscv/mm/fault.c 	vma = find_vma(mm, addr);
mm                163 arch/riscv/mm/fault.c 	up_read(&mm->mmap_sem);
mm                171 arch/riscv/mm/fault.c 	up_read(&mm->mmap_sem);
mm                199 arch/riscv/mm/fault.c 	up_read(&mm->mmap_sem);
mm                206 arch/riscv/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 21 arch/riscv/mm/tlbflush.c void flush_tlb_mm(struct mm_struct *mm)
mm                 23 arch/riscv/mm/tlbflush.c 	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
mm                176 arch/s390/include/asm/elf.h 	    !current->mm->context.alloc_pgste) {		\
mm                276 arch/s390/include/asm/elf.h 			    (unsigned long)current->mm->context.vdso_base); \
mm                 47 arch/s390/include/asm/gmap.h 	struct mm_struct *mm;
mm                101 arch/s390/include/asm/gmap.h struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
mm                 18 arch/s390/include/asm/hugetlb.h void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 21 arch/s390/include/asm/hugetlb.h pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                 24 arch/s390/include/asm/hugetlb.h static inline bool is_hugepage_only_range(struct mm_struct *mm,
mm                 50 arch/s390/include/asm/hugetlb.h static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
mm                 77 arch/s390/include/asm/hugetlb.h static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                 80 arch/s390/include/asm/hugetlb.h 	pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
mm                 81 arch/s390/include/asm/hugetlb.h 	set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
mm                 19 arch/s390/include/asm/mmu_context.h 				   struct mm_struct *mm)
mm                 21 arch/s390/include/asm/mmu_context.h 	spin_lock_init(&mm->context.lock);
mm                 22 arch/s390/include/asm/mmu_context.h 	INIT_LIST_HEAD(&mm->context.pgtable_list);
mm                 23 arch/s390/include/asm/mmu_context.h 	INIT_LIST_HEAD(&mm->context.gmap_list);
mm                 24 arch/s390/include/asm/mmu_context.h 	cpumask_clear(&mm->context.cpu_attach_mask);
mm                 25 arch/s390/include/asm/mmu_context.h 	atomic_set(&mm->context.flush_count, 0);
mm                 26 arch/s390/include/asm/mmu_context.h 	mm->context.gmap_asce = 0;
mm                 27 arch/s390/include/asm/mmu_context.h 	mm->context.flush_mm = 0;
mm                 28 arch/s390/include/asm/mmu_context.h 	mm->context.compat_mm = test_thread_flag(TIF_31BIT);
mm                 30 arch/s390/include/asm/mmu_context.h 	mm->context.alloc_pgste = page_table_allocate_pgste ||
mm                 32 arch/s390/include/asm/mmu_context.h 		(current->mm && current->mm->context.alloc_pgste);
mm                 33 arch/s390/include/asm/mmu_context.h 	mm->context.has_pgste = 0;
mm                 34 arch/s390/include/asm/mmu_context.h 	mm->context.uses_skeys = 0;
mm                 35 arch/s390/include/asm/mmu_context.h 	mm->context.uses_cmm = 0;
mm                 36 arch/s390/include/asm/mmu_context.h 	mm->context.allow_gmap_hpage_1m = 0;
mm                 38 arch/s390/include/asm/mmu_context.h 	switch (mm->context.asce_limit) {
mm                 46 arch/s390/include/asm/mmu_context.h 		mm->context.asce_limit = STACK_TOP_MAX;
mm                 47 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                 52 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                 57 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                 62 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                 65 arch/s390/include/asm/mmu_context.h 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
mm                 69 arch/s390/include/asm/mmu_context.h #define destroy_context(mm)             do { } while (0)
mm                 71 arch/s390/include/asm/mmu_context.h static inline void set_user_asce(struct mm_struct *mm)
mm                 73 arch/s390/include/asm/mmu_context.h 	S390_lowcore.user_asce = mm->context.asce;
mm                112 arch/s390/include/asm/mmu_context.h 	struct mm_struct *mm = tsk->mm;
mm                114 arch/s390/include/asm/mmu_context.h 	if (mm) {
mm                116 arch/s390/include/asm/mmu_context.h 		while (atomic_read(&mm->context.flush_count))
mm                118 arch/s390/include/asm/mmu_context.h 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
mm                119 arch/s390/include/asm/mmu_context.h 		__tlb_flush_mm_lazy(mm);
mm                125 arch/s390/include/asm/mmu_context.h #define enter_lazy_tlb(mm,tsk)	do { } while (0)
mm                126 arch/s390/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 26 arch/s390/include/asm/pgalloc.h struct page *page_table_alloc_pgste(struct mm_struct *mm);
mm                 37 arch/s390/include/asm/pgalloc.h static inline unsigned long pgd_entry_type(struct mm_struct *mm)
mm                 39 arch/s390/include/asm/pgalloc.h 	if (mm_pmd_folded(mm))
mm                 41 arch/s390/include/asm/pgalloc.h 	if (mm_pud_folded(mm))
mm                 43 arch/s390/include/asm/pgalloc.h 	if (mm_p4d_folded(mm))
mm                 48 arch/s390/include/asm/pgalloc.h int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
mm                 51 arch/s390/include/asm/pgalloc.h static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 53 arch/s390/include/asm/pgalloc.h 	unsigned long *table = crst_table_alloc(mm);
mm                 60 arch/s390/include/asm/pgalloc.h static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
mm                 62 arch/s390/include/asm/pgalloc.h 	if (!mm_p4d_folded(mm))
mm                 63 arch/s390/include/asm/pgalloc.h 		crst_table_free(mm, (unsigned long *) p4d);
mm                 66 arch/s390/include/asm/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 68 arch/s390/include/asm/pgalloc.h 	unsigned long *table = crst_table_alloc(mm);
mm                 74 arch/s390/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                 76 arch/s390/include/asm/pgalloc.h 	if (!mm_pud_folded(mm))
mm                 77 arch/s390/include/asm/pgalloc.h 		crst_table_free(mm, (unsigned long *) pud);
mm                 80 arch/s390/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
mm                 82 arch/s390/include/asm/pgalloc.h 	unsigned long *table = crst_table_alloc(mm);
mm                 88 arch/s390/include/asm/pgalloc.h 		crst_table_free(mm, table);
mm                 94 arch/s390/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 96 arch/s390/include/asm/pgalloc.h 	if (mm_pmd_folded(mm))
mm                 99 arch/s390/include/asm/pgalloc.h 	crst_table_free(mm, (unsigned long *) pmd);
mm                102 arch/s390/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
mm                107 arch/s390/include/asm/pgalloc.h static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
mm                112 arch/s390/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                117 arch/s390/include/asm/pgalloc.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                119 arch/s390/include/asm/pgalloc.h 	unsigned long *table = crst_table_alloc(mm);
mm                123 arch/s390/include/asm/pgalloc.h 	if (mm->context.asce_limit == _REGION3_SIZE) {
mm                126 arch/s390/include/asm/pgalloc.h 			crst_table_free(mm, table);
mm                133 arch/s390/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                135 arch/s390/include/asm/pgalloc.h 	if (mm->context.asce_limit == _REGION3_SIZE)
mm                137 arch/s390/include/asm/pgalloc.h 	crst_table_free(mm, (unsigned long *) pgd);
mm                140 arch/s390/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm,
mm                146 arch/s390/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
mm                154 arch/s390/include/asm/pgalloc.h #define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
mm                155 arch/s390/include/asm/pgalloc.h #define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
mm                157 arch/s390/include/asm/pgalloc.h #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
mm                158 arch/s390/include/asm/pgalloc.h #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
mm                498 arch/s390/include/asm/pgtable.h static inline bool mm_p4d_folded(struct mm_struct *mm)
mm                500 arch/s390/include/asm/pgtable.h 	return mm->context.asce_limit <= _REGION1_SIZE;
mm                502 arch/s390/include/asm/pgtable.h #define mm_p4d_folded(mm) mm_p4d_folded(mm)
mm                504 arch/s390/include/asm/pgtable.h static inline bool mm_pud_folded(struct mm_struct *mm)
mm                506 arch/s390/include/asm/pgtable.h 	return mm->context.asce_limit <= _REGION2_SIZE;
mm                508 arch/s390/include/asm/pgtable.h #define mm_pud_folded(mm) mm_pud_folded(mm)
mm                510 arch/s390/include/asm/pgtable.h static inline bool mm_pmd_folded(struct mm_struct *mm)
mm                512 arch/s390/include/asm/pgtable.h 	return mm->context.asce_limit <= _REGION3_SIZE;
mm                514 arch/s390/include/asm/pgtable.h #define mm_pmd_folded(mm) mm_pmd_folded(mm)
mm                516 arch/s390/include/asm/pgtable.h static inline int mm_has_pgste(struct mm_struct *mm)
mm                519 arch/s390/include/asm/pgtable.h 	if (unlikely(mm->context.has_pgste))
mm                525 arch/s390/include/asm/pgtable.h static inline int mm_alloc_pgste(struct mm_struct *mm)
mm                528 arch/s390/include/asm/pgtable.h 	if (unlikely(mm->context.alloc_pgste))
mm                539 arch/s390/include/asm/pgtable.h static inline int mm_uses_skeys(struct mm_struct *mm)
mm                542 arch/s390/include/asm/pgtable.h 	if (mm->context.uses_skeys)
mm                913 arch/s390/include/asm/pgtable.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm               1077 arch/s390/include/asm/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm               1080 arch/s390/include/asm/pgtable.h 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
mm               1103 arch/s390/include/asm/pgtable.h static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
mm               1112 arch/s390/include/asm/pgtable.h 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
mm               1116 arch/s390/include/asm/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm,
mm               1122 arch/s390/include/asm/pgtable.h 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
mm               1139 arch/s390/include/asm/pgtable.h void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
mm               1141 arch/s390/include/asm/pgtable.h void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
mm               1142 arch/s390/include/asm/pgtable.h void ptep_notify(struct mm_struct *mm, unsigned long addr,
mm               1144 arch/s390/include/asm/pgtable.h int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
mm               1146 arch/s390/include/asm/pgtable.h void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
mm               1148 arch/s390/include/asm/pgtable.h void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
mm               1149 arch/s390/include/asm/pgtable.h int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
mm               1151 arch/s390/include/asm/pgtable.h void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
mm               1153 arch/s390/include/asm/pgtable.h bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
mm               1155 arch/s390/include/asm/pgtable.h int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
mm               1157 arch/s390/include/asm/pgtable.h int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
mm               1160 arch/s390/include/asm/pgtable.h int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
mm               1161 arch/s390/include/asm/pgtable.h int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
mm               1164 arch/s390/include/asm/pgtable.h int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
mm               1166 arch/s390/include/asm/pgtable.h int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
mm               1167 arch/s390/include/asm/pgtable.h int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
mm               1169 arch/s390/include/asm/pgtable.h void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
mm               1170 arch/s390/include/asm/pgtable.h void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
mm               1171 arch/s390/include/asm/pgtable.h void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
mm               1172 arch/s390/include/asm/pgtable.h void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
mm               1179 arch/s390/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm               1184 arch/s390/include/asm/pgtable.h 	if (mm_has_pgste(mm))
mm               1185 arch/s390/include/asm/pgtable.h 		ptep_set_pte_at(mm, addr, ptep, entry);
mm               1247 arch/s390/include/asm/pgtable.h #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
mm               1283 arch/s390/include/asm/pgtable.h 	return end <= current->mm->context.asce_limit;
mm               1505 arch/s390/include/asm/pgtable.h void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm               1509 arch/s390/include/asm/pgtable.h pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm               1545 arch/s390/include/asm/pgtable.h static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm               1562 arch/s390/include/asm/pgtable.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
mm               1565 arch/s390/include/asm/pgtable.h 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
mm               1569 arch/s390/include/asm/pgtable.h static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
mm               1578 arch/s390/include/asm/pgtable.h 	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
mm               1598 arch/s390/include/asm/pgtable.h static inline void pmdp_set_wrprotect(struct mm_struct *mm,
mm               1604 arch/s390/include/asm/pgtable.h 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
mm               1686 arch/s390/include/asm/pgtable.h extern void s390_reset_cmma(struct mm_struct *mm);
mm                182 arch/s390/include/asm/processor.h 	crst_table_downgrade(current->mm);				\
mm                 57 arch/s390/include/asm/tlb.h 	__tlb_flush_mm_lazy(tlb->mm);
mm                 68 arch/s390/include/asm/tlb.h 	tlb->mm->context.flush_mm = 1;
mm                 89 arch/s390/include/asm/tlb.h 	if (mm_pmd_folded(tlb->mm))
mm                 93 arch/s390/include/asm/tlb.h 	tlb->mm->context.flush_mm = 1;
mm                109 arch/s390/include/asm/tlb.h 	if (mm_p4d_folded(tlb->mm))
mm                112 arch/s390/include/asm/tlb.h 	tlb->mm->context.flush_mm = 1;
mm                128 arch/s390/include/asm/tlb.h 	if (mm_pud_folded(tlb->mm))
mm                130 arch/s390/include/asm/tlb.h 	tlb->mm->context.flush_mm = 1;
mm                 51 arch/s390/include/asm/tlbflush.h static inline void __tlb_flush_mm(struct mm_struct *mm)
mm                 61 arch/s390/include/asm/tlbflush.h 	atomic_inc(&mm->context.flush_count);
mm                 63 arch/s390/include/asm/tlbflush.h 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
mm                 65 arch/s390/include/asm/tlbflush.h 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
mm                 69 arch/s390/include/asm/tlbflush.h 		__tlb_flush_idte(mm->context.asce);
mm                 74 arch/s390/include/asm/tlbflush.h 	atomic_dec(&mm->context.flush_count);
mm                 86 arch/s390/include/asm/tlbflush.h static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
mm                 88 arch/s390/include/asm/tlbflush.h 	spin_lock(&mm->context.lock);
mm                 89 arch/s390/include/asm/tlbflush.h 	if (mm->context.flush_mm) {
mm                 90 arch/s390/include/asm/tlbflush.h 		mm->context.flush_mm = 0;
mm                 91 arch/s390/include/asm/tlbflush.h 		__tlb_flush_mm(mm);
mm                 93 arch/s390/include/asm/tlbflush.h 	spin_unlock(&mm->context.lock);
mm                118 arch/s390/include/asm/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                120 arch/s390/include/asm/tlbflush.h 	__tlb_flush_mm_lazy(mm);
mm                226 arch/s390/kernel/process.c unsigned long arch_randomize_brk(struct mm_struct *mm)
mm                230 arch/s390/kernel/process.c 	ret = PAGE_ALIGN(mm->brk + brk_rnd());
mm                231 arch/s390/kernel/process.c 	return (ret > mm->brk) ? ret : mm->brk;
mm                 90 arch/s390/kernel/processor.c 	BUG_ON(current->mm);
mm                 23 arch/s390/kernel/uprobes.c int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
mm                 87 arch/s390/kernel/vdso.c 	if (WARN_ON_ONCE(current->mm != vma->vm_mm))
mm                 90 arch/s390/kernel/vdso.c 	current->mm->context.vdso_base = vma->vm_start;
mm                203 arch/s390/kernel/vdso.c 	struct mm_struct *mm = current->mm;
mm                214 arch/s390/kernel/vdso.c 	mm->context.compat_mm = is_compat_task();
mm                215 arch/s390/kernel/vdso.c 	if (mm->context.compat_mm)
mm                230 arch/s390/kernel/vdso.c 	if (down_write_killable(&mm->mmap_sem))
mm                248 arch/s390/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
mm                257 arch/s390/kernel/vdso.c 	current->mm->context.vdso_base = vdso_base;
mm                261 arch/s390/kernel/vdso.c 	up_write(&mm->mmap_sem);
mm               1173 arch/s390/kvm/gaccess.c 	down_read(&sg->mm->mmap_sem);
mm               1202 arch/s390/kvm/gaccess.c 	up_read(&sg->mm->mmap_sem);
mm                759 arch/s390/kvm/kvm-s390.c 			down_write(&kvm->mm->mmap_sem);
mm                760 arch/s390/kvm/kvm-s390.c 			kvm->mm->context.allow_gmap_hpage_1m = 1;
mm                761 arch/s390/kvm/kvm-s390.c 			up_write(&kvm->mm->mmap_sem);
mm                825 arch/s390/kvm/kvm-s390.c 		else if (kvm->mm->context.allow_gmap_hpage_1m)
mm                846 arch/s390/kvm/kvm-s390.c 		s390_reset_cmma(kvm->arch.gmap->mm);
mm                875 arch/s390/kvm/kvm-s390.c 			struct gmap *new = gmap_create(current->mm, new_limit);
mm               1800 arch/s390/kvm/kvm-s390.c 	if (!mm_uses_skeys(current->mm))
mm               1811 arch/s390/kvm/kvm-s390.c 	down_read(&current->mm->mmap_sem);
mm               1820 arch/s390/kvm/kvm-s390.c 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
mm               1825 arch/s390/kvm/kvm-s390.c 	up_read(&current->mm->mmap_sem);
mm               1869 arch/s390/kvm/kvm-s390.c 	down_read(&current->mm->mmap_sem);
mm               1885 arch/s390/kvm/kvm-s390.c 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
mm               1887 arch/s390/kvm/kvm-s390.c 			r = fixup_user_fault(current, current->mm, hva,
mm               1896 arch/s390/kvm/kvm-s390.c 	up_read(&current->mm->mmap_sem);
mm               1960 arch/s390/kvm/kvm-s390.c 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
mm               2017 arch/s390/kvm/kvm-s390.c 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
mm               2068 arch/s390/kvm/kvm-s390.c 	if (!bufsize || !kvm->mm->context.uses_cmm) {
mm               2082 arch/s390/kvm/kvm-s390.c 	down_read(&kvm->mm->mmap_sem);
mm               2089 arch/s390/kvm/kvm-s390.c 	up_read(&kvm->mm->mmap_sem);
mm               2139 arch/s390/kvm/kvm-s390.c 	down_read(&kvm->mm->mmap_sem);
mm               2151 arch/s390/kvm/kvm-s390.c 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
mm               2154 arch/s390/kvm/kvm-s390.c 	up_read(&kvm->mm->mmap_sem);
mm               2156 arch/s390/kvm/kvm-s390.c 	if (!kvm->mm->context.uses_cmm) {
mm               2157 arch/s390/kvm/kvm-s390.c 		down_write(&kvm->mm->mmap_sem);
mm               2158 arch/s390/kvm/kvm-s390.c 		kvm->mm->context.uses_cmm = 1;
mm               2159 arch/s390/kvm/kvm-s390.c 		up_write(&kvm->mm->mmap_sem);
mm               2501 arch/s390/kvm/kvm-s390.c 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
mm               2578 arch/s390/kvm/kvm-s390.c 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
mm               3559 arch/s390/kvm/kvm-s390.c 		    (vcpu->kvm->mm->context.uses_cmm))
mm                273 arch/s390/kvm/priv.c 	down_read(&current->mm->mmap_sem);
mm                274 arch/s390/kvm/priv.c 	rc = get_guest_storage_key(current->mm, vmaddr, &key);
mm                277 arch/s390/kvm/priv.c 		rc = fixup_user_fault(current, current->mm, vmaddr,
mm                280 arch/s390/kvm/priv.c 			up_read(&current->mm->mmap_sem);
mm                284 arch/s390/kvm/priv.c 	up_read(&current->mm->mmap_sem);
mm                320 arch/s390/kvm/priv.c 	down_read(&current->mm->mmap_sem);
mm                321 arch/s390/kvm/priv.c 	rc = reset_guest_reference_bit(current->mm, vmaddr);
mm                323 arch/s390/kvm/priv.c 		rc = fixup_user_fault(current, current->mm, vmaddr,
mm                326 arch/s390/kvm/priv.c 			up_read(&current->mm->mmap_sem);
mm                330 arch/s390/kvm/priv.c 	up_read(&current->mm->mmap_sem);
mm                388 arch/s390/kvm/priv.c 		down_read(&current->mm->mmap_sem);
mm                389 arch/s390/kvm/priv.c 		rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
mm                394 arch/s390/kvm/priv.c 			rc = fixup_user_fault(current, current->mm, vmaddr,
mm                398 arch/s390/kvm/priv.c 		up_read(&current->mm->mmap_sem);
mm               1089 arch/s390/kvm/priv.c 			down_read(&current->mm->mmap_sem);
mm               1090 arch/s390/kvm/priv.c 			rc = cond_set_guest_storage_key(current->mm, vmaddr,
mm               1093 arch/s390/kvm/priv.c 				rc = fixup_user_fault(current, current->mm, vmaddr,
mm               1097 arch/s390/kvm/priv.c 			up_read(&current->mm->mmap_sem);
mm               1141 arch/s390/kvm/priv.c 	nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
mm               1217 arch/s390/kvm/priv.c 		if (vcpu->kvm->mm->context.uses_cmm == 0) {
mm               1218 arch/s390/kvm/priv.c 			down_write(&vcpu->kvm->mm->mmap_sem);
mm               1219 arch/s390/kvm/priv.c 			vcpu->kvm->mm->context.uses_cmm = 1;
mm               1220 arch/s390/kvm/priv.c 			up_write(&vcpu->kvm->mm->mmap_sem);
mm               1237 arch/s390/kvm/priv.c 		down_read(&vcpu->kvm->mm->mmap_sem);
mm               1241 arch/s390/kvm/priv.c 		up_read(&vcpu->kvm->mm->mmap_sem);
mm               1249 arch/s390/kvm/priv.c 	down_read(&gmap->mm->mmap_sem);
mm               1252 arch/s390/kvm/priv.c 	up_read(&gmap->mm->mmap_sem);
mm                389 arch/s390/mm/fault.c 	struct mm_struct *mm;
mm                407 arch/s390/mm/fault.c 	mm = tsk->mm;
mm                425 arch/s390/mm/fault.c 		if (faulthandler_disabled() || !mm)
mm                437 arch/s390/mm/fault.c 	down_read(&mm->mmap_sem);
mm                456 arch/s390/mm/fault.c 	vma = find_vma(mm, address);
mm                522 arch/s390/mm/fault.c 			down_read(&mm->mmap_sem);
mm                540 arch/s390/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 97 arch/s390/mm/gmap.c struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
mm                105 arch/s390/mm/gmap.c 	gmap->mm = mm;
mm                106 arch/s390/mm/gmap.c 	spin_lock(&mm->context.lock);
mm                107 arch/s390/mm/gmap.c 	list_add_rcu(&gmap->list, &mm->context.gmap_list);
mm                108 arch/s390/mm/gmap.c 	if (list_is_singular(&mm->context.gmap_list))
mm                112 arch/s390/mm/gmap.c 	WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
mm                113 arch/s390/mm/gmap.c 	spin_unlock(&mm->context.lock);
mm                254 arch/s390/mm/gmap.c 	spin_lock(&gmap->mm->context.lock);
mm                256 arch/s390/mm/gmap.c 	if (list_empty(&gmap->mm->context.gmap_list))
mm                258 arch/s390/mm/gmap.c 	else if (list_is_singular(&gmap->mm->context.gmap_list))
mm                259 arch/s390/mm/gmap.c 		gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
mm                263 arch/s390/mm/gmap.c 	WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
mm                264 arch/s390/mm/gmap.c 	spin_unlock(&gmap->mm->context.lock);
mm                408 arch/s390/mm/gmap.c 	down_write(&gmap->mm->mmap_sem);
mm                411 arch/s390/mm/gmap.c 	up_write(&gmap->mm->mmap_sem);
mm                441 arch/s390/mm/gmap.c 	down_write(&gmap->mm->mmap_sem);
mm                451 arch/s390/mm/gmap.c 	up_write(&gmap->mm->mmap_sem);
mm                498 arch/s390/mm/gmap.c 	down_read(&gmap->mm->mmap_sem);
mm                500 arch/s390/mm/gmap.c 	up_read(&gmap->mm->mmap_sem);
mm                511 arch/s390/mm/gmap.c void gmap_unlink(struct mm_struct *mm, unsigned long *table,
mm                518 arch/s390/mm/gmap.c 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
mm                542 arch/s390/mm/gmap.c 	struct mm_struct *mm;
mm                581 arch/s390/mm/gmap.c 	mm = gmap->mm;
mm                582 arch/s390/mm/gmap.c 	pgd = pgd_offset(mm, vmaddr);
mm                594 arch/s390/mm/gmap.c 	if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
mm                600 arch/s390/mm/gmap.c 	ptl = pmd_lock(mm, pmd);
mm                643 arch/s390/mm/gmap.c 	down_read(&gmap->mm->mmap_sem);
mm                652 arch/s390/mm/gmap.c 	if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
mm                666 arch/s390/mm/gmap.c 	up_read(&gmap->mm->mmap_sem);
mm                686 arch/s390/mm/gmap.c 		ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
mm                688 arch/s390/mm/gmap.c 			ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
mm                699 arch/s390/mm/gmap.c 	down_read(&gmap->mm->mmap_sem);
mm                710 arch/s390/mm/gmap.c 		vma = find_vma(gmap->mm, vmaddr);
mm                722 arch/s390/mm/gmap.c 	up_read(&gmap->mm->mmap_sem);
mm                859 arch/s390/mm/gmap.c 	return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
mm                876 arch/s390/mm/gmap.c 	struct mm_struct *mm = gmap->mm;
mm                882 arch/s390/mm/gmap.c 	if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
mm                919 arch/s390/mm/gmap.c 	if (!gmap->mm->context.allow_gmap_hpage_1m)
mm               1015 arch/s390/mm/gmap.c 	ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
mm               1022 arch/s390/mm/gmap.c 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
mm               1109 arch/s390/mm/gmap.c 	down_read(&gmap->mm->mmap_sem);
mm               1111 arch/s390/mm/gmap.c 	up_read(&gmap->mm->mmap_sem);
mm               1234 arch/s390/mm/gmap.c 			rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
mm               1295 arch/s390/mm/gmap.c 	ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
mm               1649 arch/s390/mm/gmap.c 	BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
mm               1663 arch/s390/mm/gmap.c 	new->mm = parent->mm;
mm               1699 arch/s390/mm/gmap.c 	down_read(&parent->mm->mmap_sem);
mm               1703 arch/s390/mm/gmap.c 	up_read(&parent->mm->mmap_sem);
mm               2036 arch/s390/mm/gmap.c 	page = page_table_alloc_pgste(sg->mm);
mm               2146 arch/s390/mm/gmap.c 			rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
mm               2235 arch/s390/mm/gmap.c void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
mm               2245 arch/s390/mm/gmap.c 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
mm               2302 arch/s390/mm/gmap.c static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
mm               2310 arch/s390/mm/gmap.c 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
mm               2334 arch/s390/mm/gmap.c void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
mm               2336 arch/s390/mm/gmap.c 	gmap_pmdp_clear(mm, vmaddr, 0);
mm               2345 arch/s390/mm/gmap.c void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
mm               2347 arch/s390/mm/gmap.c 	gmap_pmdp_clear(mm, vmaddr, 1);
mm               2356 arch/s390/mm/gmap.c void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
mm               2363 arch/s390/mm/gmap.c 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
mm               2391 arch/s390/mm/gmap.c void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
mm               2398 arch/s390/mm/gmap.c 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
mm               2476 arch/s390/mm/gmap.c 			ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
mm               2479 arch/s390/mm/gmap.c 			if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
mm               2488 arch/s390/mm/gmap.c static inline void thp_split_mm(struct mm_struct *mm)
mm               2494 arch/s390/mm/gmap.c 	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
mm               2502 arch/s390/mm/gmap.c 	mm->def_flags |= VM_NOHUGEPAGE;
mm               2521 arch/s390/mm/gmap.c 		ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
mm               2523 arch/s390/mm/gmap.c 			ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
mm               2538 arch/s390/mm/gmap.c 	struct mm_struct *mm = current->mm;
mm               2541 arch/s390/mm/gmap.c 	if (mm_has_pgste(mm))
mm               2544 arch/s390/mm/gmap.c 	if (!mm_alloc_pgste(mm))
mm               2546 arch/s390/mm/gmap.c 	down_write(&mm->mmap_sem);
mm               2547 arch/s390/mm/gmap.c 	mm->context.has_pgste = 1;
mm               2549 arch/s390/mm/gmap.c 	thp_split_mm(mm);
mm               2550 arch/s390/mm/gmap.c 	walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
mm               2551 arch/s390/mm/gmap.c 	up_write(&mm->mmap_sem);
mm               2564 arch/s390/mm/gmap.c 	ptep_zap_key(walk->mm, addr, pte);
mm               2600 arch/s390/mm/gmap.c 	struct mm_struct *mm = current->mm;
mm               2604 arch/s390/mm/gmap.c 	down_write(&mm->mmap_sem);
mm               2605 arch/s390/mm/gmap.c 	if (mm_uses_skeys(mm))
mm               2608 arch/s390/mm/gmap.c 	mm->context.uses_skeys = 1;
mm               2609 arch/s390/mm/gmap.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               2612 arch/s390/mm/gmap.c 			mm->context.uses_skeys = 0;
mm               2617 arch/s390/mm/gmap.c 	mm->def_flags &= ~VM_MERGEABLE;
mm               2619 arch/s390/mm/gmap.c 	walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
mm               2622 arch/s390/mm/gmap.c 	up_write(&mm->mmap_sem);
mm               2633 arch/s390/mm/gmap.c 	ptep_zap_unused(walk->mm, addr, pte, 1);
mm               2641 arch/s390/mm/gmap.c void s390_reset_cmma(struct mm_struct *mm)
mm               2643 arch/s390/mm/gmap.c 	down_write(&mm->mmap_sem);
mm               2644 arch/s390/mm/gmap.c 	walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
mm               2645 arch/s390/mm/gmap.c 	up_write(&mm->mmap_sem);
mm                129 arch/s390/mm/hugetlbpage.c static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
mm                134 arch/s390/mm/hugetlbpage.c 	if (!mm_uses_skeys(mm) ||
mm                152 arch/s390/mm/hugetlbpage.c void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                169 arch/s390/mm/hugetlbpage.c 	clear_huge_pte_skeys(mm, rste);
mm                178 arch/s390/mm/hugetlbpage.c pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                186 arch/s390/mm/hugetlbpage.c 		pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
mm                188 arch/s390/mm/hugetlbpage.c 		pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
mm                192 arch/s390/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm,
mm                200 arch/s390/mm/hugetlbpage.c 	pgdp = pgd_offset(mm, addr);
mm                201 arch/s390/mm/hugetlbpage.c 	p4dp = p4d_alloc(mm, pgdp, addr);
mm                203 arch/s390/mm/hugetlbpage.c 		pudp = pud_alloc(mm, p4dp, addr);
mm                208 arch/s390/mm/hugetlbpage.c 				pmdp = pmd_alloc(mm, pudp, addr);
mm                214 arch/s390/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm,
mm                222 arch/s390/mm/hugetlbpage.c 	pgdp = pgd_offset(mm, addr);
mm                248 arch/s390/mm/hugetlbpage.c follow_huge_pud(struct mm_struct *mm, unsigned long address,
mm                286 arch/s390/mm/hugetlbpage.c 	info.low_limit = current->mm->mmap_base;
mm                304 arch/s390/mm/hugetlbpage.c 	info.high_limit = current->mm->mmap_base;
mm                330 arch/s390/mm/hugetlbpage.c 	struct mm_struct *mm = current->mm;
mm                347 arch/s390/mm/hugetlbpage.c 		vma = find_vma(mm, addr);
mm                353 arch/s390/mm/hugetlbpage.c 	if (mm->get_unmapped_area == arch_get_unmapped_area)
mm                363 arch/s390/mm/hugetlbpage.c 	if (addr + len > current->mm->context.asce_limit &&
mm                365 arch/s390/mm/hugetlbpage.c 		rc = crst_table_upgrade(mm, addr + len);
mm                 79 arch/s390/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                 92 arch/s390/mm/mmap.c 		vma = find_vma(mm, addr);
mm                100 arch/s390/mm/mmap.c 	info.low_limit = mm->mmap_base;
mm                112 arch/s390/mm/mmap.c 	if (addr + len > current->mm->context.asce_limit &&
mm                114 arch/s390/mm/mmap.c 		rc = crst_table_upgrade(mm, addr + len);
mm                128 arch/s390/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                143 arch/s390/mm/mmap.c 		vma = find_vma(mm, addr);
mm                152 arch/s390/mm/mmap.c 	info.high_limit = mm->mmap_base;
mm                177 arch/s390/mm/mmap.c 	if (addr + len > current->mm->context.asce_limit &&
mm                179 arch/s390/mm/mmap.c 		rc = crst_table_upgrade(mm, addr + len);
mm                191 arch/s390/mm/mmap.c void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                203 arch/s390/mm/mmap.c 		mm->mmap_base = mmap_base_legacy(random_factor);
mm                204 arch/s390/mm/mmap.c 		mm->get_unmapped_area = arch_get_unmapped_area;
mm                206 arch/s390/mm/mmap.c 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm                207 arch/s390/mm/mmap.c 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm                 54 arch/s390/mm/pgalloc.c unsigned long *crst_table_alloc(struct mm_struct *mm)
mm                 64 arch/s390/mm/pgalloc.c void crst_table_free(struct mm_struct *mm, unsigned long *table)
mm                 71 arch/s390/mm/pgalloc.c 	struct mm_struct *mm = arg;
mm                 74 arch/s390/mm/pgalloc.c 	if (current->active_mm == mm) {
mm                 75 arch/s390/mm/pgalloc.c 		S390_lowcore.user_asce = mm->context.asce;
mm                 90 arch/s390/mm/pgalloc.c int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
mm                 96 arch/s390/mm/pgalloc.c 	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
mm                 99 arch/s390/mm/pgalloc.c 	while (mm->context.asce_limit < end) {
mm                100 arch/s390/mm/pgalloc.c 		table = crst_table_alloc(mm);
mm                105 arch/s390/mm/pgalloc.c 		spin_lock_bh(&mm->page_table_lock);
mm                106 arch/s390/mm/pgalloc.c 		pgd = (unsigned long *) mm->pgd;
mm                107 arch/s390/mm/pgalloc.c 		if (mm->context.asce_limit == _REGION2_SIZE) {
mm                109 arch/s390/mm/pgalloc.c 			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
mm                110 arch/s390/mm/pgalloc.c 			mm->pgd = (pgd_t *) table;
mm                111 arch/s390/mm/pgalloc.c 			mm->context.asce_limit = _REGION1_SIZE;
mm                112 arch/s390/mm/pgalloc.c 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                114 arch/s390/mm/pgalloc.c 			mm_inc_nr_puds(mm);
mm                117 arch/s390/mm/pgalloc.c 			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
mm                118 arch/s390/mm/pgalloc.c 			mm->pgd = (pgd_t *) table;
mm                119 arch/s390/mm/pgalloc.c 			mm->context.asce_limit = -PAGE_SIZE;
mm                120 arch/s390/mm/pgalloc.c 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                124 arch/s390/mm/pgalloc.c 		spin_unlock_bh(&mm->page_table_lock);
mm                127 arch/s390/mm/pgalloc.c 		on_each_cpu(__crst_table_upgrade, mm, 0);
mm                131 arch/s390/mm/pgalloc.c void crst_table_downgrade(struct mm_struct *mm)
mm                136 arch/s390/mm/pgalloc.c 	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
mm                138 arch/s390/mm/pgalloc.c 	if (current->active_mm == mm) {
mm                140 arch/s390/mm/pgalloc.c 		__tlb_flush_mm(mm);
mm                143 arch/s390/mm/pgalloc.c 	pgd = mm->pgd;
mm                144 arch/s390/mm/pgalloc.c 	mm_dec_nr_pmds(mm);
mm                145 arch/s390/mm/pgalloc.c 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm                146 arch/s390/mm/pgalloc.c 	mm->context.asce_limit = _REGION3_SIZE;
mm                147 arch/s390/mm/pgalloc.c 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm                149 arch/s390/mm/pgalloc.c 	crst_table_free(mm, (unsigned long *) pgd);
mm                151 arch/s390/mm/pgalloc.c 	if (current->active_mm == mm)
mm                152 arch/s390/mm/pgalloc.c 		set_user_asce(mm);
mm                168 arch/s390/mm/pgalloc.c struct page *page_table_alloc_pgste(struct mm_struct *mm)
mm                192 arch/s390/mm/pgalloc.c unsigned long *page_table_alloc(struct mm_struct *mm)
mm                199 arch/s390/mm/pgalloc.c 	if (!mm_alloc_pgste(mm)) {
mm                201 arch/s390/mm/pgalloc.c 		spin_lock_bh(&mm->context.lock);
mm                202 arch/s390/mm/pgalloc.c 		if (!list_empty(&mm->context.pgtable_list)) {
mm                203 arch/s390/mm/pgalloc.c 			page = list_first_entry(&mm->context.pgtable_list,
mm                217 arch/s390/mm/pgalloc.c 		spin_unlock_bh(&mm->context.lock);
mm                232 arch/s390/mm/pgalloc.c 	if (mm_alloc_pgste(mm)) {
mm                241 arch/s390/mm/pgalloc.c 		spin_lock_bh(&mm->context.lock);
mm                242 arch/s390/mm/pgalloc.c 		list_add(&page->lru, &mm->context.pgtable_list);
mm                243 arch/s390/mm/pgalloc.c 		spin_unlock_bh(&mm->context.lock);
mm                248 arch/s390/mm/pgalloc.c void page_table_free(struct mm_struct *mm, unsigned long *table)
mm                254 arch/s390/mm/pgalloc.c 	if (!mm_alloc_pgste(mm)) {
mm                257 arch/s390/mm/pgalloc.c 		spin_lock_bh(&mm->context.lock);
mm                261 arch/s390/mm/pgalloc.c 			list_add(&page->lru, &mm->context.pgtable_list);
mm                264 arch/s390/mm/pgalloc.c 		spin_unlock_bh(&mm->context.lock);
mm                278 arch/s390/mm/pgalloc.c 	struct mm_struct *mm;
mm                282 arch/s390/mm/pgalloc.c 	mm = tlb->mm;
mm                284 arch/s390/mm/pgalloc.c 	if (mm_alloc_pgste(mm)) {
mm                285 arch/s390/mm/pgalloc.c 		gmap_unlink(mm, table, vmaddr);
mm                291 arch/s390/mm/pgalloc.c 	spin_lock_bh(&mm->context.lock);
mm                295 arch/s390/mm/pgalloc.c 		list_add_tail(&page->lru, &mm->context.pgtable_list);
mm                298 arch/s390/mm/pgalloc.c 	spin_unlock_bh(&mm->context.lock);
mm                 29 arch/s390/mm/pgtable.c static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
mm                 36 arch/s390/mm/pgtable.c 		asce = READ_ONCE(mm->context.gmap_asce);
mm                 40 arch/s390/mm/pgtable.c 			asce = asce ? : mm->context.asce;
mm                 49 arch/s390/mm/pgtable.c static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
mm                 56 arch/s390/mm/pgtable.c 		asce = READ_ONCE(mm->context.gmap_asce);
mm                 60 arch/s390/mm/pgtable.c 			asce = asce ? : mm->context.asce;
mm                 69 arch/s390/mm/pgtable.c static inline pte_t ptep_flush_direct(struct mm_struct *mm,
mm                 78 arch/s390/mm/pgtable.c 	atomic_inc(&mm->context.flush_count);
mm                 80 arch/s390/mm/pgtable.c 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
mm                 81 arch/s390/mm/pgtable.c 		ptep_ipte_local(mm, addr, ptep, nodat);
mm                 83 arch/s390/mm/pgtable.c 		ptep_ipte_global(mm, addr, ptep, nodat);
mm                 84 arch/s390/mm/pgtable.c 	atomic_dec(&mm->context.flush_count);
mm                 88 arch/s390/mm/pgtable.c static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
mm                 97 arch/s390/mm/pgtable.c 	atomic_inc(&mm->context.flush_count);
mm                 98 arch/s390/mm/pgtable.c 	if (cpumask_equal(&mm->context.cpu_attach_mask,
mm                101 arch/s390/mm/pgtable.c 		mm->context.flush_mm = 1;
mm                103 arch/s390/mm/pgtable.c 		ptep_ipte_global(mm, addr, ptep, nodat);
mm                104 arch/s390/mm/pgtable.c 	atomic_dec(&mm->context.flush_count);
mm                156 arch/s390/mm/pgtable.c 				       struct mm_struct *mm)
mm                161 arch/s390/mm/pgtable.c 	if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
mm                177 arch/s390/mm/pgtable.c 				 struct mm_struct *mm)
mm                183 arch/s390/mm/pgtable.c 	if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
mm                221 arch/s390/mm/pgtable.c static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
mm                231 arch/s390/mm/pgtable.c 		ptep_notify(mm, addr, ptep, bits);
mm                237 arch/s390/mm/pgtable.c static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
mm                242 arch/s390/mm/pgtable.c 	if (mm_has_pgste(mm)) {
mm                244 arch/s390/mm/pgtable.c 		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
mm                249 arch/s390/mm/pgtable.c static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
mm                253 arch/s390/mm/pgtable.c 	if (mm_has_pgste(mm)) {
mm                255 arch/s390/mm/pgtable.c 			pgste_set_key(ptep, pgste, new, mm);
mm                257 arch/s390/mm/pgtable.c 			pgste = pgste_update_all(old, pgste, mm);
mm                270 arch/s390/mm/pgtable.c pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
mm                278 arch/s390/mm/pgtable.c 	pgste = ptep_xchg_start(mm, addr, ptep);
mm                280 arch/s390/mm/pgtable.c 	old = ptep_flush_direct(mm, addr, ptep, nodat);
mm                281 arch/s390/mm/pgtable.c 	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
mm                287 arch/s390/mm/pgtable.c pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
mm                295 arch/s390/mm/pgtable.c 	pgste = ptep_xchg_start(mm, addr, ptep);
mm                297 arch/s390/mm/pgtable.c 	old = ptep_flush_lazy(mm, addr, ptep, nodat);
mm                298 arch/s390/mm/pgtable.c 	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
mm                310 arch/s390/mm/pgtable.c 	struct mm_struct *mm = vma->vm_mm;
mm                313 arch/s390/mm/pgtable.c 	pgste = ptep_xchg_start(mm, addr, ptep);
mm                315 arch/s390/mm/pgtable.c 	old = ptep_flush_lazy(mm, addr, ptep, nodat);
mm                316 arch/s390/mm/pgtable.c 	if (mm_has_pgste(mm)) {
mm                317 arch/s390/mm/pgtable.c 		pgste = pgste_update_all(old, pgste, mm);
mm                327 arch/s390/mm/pgtable.c 	struct mm_struct *mm = vma->vm_mm;
mm                331 arch/s390/mm/pgtable.c 	if (mm_has_pgste(mm)) {
mm                333 arch/s390/mm/pgtable.c 		pgste_set_key(ptep, pgste, pte, mm);
mm                342 arch/s390/mm/pgtable.c static inline void pmdp_idte_local(struct mm_struct *mm,
mm                347 arch/s390/mm/pgtable.c 			    mm->context.asce, IDTE_LOCAL);
mm                350 arch/s390/mm/pgtable.c 	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
mm                351 arch/s390/mm/pgtable.c 		gmap_pmdp_idte_local(mm, addr);
mm                354 arch/s390/mm/pgtable.c static inline void pmdp_idte_global(struct mm_struct *mm,
mm                359 arch/s390/mm/pgtable.c 			    mm->context.asce, IDTE_GLOBAL);
mm                360 arch/s390/mm/pgtable.c 		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
mm                361 arch/s390/mm/pgtable.c 			gmap_pmdp_idte_global(mm, addr);
mm                364 arch/s390/mm/pgtable.c 		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
mm                365 arch/s390/mm/pgtable.c 			gmap_pmdp_idte_global(mm, addr);
mm                368 arch/s390/mm/pgtable.c 		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
mm                369 arch/s390/mm/pgtable.c 			gmap_pmdp_csp(mm, addr);
mm                373 arch/s390/mm/pgtable.c static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
mm                381 arch/s390/mm/pgtable.c 	atomic_inc(&mm->context.flush_count);
mm                383 arch/s390/mm/pgtable.c 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
mm                384 arch/s390/mm/pgtable.c 		pmdp_idte_local(mm, addr, pmdp);
mm                386 arch/s390/mm/pgtable.c 		pmdp_idte_global(mm, addr, pmdp);
mm                387 arch/s390/mm/pgtable.c 	atomic_dec(&mm->context.flush_count);
mm                391 arch/s390/mm/pgtable.c static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
mm                399 arch/s390/mm/pgtable.c 	atomic_inc(&mm->context.flush_count);
mm                400 arch/s390/mm/pgtable.c 	if (cpumask_equal(&mm->context.cpu_attach_mask,
mm                403 arch/s390/mm/pgtable.c 		mm->context.flush_mm = 1;
mm                404 arch/s390/mm/pgtable.c 		if (mm_has_pgste(mm))
mm                405 arch/s390/mm/pgtable.c 			gmap_pmdp_invalidate(mm, addr);
mm                407 arch/s390/mm/pgtable.c 		pmdp_idte_global(mm, addr, pmdp);
mm                409 arch/s390/mm/pgtable.c 	atomic_dec(&mm->context.flush_count);
mm                414 arch/s390/mm/pgtable.c static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
mm                421 arch/s390/mm/pgtable.c 	pgd = pgd_offset(mm, addr);
mm                422 arch/s390/mm/pgtable.c 	p4d = p4d_alloc(mm, pgd, addr);
mm                425 arch/s390/mm/pgtable.c 	pud = pud_alloc(mm, p4d, addr);
mm                428 arch/s390/mm/pgtable.c 	pmd = pmd_alloc(mm, pud, addr);
mm                433 arch/s390/mm/pgtable.c pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
mm                439 arch/s390/mm/pgtable.c 	old = pmdp_flush_direct(mm, addr, pmdp);
mm                446 arch/s390/mm/pgtable.c pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
mm                452 arch/s390/mm/pgtable.c 	old = pmdp_flush_lazy(mm, addr, pmdp);
mm                459 arch/s390/mm/pgtable.c static inline void pudp_idte_local(struct mm_struct *mm,
mm                464 arch/s390/mm/pgtable.c 			    mm->context.asce, IDTE_LOCAL);
mm                469 arch/s390/mm/pgtable.c static inline void pudp_idte_global(struct mm_struct *mm,
mm                474 arch/s390/mm/pgtable.c 			    mm->context.asce, IDTE_GLOBAL);
mm                485 arch/s390/mm/pgtable.c static inline pud_t pudp_flush_direct(struct mm_struct *mm,
mm                493 arch/s390/mm/pgtable.c 	atomic_inc(&mm->context.flush_count);
mm                495 arch/s390/mm/pgtable.c 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
mm                496 arch/s390/mm/pgtable.c 		pudp_idte_local(mm, addr, pudp);
mm                498 arch/s390/mm/pgtable.c 		pudp_idte_global(mm, addr, pudp);
mm                499 arch/s390/mm/pgtable.c 	atomic_dec(&mm->context.flush_count);
mm                503 arch/s390/mm/pgtable.c pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
mm                509 arch/s390/mm/pgtable.c 	old = pudp_flush_direct(mm, addr, pudp);
mm                517 arch/s390/mm/pgtable.c void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                522 arch/s390/mm/pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                525 arch/s390/mm/pgtable.c 	if (!pmd_huge_pte(mm, pmdp))
mm                528 arch/s390/mm/pgtable.c 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
mm                529 arch/s390/mm/pgtable.c 	pmd_huge_pte(mm, pmdp) = pgtable;
mm                532 arch/s390/mm/pgtable.c pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
mm                538 arch/s390/mm/pgtable.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                541 arch/s390/mm/pgtable.c 	pgtable = pmd_huge_pte(mm, pmdp);
mm                544 arch/s390/mm/pgtable.c 		pmd_huge_pte(mm, pmdp) = NULL;
mm                546 arch/s390/mm/pgtable.c 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
mm                558 arch/s390/mm/pgtable.c void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                567 arch/s390/mm/pgtable.c 	pgste_set_key(ptep, pgste, entry, mm);
mm                573 arch/s390/mm/pgtable.c void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                595 arch/s390/mm/pgtable.c int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
mm                615 arch/s390/mm/pgtable.c 		ptep_flush_direct(mm, addr, ptep, nodat);
mm                616 arch/s390/mm/pgtable.c 		pgste = pgste_update_all(entry, pgste, mm);
mm                620 arch/s390/mm/pgtable.c 		ptep_flush_direct(mm, addr, ptep, nodat);
mm                630 arch/s390/mm/pgtable.c int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
mm                657 arch/s390/mm/pgtable.c void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
mm                665 arch/s390/mm/pgtable.c 	ptep_flush_direct(mm, saddr, ptep, nodat);
mm                671 arch/s390/mm/pgtable.c static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
mm                674 arch/s390/mm/pgtable.c 		dec_mm_counter(mm, MM_SWAPENTS);
mm                678 arch/s390/mm/pgtable.c 		dec_mm_counter(mm, mm_counter(page));
mm                683 arch/s390/mm/pgtable.c void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
mm                698 arch/s390/mm/pgtable.c 		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
mm                699 arch/s390/mm/pgtable.c 		pte_clear(mm, addr, ptep);
mm                707 arch/s390/mm/pgtable.c void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                727 arch/s390/mm/pgtable.c bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
mm                740 arch/s390/mm/pgtable.c 		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
mm                742 arch/s390/mm/pgtable.c 		ptep_ipte_global(mm, addr, ptep, nodat);
mm                754 arch/s390/mm/pgtable.c int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
mm                763 arch/s390/mm/pgtable.c 	pmdp = pmd_alloc_map(mm, addr);
mm                767 arch/s390/mm/pgtable.c 	ptl = pmd_lock(mm, pmdp);
mm                786 arch/s390/mm/pgtable.c 	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
mm                826 arch/s390/mm/pgtable.c int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
mm                835 arch/s390/mm/pgtable.c 		rc = get_guest_storage_key(current->mm, addr, &tmp);
mm                847 arch/s390/mm/pgtable.c 	rc = set_guest_storage_key(current->mm, addr, key, nq);
mm                857 arch/s390/mm/pgtable.c int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
mm                866 arch/s390/mm/pgtable.c 	pmdp = pmd_alloc_map(mm, addr);
mm                870 arch/s390/mm/pgtable.c 	ptl = pmd_lock(mm, pmdp);
mm                885 arch/s390/mm/pgtable.c 	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
mm                911 arch/s390/mm/pgtable.c int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
mm                920 arch/s390/mm/pgtable.c 	pmdp = pmd_alloc_map(mm, addr);
mm                924 arch/s390/mm/pgtable.c 	ptl = pmd_lock(mm, pmdp);
mm                941 arch/s390/mm/pgtable.c 	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
mm                970 arch/s390/mm/pgtable.c int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
mm                982 arch/s390/mm/pgtable.c 	ptep = get_locked_pte(mm, hva, &ptl);
mm               1071 arch/s390/mm/pgtable.c int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
mm               1078 arch/s390/mm/pgtable.c 	ptep = get_locked_pte(mm, hva, &ptl);
mm               1100 arch/s390/mm/pgtable.c int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
mm               1105 arch/s390/mm/pgtable.c 	ptep = get_locked_pte(mm, hva, &ptl);
mm                128 arch/s390/pci/pci_mmio.c 	down_read(&current->mm->mmap_sem);
mm                130 arch/s390/pci/pci_mmio.c 	vma = find_vma(current->mm, user_addr);
mm                138 arch/s390/pci/pci_mmio.c 	up_read(&current->mm->mmap_sem);
mm                 40 arch/sh/include/asm/cacheflush.h extern void flush_cache_mm(struct mm_struct *mm);
mm                 41 arch/sh/include/asm/cacheflush.h extern void flush_cache_dup_mm(struct mm_struct *mm);
mm                200 arch/sh/include/asm/elf.h #define VDSO_BASE		((unsigned long)current->mm->context.vdso)
mm                  8 arch/sh/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 41 arch/sh/include/asm/mmu_context.h #define cpu_context(cpu, mm)	((mm)->context.id[cpu])
mm                 43 arch/sh/include/asm/mmu_context.h #define cpu_asid(cpu, mm)	\
mm                 44 arch/sh/include/asm/mmu_context.h 	(cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
mm                 60 arch/sh/include/asm/mmu_context.h static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
mm                 65 arch/sh/include/asm/mmu_context.h 	if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
mm                 93 arch/sh/include/asm/mmu_context.h 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
mm                101 arch/sh/include/asm/mmu_context.h 				   struct mm_struct *mm)
mm                106 arch/sh/include/asm/mmu_context.h 		cpu_context(i, mm) = NO_CONTEXT;
mm                115 arch/sh/include/asm/mmu_context.h static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
mm                117 arch/sh/include/asm/mmu_context.h 	get_mmu_context(mm, cpu);
mm                118 arch/sh/include/asm/mmu_context.h 	set_asid(cpu_asid(cpu, mm));
mm                137 arch/sh/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)		do { } while (0)
mm                138 arch/sh/include/asm/mmu_context.h #define enter_lazy_tlb(mm,tsk)		do { } while (0)
mm                144 arch/sh/include/asm/mmu_context.h #define cpu_asid(cpu, mm)		({ (void)cpu; NO_CONTEXT; })
mm                  9 arch/sh/include/asm/mmu_context_32.h static inline void destroy_context(struct mm_struct *mm)
mm                 21 arch/sh/include/asm/mmu_context_64.h static inline void destroy_context(struct mm_struct *mm)
mm                 24 arch/sh/include/asm/mmu_context_64.h 	flush_tlb_mm(mm);
mm                  9 arch/sh/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
mm                 12 arch/sh/include/asm/pgalloc.h extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
mm                 13 arch/sh/include/asm/pgalloc.h extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
mm                 14 arch/sh/include/asm/pgalloc.h extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
mm                 17 arch/sh/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
mm                 23 arch/sh/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                310 arch/sh/include/asm/pgtable_32.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                326 arch/sh/include/asm/pgtable_32.h #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
mm                409 arch/sh/include/asm/pgtable_32.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
mm                 41 arch/sh/include/asm/pgtable_64.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                 50 arch/sh/include/asm/pgtable_64.h #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
mm                227 arch/sh/include/asm/pgtable_64.h #define pte_clear(mm,addr,xp)	(set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
mm                 15 arch/sh/include/asm/tlbflush.h extern void local_flush_tlb_mm(struct mm_struct *mm);
mm                 30 arch/sh/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 40 arch/sh/include/asm/tlbflush.h #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
mm                405 arch/sh/kernel/ptrace_32.c 			tmp = child->mm->start_code;
mm                407 arch/sh/kernel/ptrace_32.c 			tmp = child->mm->start_data;
mm                409 arch/sh/kernel/ptrace_32.c 			tmp = child->mm->end_code;
mm                411 arch/sh/kernel/ptrace_32.c 			tmp = child->mm->end_code - child->mm->start_code;
mm                289 arch/sh/kernel/signal_32.c 	} else if (likely(current->mm->context.vdso)) {
mm                359 arch/sh/kernel/signal_32.c 	} else if (likely(current->mm->context.vdso)) {
mm                176 arch/sh/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                179 arch/sh/kernel/smp.c 	mmgrab(mm);
mm                180 arch/sh/kernel/smp.c 	mmget(mm);
mm                181 arch/sh/kernel/smp.c 	current->active_mm = mm;
mm                183 arch/sh/kernel/smp.c 	enter_lazy_tlb(mm, current);
mm                343 arch/sh/kernel/smp.c static void flush_tlb_mm_ipi(void *mm)
mm                345 arch/sh/kernel/smp.c 	local_flush_tlb_mm((struct mm_struct *)mm);
mm                360 arch/sh/kernel/smp.c void flush_tlb_mm(struct mm_struct *mm)
mm                364 arch/sh/kernel/smp.c 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
mm                365 arch/sh/kernel/smp.c 		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
mm                370 arch/sh/kernel/smp.c 				cpu_context(i, mm) = 0;
mm                372 arch/sh/kernel/smp.c 	local_flush_tlb_mm(mm);
mm                393 arch/sh/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
mm                396 arch/sh/kernel/smp.c 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
mm                407 arch/sh/kernel/smp.c 				cpu_context(i, mm) = 0;
mm                440 arch/sh/kernel/smp.c 	    (current->mm != vma->vm_mm)) {
mm                 72 arch/sh/kernel/sys_sh.c 	down_read(&current->mm->mmap_sem);
mm                 73 arch/sh/kernel/sys_sh.c 	vma = find_vma (current->mm, addr);
mm                 75 arch/sh/kernel/sys_sh.c 		up_read(&current->mm->mmap_sem);
mm                 94 arch/sh/kernel/sys_sh.c 	up_read(&current->mm->mmap_sem);
mm                 60 arch/sh/kernel/vsyscall/vsyscall.c 	struct mm_struct *mm = current->mm;
mm                 64 arch/sh/kernel/vsyscall/vsyscall.c 	if (down_write_killable(&mm->mmap_sem))
mm                 73 arch/sh/kernel/vsyscall/vsyscall.c 	ret = install_special_mapping(mm, addr, PAGE_SIZE,
mm                 80 arch/sh/kernel/vsyscall/vsyscall.c 	current->mm->context.vdso = (void *)addr;
mm                 83 arch/sh/kernel/vsyscall/vsyscall.c 	up_write(&mm->mmap_sem);
mm                 41 arch/sh/mm/asids-debugfs.c 		if (p->mm)
mm                 43 arch/sh/mm/asids-debugfs.c 				   cpu_asid(smp_processor_id(), p->mm));
mm                190 arch/sh/mm/cache-sh4.c 	struct mm_struct *mm = arg;
mm                192 arch/sh/mm/cache-sh4.c 	if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
mm                132 arch/sh/mm/cache-sh5.c static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
mm                151 arch/sh/mm/cache-sh5.c 	if (!mm)
mm                164 arch/sh/mm/cache-sh5.c 		mm_asid = cpu_asid(smp_processor_id(), mm);
mm                179 arch/sh/mm/cache-sh5.c 			vma = find_vma(mm, aligned_start);
mm                382 arch/sh/mm/cache-sh5.c static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
mm                393 arch/sh/mm/cache-sh5.c 	if (!mm)
mm                396 arch/sh/mm/cache-sh5.c 	pgd = pgd_offset(mm, addr);
mm                408 arch/sh/mm/cache-sh5.c 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                467 arch/sh/mm/cache-sh5.c static void sh64_dcache_purge_user_range(struct mm_struct *mm,
mm                478 arch/sh/mm/cache-sh5.c 		sh64_dcache_purge_user_pages(mm, start, end);
mm                177 arch/sh/mm/cache.c void flush_cache_mm(struct mm_struct *mm)
mm                182 arch/sh/mm/cache.c 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
mm                185 arch/sh/mm/cache.c void flush_cache_dup_mm(struct mm_struct *mm)
mm                190 arch/sh/mm/cache.c 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
mm                 37 arch/sh/mm/fault.c static void show_pte(struct mm_struct *mm, unsigned long addr)
mm                 41 arch/sh/mm/fault.c 	if (mm) {
mm                 42 arch/sh/mm/fault.c 		pgd = mm->pgd;
mm                258 arch/sh/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                264 arch/sh/mm/fault.c 	up_read(&mm->mmap_sem);
mm                286 arch/sh/mm/fault.c 	struct mm_struct *mm = tsk->mm;
mm                288 arch/sh/mm/fault.c 	up_read(&mm->mmap_sem);
mm                307 arch/sh/mm/fault.c 			up_read(&current->mm->mmap_sem);
mm                319 arch/sh/mm/fault.c 			up_read(&current->mm->mmap_sem);
mm                323 arch/sh/mm/fault.c 		up_read(&current->mm->mmap_sem);
mm                380 arch/sh/mm/fault.c 	struct mm_struct *mm;
mm                386 arch/sh/mm/fault.c 	mm = tsk->mm;
mm                421 arch/sh/mm/fault.c 	if (unlikely(faulthandler_disabled() || !mm)) {
mm                427 arch/sh/mm/fault.c 	down_read(&mm->mmap_sem);
mm                429 arch/sh/mm/fault.c 	vma = find_vma(mm, address);
mm                496 arch/sh/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 25 arch/sh/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm,
mm                 33 arch/sh/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                 35 arch/sh/mm/hugetlbpage.c 		pud = pud_alloc(mm, pgd, addr);
mm                 37 arch/sh/mm/hugetlbpage.c 			pmd = pmd_alloc(mm, pud, addr);
mm                 39 arch/sh/mm/hugetlbpage.c 				pte = pte_alloc_map(mm, pmd, addr);
mm                 46 arch/sh/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm,
mm                 54 arch/sh/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                 37 arch/sh/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                 65 arch/sh/mm/mmap.c 		vma = find_vma(mm, addr);
mm                 86 arch/sh/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                115 arch/sh/mm/mmap.c 		vma = find_vma(mm, addr);
mm                124 arch/sh/mm/mmap.c 	info.high_limit = mm->mmap_base;
mm                 43 arch/sh/mm/nommu.c void local_flush_tlb_mm(struct mm_struct *mm)
mm                 33 arch/sh/mm/pgtable.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 38 arch/sh/mm/pgtable.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 44 arch/sh/mm/pgtable.c void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                 49 arch/sh/mm/pgtable.c pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 54 arch/sh/mm/pgtable.c void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 39 arch/sh/mm/tlbex_32.c 		if (unlikely(address >= TASK_SIZE || !current->mm))
mm                 42 arch/sh/mm/tlbex_32.c 		pgd = pgd_offset(current->mm, address);
mm                 55 arch/sh/mm/tlbex_64.c 		if (unlikely(address >= TASK_SIZE || !current->mm))
mm                 58 arch/sh/mm/tlbex_64.c 		pgd = pgd_offset(current->mm, address);
mm                 28 arch/sh/mm/tlbflush_32.c 		if (vma->vm_mm != current->mm) {
mm                 42 arch/sh/mm/tlbflush_32.c 	struct mm_struct *mm = vma->vm_mm;
mm                 45 arch/sh/mm/tlbflush_32.c 	if (cpu_context(cpu, mm) != NO_CONTEXT) {
mm                 52 arch/sh/mm/tlbflush_32.c 			cpu_context(cpu, mm) = NO_CONTEXT;
mm                 53 arch/sh/mm/tlbflush_32.c 			if (mm == current->mm)
mm                 54 arch/sh/mm/tlbflush_32.c 				activate_context(mm, cpu);
mm                 59 arch/sh/mm/tlbflush_32.c 			asid = cpu_asid(cpu, mm);
mm                 63 arch/sh/mm/tlbflush_32.c 			if (mm != current->mm) {
mm                106 arch/sh/mm/tlbflush_32.c void local_flush_tlb_mm(struct mm_struct *mm)
mm                112 arch/sh/mm/tlbflush_32.c 	if (cpu_context(cpu, mm) != NO_CONTEXT) {
mm                116 arch/sh/mm/tlbflush_32.c 		cpu_context(cpu, mm) = NO_CONTEXT;
mm                117 arch/sh/mm/tlbflush_32.c 		if (mm == current->mm)
mm                118 arch/sh/mm/tlbflush_32.c 			activate_context(mm, cpu);
mm                 86 arch/sh/mm/tlbflush_64.c 	struct mm_struct *mm;
mm                 88 arch/sh/mm/tlbflush_64.c 	mm = vma->vm_mm;
mm                 89 arch/sh/mm/tlbflush_64.c 	if (cpu_context(cpu, mm) == NO_CONTEXT)
mm                 97 arch/sh/mm/tlbflush_64.c 	match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
mm                128 arch/sh/mm/tlbflush_64.c void local_flush_tlb_mm(struct mm_struct *mm)
mm                133 arch/sh/mm/tlbflush_64.c 	if (cpu_context(cpu, mm) == NO_CONTEXT)
mm                138 arch/sh/mm/tlbflush_64.c 	cpu_context(cpu, mm) = NO_CONTEXT;
mm                139 arch/sh/mm/tlbflush_64.c 	if (mm == current->mm)
mm                140 arch/sh/mm/tlbflush_64.c 		activate_context(mm, cpu);
mm                  9 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_mm(mm) \
mm                 10 arch/sparc/include/asm/cacheflush_32.h 	sparc32_cachetlb_ops->cache_mm(mm)
mm                 11 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_dup_mm(mm) \
mm                 12 arch/sparc/include/asm/cacheflush_32.h 	sparc32_cachetlb_ops->cache_mm(mm)
mm                 35 arch/sparc/include/asm/cacheflush_32.h #define flush_sig_insns(mm,insn_addr) \
mm                 36 arch/sparc/include/asm/cacheflush_32.h 	sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
mm                 22 arch/sparc/include/asm/cacheflush_64.h 	do { if ((__mm) == current->mm) flushw_user(); } while(0)
mm                 23 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
mm                 41 arch/sparc/include/asm/cacheflush_64.h void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
mm                 44 arch/sparc/include/asm/cacheflush_64.h #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
mm                222 arch/sparc/include/asm/elf_64.h 			    (unsigned long)current->mm->context.vdso);	\
mm                 16 arch/sparc/include/asm/hugetlb.h void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 20 arch/sparc/include/asm/hugetlb.h pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm                 23 arch/sparc/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 36 arch/sparc/include/asm/hugetlb.h static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                 40 arch/sparc/include/asm/hugetlb.h 	set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
mm                 16 arch/sparc/include/asm/mman.h 	struct mm_struct *mm = arg;
mm                 22 arch/sparc/include/asm/mman.h 	if (current->mm == mm) {
mm                 36 arch/sparc/include/asm/mman.h 		if (!current->mm->context.adi) {
mm                 39 arch/sparc/include/asm/mman.h 			current->mm->context.adi = true;
mm                 40 arch/sparc/include/asm/mman.h 			on_each_cpu_mask(mm_cpumask(current->mm),
mm                 41 arch/sparc/include/asm/mman.h 					 ipi_set_tstate_mcde, current->mm, 0);
mm                 67 arch/sparc/include/asm/mman.h 			vma = find_vma(current->mm, addr);
mm                  9 arch/sparc/include/asm/mmu_context_32.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 16 arch/sparc/include/asm/mmu_context_32.h int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
mm                 23 arch/sparc/include/asm/mmu_context_32.h void destroy_context(struct mm_struct *mm);
mm                 26 arch/sparc/include/asm/mmu_context_32.h void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
mm                 29 arch/sparc/include/asm/mmu_context_32.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 32 arch/sparc/include/asm/mmu_context_32.h #define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
mm                 19 arch/sparc/include/asm/mmu_context_64.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 28 arch/sparc/include/asm/mmu_context_64.h void get_new_mmu_context(struct mm_struct *mm);
mm                 29 arch/sparc/include/asm/mmu_context_64.h int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
mm                 30 arch/sparc/include/asm/mmu_context_64.h void destroy_context(struct mm_struct *mm);
mm                 38 arch/sparc/include/asm/mmu_context_64.h static inline void tsb_context_switch_ctx(struct mm_struct *mm,
mm                 41 arch/sparc/include/asm/mmu_context_64.h 	__tsb_context_switch(__pa(mm->pgd),
mm                 42 arch/sparc/include/asm/mmu_context_64.h 			     &mm->context.tsb_block[MM_TSB_BASE],
mm                 44 arch/sparc/include/asm/mmu_context_64.h 			     (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
mm                 45 arch/sparc/include/asm/mmu_context_64.h 			      &mm->context.tsb_block[MM_TSB_HUGE] :
mm                 50 arch/sparc/include/asm/mmu_context_64.h 			     , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
mm                 56 arch/sparc/include/asm/mmu_context_64.h void tsb_grow(struct mm_struct *mm,
mm                 60 arch/sparc/include/asm/mmu_context_64.h void smp_tsb_sync(struct mm_struct *mm);
mm                 81 arch/sparc/include/asm/mmu_context_64.h static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
mm                 86 arch/sparc/include/asm/mmu_context_64.h 	per_cpu(per_cpu_secondary_mm, cpu) = mm;
mm                 87 arch/sparc/include/asm/mmu_context_64.h 	if (unlikely(mm == &init_mm))
mm                 90 arch/sparc/include/asm/mmu_context_64.h 	spin_lock_irqsave(&mm->context.lock, flags);
mm                 91 arch/sparc/include/asm/mmu_context_64.h 	ctx_valid = CTX_VALID(mm->context);
mm                 93 arch/sparc/include/asm/mmu_context_64.h 		get_new_mmu_context(mm);
mm                125 arch/sparc/include/asm/mmu_context_64.h 	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
mm                131 arch/sparc/include/asm/mmu_context_64.h 	if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
mm                132 arch/sparc/include/asm/mmu_context_64.h 		cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                133 arch/sparc/include/asm/mmu_context_64.h 		__flush_tlb_mm(CTX_HWBITS(mm->context),
mm                136 arch/sparc/include/asm/mmu_context_64.h 	spin_unlock_irqrestore(&mm->context.lock, flags);
mm                139 arch/sparc/include/asm/mmu_context_64.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                140 arch/sparc/include/asm/mmu_context_64.h #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
mm                181 arch/sparc/include/asm/mmu_context_64.h 		if (current && current->mm && current->mm->context.adi) {
mm                 26 arch/sparc/include/asm/pgalloc_32.h #define pgd_free(mm, pgd)	free_pgd_fast(pgd)
mm                 27 arch/sparc/include/asm/pgalloc_32.h #define pgd_alloc(mm)	get_pgd_fast()
mm                 38 arch/sparc/include/asm/pgalloc_32.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
mm                 50 arch/sparc/include/asm/pgalloc_32.h #define pmd_free(mm, pmd)		free_pmd_fast(pmd)
mm                 51 arch/sparc/include/asm/pgalloc_32.h #define __pmd_free_tlb(tlb, pmd, addr)	pmd_free((tlb)->mm, pmd)
mm                 53 arch/sparc/include/asm/pgalloc_32.h void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
mm                 59 arch/sparc/include/asm/pgalloc_32.h pgtable_t pte_alloc_one(struct mm_struct *mm);
mm                 61 arch/sparc/include/asm/pgalloc_32.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 72 arch/sparc/include/asm/pgalloc_32.h #define pte_free_kernel(mm, pte)	free_pte_fast(pte)
mm                 74 arch/sparc/include/asm/pgalloc_32.h void pte_free(struct mm_struct * mm, pgtable_t pte);
mm                 75 arch/sparc/include/asm/pgalloc_32.h #define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, pte)
mm                 26 arch/sparc/include/asm/pgalloc_64.h static inline pgd_t *pgd_alloc(struct mm_struct *mm)
mm                 31 arch/sparc/include/asm/pgalloc_64.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 43 arch/sparc/include/asm/pgalloc_64.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 48 arch/sparc/include/asm/pgalloc_64.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                 53 arch/sparc/include/asm/pgalloc_64.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 58 arch/sparc/include/asm/pgalloc_64.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 63 arch/sparc/include/asm/pgalloc_64.h pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
mm                 64 arch/sparc/include/asm/pgalloc_64.h pgtable_t pte_alloc_one(struct mm_struct *mm);
mm                 65 arch/sparc/include/asm/pgalloc_64.h void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
mm                 66 arch/sparc/include/asm/pgalloc_64.h void pte_free(struct mm_struct *mm, pgtable_t ptepage);
mm                121 arch/sparc/include/asm/pgtable_32.h #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
mm                160 arch/sparc/include/asm/pgtable_32.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                316 arch/sparc/include/asm/pgtable_32.h #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
mm                676 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
mm                818 arch/sparc/include/asm/pgtable_64.h void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                821 arch/sparc/include/asm/pgtable_64.h static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                828 arch/sparc/include/asm/pgtable_64.h static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
mm                892 arch/sparc/include/asm/pgtable_64.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
mm                919 arch/sparc/include/asm/pgtable_64.h void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
mm                923 arch/sparc/include/asm/pgtable_64.h static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
mm                933 arch/sparc/include/asm/pgtable_64.h 	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
mm                934 arch/sparc/include/asm/pgtable_64.h 		tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
mm                938 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                943 arch/sparc/include/asm/pgtable_64.h 	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
mm                947 arch/sparc/include/asm/pgtable_64.h static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                953 arch/sparc/include/asm/pgtable_64.h 	maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
mm                956 arch/sparc/include/asm/pgtable_64.h #define set_pte_at(mm,addr,ptep,pte)	\
mm                957 arch/sparc/include/asm/pgtable_64.h 	__set_pte_at((mm), (addr), (ptep), (pte), 0)
mm                959 arch/sparc/include/asm/pgtable_64.h #define pte_clear(mm,addr,ptep)		\
mm                960 arch/sparc/include/asm/pgtable_64.h 	set_pte_at((mm), (addr), (ptep), __pte(0UL))
mm                963 arch/sparc/include/asm/pgtable_64.h #define pte_clear_not_present_full(mm,addr,ptep,fullmm)	\
mm                964 arch/sparc/include/asm/pgtable_64.h 	__set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
mm                976 arch/sparc/include/asm/pgtable_64.h 			flush_dcache_page_all(current->mm,		\
mm               1002 arch/sparc/include/asm/pgtable_64.h void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm               1006 arch/sparc/include/asm/pgtable_64.h pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm               1034 arch/sparc/include/asm/pgtable_64.h void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
mm               1037 arch/sparc/include/asm/pgtable_64.h int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
mm               1041 arch/sparc/include/asm/pgtable_64.h static inline void arch_do_swap_page(struct mm_struct *mm,
mm               1054 arch/sparc/include/asm/pgtable_64.h 		adi_restore_tags(mm, vma, addr, pte);
mm               1058 arch/sparc/include/asm/pgtable_64.h static inline int arch_unmap_one(struct mm_struct *mm,
mm               1063 arch/sparc/include/asm/pgtable_64.h 		return adi_save_tags(mm, vma, addr, oldpte);
mm                 17 arch/sparc/include/asm/tlb_64.h void smp_flush_tlb_mm(struct mm_struct *mm);
mm                 18 arch/sparc/include/asm/tlb_64.h #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
mm                 20 arch/sparc/include/asm/tlb_64.h #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
mm                  9 arch/sparc/include/asm/tlbflush_32.h #define flush_tlb_mm(mm) \
mm                 10 arch/sparc/include/asm/tlbflush_32.h 	sparc32_cachetlb_ops->tlb_mm(mm)
mm                 13 arch/sparc/include/asm/tlbflush_64.h 	struct mm_struct *mm;
mm                 21 arch/sparc/include/asm/tlbflush_64.h void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
mm                 26 arch/sparc/include/asm/tlbflush_64.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                 56 arch/sparc/include/asm/tlbflush_64.h static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
mm                 58 arch/sparc/include/asm/tlbflush_64.h 	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
mm                 64 arch/sparc/include/asm/tlbflush_64.h void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
mm                 66 arch/sparc/include/asm/tlbflush_64.h #define global_flush_tlb_page(mm, vaddr) \
mm                 67 arch/sparc/include/asm/tlbflush_64.h 	smp_flush_tlb_page(mm, vaddr)
mm                 40 arch/sparc/include/asm/uprobes.h extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
mm                124 arch/sparc/kernel/adi_64.c tag_storage_desc_t *find_tag_store(struct mm_struct *mm,
mm                135 arch/sparc/kernel/adi_64.c 	if (mm->context.tag_store) {
mm                136 arch/sparc/kernel/adi_64.c 		tag_desc = mm->context.tag_store;
mm                137 arch/sparc/kernel/adi_64.c 		spin_lock_irqsave(&mm->context.tag_lock, flags);
mm                144 arch/sparc/kernel/adi_64.c 		spin_unlock_irqrestore(&mm->context.tag_lock, flags);
mm                156 arch/sparc/kernel/adi_64.c tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm,
mm                174 arch/sparc/kernel/adi_64.c 	spin_lock_irqsave(&mm->context.tag_lock, flags);
mm                175 arch/sparc/kernel/adi_64.c 	if (mm->context.tag_store) {
mm                176 arch/sparc/kernel/adi_64.c 		tag_desc = mm->context.tag_store;
mm                205 arch/sparc/kernel/adi_64.c 		mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
mm                206 arch/sparc/kernel/adi_64.c 		if (mm->context.tag_store == NULL) {
mm                210 arch/sparc/kernel/adi_64.c 		tag_desc = mm->context.tag_store;
mm                213 arch/sparc/kernel/adi_64.c 		open_desc = mm->context.tag_store;
mm                295 arch/sparc/kernel/adi_64.c 	spin_unlock_irqrestore(&mm->context.tag_lock, flags);
mm                299 arch/sparc/kernel/adi_64.c void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm)
mm                304 arch/sparc/kernel/adi_64.c 	spin_lock_irqsave(&mm->context.tag_lock, flags);
mm                312 arch/sparc/kernel/adi_64.c 		if (tag_desc != mm->context.tag_store) {
mm                317 arch/sparc/kernel/adi_64.c 	spin_unlock_irqrestore(&mm->context.tag_lock, flags);
mm                327 arch/sparc/kernel/adi_64.c void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
mm                338 arch/sparc/kernel/adi_64.c 	tag_desc = find_tag_store(mm, vma, addr);
mm                363 arch/sparc/kernel/adi_64.c 	del_tag_store(tag_desc, mm);
mm                370 arch/sparc/kernel/adi_64.c int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
mm                377 arch/sparc/kernel/adi_64.c 	tag_desc = alloc_tag_store(mm, vma, addr);
mm               1861 arch/sparc/kernel/perf_event.c 	if (!current->mm)
mm                426 arch/sparc/kernel/process_64.c 	struct mm_struct *mm;
mm                428 arch/sparc/kernel/process_64.c 	mm = t->task->mm;
mm                429 arch/sparc/kernel/process_64.c 	if (mm)
mm                430 arch/sparc/kernel/process_64.c 		tsb_context_switch(mm);
mm                318 arch/sparc/kernel/signal32.c 	pgdp = pgd_offset(current->mm, address);
mm                317 arch/sparc/kernel/signal_32.c 		flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
mm                414 arch/sparc/kernel/signal_32.c 		flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
mm                893 arch/sparc/kernel/smp_64.c 	struct mm_struct *mm = info;
mm                901 arch/sparc/kernel/smp_64.c 	if (tp->pgd_paddr == __pa(mm->pgd))
mm                902 arch/sparc/kernel/smp_64.c 		tsb_context_switch(mm);
mm                905 arch/sparc/kernel/smp_64.c void smp_tsb_sync(struct mm_struct *mm)
mm                907 arch/sparc/kernel/smp_64.c 	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
mm                980 arch/sparc/kernel/smp_64.c void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
mm               1080 arch/sparc/kernel/smp_64.c void smp_flush_tlb_mm(struct mm_struct *mm)
mm               1082 arch/sparc/kernel/smp_64.c 	u32 ctx = CTX_HWBITS(mm->context);
mm               1085 arch/sparc/kernel/smp_64.c 	if (atomic_read(&mm->mm_users) == 1) {
mm               1086 arch/sparc/kernel/smp_64.c 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
mm               1092 arch/sparc/kernel/smp_64.c 			      mm_cpumask(mm));
mm               1113 arch/sparc/kernel/smp_64.c void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
mm               1115 arch/sparc/kernel/smp_64.c 	u32 ctx = CTX_HWBITS(mm->context);
mm               1123 arch/sparc/kernel/smp_64.c 	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
mm               1124 arch/sparc/kernel/smp_64.c 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
mm               1126 arch/sparc/kernel/smp_64.c 		smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
mm               1134 arch/sparc/kernel/smp_64.c void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
mm               1136 arch/sparc/kernel/smp_64.c 	unsigned long context = CTX_HWBITS(mm->context);
mm               1139 arch/sparc/kernel/smp_64.c 	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
mm               1140 arch/sparc/kernel/smp_64.c 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
mm               1144 arch/sparc/kernel/smp_64.c 				      mm_cpumask(mm));
mm                 92 arch/sparc/kernel/sys_sparc_64.c 	struct mm_struct *mm = current->mm;
mm                123 arch/sparc/kernel/sys_sparc_64.c 		vma = find_vma(mm, addr);
mm                153 arch/sparc/kernel/sys_sparc_64.c 	struct mm_struct *mm = current->mm;
mm                186 arch/sparc/kernel/sys_sparc_64.c 		vma = find_vma(mm, addr);
mm                195 arch/sparc/kernel/sys_sparc_64.c 	info.high_limit = mm->mmap_base;
mm                224 arch/sparc/kernel/sys_sparc_64.c 	get_area = current->mm->get_unmapped_area;
mm                280 arch/sparc/kernel/sys_sparc_64.c void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                294 arch/sparc/kernel/sys_sparc_64.c 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm                295 arch/sparc/kernel/sys_sparc_64.c 		mm->get_unmapped_area = arch_get_unmapped_area;
mm                305 arch/sparc/kernel/sys_sparc_64.c 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
mm                306 arch/sparc/kernel/sys_sparc_64.c 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm                227 arch/sparc/kernel/unaligned_32.c 			(current->mm ? current->mm->context :
mm                230 arch/sparc/kernel/unaligned_32.c 			(current->mm ? (unsigned long) current->mm->pgd :
mm                279 arch/sparc/kernel/unaligned_64.c 			(current->mm ? CTX_HWBITS(current->mm->context) :
mm                282 arch/sparc/kernel/unaligned_64.c 			(current->mm ? (unsigned long) current->mm->pgd :
mm                 77 arch/sparc/kernel/uprobes.c 			     struct mm_struct *mm, unsigned long addr)
mm                 51 arch/sparc/mm/fault_32.c 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
mm                 53 arch/sparc/mm/fault_32.c 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
mm                165 arch/sparc/mm/fault_32.c 	struct mm_struct *mm = tsk->mm;
mm                193 arch/sparc/mm/fault_32.c 	if (pagefault_disabled() || !mm)
mm                199 arch/sparc/mm/fault_32.c 	down_read(&mm->mmap_sem);
mm                204 arch/sparc/mm/fault_32.c 	vma = find_vma(mm, address);
mm                276 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                284 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                333 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                341 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                382 arch/sparc/mm/fault_32.c 	struct mm_struct *mm = tsk->mm;
mm                388 arch/sparc/mm/fault_32.c 	down_read(&mm->mmap_sem);
mm                389 arch/sparc/mm/fault_32.c 	vma = find_vma(mm, address);
mm                413 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                416 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                421 arch/sparc/mm/fault_32.c 	up_read(&mm->mmap_sem);
mm                 53 arch/sparc/mm/fault_64.c 	       (tsk->mm ?
mm                 54 arch/sparc/mm/fault_64.c 		CTX_HWBITS(tsk->mm->context) :
mm                 57 arch/sparc/mm/fault_64.c 	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
mm                 82 arch/sparc/mm/fault_64.c 	pgd_t *pgdp = pgd_offset(current->mm, tpc);
mm                264 arch/sparc/mm/fault_64.c 	struct mm_struct *mm = current->mm;
mm                313 arch/sparc/mm/fault_64.c 	if (faulthandler_disabled() || !mm)
mm                318 arch/sparc/mm/fault_64.c 	if (!down_read_trylock(&mm->mmap_sem)) {
mm                326 arch/sparc/mm/fault_64.c 		down_read(&mm->mmap_sem);
mm                332 arch/sparc/mm/fault_64.c 	vma = find_vma(mm, address);
mm                459 arch/sparc/mm/fault_64.c 	up_read(&mm->mmap_sem);
mm                461 arch/sparc/mm/fault_64.c 	mm_rss = get_mm_rss(mm);
mm                463 arch/sparc/mm/fault_64.c 	mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
mm                466 arch/sparc/mm/fault_64.c 		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
mm                467 arch/sparc/mm/fault_64.c 		tsb_grow(mm, MM_TSB_BASE, mm_rss);
mm                469 arch/sparc/mm/fault_64.c 	mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
mm                472 arch/sparc/mm/fault_64.c 		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
mm                473 arch/sparc/mm/fault_64.c 		if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
mm                474 arch/sparc/mm/fault_64.c 			tsb_grow(mm, MM_TSB_HUGE, mm_rss);
mm                490 arch/sparc/mm/fault_64.c 	up_read(&mm->mmap_sem);
mm                502 arch/sparc/mm/fault_64.c 	up_read(&mm->mmap_sem);
mm                515 arch/sparc/mm/fault_64.c 	up_read(&mm->mmap_sem);
mm                 65 arch/sparc/mm/hugetlbpage.c 	struct mm_struct *mm = current->mm;
mm                 75 arch/sparc/mm/hugetlbpage.c 	info.high_limit = mm->mmap_base;
mm                102 arch/sparc/mm/hugetlbpage.c 	struct mm_struct *mm = current->mm;
mm                122 arch/sparc/mm/hugetlbpage.c 		vma = find_vma(mm, addr);
mm                127 arch/sparc/mm/hugetlbpage.c 	if (mm->get_unmapped_area == arch_get_unmapped_area)
mm                276 arch/sparc/mm/hugetlbpage.c pte_t *huge_pte_alloc(struct mm_struct *mm,
mm                283 arch/sparc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                284 arch/sparc/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
mm                289 arch/sparc/mm/hugetlbpage.c 	pmd = pmd_alloc(mm, pud, addr);
mm                294 arch/sparc/mm/hugetlbpage.c 	return pte_alloc_map(mm, pmd, addr);
mm                297 arch/sparc/mm/hugetlbpage.c pte_t *huge_pte_offset(struct mm_struct *mm,
mm                304 arch/sparc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
mm                320 arch/sparc/mm/hugetlbpage.c void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                340 arch/sparc/mm/hugetlbpage.c 		mm->context.hugetlb_pte_count += nptes;
mm                349 arch/sparc/mm/hugetlbpage.c 	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
mm                352 arch/sparc/mm/hugetlbpage.c 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
mm                356 arch/sparc/mm/hugetlbpage.c pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm                378 arch/sparc/mm/hugetlbpage.c 		mm->context.hugetlb_pte_count -= nptes;
mm                384 arch/sparc/mm/hugetlbpage.c 	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
mm                387 arch/sparc/mm/hugetlbpage.c 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
mm                412 arch/sparc/mm/hugetlbpage.c 	mm_dec_nr_ptes(tlb->mm);
mm                449 arch/sparc/mm/hugetlbpage.c 	mm_dec_nr_pmds(tlb->mm);
mm                487 arch/sparc/mm/hugetlbpage.c 	mm_dec_nr_puds(tlb->mm);
mm                513 arch/sparc/mm/hugetlbpage.c 	pgd = pgd_offset(tlb->mm, addr);
mm                311 arch/sparc/mm/init_64.c static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
mm                315 arch/sparc/mm/init_64.c 	struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
mm                322 arch/sparc/mm/init_64.c 		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
mm                418 arch/sparc/mm/init_64.c 	struct mm_struct *mm;
mm                430 arch/sparc/mm/init_64.c 	mm = vma->vm_mm;
mm                433 arch/sparc/mm/init_64.c 	if (!pte_accessible(mm, pte))
mm                436 arch/sparc/mm/init_64.c 	spin_lock_irqsave(&mm->context.lock, flags);
mm                440 arch/sparc/mm/init_64.c 	if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
mm                462 arch/sparc/mm/init_64.c 			__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
mm                469 arch/sparc/mm/init_64.c 		__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
mm                472 arch/sparc/mm/init_64.c 	spin_unlock_irqrestore(&mm->context.lock, flags);
mm                783 arch/sparc/mm/init_64.c 	struct mm_struct *mm;
mm                812 arch/sparc/mm/init_64.c 		mm = per_cpu(per_cpu_secondary_mm, cpu);
mm                814 arch/sparc/mm/init_64.c 		if (unlikely(!mm || mm == &init_mm))
mm                817 arch/sparc/mm/init_64.c 		old_ctx = mm->context.sparc64_ctx_val;
mm                821 arch/sparc/mm/init_64.c 			mm->context.sparc64_ctx_val = new_ctx;
mm                836 arch/sparc/mm/init_64.c void get_new_mmu_context(struct mm_struct *mm)
mm                844 arch/sparc/mm/init_64.c 	if (unlikely(CTX_VALID(mm->context)))
mm                846 arch/sparc/mm/init_64.c 	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
mm                856 arch/sparc/mm/init_64.c 	if (mm->context.sparc64_ctx_val)
mm                857 arch/sparc/mm/init_64.c 		cpumask_clear(mm_cpumask(mm));
mm                861 arch/sparc/mm/init_64.c 	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
mm               2890 arch/sparc/mm/init_64.c pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm               2901 arch/sparc/mm/init_64.c pgtable_t pte_alloc_one(struct mm_struct *mm)
mm               2913 arch/sparc/mm/init_64.c void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm               2926 arch/sparc/mm/init_64.c void pte_free(struct mm_struct *mm, pgtable_t pte)
mm               2944 arch/sparc/mm/init_64.c 	struct mm_struct *mm;
mm               2959 arch/sparc/mm/init_64.c 	mm = vma->vm_mm;
mm               2961 arch/sparc/mm/init_64.c 	spin_lock_irqsave(&mm->context.lock, flags);
mm               2963 arch/sparc/mm/init_64.c 	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
mm               2964 arch/sparc/mm/init_64.c 		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
mm               2967 arch/sparc/mm/init_64.c 	spin_unlock_irqrestore(&mm->context.lock, flags);
mm               2974 arch/sparc/mm/init_64.c 	struct mm_struct *mm = __data;
mm               2976 arch/sparc/mm/init_64.c 	if (mm == current->mm)
mm               2977 arch/sparc/mm/init_64.c 		load_secondary_context(mm);
mm               2982 arch/sparc/mm/init_64.c 	struct mm_struct *mm = current->mm;
mm               2985 arch/sparc/mm/init_64.c 	if (faulthandler_disabled() || !mm) {
mm               2998 arch/sparc/mm/init_64.c 	tp = &mm->context.tsb_block[MM_TSB_HUGE];
mm               3000 arch/sparc/mm/init_64.c 		tsb_grow(mm, MM_TSB_HUGE, 0);
mm               3002 arch/sparc/mm/init_64.c 	tsb_context_switch(mm);
mm               3003 arch/sparc/mm/init_64.c 	smp_tsb_sync(mm);
mm               3013 arch/sparc/mm/init_64.c 		ctx = mm->context.sparc64_ctx_val;
mm               3018 arch/sparc/mm/init_64.c 		if (ctx != mm->context.sparc64_ctx_val) {
mm               3025 arch/sparc/mm/init_64.c 			do_flush_tlb_mm(mm);
mm               3030 arch/sparc/mm/init_64.c 			mm->context.sparc64_ctx_val = ctx;
mm               3036 arch/sparc/mm/init_64.c 			on_each_cpu(context_reload, mm, 0);
mm                277 arch/sparc/mm/leon_mm.c static void leon_flush_cache_mm(struct mm_struct *mm)
mm                294 arch/sparc/mm/leon_mm.c static void leon_flush_tlb_mm(struct mm_struct *mm)
mm                317 arch/sparc/mm/leon_mm.c static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
mm                 72 arch/sparc/mm/srmmu.c #define FLUSH_BEGIN(mm)
mm                 75 arch/sparc/mm/srmmu.c #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
mm                149 arch/sparc/mm/srmmu.c void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
mm                373 arch/sparc/mm/srmmu.c pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                378 arch/sparc/mm/srmmu.c 	if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
mm                388 arch/sparc/mm/srmmu.c void pte_free(struct mm_struct *mm, pgtable_t pte)
mm                435 arch/sparc/mm/srmmu.c static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
mm                443 arch/sparc/mm/srmmu.c 		mm->context = ctxp->ctx_number;
mm                444 arch/sparc/mm/srmmu.c 		ctxp->ctx_mm = mm;
mm                457 arch/sparc/mm/srmmu.c 	ctxp->ctx_mm = mm;
mm                458 arch/sparc/mm/srmmu.c 	mm->context = ctxp->ctx_number;
mm                493 arch/sparc/mm/srmmu.c void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
mm                498 arch/sparc/mm/srmmu.c 	if (mm->context == NO_CONTEXT) {
mm                500 arch/sparc/mm/srmmu.c 		alloc_context(old_mm, mm);
mm                502 arch/sparc/mm/srmmu.c 		srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
mm                511 arch/sparc/mm/srmmu.c 	srmmu_set_context(mm->context);
mm                577 arch/sparc/mm/srmmu.c extern void tsunami_flush_cache_mm(struct mm_struct *mm);
mm                582 arch/sparc/mm/srmmu.c extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
mm                584 arch/sparc/mm/srmmu.c extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
mm                591 arch/sparc/mm/srmmu.c extern void swift_flush_cache_mm(struct mm_struct *mm);
mm                597 arch/sparc/mm/srmmu.c extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
mm                599 arch/sparc/mm/srmmu.c extern void swift_flush_tlb_mm(struct mm_struct *mm);
mm                643 arch/sparc/mm/srmmu.c extern void viking_flush_cache_mm(struct mm_struct *mm);
mm                649 arch/sparc/mm/srmmu.c extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
mm                653 arch/sparc/mm/srmmu.c extern void viking_flush_tlb_mm(struct mm_struct *mm);
mm                659 arch/sparc/mm/srmmu.c extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
mm                667 arch/sparc/mm/srmmu.c extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
mm                672 arch/sparc/mm/srmmu.c extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
mm                674 arch/sparc/mm/srmmu.c extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
mm               1016 arch/sparc/mm/srmmu.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm               1018 arch/sparc/mm/srmmu.c 	mm->context = NO_CONTEXT;
mm               1022 arch/sparc/mm/srmmu.c void destroy_context(struct mm_struct *mm)
mm               1026 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1027 arch/sparc/mm/srmmu.c 		flush_cache_mm(mm);
mm               1028 arch/sparc/mm/srmmu.c 		srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
mm               1029 arch/sparc/mm/srmmu.c 		flush_tlb_mm(mm);
mm               1031 arch/sparc/mm/srmmu.c 		free_context(mm->context);
mm               1033 arch/sparc/mm/srmmu.c 		mm->context = NO_CONTEXT;
mm               1255 arch/sparc/mm/srmmu.c static void turbosparc_flush_cache_mm(struct mm_struct *mm)
mm               1257 arch/sparc/mm/srmmu.c 	FLUSH_BEGIN(mm)
mm               1293 arch/sparc/mm/srmmu.c static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
mm               1307 arch/sparc/mm/srmmu.c static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
mm               1309 arch/sparc/mm/srmmu.c 	FLUSH_BEGIN(mm)
mm               1673 arch/sparc/mm/srmmu.c static void smp_flush_cache_mm(struct mm_struct *mm)
mm               1675 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1677 arch/sparc/mm/srmmu.c 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1680 arch/sparc/mm/srmmu.c 			xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
mm               1681 arch/sparc/mm/srmmu.c 		local_ops->cache_mm(mm);
mm               1685 arch/sparc/mm/srmmu.c static void smp_flush_tlb_mm(struct mm_struct *mm)
mm               1687 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1689 arch/sparc/mm/srmmu.c 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1692 arch/sparc/mm/srmmu.c 			xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
mm               1693 arch/sparc/mm/srmmu.c 			if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
mm               1694 arch/sparc/mm/srmmu.c 				cpumask_copy(mm_cpumask(mm),
mm               1697 arch/sparc/mm/srmmu.c 		local_ops->tlb_mm(mm);
mm               1705 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
mm               1707 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1709 arch/sparc/mm/srmmu.c 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1722 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
mm               1724 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1726 arch/sparc/mm/srmmu.c 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1737 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
mm               1739 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1741 arch/sparc/mm/srmmu.c 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1752 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
mm               1754 arch/sparc/mm/srmmu.c 	if (mm->context != NO_CONTEXT) {
mm               1756 arch/sparc/mm/srmmu.c 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1779 arch/sparc/mm/srmmu.c static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
mm               1782 arch/sparc/mm/srmmu.c 	cpumask_copy(&cpu_mask, mm_cpumask(mm));
mm               1786 arch/sparc/mm/srmmu.c 		    (unsigned long) mm, insn_addr);
mm               1787 arch/sparc/mm/srmmu.c 	local_ops->sig_insns(mm, insn_addr);
mm                 27 arch/sparc/mm/tlb.c 	struct mm_struct *mm = tb->mm;
mm                 34 arch/sparc/mm/tlb.c 	if (CTX_VALID(mm->context)) {
mm                 36 arch/sparc/mm/tlb.c 			global_flush_tlb_page(mm, tb->vaddrs[0]);
mm                 39 arch/sparc/mm/tlb.c 			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
mm                 42 arch/sparc/mm/tlb.c 			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
mm                 70 arch/sparc/mm/tlb.c static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
mm                 82 arch/sparc/mm/tlb.c 	if (unlikely(nr != 0 && mm != tb->mm)) {
mm                 88 arch/sparc/mm/tlb.c 		flush_tsb_user_page(mm, vaddr, hugepage_shift);
mm                 89 arch/sparc/mm/tlb.c 		global_flush_tlb_page(mm, vaddr);
mm                 94 arch/sparc/mm/tlb.c 		tb->mm = mm;
mm                113 arch/sparc/mm/tlb.c void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
mm                137 arch/sparc/mm/tlb.c 			flush_dcache_page_all(mm, page);
mm                142 arch/sparc/mm/tlb.c 		tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
mm                146 arch/sparc/mm/tlb.c static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
mm                158 arch/sparc/mm/tlb.c 			tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
mm                167 arch/sparc/mm/tlb.c static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
mm                170 arch/sparc/mm/tlb.c 	if (mm == &init_mm)
mm                184 arch/sparc/mm/tlb.c 				mm->context.hugetlb_pte_count++;
mm                186 arch/sparc/mm/tlb.c 				mm->context.thp_pte_count++;
mm                189 arch/sparc/mm/tlb.c 				mm->context.hugetlb_pte_count--;
mm                191 arch/sparc/mm/tlb.c 				mm->context.thp_pte_count--;
mm                210 arch/sparc/mm/tlb.c 			tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
mm                211 arch/sparc/mm/tlb.c 			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
mm                214 arch/sparc/mm/tlb.c 			tlb_batch_pmd_scan(mm, addr, orig);
mm                219 arch/sparc/mm/tlb.c void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm                225 arch/sparc/mm/tlb.c 	__set_pmd_acct(mm, addr, orig, pmd);
mm                265 arch/sparc/mm/tlb.c void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                270 arch/sparc/mm/tlb.c 	assert_spin_locked(&mm->page_table_lock);
mm                273 arch/sparc/mm/tlb.c 	if (!pmd_huge_pte(mm, pmdp))
mm                276 arch/sparc/mm/tlb.c 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
mm                277 arch/sparc/mm/tlb.c 	pmd_huge_pte(mm, pmdp) = pgtable;
mm                280 arch/sparc/mm/tlb.c pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
mm                285 arch/sparc/mm/tlb.c 	assert_spin_locked(&mm->page_table_lock);
mm                288 arch/sparc/mm/tlb.c 	pgtable = pmd_huge_pte(mm, pmdp);
mm                291 arch/sparc/mm/tlb.c 		pmd_huge_pte(mm, pmdp) = NULL;
mm                293 arch/sparc/mm/tlb.c 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
mm                121 arch/sparc/mm/tsb.c 	struct mm_struct *mm = tb->mm;
mm                124 arch/sparc/mm/tsb.c 	spin_lock_irqsave(&mm->context.lock, flags);
mm                127 arch/sparc/mm/tsb.c 		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
mm                128 arch/sparc/mm/tsb.c 		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
mm                140 arch/sparc/mm/tsb.c 	else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
mm                141 arch/sparc/mm/tsb.c 		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
mm                142 arch/sparc/mm/tsb.c 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
mm                149 arch/sparc/mm/tsb.c 	spin_unlock_irqrestore(&mm->context.lock, flags);
mm                152 arch/sparc/mm/tsb.c void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
mm                157 arch/sparc/mm/tsb.c 	spin_lock_irqsave(&mm->context.lock, flags);
mm                160 arch/sparc/mm/tsb.c 		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
mm                161 arch/sparc/mm/tsb.c 		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
mm                174 arch/sparc/mm/tsb.c 	else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
mm                175 arch/sparc/mm/tsb.c 		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
mm                176 arch/sparc/mm/tsb.c 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
mm                183 arch/sparc/mm/tsb.c 	spin_unlock_irqrestore(&mm->context.lock, flags);
mm                194 arch/sparc/mm/tsb.c static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
mm                199 arch/sparc/mm/tsb.c 	mm->context.tsb_block[tsb_idx].tsb_nentries =
mm                216 arch/sparc/mm/tsb.c 	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
mm                277 arch/sparc/mm/tsb.c 		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
mm                278 arch/sparc/mm/tsb.c 		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
mm                279 arch/sparc/mm/tsb.c 		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
mm                285 arch/sparc/mm/tsb.c 		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
mm                286 arch/sparc/mm/tsb.c 		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
mm                287 arch/sparc/mm/tsb.c 		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
mm                292 arch/sparc/mm/tsb.c 		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
mm                396 arch/sparc/mm/tsb.c void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
mm                432 arch/sparc/mm/tsb.c 		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
mm                443 arch/sparc/mm/tsb.c 		if (mm->context.tsb_block[tsb_index].tsb != NULL)
mm                444 arch/sparc/mm/tsb.c 			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
mm                473 arch/sparc/mm/tsb.c 	spin_lock_irqsave(&mm->context.lock, flags);
mm                475 arch/sparc/mm/tsb.c 	old_tsb = mm->context.tsb_block[tsb_index].tsb;
mm                477 arch/sparc/mm/tsb.c 		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
mm                478 arch/sparc/mm/tsb.c 	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
mm                487 arch/sparc/mm/tsb.c 		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
mm                488 arch/sparc/mm/tsb.c 		spin_unlock_irqrestore(&mm->context.lock, flags);
mm                494 arch/sparc/mm/tsb.c 	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
mm                514 arch/sparc/mm/tsb.c 	mm->context.tsb_block[tsb_index].tsb = new_tsb;
mm                515 arch/sparc/mm/tsb.c 	setup_tsb_params(mm, tsb_index, new_size);
mm                517 arch/sparc/mm/tsb.c 	spin_unlock_irqrestore(&mm->context.lock, flags);
mm                524 arch/sparc/mm/tsb.c 		tsb_context_switch(mm);
mm                528 arch/sparc/mm/tsb.c 		smp_tsb_sync(mm);
mm                536 arch/sparc/mm/tsb.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm                538 arch/sparc/mm/tsb.c 	unsigned long mm_rss = get_mm_rss(mm);
mm                545 arch/sparc/mm/tsb.c 	spin_lock_init(&mm->context.lock);
mm                547 arch/sparc/mm/tsb.c 	mm->context.sparc64_ctx_val = 0UL;
mm                549 arch/sparc/mm/tsb.c 	mm->context.tag_store = NULL;
mm                550 arch/sparc/mm/tsb.c 	spin_lock_init(&mm->context.tag_lock);
mm                557 arch/sparc/mm/tsb.c 	saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
mm                558 arch/sparc/mm/tsb.c 	saved_thp_pte_count = mm->context.thp_pte_count;
mm                559 arch/sparc/mm/tsb.c 	mm->context.hugetlb_pte_count = 0;
mm                560 arch/sparc/mm/tsb.c 	mm->context.thp_pte_count = 0;
mm                570 arch/sparc/mm/tsb.c 		mm->context.tsb_block[i].tsb = NULL;
mm                575 arch/sparc/mm/tsb.c 	tsb_grow(mm, MM_TSB_BASE, mm_rss);
mm                579 arch/sparc/mm/tsb.c 		tsb_grow(mm, MM_TSB_HUGE,
mm                584 arch/sparc/mm/tsb.c 	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
mm                602 arch/sparc/mm/tsb.c void destroy_context(struct mm_struct *mm)
mm                607 arch/sparc/mm/tsb.c 		tsb_destroy_one(&mm->context.tsb_block[i]);
mm                611 arch/sparc/mm/tsb.c 	if (CTX_VALID(mm->context)) {
mm                612 arch/sparc/mm/tsb.c 		unsigned long nr = CTX_NRBITS(mm->context);
mm                619 arch/sparc/mm/tsb.c 	if (mm->context.tag_store) {
mm                624 arch/sparc/mm/tsb.c 		tag_desc = mm->context.tag_store;
mm                632 arch/sparc/mm/tsb.c 		kfree(mm->context.tag_store);
mm                633 arch/sparc/mm/tsb.c 		mm->context.tag_store = NULL;
mm                 37 arch/sparc/power/hibernate.c 	struct mm_struct *mm = current->active_mm;
mm                 39 arch/sparc/power/hibernate.c 	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
mm                364 arch/sparc/vdso/vma.c 	struct mm_struct *mm = current->mm;
mm                369 arch/sparc/vdso/vma.c 	down_write(&mm->mmap_sem);
mm                393 arch/sparc/vdso/vma.c 	current->mm->context.vdso = (void __user *)text_start;
mm                398 arch/sparc/vdso/vma.c 	vma = _install_special_mapping(mm,
mm                410 arch/sparc/vdso/vma.c 	vma = _install_special_mapping(mm,
mm                418 arch/sparc/vdso/vma.c 		do_munmap(mm, text_start, image->size, NULL);
mm                423 arch/sparc/vdso/vma.c 		current->mm->context.vdso = NULL;
mm                425 arch/sparc/vdso/vma.c 	up_write(&mm->mmap_sem);
mm                 22 arch/um/include/asm/mmu.h extern void free_ldt(struct mm_context *mm);
mm                 14 arch/um/include/asm/mmu_context.h extern void uml_setup_stubs(struct mm_struct *mm);
mm                 18 arch/um/include/asm/mmu_context.h static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
mm                 20 arch/um/include/asm/mmu_context.h 	uml_setup_stubs(mm);
mm                 23 arch/um/include/asm/mmu_context.h extern void arch_exit_mmap(struct mm_struct *mm);
mm                 24 arch/um/include/asm/mmu_context.h static inline void arch_unmap(struct mm_struct *mm,
mm                 28 arch/um/include/asm/mmu_context.h static inline void arch_bprm_mm_init(struct mm_struct *mm,
mm                 44 arch/um/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
mm                 73 arch/um/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, 
mm                 78 arch/um/include/asm/mmu_context.h extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
mm                 80 arch/um/include/asm/mmu_context.h extern void destroy_context(struct mm_struct *mm);
mm                 15 arch/um/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) \
mm                 18 arch/um/include/asm/pgalloc.h #define pmd_populate(mm, pmd, pte) 				\
mm                 28 arch/um/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
mm                 38 arch/um/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 60 arch/um/include/asm/pgtable-3level.h #define pud_populate(mm, pud, pmd) \
mm                 83 arch/um/include/asm/pgtable-3level.h extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
mm                 95 arch/um/include/asm/pgtable.h #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
mm                265 arch/um/include/asm/pgtable.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                313 arch/um/include/asm/pgtable.h #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
mm                347 arch/um/include/asm/pgtable.h extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
mm                 23 arch/um/include/asm/tlbflush.h extern void flush_tlb_mm(struct mm_struct *mm);
mm                 29 arch/um/kernel/exec.c 	ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data);
mm                 30 arch/um/kernel/exec.c 	ret = ret || unmap(&current->mm->context.id, STUB_END,
mm                 40 arch/um/kernel/exec.c 	__switch_mm(&current->mm->context.id);
mm                194 arch/um/kernel/mem.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                207 arch/um/kernel/mem.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                213 arch/um/kernel/mem.c pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
mm                 31 arch/um/kernel/reboot.c 		pid = t->mm->context.id.u.pid;
mm                 18 arch/um/kernel/skas/mmu.c static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
mm                 26 arch/um/kernel/skas/mmu.c 	pgd = pgd_offset(mm, proc);
mm                 27 arch/um/kernel/skas/mmu.c 	pud = pud_alloc(mm, pgd, proc);
mm                 31 arch/um/kernel/skas/mmu.c 	pmd = pmd_alloc(mm, pud, proc);
mm                 35 arch/um/kernel/skas/mmu.c 	pte = pte_alloc_map(mm, pmd, proc);
mm                 44 arch/um/kernel/skas/mmu.c 	pmd_free(mm, pmd);
mm                 46 arch/um/kernel/skas/mmu.c 	pud_free(mm, pud);
mm                 51 arch/um/kernel/skas/mmu.c int init_new_context(struct task_struct *task, struct mm_struct *mm)
mm                 54 arch/um/kernel/skas/mmu.c 	struct mm_context *to_mm = &mm->context;
mm                 63 arch/um/kernel/skas/mmu.c 	if (current->mm != NULL && current->mm != &init_mm)
mm                 64 arch/um/kernel/skas/mmu.c 		from_mm = &current->mm->context;
mm                 94 arch/um/kernel/skas/mmu.c void uml_setup_stubs(struct mm_struct *mm)
mm                 98 arch/um/kernel/skas/mmu.c 	ret = init_stub_pte(mm, STUB_CODE,
mm                103 arch/um/kernel/skas/mmu.c 	ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
mm                107 arch/um/kernel/skas/mmu.c 	mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
mm                108 arch/um/kernel/skas/mmu.c 	mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
mm                111 arch/um/kernel/skas/mmu.c 	err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
mm                114 arch/um/kernel/skas/mmu.c 				      mm->context.stub_pages);
mm                125 arch/um/kernel/skas/mmu.c void arch_exit_mmap(struct mm_struct *mm)
mm                129 arch/um/kernel/skas/mmu.c 	pte = virt_to_pte(mm, STUB_CODE);
mm                131 arch/um/kernel/skas/mmu.c 		pte_clear(mm, STUB_CODE, pte);
mm                133 arch/um/kernel/skas/mmu.c 	pte = virt_to_pte(mm, STUB_DATA);
mm                137 arch/um/kernel/skas/mmu.c 	pte_clear(mm, STUB_DATA, pte);
mm                140 arch/um/kernel/skas/mmu.c void destroy_context(struct mm_struct *mm)
mm                142 arch/um/kernel/skas/mmu.c 	struct mm_context *mmu = &mm->context;
mm                 51 arch/um/kernel/skas/process.c 	if (current->mm == NULL)
mm                 54 arch/um/kernel/skas/process.c 	return current->mm->context.id.stack;
mm                 17 arch/um/kernel/skas/uaccess.c pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
mm                 23 arch/um/kernel/skas/uaccess.c 	if (mm == NULL)
mm                 26 arch/um/kernel/skas/uaccess.c 	pgd = pgd_offset(mm, addr);
mm                 43 arch/um/kernel/skas/uaccess.c 	pte_t *pte = virt_to_pte(current->mm, virt);
mm                 51 arch/um/kernel/skas/uaccess.c 		pte = virt_to_pte(current->mm, virt);
mm                127 arch/um/kernel/time.c 	if (get_current()->mm != NULL)
mm                130 arch/um/kernel/time.c 		os_alarm_process(get_current()->mm->context.id.u.pid);
mm                 42 arch/um/kernel/tlb.c 	struct mm_struct *mm;
mm                 47 arch/um/kernel/tlb.c #define INIT_HVC(mm, force, userspace) \
mm                 50 arch/um/kernel/tlb.c 	   .mm		= mm, \
mm                 74 arch/um/kernel/tlb.c 				ret = map(&hvc->mm->context.id, op->u.mmap.addr,
mm                 85 arch/um/kernel/tlb.c 				ret = unmap(&hvc->mm->context.id,
mm                 97 arch/um/kernel/tlb.c 				ret = protect(&hvc->mm->context.id,
mm                302 arch/um/kernel/tlb.c void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
mm                310 arch/um/kernel/tlb.c 	hvc = INIT_HVC(mm, force, userspace);
mm                311 arch/um/kernel/tlb.c 	pgd = pgd_offset(mm, addr);
mm                331 arch/um/kernel/tlb.c 		up_write(&current->mm->mmap_sem);
mm                339 arch/um/kernel/tlb.c 	struct mm_struct *mm;
mm                348 arch/um/kernel/tlb.c 	mm = &init_mm;
mm                349 arch/um/kernel/tlb.c 	hvc = INIT_HVC(mm, force, userspace);
mm                351 arch/um/kernel/tlb.c 		pgd = pgd_offset(mm, addr);
mm                430 arch/um/kernel/tlb.c 	struct mm_struct *mm = vma->vm_mm;
mm                436 arch/um/kernel/tlb.c 	pgd = pgd_offset(mm, address);
mm                460 arch/um/kernel/tlb.c 	mm_id = &mm->context.id;
mm                493 arch/um/kernel/tlb.c pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
mm                495 arch/um/kernel/tlb.c 	return pgd_offset(mm, address);
mm                515 arch/um/kernel/tlb.c 	pgd_t *pgd = pgd_offset(task->mm, addr);
mm                528 arch/um/kernel/tlb.c 	if (atomic_read(&current->mm->mm_users) == 0)
mm                531 arch/um/kernel/tlb.c 	flush_tlb_mm(current->mm);
mm                549 arch/um/kernel/tlb.c static void fix_range(struct mm_struct *mm, unsigned long start_addr,
mm                556 arch/um/kernel/tlb.c 	if (atomic_read(&mm->mm_users) == 0)
mm                559 arch/um/kernel/tlb.c 	fix_range_common(mm, start_addr, end_addr, force);
mm                571 arch/um/kernel/tlb.c void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
mm                574 arch/um/kernel/tlb.c 	fix_range(mm, start, end, 0);
mm                577 arch/um/kernel/tlb.c void flush_tlb_mm(struct mm_struct *mm)
mm                579 arch/um/kernel/tlb.c 	struct vm_area_struct *vma = mm->mmap;
mm                582 arch/um/kernel/tlb.c 		fix_range(mm, vma->vm_start, vma->vm_end, 0);
mm                589 arch/um/kernel/tlb.c 	struct mm_struct *mm = current->mm;
mm                590 arch/um/kernel/tlb.c 	struct vm_area_struct *vma = mm->mmap;
mm                593 arch/um/kernel/tlb.c 		fix_range(mm, vma->vm_start, vma->vm_end, 1);
mm                 28 arch/um/kernel/trap.c 	struct mm_struct *mm = current->mm;
mm                 49 arch/um/kernel/trap.c 	down_read(&mm->mmap_sem);
mm                 50 arch/um/kernel/trap.c 	vma = find_vma(mm, address);
mm                106 arch/um/kernel/trap.c 		pgd = pgd_offset(mm, address);
mm                125 arch/um/kernel/trap.c 	up_read(&mm->mmap_sem);
mm                134 arch/um/kernel/trap.c 	up_read(&mm->mmap_sem);
mm                225 arch/um/kernel/trap.c 	else if (current->mm == NULL) {
mm                127 arch/unicore32/include/asm/cacheflush.h extern void flush_cache_mm(struct mm_struct *mm);
mm                133 arch/unicore32/include/asm/cacheflush.h #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
mm                 18 arch/unicore32/include/asm/cpu-single.h #define cpu_switch_mm(pgd, mm) cpu_do_switch_mm(virt_to_phys(pgd), mm)
mm                 35 arch/unicore32/include/asm/cpu-single.h extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
mm                 83 arch/unicore32/include/asm/elf.h extern unsigned long arch_randomize_brk(struct mm_struct *mm);
mm                 21 arch/unicore32/include/asm/mmu_context.h #define init_new_context(tsk, mm)	0
mm                 23 arch/unicore32/include/asm/mmu_context.h #define destroy_context(mm)		do { } while (0)
mm                 35 arch/unicore32/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 55 arch/unicore32/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
mm                 65 arch/unicore32/include/asm/mmu_context.h #define arch_exit_mmap(mm) \
mm                 67 arch/unicore32/include/asm/mmu_context.h 	struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
mm                 73 arch/unicore32/include/asm/mmu_context.h 			mm->mmap = NULL; \
mm                 74 arch/unicore32/include/asm/mmu_context.h 		rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
mm                 75 arch/unicore32/include/asm/mmu_context.h 		vmacache_invalidate(mm); \
mm                 76 arch/unicore32/include/asm/mmu_context.h 		mm->map_count--; \
mm                 82 arch/unicore32/include/asm/mmu_context.h 				struct mm_struct *mm)
mm                 87 arch/unicore32/include/asm/mmu_context.h static inline void arch_unmap(struct mm_struct *mm,
mm                 92 arch/unicore32/include/asm/mmu_context.h static inline void arch_bprm_mm_init(struct mm_struct *mm,
mm                 24 arch/unicore32/include/asm/pgalloc.h extern pgd_t *get_pgd_slow(struct mm_struct *mm);
mm                 25 arch/unicore32/include/asm/pgalloc.h extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
mm                 27 arch/unicore32/include/asm/pgalloc.h #define pgd_alloc(mm)			get_pgd_slow(mm)
mm                 28 arch/unicore32/include/asm/pgalloc.h #define pgd_free(mm, pgd)		free_pgd_slow(mm, pgd)
mm                 34 arch/unicore32/include/asm/pgalloc.h pte_alloc_one_kernel(struct mm_struct *mm)
mm                 36 arch/unicore32/include/asm/pgalloc.h 	pte_t *pte = __pte_alloc_one_kernel(mm);
mm                 45 arch/unicore32/include/asm/pgalloc.h pte_alloc_one(struct mm_struct *mm)
mm                 49 arch/unicore32/include/asm/pgalloc.h 	pte = __pte_alloc_one(mm, GFP_PGTABLE_USER);
mm                 68 arch/unicore32/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
mm                 80 arch/unicore32/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
mm                155 arch/unicore32/include/asm/pgtable.h #define pte_clear(mm, addr, ptep)	set_pte(ptep, __pte(0))
mm                166 arch/unicore32/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pteval)	\
mm                231 arch/unicore32/include/asm/pgtable.h #define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
mm                 72 arch/unicore32/include/asm/tlbflush.h static inline void local_flush_tlb_mm(struct mm_struct *mm)
mm                 76 arch/unicore32/include/asm/tlbflush.h 	if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
mm                295 arch/unicore32/kernel/process.c unsigned long arch_randomize_brk(struct mm_struct *mm)
mm                297 arch/unicore32/kernel/process.c 	return randomize_page(mm->brk, 0x02000000);
mm                308 arch/unicore32/kernel/process.c 	struct mm_struct *mm = current->mm;
mm                309 arch/unicore32/kernel/process.c 	return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
mm                 38 arch/unicore32/mm/fault.c void show_pte(struct mm_struct *mm, unsigned long addr)
mm                 42 arch/unicore32/mm/fault.c 	if (!mm)
mm                 43 arch/unicore32/mm/fault.c 		mm = &init_mm;
mm                 45 arch/unicore32/mm/fault.c 	printk(KERN_ALERT "pgd = %p\n", mm->pgd);
mm                 46 arch/unicore32/mm/fault.c 	pgd = pgd_offset(mm, addr);
mm                 88 arch/unicore32/mm/fault.c static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
mm                106 arch/unicore32/mm/fault.c 	show_pte(mm, addr);
mm                130 arch/unicore32/mm/fault.c 	struct mm_struct *mm = tsk->active_mm;
mm                139 arch/unicore32/mm/fault.c 		__do_kernel_fault(mm, addr, fsr, regs);
mm                162 arch/unicore32/mm/fault.c static vm_fault_t __do_pf(struct mm_struct *mm, unsigned long addr,
mm                168 arch/unicore32/mm/fault.c 	vma = find_vma(mm, addr);
mm                202 arch/unicore32/mm/fault.c 	struct mm_struct *mm;
mm                208 arch/unicore32/mm/fault.c 	mm = tsk->mm;
mm                214 arch/unicore32/mm/fault.c 	if (faulthandler_disabled() || !mm)
mm                227 arch/unicore32/mm/fault.c 	if (!down_read_trylock(&mm->mmap_sem)) {
mm                232 arch/unicore32/mm/fault.c 		down_read(&mm->mmap_sem);
mm                247 arch/unicore32/mm/fault.c 	fault = __do_pf(mm, addr, fsr, flags, tsk);
mm                269 arch/unicore32/mm/fault.c 	up_read(&mm->mmap_sem);
mm                315 arch/unicore32/mm/fault.c 	__do_kernel_fault(mm, addr, fsr, regs);
mm                 16 arch/unicore32/mm/flush.c void flush_cache_mm(struct mm_struct *mm)
mm                 24 arch/unicore32/mm/pgd.c pgd_t *get_pgd_slow(struct mm_struct *mm)
mm                 50 arch/unicore32/mm/pgd.c 		new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
mm                 54 arch/unicore32/mm/pgd.c 		new_pte = pte_alloc_map(mm, new_pmd, 0);
mm                 68 arch/unicore32/mm/pgd.c 	pmd_free(mm, new_pmd);
mm                 69 arch/unicore32/mm/pgd.c 	mm_dec_nr_pmds(mm);
mm                 76 arch/unicore32/mm/pgd.c void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
mm                 96 arch/unicore32/mm/pgd.c 	pte_free(mm, pte);
mm                 97 arch/unicore32/mm/pgd.c 	mm_dec_nr_ptes(mm);
mm                 98 arch/unicore32/mm/pgd.c 	pmd_free(mm, pmd);
mm                 99 arch/unicore32/mm/pgd.c 	mm_dec_nr_pmds(mm);
mm                367 arch/x86/entry/common.c 	unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
mm                 63 arch/x86/entry/vdso/vma.c 			(unsigned long)current->mm->context.vdso;
mm                 76 arch/x86/entry/vdso/vma.c 	const struct vdso_image *image = current->mm->context.vdso_image;
mm                 82 arch/x86/entry/vdso/vma.c 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
mm                148 arch/x86/entry/vdso/vma.c 	struct mm_struct *mm = current->mm;
mm                153 arch/x86/entry/vdso/vma.c 	if (down_write_killable(&mm->mmap_sem))
mm                168 arch/x86/entry/vdso/vma.c 	vma = _install_special_mapping(mm,
mm                180 arch/x86/entry/vdso/vma.c 	vma = _install_special_mapping(mm,
mm                189 arch/x86/entry/vdso/vma.c 		do_munmap(mm, text_start, image->size, NULL);
mm                191 arch/x86/entry/vdso/vma.c 		current->mm->context.vdso = (void __user *)text_start;
mm                192 arch/x86/entry/vdso/vma.c 		current->mm->context.vdso_image = image;
mm                196 arch/x86/entry/vdso/vma.c 	up_write(&mm->mmap_sem);
mm                247 arch/x86/entry/vdso/vma.c 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
mm                255 arch/x86/entry/vdso/vma.c 	struct mm_struct *mm = current->mm;
mm                258 arch/x86/entry/vdso/vma.c 	down_write(&mm->mmap_sem);
mm                266 arch/x86/entry/vdso/vma.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                269 arch/x86/entry/vdso/vma.c 			up_write(&mm->mmap_sem);
mm                273 arch/x86/entry/vdso/vma.c 	up_write(&mm->mmap_sem);
mm                316 arch/x86/entry/vsyscall/vsyscall_64.c struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
mm                319 arch/x86/entry/vsyscall/vsyscall_64.c 	if (!mm || mm->context.ia32_compat)
mm                327 arch/x86/entry/vsyscall/vsyscall_64.c int in_gate_area(struct mm_struct *mm, unsigned long addr)
mm                329 arch/x86/entry/vsyscall/vsyscall_64.c 	struct vm_area_struct *vma = get_gate_vma(mm);
mm               2125 arch/x86/events/core.c static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
mm               2140 arch/x86/events/core.c 	lockdep_assert_held_write(&mm->mmap_sem);
mm               2142 arch/x86/events/core.c 	if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
mm               2143 arch/x86/events/core.c 		on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
mm               2146 arch/x86/events/core.c static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
mm               2152 arch/x86/events/core.c 	if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
mm               2153 arch/x86/events/core.c 		on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
mm                860 arch/x86/events/intel/lbr.c 		if (!current->mm)
mm                 84 arch/x86/hyperv/mmu.c 	if (info->mm) {
mm                 89 arch/x86/hyperv/mmu.c 		flush->address_space = virt_to_phys(info->mm->pgd);
mm                180 arch/x86/hyperv/mmu.c 	if (info->mm) {
mm                185 arch/x86/hyperv/mmu.c 		flush->address_space = virt_to_phys(info->mm->pgd);
mm                 77 arch/x86/ia32/ia32_aout.c 	current->mm->arg_start = (unsigned long) p;
mm                 87 arch/x86/ia32/ia32_aout.c 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
mm                 97 arch/x86/ia32/ia32_aout.c 	current->mm->env_end = (unsigned long) p;
mm                148 arch/x86/ia32/ia32_aout.c 	current->mm->end_code = ex.a_text +
mm                149 arch/x86/ia32/ia32_aout.c 		(current->mm->start_code = N_TXTADDR(ex));
mm                150 arch/x86/ia32/ia32_aout.c 	current->mm->end_data = ex.a_data +
mm                151 arch/x86/ia32/ia32_aout.c 		(current->mm->start_data = N_DATADDR(ex));
mm                152 arch/x86/ia32/ia32_aout.c 	current->mm->brk = ex.a_bss +
mm                153 arch/x86/ia32/ia32_aout.c 		(current->mm->start_brk = N_BSSADDR(ex));
mm                225 arch/x86/ia32/ia32_aout.c 	error = set_brk(current->mm->start_brk, current->mm->brk);
mm                231 arch/x86/ia32/ia32_aout.c 	current->mm->start_stack =
mm                239 arch/x86/ia32/ia32_aout.c 	(regs)->sp = current->mm->start_stack;
mm                292 arch/x86/ia32/ia32_signal.c 		if (current->mm->context.vdso)
mm                293 arch/x86/ia32/ia32_signal.c 			restorer = current->mm->context.vdso +
mm                372 arch/x86/ia32/ia32_signal.c 			restorer = current->mm->context.vdso +
mm                140 arch/x86/include/asm/efi.h extern void efi_switch_mm(struct mm_struct *mm);
mm                332 arch/x86/include/asm/elf.h 			    (unsigned long __force)current->mm->context.vdso); \
mm                340 arch/x86/include/asm/elf.h 			    (unsigned long __force)current->mm->context.vdso); \
mm                355 arch/x86/include/asm/elf.h #define VDSO_CURRENT_BASE	((unsigned long)current->mm->context.vdso)
mm                358 arch/x86/include/asm/elf.h 	((unsigned long)current->mm->context.vdso +			\
mm                610 arch/x86/include/asm/fpu/internal.h 	if (current->mm) {
mm                 10 arch/x86/include/asm/hugetlb.h static inline int is_hugepage_only_range(struct mm_struct *mm,
mm                 67 arch/x86/include/asm/ia32.h extern void ia32_pick_mmap_layout(struct mm_struct *mm);
mm                128 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm0
mm                131 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm1
mm                134 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm2
mm                137 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm3
mm                140 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm4
mm                143 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm5
mm                146 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm6
mm                149 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm7
mm                152 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm8
mm                155 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm9
mm                158 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm10
mm                161 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm11
mm                164 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm12
mm                167 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm13
mm                170 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm14
mm                173 arch/x86/include/asm/inst.h 	.ifc \xmm,%xmm15
mm                228 arch/x86/include/asm/inst.h 	XMM_NUM extrd_opd2 \xmm
mm                253 arch/x86/include/asm/kvm_emulate.h 		unsigned mm;
mm                 59 arch/x86/include/asm/mmu.h #define INIT_MM_CONTEXT(mm)						\
mm                 62 arch/x86/include/asm/mmu.h 		.lock = __MUTEX_INITIALIZER(mm.context.lock),		\
mm                 31 arch/x86/include/asm/mmu_context.h static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
mm                 34 arch/x86/include/asm/mmu_context.h 	    atomic_read(&mm->context.perf_rdpmc_allowed))
mm                 40 arch/x86/include/asm/mmu_context.h static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
mm                 81 arch/x86/include/asm/mmu_context.h static inline void init_new_context_ldt(struct mm_struct *mm)
mm                 83 arch/x86/include/asm/mmu_context.h 	mm->context.ldt = NULL;
mm                 84 arch/x86/include/asm/mmu_context.h 	init_rwsem(&mm->context.ldt_usr_sem);
mm                 86 arch/x86/include/asm/mmu_context.h int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
mm                 87 arch/x86/include/asm/mmu_context.h void destroy_context_ldt(struct mm_struct *mm);
mm                 88 arch/x86/include/asm/mmu_context.h void ldt_arch_exit_mmap(struct mm_struct *mm);
mm                 90 arch/x86/include/asm/mmu_context.h static inline void init_new_context_ldt(struct mm_struct *mm) { }
mm                 92 arch/x86/include/asm/mmu_context.h 				  struct mm_struct *mm)
mm                 96 arch/x86/include/asm/mmu_context.h static inline void destroy_context_ldt(struct mm_struct *mm) { }
mm                 97 arch/x86/include/asm/mmu_context.h static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
mm                100 arch/x86/include/asm/mmu_context.h static inline void load_mm_ldt(struct mm_struct *mm)
mm                106 arch/x86/include/asm/mmu_context.h 	ldt = READ_ONCE(mm->context.ldt);
mm                180 arch/x86/include/asm/mmu_context.h void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
mm                187 arch/x86/include/asm/mmu_context.h 				   struct mm_struct *mm)
mm                189 arch/x86/include/asm/mmu_context.h 	mutex_init(&mm->context.lock);
mm                191 arch/x86/include/asm/mmu_context.h 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
mm                192 arch/x86/include/asm/mmu_context.h 	atomic64_set(&mm->context.tlb_gen, 0);
mm                197 arch/x86/include/asm/mmu_context.h 		mm->context.pkey_allocation_map = 0x1;
mm                199 arch/x86/include/asm/mmu_context.h 		mm->context.execute_only_pkey = -1;
mm                202 arch/x86/include/asm/mmu_context.h 	init_new_context_ldt(mm);
mm                205 arch/x86/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                207 arch/x86/include/asm/mmu_context.h 	destroy_context_ldt(mm);
mm                224 arch/x86/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)			\
mm                229 arch/x86/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)			\
mm                237 arch/x86/include/asm/mmu_context.h 				  struct mm_struct *mm)
mm                244 arch/x86/include/asm/mmu_context.h 	mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
mm                245 arch/x86/include/asm/mmu_context.h 	mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
mm                249 arch/x86/include/asm/mmu_context.h static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
mm                251 arch/x86/include/asm/mmu_context.h 	arch_dup_pkeys(oldmm, mm);
mm                252 arch/x86/include/asm/mmu_context.h 	paravirt_arch_dup_mmap(oldmm, mm);
mm                253 arch/x86/include/asm/mmu_context.h 	return ldt_dup_context(oldmm, mm);
mm                256 arch/x86/include/asm/mmu_context.h static inline void arch_exit_mmap(struct mm_struct *mm)
mm                258 arch/x86/include/asm/mmu_context.h 	paravirt_arch_exit_mmap(mm);
mm                259 arch/x86/include/asm/mmu_context.h 	ldt_arch_exit_mmap(mm);
mm                263 arch/x86/include/asm/mmu_context.h static inline bool is_64bit_mm(struct mm_struct *mm)
mm                266 arch/x86/include/asm/mmu_context.h 		!(mm->context.ia32_compat == TIF_IA32);
mm                269 arch/x86/include/asm/mmu_context.h static inline bool is_64bit_mm(struct mm_struct *mm)
mm                275 arch/x86/include/asm/mmu_context.h static inline void arch_bprm_mm_init(struct mm_struct *mm,
mm                278 arch/x86/include/asm/mmu_context.h 	mpx_mm_init(mm);
mm                281 arch/x86/include/asm/mmu_context.h static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
mm                302 arch/x86/include/asm/mmu_context.h 		mpx_notify_unmap(mm, start, end);
mm                316 arch/x86/include/asm/mmu_context.h 	if (!current->mm)
mm                323 arch/x86/include/asm/mmu_context.h 	if (current->mm != vma->vm_mm)
mm                361 arch/x86/include/asm/mmu_context.h 	struct mm_struct *mm;
mm                377 arch/x86/include/asm/mmu_context.h static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
mm                382 arch/x86/include/asm/mmu_context.h 	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
mm                383 arch/x86/include/asm/mmu_context.h 	switch_mm_irqs_off(NULL, mm, current);
mm                405 arch/x86/include/asm/mmu_context.h 	switch_mm_irqs_off(NULL, prev_state.mm, current);
mm                 71 arch/x86/include/asm/mpx.h static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
mm                 73 arch/x86/include/asm/mpx.h 	return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
mm                 76 arch/x86/include/asm/mpx.h static inline void mpx_mm_init(struct mm_struct *mm)
mm                 82 arch/x86/include/asm/mpx.h 	mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
mm                 85 arch/x86/include/asm/mpx.h extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
mm                 97 arch/x86/include/asm/mpx.h static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
mm                101 arch/x86/include/asm/mpx.h static inline void mpx_mm_init(struct mm_struct *mm)
mm                104 arch/x86/include/asm/mpx.h static inline void mpx_notify_unmap(struct mm_struct *mm,
mm                 76 arch/x86/include/asm/paravirt.h static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
mm                 78 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.exit_mmap, mm);
mm                309 arch/x86/include/asm/paravirt.h 					  struct mm_struct *mm)
mm                311 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
mm                314 arch/x86/include/asm/paravirt.h static inline int paravirt_pgd_alloc(struct mm_struct *mm)
mm                316 arch/x86/include/asm/paravirt.h 	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
mm                319 arch/x86/include/asm/paravirt.h static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                321 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
mm                324 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
mm                326 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
mm                333 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
mm                335 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
mm                343 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
mm                345 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
mm                352 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
mm                354 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
mm                443 arch/x86/include/asm/paravirt.h static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                448 arch/x86/include/asm/paravirt.h 		pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
mm                450 arch/x86/include/asm/paravirt.h 		PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
mm                575 arch/x86/include/asm/paravirt.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
mm                578 arch/x86/include/asm/paravirt.h 	PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
mm                591 arch/x86/include/asm/paravirt.h static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
mm                594 arch/x86/include/asm/paravirt.h 	set_pte_at(mm, addr, ptep, __pte(0));
mm                937 arch/x86/include/asm/paravirt.h 					  struct mm_struct *mm)
mm                943 arch/x86/include/asm/paravirt.h static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
mm                215 arch/x86/include/asm/paravirt_types.h 	void (*exit_mmap)(struct mm_struct *mm);
mm                228 arch/x86/include/asm/paravirt_types.h 			 struct mm_struct *mm);
mm                231 arch/x86/include/asm/paravirt_types.h 	int  (*pgd_alloc)(struct mm_struct *mm);
mm                232 arch/x86/include/asm/paravirt_types.h 	void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
mm                238 arch/x86/include/asm/paravirt_types.h 	void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
mm                239 arch/x86/include/asm/paravirt_types.h 	void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
mm                240 arch/x86/include/asm/paravirt_types.h 	void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
mm                241 arch/x86/include/asm/paravirt_types.h 	void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
mm                249 arch/x86/include/asm/paravirt_types.h 	void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
mm                267 arch/x86/include/asm/paravirt_types.h 	void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
mm                 12 arch/x86/include/asm/pgalloc.h static inline int  __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
mm                 17 arch/x86/include/asm/pgalloc.h #define paravirt_pgd_alloc(mm)	__paravirt_pgd_alloc(mm)
mm                 18 arch/x86/include/asm/pgalloc.h static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
mm                 19 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)	{}
mm                 20 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)	{}
mm                 23 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)	{}
mm                 24 arch/x86/include/asm/pgalloc.h static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)	{}
mm                 51 arch/x86/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
mm                 63 arch/x86/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm,
mm                 66 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
mm                 70 arch/x86/include/asm/pgalloc.h static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
mm                 73 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
mm                 77 arch/x86/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
mm                 82 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pte(mm, pfn);
mm                 89 arch/x86/include/asm/pgalloc.h static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                 94 arch/x86/include/asm/pgalloc.h 	if (mm == &init_mm)
mm                106 arch/x86/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                122 arch/x86/include/asm/pgalloc.h extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
mm                124 arch/x86/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                126 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
mm                130 arch/x86/include/asm/pgalloc.h static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
mm                132 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
mm                138 arch/x86/include/asm/pgalloc.h static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
mm                140 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
mm                144 arch/x86/include/asm/pgalloc.h static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
mm                146 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
mm                150 arch/x86/include/asm/pgalloc.h static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                154 arch/x86/include/asm/pgalloc.h 	if (mm == &init_mm)
mm                159 arch/x86/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
mm                174 arch/x86/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
mm                178 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
mm                182 arch/x86/include/asm/pgalloc.h static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
mm                186 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
mm                190 arch/x86/include/asm/pgalloc.h static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
mm                194 arch/x86/include/asm/pgalloc.h 	if (mm == &init_mm)
mm                199 arch/x86/include/asm/pgalloc.h static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
mm                 43 arch/x86/include/asm/pgtable-2level.h static inline void native_pte_clear(struct mm_struct *mm,
mm                114 arch/x86/include/asm/pgtable-3level.h static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
mm                 64 arch/x86/include/asm/pgtable.h #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
mm                 92 arch/x86/include/asm/pgtable.h #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
mm                747 arch/x86/include/asm/pgtable.h static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
mm                753 arch/x86/include/asm/pgtable.h 			mm_tlb_flush_pending(mm))
mm               1019 arch/x86/include/asm/pgtable.h #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
mm               1083 arch/x86/include/asm/pgtable.h static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
mm               1089 arch/x86/include/asm/pgtable.h static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm               1095 arch/x86/include/asm/pgtable.h static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
mm               1124 arch/x86/include/asm/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm               1132 arch/x86/include/asm/pgtable.h static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
mm               1144 arch/x86/include/asm/pgtable.h 		pte = ptep_get_and_clear(mm, addr, ptep);
mm               1150 arch/x86/include/asm/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm,
mm               1186 arch/x86/include/asm/pgtable.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
mm               1193 arch/x86/include/asm/pgtable.h static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
mm               1200 arch/x86/include/asm/pgtable.h static inline void pmdp_set_wrprotect(struct mm_struct *mm,
mm                 64 arch/x86/include/asm/pgtable_64.h static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
mm                 21 arch/x86/include/asm/pkeys.h extern int __execute_only_pkey(struct mm_struct *mm);
mm                 22 arch/x86/include/asm/pkeys.h static inline int execute_only_pkey(struct mm_struct *mm)
mm                 27 arch/x86/include/asm/pkeys.h 	return __execute_only_pkey(mm);
mm                 46 arch/x86/include/asm/pkeys.h #define mm_pkey_allocation_map(mm)	(mm->context.pkey_allocation_map)
mm                 47 arch/x86/include/asm/pkeys.h #define mm_set_pkey_allocated(mm, pkey) do {		\
mm                 48 arch/x86/include/asm/pkeys.h 	mm_pkey_allocation_map(mm) |= (1U << pkey);	\
mm                 50 arch/x86/include/asm/pkeys.h #define mm_set_pkey_free(mm, pkey) do {			\
mm                 51 arch/x86/include/asm/pkeys.h 	mm_pkey_allocation_map(mm) &= ~(1U << pkey);	\
mm                 55 arch/x86/include/asm/pkeys.h bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
mm                 71 arch/x86/include/asm/pkeys.h 	if (pkey == mm->context.execute_only_pkey)
mm                 74 arch/x86/include/asm/pkeys.h 	return mm_pkey_allocation_map(mm) & (1U << pkey);
mm                 81 arch/x86/include/asm/pkeys.h int mm_pkey_alloc(struct mm_struct *mm)
mm                 97 arch/x86/include/asm/pkeys.h 	if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
mm                100 arch/x86/include/asm/pkeys.h 	ret = ffz(mm_pkey_allocation_map(mm));
mm                102 arch/x86/include/asm/pkeys.h 	mm_set_pkey_allocated(mm, ret);
mm                108 arch/x86/include/asm/pkeys.h int mm_pkey_free(struct mm_struct *mm, int pkey)
mm                110 arch/x86/include/asm/pkeys.h 	if (!mm_pkey_is_allocated(mm, pkey))
mm                113 arch/x86/include/asm/pkeys.h 	mm_set_pkey_free(mm, pkey);
mm                 24 arch/x86/include/asm/tlb.h 	flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
mm                255 arch/x86/include/asm/tlbflush.h 	struct mm_struct *current_mm = current->mm;
mm                558 arch/x86/include/asm/tlbflush.h 	struct mm_struct	*mm;
mm                568 arch/x86/include/asm/tlbflush.h #define flush_tlb_mm(mm)						\
mm                569 arch/x86/include/asm/tlbflush.h 		flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
mm                578 arch/x86/include/asm/tlbflush.h extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
mm                591 arch/x86/include/asm/tlbflush.h static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
mm                599 arch/x86/include/asm/tlbflush.h 	return atomic64_inc_return(&mm->context.tlb_gen);
mm                603 arch/x86/include/asm/tlbflush.h 					struct mm_struct *mm)
mm                605 arch/x86/include/asm/tlbflush.h 	inc_mm_tlb_gen(mm);
mm                606 arch/x86/include/asm/tlbflush.h 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
mm                 17 arch/x86/include/asm/trace/hyperv.h 		    __field(struct mm_struct *, mm)
mm                 22 arch/x86/include/asm/trace/hyperv.h 			   __entry->mm = info->mm;
mm                 27 arch/x86/include/asm/trace/hyperv.h 		      __entry->ncpus, __entry->mm,
mm               1922 arch/x86/kernel/cpu/common.c 	BUG_ON(me->mm);
mm               1974 arch/x86/kernel/cpu/common.c 	BUG_ON(curr->mm);
mm                 54 arch/x86/kernel/ldt.c 	struct mm_struct *mm = __mm;
mm                 56 arch/x86/kernel/ldt.c 	if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
mm                 59 arch/x86/kernel/ldt.c 	load_mm_ldt(mm);
mm                105 arch/x86/kernel/ldt.c static void do_sanity_check(struct mm_struct *mm,
mm                109 arch/x86/kernel/ldt.c 	if (mm->context.ldt) {
mm                150 arch/x86/kernel/ldt.c static void map_ldt_struct_to_user(struct mm_struct *mm)
mm                152 arch/x86/kernel/ldt.c 	pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
mm                159 arch/x86/kernel/ldt.c 	if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
mm                163 arch/x86/kernel/ldt.c static void sanity_check_ldt_mapping(struct mm_struct *mm)
mm                165 arch/x86/kernel/ldt.c 	pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
mm                175 arch/x86/kernel/ldt.c 	do_sanity_check(mm, had_kernel, had_user);
mm                180 arch/x86/kernel/ldt.c static void map_ldt_struct_to_user(struct mm_struct *mm)
mm                182 arch/x86/kernel/ldt.c 	pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
mm                184 arch/x86/kernel/ldt.c 	if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
mm                188 arch/x86/kernel/ldt.c static void sanity_check_ldt_mapping(struct mm_struct *mm)
mm                190 arch/x86/kernel/ldt.c 	pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
mm                194 arch/x86/kernel/ldt.c 	do_sanity_check(mm, had_kernel, had_user);
mm                204 arch/x86/kernel/ldt.c map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
mm                221 arch/x86/kernel/ldt.c 	sanity_check_ldt_mapping(mm);
mm                242 arch/x86/kernel/ldt.c 		ptep = get_locked_pte(mm, va, &ptl);
mm                254 arch/x86/kernel/ldt.c 		set_pte_at(mm, va, ptep, pte);
mm                259 arch/x86/kernel/ldt.c 	map_ldt_struct_to_user(mm);
mm                265 arch/x86/kernel/ldt.c static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
mm                285 arch/x86/kernel/ldt.c 		ptep = get_locked_pte(mm, va, &ptl);
mm                286 arch/x86/kernel/ldt.c 		pte_clear(mm, va, ptep);
mm                291 arch/x86/kernel/ldt.c 	flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
mm                297 arch/x86/kernel/ldt.c map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
mm                302 arch/x86/kernel/ldt.c static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
mm                307 arch/x86/kernel/ldt.c static void free_ldt_pgtables(struct mm_struct *mm)
mm                317 arch/x86/kernel/ldt.c 	tlb_gather_mmu(&tlb, mm, start, end);
mm                329 arch/x86/kernel/ldt.c static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
mm                331 arch/x86/kernel/ldt.c 	mutex_lock(&mm->context.lock);
mm                334 arch/x86/kernel/ldt.c 	smp_store_release(&mm->context.ldt, ldt);
mm                337 arch/x86/kernel/ldt.c 	on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
mm                339 arch/x86/kernel/ldt.c 	mutex_unlock(&mm->context.lock);
mm                359 arch/x86/kernel/ldt.c int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
mm                381 arch/x86/kernel/ldt.c 	retval = map_ldt_struct(mm, new_ldt, 0);
mm                383 arch/x86/kernel/ldt.c 		free_ldt_pgtables(mm);
mm                387 arch/x86/kernel/ldt.c 	mm->context.ldt = new_ldt;
mm                399 arch/x86/kernel/ldt.c void destroy_context_ldt(struct mm_struct *mm)
mm                401 arch/x86/kernel/ldt.c 	free_ldt_struct(mm->context.ldt);
mm                402 arch/x86/kernel/ldt.c 	mm->context.ldt = NULL;
mm                405 arch/x86/kernel/ldt.c void ldt_arch_exit_mmap(struct mm_struct *mm)
mm                407 arch/x86/kernel/ldt.c 	free_ldt_pgtables(mm);
mm                412 arch/x86/kernel/ldt.c 	struct mm_struct *mm = current->mm;
mm                416 arch/x86/kernel/ldt.c 	down_read(&mm->context.ldt_usr_sem);
mm                418 arch/x86/kernel/ldt.c 	if (!mm->context.ldt) {
mm                426 arch/x86/kernel/ldt.c 	entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
mm                430 arch/x86/kernel/ldt.c 	if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
mm                445 arch/x86/kernel/ldt.c 	up_read(&mm->context.ldt_usr_sem);
mm                466 arch/x86/kernel/ldt.c 	struct mm_struct *mm = current->mm;
mm                505 arch/x86/kernel/ldt.c 	if (down_write_killable(&mm->context.ldt_usr_sem))
mm                508 arch/x86/kernel/ldt.c 	old_ldt       = mm->context.ldt;
mm                529 arch/x86/kernel/ldt.c 	error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
mm                537 arch/x86/kernel/ldt.c 			free_ldt_pgtables(mm);
mm                542 arch/x86/kernel/ldt.c 	install_ldt(mm, new_ldt);
mm                543 arch/x86/kernel/ldt.c 	unmap_ldt_struct(mm, old_ldt);
mm                548 arch/x86/kernel/ldt.c 	up_write(&mm->context.ldt_usr_sem);
mm                791 arch/x86/kernel/process.c unsigned long arch_randomize_brk(struct mm_struct *mm)
mm                793 arch/x86/kernel/process.c 	return randomize_page(mm->brk, 0x02000000);
mm                111 arch/x86/kernel/process_32.c 	BUG_ON(dead_task->mm);
mm                146 arch/x86/kernel/process_64.c 	WARN_ON(dead_task->mm);
mm                317 arch/x86/kernel/process_64.c 		mutex_lock(&task->mm->context.lock);
mm                318 arch/x86/kernel/process_64.c 		ldt = task->mm->context.ldt;
mm                323 arch/x86/kernel/process_64.c 		mutex_unlock(&task->mm->context.lock);
mm                633 arch/x86/kernel/process_64.c 	if (current->mm)
mm                634 arch/x86/kernel/process_64.c 		current->mm->context.ia32_compat = 0;
mm                648 arch/x86/kernel/process_64.c 	if (current->mm)
mm                649 arch/x86/kernel/process_64.c 		current->mm->context.ia32_compat = TIF_X32;
mm                669 arch/x86/kernel/process_64.c 	if (current->mm)
mm                670 arch/x86/kernel/process_64.c 		current->mm->context.ia32_compat = TIF_IA32;
mm                337 arch/x86/kernel/signal.c 	if (current->mm->context.vdso)
mm                338 arch/x86/kernel/signal.c 		restorer = current->mm->context.vdso +
mm                402 arch/x86/kernel/signal.c 		restorer = current->mm->context.vdso +
mm                 36 arch/x86/kernel/step.c 		mutex_lock(&child->mm->context.lock);
mm                 37 arch/x86/kernel/step.c 		if (unlikely(!child->mm->context.ldt ||
mm                 38 arch/x86/kernel/step.c 			     seg >= child->mm->context.ldt->nr_entries))
mm                 41 arch/x86/kernel/step.c 			desc = &child->mm->context.ldt->entries[seg];
mm                 49 arch/x86/kernel/step.c 		mutex_unlock(&child->mm->context.lock);
mm                135 arch/x86/kernel/sys_x86_64.c 	struct mm_struct *mm = current->mm;
mm                154 arch/x86/kernel/sys_x86_64.c 		vma = find_vma(mm, addr);
mm                179 arch/x86/kernel/sys_x86_64.c 	struct mm_struct *mm = current->mm;
mm                205 arch/x86/kernel/sys_x86_64.c 		vma = find_vma(mm, addr);
mm                850 arch/x86/kernel/uprobes.c int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
mm                856 arch/x86/kernel/uprobes.c 	ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
mm                164 arch/x86/kernel/vm86_32.c static void mark_screen_rdonly(struct mm_struct *mm)
mm                175 arch/x86/kernel/vm86_32.c 	down_write(&mm->mmap_sem);
mm                176 arch/x86/kernel/vm86_32.c 	pgd = pgd_offset(mm, 0xA0000);
mm                188 arch/x86/kernel/vm86_32.c 		vma = find_vma(mm, 0xA0000);
mm                193 arch/x86/kernel/vm86_32.c 	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
mm                201 arch/x86/kernel/vm86_32.c 	up_write(&mm->mmap_sem);
mm                202 arch/x86/kernel/vm86_32.c 	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
mm                381 arch/x86/kernel/vm86_32.c 		mark_screen_rdonly(tsk->mm);
mm               1244 arch/x86/kvm/emulate.c 		op->addr.mm = reg;
mm               1294 arch/x86/kvm/emulate.c 			op->addr.mm = ctxt->modrm_rm & 7;
mm               1868 arch/x86/kvm/emulate.c 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
mm               5522 arch/x86/kvm/emulate.c 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
mm                153 arch/x86/kvm/paging_tmpl.h 		down_read(&current->mm->mmap_sem);
mm                154 arch/x86/kvm/paging_tmpl.h 		vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
mm                156 arch/x86/kvm/paging_tmpl.h 			up_read(&current->mm->mmap_sem);
mm                163 arch/x86/kvm/paging_tmpl.h 			up_read(&current->mm->mmap_sem);
mm                168 arch/x86/kvm/paging_tmpl.h 		up_read(&current->mm->mmap_sem);
mm               1149 arch/x86/kvm/vmx/vmx.c 	if (likely(is_64bit_mm(current->mm))) {
mm               9693 arch/x86/kvm/x86.c 	if (current->mm == kvm->mm) {
mm                 30 arch/x86/math-emu/fpu_system.h 	mutex_lock(&current->mm->context.lock);
mm                 31 arch/x86/math-emu/fpu_system.h 	if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries)
mm                 32 arch/x86/math-emu/fpu_system.h 		ret = current->mm->context.ldt->entries[seg];
mm                 33 arch/x86/math-emu/fpu_system.h 	mutex_unlock(&current->mm->context.lock);
mm                 18 arch/x86/mm/debug_pagetables.c 	if (current->mm->pgd) {
mm                 19 arch/x86/mm/debug_pagetables.c 		down_read(&current->mm->mmap_sem);
mm                 20 arch/x86/mm/debug_pagetables.c 		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
mm                 21 arch/x86/mm/debug_pagetables.c 		up_read(&current->mm->mmap_sem);
mm                 31 arch/x86/mm/debug_pagetables.c 	if (current->mm->pgd) {
mm                 32 arch/x86/mm/debug_pagetables.c 		down_read(&current->mm->mmap_sem);
mm                 33 arch/x86/mm/debug_pagetables.c 		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
mm                 34 arch/x86/mm/debug_pagetables.c 		up_read(&current->mm->mmap_sem);
mm                948 arch/x86/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                953 arch/x86/mm/fault.c 	up_read(&mm->mmap_sem);
mm               1310 arch/x86/mm/fault.c 	struct mm_struct *mm;
mm               1315 arch/x86/mm/fault.c 	mm = tsk->mm;
mm               1347 arch/x86/mm/fault.c 	if (unlikely(faulthandler_disabled() || !mm)) {
mm               1404 arch/x86/mm/fault.c 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
mm               1414 arch/x86/mm/fault.c 		down_read(&mm->mmap_sem);
mm               1424 arch/x86/mm/fault.c 	vma = find_vma(mm, address);
mm               1489 arch/x86/mm/fault.c 	up_read(&mm->mmap_sem);
mm               1519 arch/x86/mm/fault.c 	prefetchw(&current->mm->mmap_sem);
mm                 26 arch/x86/mm/hugetlbpage.c follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
mm                 34 arch/x86/mm/hugetlbpage.c 	vma = find_vma(mm, addr);
mm                 38 arch/x86/mm/hugetlbpage.c 	pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
mm                148 arch/x86/mm/hugetlbpage.c 	struct mm_struct *mm = current->mm;
mm                173 arch/x86/mm/hugetlbpage.c 		vma = find_vma(mm, addr);
mm                179 arch/x86/mm/hugetlbpage.c 	if (mm->get_unmapped_area == arch_get_unmapped_area)
mm                 63 arch/x86/mm/init_64.c static inline void fname##_init(struct mm_struct *mm,		\
mm                 67 arch/x86/mm/init_64.c 		fname##_safe(mm, arg1, arg2);			\
mm                 69 arch/x86/mm/init_64.c 		fname(mm, arg1, arg2);				\
mm                127 arch/x86/mm/mmap.c void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                130 arch/x86/mm/mmap.c 		mm->get_unmapped_area = arch_get_unmapped_area;
mm                132 arch/x86/mm/mmap.c 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm                134 arch/x86/mm/mmap.c 	arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
mm                145 arch/x86/mm/mmap.c 	arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
mm                153 arch/x86/mm/mmap.c 	struct mm_struct *mm = current->mm;
mm                157 arch/x86/mm/mmap.c 		return is_legacy ? mm->mmap_compat_legacy_base
mm                158 arch/x86/mm/mmap.c 				 : mm->mmap_compat_base;
mm                161 arch/x86/mm/mmap.c 	return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
mm                 26 arch/x86/mm/mpx.c static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
mm                 28 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm))
mm                 34 arch/x86/mm/mpx.c static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
mm                 36 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm))
mm                 48 arch/x86/mm/mpx.c 	struct mm_struct *mm = current->mm;
mm                 52 arch/x86/mm/mpx.c 	if (len != mpx_bt_size_bytes(mm))
mm                 55 arch/x86/mm/mpx.c 	down_write(&mm->mmap_sem);
mm                 58 arch/x86/mm/mpx.c 	up_write(&mm->mmap_sem);
mm                215 arch/x86/mm/mpx.c 	struct mm_struct *mm = current->mm;
mm                230 arch/x86/mm/mpx.c 	down_write(&mm->mmap_sem);
mm                233 arch/x86/mm/mpx.c 	if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
mm                240 arch/x86/mm/mpx.c 	mm->context.bd_addr = bd_base;
mm                241 arch/x86/mm/mpx.c 	if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
mm                244 arch/x86/mm/mpx.c 	up_write(&mm->mmap_sem);
mm                250 arch/x86/mm/mpx.c 	struct mm_struct *mm = current->mm;
mm                255 arch/x86/mm/mpx.c 	down_write(&mm->mmap_sem);
mm                256 arch/x86/mm/mpx.c 	mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
mm                257 arch/x86/mm/mpx.c 	up_write(&mm->mmap_sem);
mm                261 arch/x86/mm/mpx.c static int mpx_cmpxchg_bd_entry(struct mm_struct *mm,
mm                274 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm)) {
mm                295 arch/x86/mm/mpx.c static int allocate_bt(struct mm_struct *mm, long __user *bd_entry)
mm                307 arch/x86/mm/mpx.c 	bt_addr = mpx_mmap(mpx_bt_size_bytes(mm));
mm                326 arch/x86/mm/mpx.c 	ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,	bd_entry,
mm                358 arch/x86/mm/mpx.c 	vm_munmap(bt_addr, mpx_bt_size_bytes(mm));
mm                377 arch/x86/mm/mpx.c 	struct mm_struct *mm = current->mm;
mm                396 arch/x86/mm/mpx.c 	    (bd_entry >= bd_base + mpx_bd_size_bytes(mm)))
mm                399 arch/x86/mm/mpx.c 	return allocate_bt(mm, (long __user *)bd_entry);
mm                408 arch/x86/mm/mpx.c 	if (!kernel_managing_mpx_tables(current->mm))
mm                439 arch/x86/mm/mpx.c static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
mm                454 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm))
mm                467 arch/x86/mm/mpx.c static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
mm                473 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm))
mm                489 arch/x86/mm/mpx.c static int get_bt_addr(struct mm_struct *mm,
mm                505 arch/x86/mm/mpx.c 		ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
mm                520 arch/x86/mm/mpx.c 	bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry);
mm                544 arch/x86/mm/mpx.c static inline int bt_entry_size_bytes(struct mm_struct *mm)
mm                546 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm))
mm                557 arch/x86/mm/mpx.c static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
mm                563 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm)) {
mm                587 arch/x86/mm/mpx.c 	offset *= bt_entry_size_bytes(mm);
mm                598 arch/x86/mm/mpx.c static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
mm                607 arch/x86/mm/mpx.c 	if (!is_64bit_mm(mm))
mm                623 arch/x86/mm/mpx.c static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
mm                638 arch/x86/mm/mpx.c 	start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping);
mm                639 arch/x86/mm/mpx.c 	end   = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1);
mm                645 arch/x86/mm/mpx.c 	end += bt_entry_size_bytes(mm);
mm                652 arch/x86/mm/mpx.c 	vma = find_vma(mm, start);
mm                683 arch/x86/mm/mpx.c static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm,
mm                697 arch/x86/mm/mpx.c 	if (is_64bit_mm(mm)) {
mm                703 arch/x86/mm/mpx.c 		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
mm                709 arch/x86/mm/mpx.c 		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
mm                720 arch/x86/mm/mpx.c static int unmap_entire_bt(struct mm_struct *mm,
mm                732 arch/x86/mm/mpx.c 		ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,
mm                771 arch/x86/mm/mpx.c 	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
mm                774 arch/x86/mm/mpx.c static int try_unmap_single_bt(struct mm_struct *mm,
mm                783 arch/x86/mm/mpx.c 	unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1);
mm                784 arch/x86/mm/mpx.c 	unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm);
mm                794 arch/x86/mm/mpx.c 	next = find_vma_prev(mm, start, &prev);
mm                814 arch/x86/mm/mpx.c 	next = find_vma_prev(mm, start, &prev);
mm                825 arch/x86/mm/mpx.c 	bde_vaddr = mm->context.bd_addr + mpx_get_bd_entry_offset(mm, start);
mm                826 arch/x86/mm/mpx.c 	ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
mm                844 arch/x86/mm/mpx.c 		return unmap_entire_bt(mm, bde_vaddr, bt_addr);
mm                845 arch/x86/mm/mpx.c 	return zap_bt_entries_mapping(mm, bt_addr, start, end);
mm                848 arch/x86/mm/mpx.c static int mpx_unmap_tables(struct mm_struct *mm,
mm                858 arch/x86/mm/mpx.c 						       bd_entry_virt_space(mm));
mm                867 arch/x86/mm/mpx.c 		ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end);
mm                884 arch/x86/mm/mpx.c void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
mm                894 arch/x86/mm/mpx.c 	if (!kernel_managing_mpx_tables(current->mm))
mm                906 arch/x86/mm/mpx.c 	vma = find_vma(mm, start);
mm                913 arch/x86/mm/mpx.c 	ret = mpx_unmap_tables(mm, start, end);
mm                922 arch/x86/mm/mpx.c 	if (!kernel_managing_mpx_tables(current->mm))
mm                 24 arch/x86/mm/pgtable.c pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                 26 arch/x86/mm/pgtable.c 	return __pte_alloc_one(mm, __userpte_alloc_gfp);
mm                106 arch/x86/mm/pgtable.c static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
mm                108 arch/x86/mm/pgtable.c 	virt_to_page(pgd)->pt_mm = mm;
mm                116 arch/x86/mm/pgtable.c static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
mm                131 arch/x86/mm/pgtable.c 		pgd_set_mm(pgd, mm);
mm                181 arch/x86/mm/pgtable.c void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
mm                183 arch/x86/mm/pgtable.c 	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
mm                195 arch/x86/mm/pgtable.c 	flush_tlb_mm(mm);
mm                206 arch/x86/mm/pgtable.c static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
mm                214 arch/x86/mm/pgtable.c 			mm_dec_nr_pmds(mm);
mm                218 arch/x86/mm/pgtable.c static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
mm                224 arch/x86/mm/pgtable.c 	if (mm == &init_mm)
mm                237 arch/x86/mm/pgtable.c 			mm_inc_nr_pmds(mm);
mm                242 arch/x86/mm/pgtable.c 		free_pmds(mm, pmds, count);
mm                255 arch/x86/mm/pgtable.c static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
mm                265 arch/x86/mm/pgtable.c 		pmd_free(mm, pmd);
mm                266 arch/x86/mm/pgtable.c 		mm_dec_nr_pmds(mm);
mm                270 arch/x86/mm/pgtable.c static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
mm                275 arch/x86/mm/pgtable.c 		mop_up_one_pmd(mm, &pgdp[i]);
mm                285 arch/x86/mm/pgtable.c 		mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
mm                289 arch/x86/mm/pgtable.c static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
mm                308 arch/x86/mm/pgtable.c 		pud_populate(mm, pud, pmd);
mm                313 arch/x86/mm/pgtable.c static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
mm                334 arch/x86/mm/pgtable.c 		pud_populate(mm, u_pud, pmd);
mm                339 arch/x86/mm/pgtable.c static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
mm                417 arch/x86/mm/pgtable.c pgd_t *pgd_alloc(struct mm_struct *mm)
mm                428 arch/x86/mm/pgtable.c 	mm->pgd = pgd;
mm                430 arch/x86/mm/pgtable.c 	if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
mm                433 arch/x86/mm/pgtable.c 	if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
mm                436 arch/x86/mm/pgtable.c 	if (paravirt_pgd_alloc(mm) != 0)
mm                446 arch/x86/mm/pgtable.c 	pgd_ctor(mm, pgd);
mm                447 arch/x86/mm/pgtable.c 	pgd_prepopulate_pmd(mm, pgd, pmds);
mm                448 arch/x86/mm/pgtable.c 	pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
mm                455 arch/x86/mm/pgtable.c 	free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
mm                457 arch/x86/mm/pgtable.c 	free_pmds(mm, pmds, PREALLOCATED_PMDS);
mm                464 arch/x86/mm/pgtable.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                466 arch/x86/mm/pgtable.c 	pgd_mop_up_pmds(mm, pgd);
mm                468 arch/x86/mm/pgtable.c 	paravirt_pgd_free(mm, pgd);
mm                 15 arch/x86/mm/pkeys.c int __execute_only_pkey(struct mm_struct *mm)
mm                 18 arch/x86/mm/pkeys.c 	int execute_only_pkey = mm->context.execute_only_pkey;
mm                 24 arch/x86/mm/pkeys.c 		execute_only_pkey = mm_pkey_alloc(mm);
mm                 53 arch/x86/mm/pkeys.c 		mm_set_pkey_free(mm, execute_only_pkey);
mm                 59 arch/x86/mm/pkeys.c 		mm->context.execute_only_pkey = execute_only_pkey;
mm                164 arch/x86/mm/tlb.c static void sync_current_stack_to_mm(struct mm_struct *mm)
mm                167 arch/x86/mm/tlb.c 	pgd_t *pgd = pgd_offset(mm, sp);
mm                197 arch/x86/mm/tlb.c 	return (unsigned long)next->mm | ibpb;
mm                202 arch/x86/mm/tlb.c 	if (!next || !next->mm)
mm                268 arch/x86/mm/tlb.c 		if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
mm                270 arch/x86/mm/tlb.c 			this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
mm                461 arch/x86/mm/tlb.c void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                485 arch/x86/mm/tlb.c 	struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
mm                490 arch/x86/mm/tlb.c 	WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
mm                501 arch/x86/mm/tlb.c 	write_cr3(build_cr3(mm->pgd, 0));
mm                507 arch/x86/mm/tlb.c 	this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
mm                651 arch/x86/mm/tlb.c 	if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
mm                732 arch/x86/mm/tlb.c static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
mm                750 arch/x86/mm/tlb.c 	info->mm		= mm;
mm                767 arch/x86/mm/tlb.c void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
mm                785 arch/x86/mm/tlb.c 	new_tlb_gen = inc_mm_tlb_gen(mm);
mm                787 arch/x86/mm/tlb.c 	info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
mm                790 arch/x86/mm/tlb.c 	if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
mm                797 arch/x86/mm/tlb.c 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
mm                798 arch/x86/mm/tlb.c 		flush_tlb_others(mm_cpumask(mm), info);
mm                853 arch/x86/mm/tlb.c 	.mm = NULL,
mm                540 arch/x86/platform/efi/efi_64.c static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
mm                628 arch/x86/platform/efi/efi_64.c void efi_switch_mm(struct mm_struct *mm)
mm                631 arch/x86/platform/efi/efi_64.c 	current->active_mm = mm;
mm                632 arch/x86/platform/efi/efi_64.c 	switch_mm(efi_scratch.prev_mm, mm, NULL);
mm                 58 arch/x86/um/ldt.c 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
mm                123 arch/x86/um/ldt.c 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
mm                124 arch/x86/um/ldt.c 	struct mm_id * mm_idp = &current->mm->context.id;
mm                361 arch/x86/um/ldt.c void free_ldt(struct mm_context *mm)
mm                365 arch/x86/um/ldt.c 	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
mm                366 arch/x86/um/ldt.c 		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
mm                368 arch/x86/um/ldt.c 			free_page((long) mm->arch.ldt.u.pages[i]);
mm                370 arch/x86/um/ldt.c 	mm->arch.ldt.entry_count = 0;
mm                 26 arch/x86/um/mem_32.c struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
mm                 42 arch/x86/um/mem_32.c int in_gate_area(struct mm_struct *mm, unsigned long addr)
mm                 44 arch/x86/um/mem_32.c 	struct vm_area_struct *vma = get_gate_vma(mm);
mm                 20 arch/x86/um/syscalls_64.c 	int pid = task->mm->context.id.u.pid;
mm                 85 arch/x86/um/syscalls_64.c 	if ((to->thread.arch.fs == 0) || (to->mm == NULL))
mm                197 arch/x86/um/tls_32.c 	if (likely(to->mm))
mm                 56 arch/x86/um/vdso/vma.c 	struct mm_struct *mm = current->mm;
mm                 61 arch/x86/um/vdso/vma.c 	if (down_write_killable(&mm->mmap_sem))
mm                 64 arch/x86/um/vdso/vma.c 	err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
mm                 69 arch/x86/um/vdso/vma.c 	up_write(&mm->mmap_sem);
mm                 47 arch/x86/xen/mmu_hvm.c static void xen_hvm_exit_mmap(struct mm_struct *mm)
mm                 53 arch/x86/xen/mmu_hvm.c 	a.gpa = __pa(mm->pgd);
mm                302 arch/x86/xen/mmu_pv.c static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
mm                305 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
mm                449 arch/x86/xen/mmu_pv.c static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                451 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_pte_clear(mm, addr, ptep);
mm                576 arch/x86/xen/mmu_pv.c static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
mm                577 arch/x86/xen/mmu_pv.c 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
mm                585 arch/x86/xen/mmu_pv.c 			flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
mm                590 arch/x86/xen/mmu_pv.c static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
mm                591 arch/x86/xen/mmu_pv.c 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
mm                605 arch/x86/xen/mmu_pv.c 			flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
mm                606 arch/x86/xen/mmu_pv.c 		flush |= xen_pmd_walk(mm, pmd, func,
mm                612 arch/x86/xen/mmu_pv.c static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
mm                613 arch/x86/xen/mmu_pv.c 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
mm                625 arch/x86/xen/mmu_pv.c 		flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
mm                626 arch/x86/xen/mmu_pv.c 	flush |= xen_pud_walk(mm, pud, func, last, limit);
mm                645 arch/x86/xen/mmu_pv.c static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
mm                646 arch/x86/xen/mmu_pv.c 			  int (*func)(struct mm_struct *mm, struct page *,
mm                677 arch/x86/xen/mmu_pv.c 		flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
mm                682 arch/x86/xen/mmu_pv.c 	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
mm                687 arch/x86/xen/mmu_pv.c static int xen_pgd_walk(struct mm_struct *mm,
mm                688 arch/x86/xen/mmu_pv.c 			int (*func)(struct mm_struct *mm, struct page *,
mm                692 arch/x86/xen/mmu_pv.c 	return __xen_pgd_walk(mm, mm->pgd, func, limit);
mm                697 arch/x86/xen/mmu_pv.c static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
mm                703 arch/x86/xen/mmu_pv.c 	spin_lock_nest_lock(ptl, &mm->page_table_lock);
mm                725 arch/x86/xen/mmu_pv.c static int xen_pin_page(struct mm_struct *mm, struct page *page,
mm                767 arch/x86/xen/mmu_pv.c 			ptl = xen_pte_lock(page, mm);
mm                788 arch/x86/xen/mmu_pv.c static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
mm                790 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_pgd_pin(mm, pgd);
mm                794 arch/x86/xen/mmu_pv.c 	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
mm                810 arch/x86/xen/mmu_pv.c 			xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
mm                818 arch/x86/xen/mmu_pv.c 	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
mm                826 arch/x86/xen/mmu_pv.c static void xen_pgd_pin(struct mm_struct *mm)
mm                828 arch/x86/xen/mmu_pv.c 	__xen_pgd_pin(mm, mm->pgd);
mm                857 arch/x86/xen/mmu_pv.c static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
mm                879 arch/x86/xen/mmu_pv.c static int xen_unpin_page(struct mm_struct *mm, struct page *page,
mm                898 arch/x86/xen/mmu_pv.c 			ptl = xen_pte_lock(page, mm);
mm                920 arch/x86/xen/mmu_pv.c static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
mm                922 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_pgd_unpin(mm, pgd);
mm                935 arch/x86/xen/mmu_pv.c 			xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
mm                942 arch/x86/xen/mmu_pv.c 	xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
mm                946 arch/x86/xen/mmu_pv.c 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
mm                951 arch/x86/xen/mmu_pv.c static void xen_pgd_unpin(struct mm_struct *mm)
mm                953 arch/x86/xen/mmu_pv.c 	__xen_pgd_unpin(mm, mm->pgd);
mm                984 arch/x86/xen/mmu_pv.c static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
mm                986 arch/x86/xen/mmu_pv.c 	spin_lock(&mm->page_table_lock);
mm                987 arch/x86/xen/mmu_pv.c 	xen_pgd_pin(mm);
mm                988 arch/x86/xen/mmu_pv.c 	spin_unlock(&mm->page_table_lock);
mm                993 arch/x86/xen/mmu_pv.c 	struct mm_struct *mm = info;
mm                995 arch/x86/xen/mmu_pv.c 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
mm               1002 arch/x86/xen/mmu_pv.c 	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
mm               1011 arch/x86/xen/mmu_pv.c static void xen_drop_mm_ref(struct mm_struct *mm)
mm               1016 arch/x86/xen/mmu_pv.c 	drop_mm_ref_this_cpu(mm);
mm               1021 arch/x86/xen/mmu_pv.c 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
mm               1023 arch/x86/xen/mmu_pv.c 			smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
mm               1037 arch/x86/xen/mmu_pv.c 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
mm               1041 arch/x86/xen/mmu_pv.c 	smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
mm               1045 arch/x86/xen/mmu_pv.c static void xen_drop_mm_ref(struct mm_struct *mm)
mm               1047 arch/x86/xen/mmu_pv.c 	drop_mm_ref_this_cpu(mm);
mm               1065 arch/x86/xen/mmu_pv.c static void xen_exit_mmap(struct mm_struct *mm)
mm               1068 arch/x86/xen/mmu_pv.c 	xen_drop_mm_ref(mm);
mm               1071 arch/x86/xen/mmu_pv.c 	spin_lock(&mm->page_table_lock);
mm               1074 arch/x86/xen/mmu_pv.c 	if (xen_page_pinned(mm->pgd))
mm               1075 arch/x86/xen/mmu_pv.c 		xen_pgd_unpin(mm);
mm               1077 arch/x86/xen/mmu_pv.c 	spin_unlock(&mm->page_table_lock);
mm               1359 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
mm               1483 arch/x86/xen/mmu_pv.c static int xen_pgd_alloc(struct mm_struct *mm)
mm               1485 arch/x86/xen/mmu_pv.c 	pgd_t *pgd = mm->pgd;
mm               1516 arch/x86/xen/mmu_pv.c static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm               1576 arch/x86/xen/mmu_pv.c static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
mm               1586 arch/x86/xen/mmu_pv.c static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
mm               1632 arch/x86/xen/mmu_pv.c static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
mm               1635 arch/x86/xen/mmu_pv.c 	bool pinned = xen_page_pinned(mm->pgd);
mm               1637 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
mm               1662 arch/x86/xen/mmu_pv.c static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
mm               1664 arch/x86/xen/mmu_pv.c 	xen_alloc_ptpage(mm, pfn, PT_PTE);
mm               1667 arch/x86/xen/mmu_pv.c static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
mm               1669 arch/x86/xen/mmu_pv.c 	xen_alloc_ptpage(mm, pfn, PT_PMD);
mm               1706 arch/x86/xen/mmu_pv.c static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
mm               1708 arch/x86/xen/mmu_pv.c 	xen_alloc_ptpage(mm, pfn, PT_PUD);
mm                116 arch/xtensa/include/asm/cacheflush.h #define flush_cache_mm(mm)		flush_cache_all()
mm                117 arch/xtensa/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)		flush_cache_mm(mm)
mm                133 arch/xtensa/include/asm/cacheflush.h #define flush_cache_mm(mm)				do { } while (0)
mm                134 arch/xtensa/include/asm/cacheflush.h #define flush_cache_dup_mm(mm)				do { } while (0)
mm                 70 arch/xtensa/include/asm/mmu_context.h static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
mm                 82 arch/xtensa/include/asm/mmu_context.h 	mm->context.asid[cpu] = asid;
mm                 83 arch/xtensa/include/asm/mmu_context.h 	mm->context.cpu = cpu;
mm                 86 arch/xtensa/include/asm/mmu_context.h static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
mm                 92 arch/xtensa/include/asm/mmu_context.h 	if (mm) {
mm                 93 arch/xtensa/include/asm/mmu_context.h 		unsigned long asid = mm->context.asid[cpu];
mm                 97 arch/xtensa/include/asm/mmu_context.h 			get_new_mmu_context(mm, cpu);
mm                101 arch/xtensa/include/asm/mmu_context.h static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
mm                103 arch/xtensa/include/asm/mmu_context.h 	get_mmu_context(mm, cpu);
mm                104 arch/xtensa/include/asm/mmu_context.h 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
mm                115 arch/xtensa/include/asm/mmu_context.h 		struct mm_struct *mm)
mm                119 arch/xtensa/include/asm/mmu_context.h 		mm->context.asid[cpu] = NO_CONTEXT;
mm                121 arch/xtensa/include/asm/mmu_context.h 	mm->context.cpu = -1;
mm                140 arch/xtensa/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
mm                146 arch/xtensa/include/asm/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                152 arch/xtensa/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 10 arch/xtensa/include/asm/nommu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
mm                 14 arch/xtensa/include/asm/nommu_context.h static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
mm                 19 arch/xtensa/include/asm/nommu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                 32 arch/xtensa/include/asm/nommu_context.h static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
mm                 19 arch/xtensa/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmdp, ptep)				     \
mm                 21 arch/xtensa/include/asm/pgalloc.h #define pmd_populate(mm, pmdp, page)					     \
mm                 26 arch/xtensa/include/asm/pgalloc.h pgd_alloc(struct mm_struct *mm)
mm                 31 arch/xtensa/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
mm                 36 arch/xtensa/include/asm/pgalloc.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 49 arch/xtensa/include/asm/pgalloc.h static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                 54 arch/xtensa/include/asm/pgalloc.h 	pte = pte_alloc_one_kernel(mm);
mm                 65 arch/xtensa/include/asm/pgalloc.h static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                 70 arch/xtensa/include/asm/pgalloc.h static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
mm                259 arch/xtensa/include/asm/pgtable.h #define pte_clear(mm,addr,ptep)						\
mm                322 arch/xtensa/include/asm/pgtable.h set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
mm                352 arch/xtensa/include/asm/pgtable.h ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                355 arch/xtensa/include/asm/pgtable.h 	pte_clear(mm, addr, ptep);
mm                360 arch/xtensa/include/asm/pgtable.h ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
mm                370 arch/xtensa/include/asm/pgtable.h #define pgd_offset(mm,address)	((mm)->pgd + pgd_index(address))
mm                418 arch/xtensa/include/asm/pgtable.h #define _PGD_OFFSET(mm,adr,tmp)		l32i	mm, mm, MM_PGD;		\
mm                420 arch/xtensa/include/asm/pgtable.h 					addx4	mm, tmp, mm
mm                 19 arch/xtensa/include/asm/tlb.h #define __pte_free_tlb(tlb, pte, address)	pte_free((tlb)->mm, pte)
mm                 34 arch/xtensa/include/asm/tlbflush.h void local_flush_tlb_mm(struct mm_struct *mm);
mm                 53 arch/xtensa/include/asm/tlbflush.h #define flush_tlb_mm(mm)		   local_flush_tlb_mm(mm)
mm                 74 arch/xtensa/kernel/asm-offsets.c 	DEFINE(TASK_MM, offsetof (struct task_struct, mm));
mm                124 arch/xtensa/kernel/smp.c 	struct mm_struct *mm = &init_mm;
mm                146 arch/xtensa/kernel/smp.c 	mmget(mm);
mm                147 arch/xtensa/kernel/smp.c 	mmgrab(mm);
mm                148 arch/xtensa/kernel/smp.c 	current->active_mm = mm;
mm                149 arch/xtensa/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
mm                150 arch/xtensa/kernel/smp.c 	enter_lazy_tlb(mm, current);
mm                486 arch/xtensa/kernel/smp.c void flush_tlb_mm(struct mm_struct *mm)
mm                488 arch/xtensa/kernel/smp.c 	on_each_cpu(ipi_flush_tlb_mm, mm, 1);
mm                 86 arch/xtensa/kernel/syscall.c 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
mm                 39 arch/xtensa/mm/fault.c 	struct mm_struct *mm = current->mm;
mm                 59 arch/xtensa/mm/fault.c 	if (faulthandler_disabled() || !mm) {
mm                 77 arch/xtensa/mm/fault.c 	down_read(&mm->mmap_sem);
mm                 78 arch/xtensa/mm/fault.c 	vma = find_vma(mm, address);
mm                143 arch/xtensa/mm/fault.c 	up_read(&mm->mmap_sem);
mm                156 arch/xtensa/mm/fault.c 	up_read(&mm->mmap_sem);
mm                171 arch/xtensa/mm/fault.c 	up_read(&mm->mmap_sem);
mm                179 arch/xtensa/mm/fault.c 	up_read(&mm->mmap_sem);
mm                 63 arch/xtensa/mm/tlb.c void local_flush_tlb_mm(struct mm_struct *mm)
mm                 67 arch/xtensa/mm/tlb.c 	if (mm == current->active_mm) {
mm                 70 arch/xtensa/mm/tlb.c 		mm->context.asid[cpu] = NO_CONTEXT;
mm                 71 arch/xtensa/mm/tlb.c 		activate_context(mm, cpu);
mm                 74 arch/xtensa/mm/tlb.c 		mm->context.asid[cpu] = NO_CONTEXT;
mm                 75 arch/xtensa/mm/tlb.c 		mm->context.cpu = -1;
mm                 92 arch/xtensa/mm/tlb.c 	struct mm_struct *mm = vma->vm_mm;
mm                 95 arch/xtensa/mm/tlb.c 	if (mm->context.asid[cpu] == NO_CONTEXT)
mm                 99 arch/xtensa/mm/tlb.c 		 (unsigned long)mm->context.asid[cpu], start, end);
mm                105 arch/xtensa/mm/tlb.c 		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
mm                121 arch/xtensa/mm/tlb.c 		local_flush_tlb_mm(mm);
mm                129 arch/xtensa/mm/tlb.c 	struct mm_struct* mm = vma->vm_mm;
mm                133 arch/xtensa/mm/tlb.c 	if (mm->context.asid[cpu] == NO_CONTEXT)
mm                139 arch/xtensa/mm/tlb.c 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
mm                170 arch/xtensa/mm/tlb.c 	struct mm_struct *mm = task->mm;
mm                175 arch/xtensa/mm/tlb.c 	if (!mm)
mm                176 arch/xtensa/mm/tlb.c 		mm = task->active_mm;
mm                177 arch/xtensa/mm/tlb.c 	pgd = pgd_offset(mm, vaddr);
mm               1250 block/bio.c    		if (!current->mm)
mm                188 drivers/android/binder_alloc.c 	struct mm_struct *mm = NULL;
mm                212 drivers/android/binder_alloc.c 		mm = alloc->vma_vm_mm;
mm                214 drivers/android/binder_alloc.c 	if (mm) {
mm                215 drivers/android/binder_alloc.c 		down_read(&mm->mmap_sem);
mm                273 drivers/android/binder_alloc.c 	if (mm) {
mm                274 drivers/android/binder_alloc.c 		up_read(&mm->mmap_sem);
mm                275 drivers/android/binder_alloc.c 		mmput(mm);
mm                306 drivers/android/binder_alloc.c 	if (mm) {
mm                307 drivers/android/binder_alloc.c 		up_read(&mm->mmap_sem);
mm                308 drivers/android/binder_alloc.c 		mmput(mm);
mm                914 drivers/android/binder_alloc.c 	struct mm_struct *mm = NULL;
mm                933 drivers/android/binder_alloc.c 	mm = alloc->vma_vm_mm;
mm                934 drivers/android/binder_alloc.c 	if (!mmget_not_zero(mm))
mm                936 drivers/android/binder_alloc.c 	if (!down_read_trylock(&mm->mmap_sem))
mm                950 drivers/android/binder_alloc.c 	up_read(&mm->mmap_sem);
mm                951 drivers/android/binder_alloc.c 	mmput(mm);
mm                965 drivers/android/binder_alloc.c 	mmput_async(mm);
mm                752 drivers/char/mem.c 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
mm               2151 drivers/crypto/ccp/ccp-ops.c 	if (!ecc->u.mm.operand_1 ||
mm               2152 drivers/crypto/ccp/ccp-ops.c 	    (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
mm               2156 drivers/crypto/ccp/ccp-ops.c 		if (!ecc->u.mm.operand_2 ||
mm               2157 drivers/crypto/ccp/ccp-ops.c 		    (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
mm               2160 drivers/crypto/ccp/ccp-ops.c 	if (!ecc->u.mm.result ||
mm               2161 drivers/crypto/ccp/ccp-ops.c 	    (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
mm               2190 drivers/crypto/ccp/ccp-ops.c 	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
mm               2191 drivers/crypto/ccp/ccp-ops.c 				      ecc->u.mm.operand_1_len);
mm               2198 drivers/crypto/ccp/ccp-ops.c 		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
mm               2199 drivers/crypto/ccp/ccp-ops.c 					      ecc->u.mm.operand_2_len);
mm               2238 drivers/crypto/ccp/ccp-ops.c 	ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
mm                353 drivers/dax/device.c 	addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
mm                360 drivers/dax/device.c 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
mm                 34 drivers/firmware/efi/arm-runtime.c 	.mm		= &efi_mm,
mm                132 drivers/firmware/efi/memattr.c int __init efi_memattr_apply_permissions(struct mm_struct *mm,
mm                176 drivers/firmware/efi/memattr.c 			ret = fn(mm, &md);
mm                 50 drivers/fpga/dfl-afu-dma-region.c 	ret = account_locked_vm(current->mm, npages, true);
mm                 79 drivers/fpga/dfl-afu-dma-region.c 	account_locked_vm(current->mm, npages, false);
mm                 99 drivers/fpga/dfl-afu-dma-region.c 	account_locked_vm(current->mm, npages, false);
mm               1120 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
mm               1123 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
mm                688 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
mm                707 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
mm                 73 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 	struct mm_struct *mm;
mm                 96 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 						       struct mm_struct *mm);
mm                 97 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
mm                133 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
mm                186 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 			if ((mmptr) == current->mm) {			\
mm                188 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 			} else if (current->mm == NULL) {		\
mm                252 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h int kgd2kfd_quiesce_mm(struct mm_struct *mm);
mm                253 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h int kgd2kfd_resume_mm(struct mm_struct *mm);
mm                254 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
mm                131 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c 			     uint32_t __user *wptr, struct mm_struct *mm)
mm                175 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c 	if (read_user_wptr(mm, wptr64, data64)) {
mm                 63 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c 						       struct mm_struct *mm)
mm                 72 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c 	mmgrab(mm);
mm                 73 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c 	fence->mm = mm;
mm                125 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c 	if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f))
mm                149 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c 	mmdrop(fence->mm);
mm                160 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
mm                166 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c 	else if (fence->mm == mm)
mm                 67 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 			struct mm_struct *mm);
mm                 72 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 			     uint32_t __user *wptr, struct mm_struct *mm);
mm                360 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 			struct mm_struct *mm)
mm                487 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 			     uint32_t __user *wptr, struct mm_struct *mm)
mm                533 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	if (read_user_wptr(mm, wptr64, data64)) {
mm                104 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 			struct mm_struct *mm);
mm                109 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 			     uint32_t __user *wptr, struct mm_struct *mm);
mm                331 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 			struct mm_struct *mm)
mm                361 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
mm                411 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 			     uint32_t __user *wptr, struct mm_struct *mm)
mm                451 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 	if (read_user_wptr(mm, wptr, data))
mm                 61 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 			struct mm_struct *mm);
mm                 66 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 			     uint32_t __user *wptr, struct mm_struct *mm);
mm                287 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 			struct mm_struct *mm)
mm                346 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
mm                396 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 			     uint32_t __user *wptr, struct mm_struct *mm)
mm                435 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 	if (read_user_wptr(mm, wptr, data))
mm                262 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 			struct mm_struct *mm)
mm                387 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 			     uint32_t __user *wptr, struct mm_struct *mm)
mm                432 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	if (read_user_wptr(mm, wptr64, data64)) {
mm                 35 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h 			struct mm_struct *mm);
mm                484 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
mm                859 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 						   current->mm);
mm               1198 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 		ret = init_user_pages(*mem, current->mm, user_addr);
mm               1330 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 		down_write(&current->mm->mmap_sem);
mm               1332 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 		up_write(&current->mm->mmap_sem);
mm               1658 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 				struct mm_struct *mm)
mm               1668 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 		r = kgd2kfd_quiesce_mm(mm);
mm               1685 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 				     struct mm_struct *mm)
mm               1883 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	struct mm_struct *mm;
mm               1894 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	mm = get_task_mm(usertask);
mm               1895 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	if (!mm) {
mm               1902 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	if (update_invalid_user_pages(process_info, mm))
mm               1924 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	if (kgd2kfd_resume_mm(mm)) {
mm               1933 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	mmput(mm);
mm               2066 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 				process_info->eviction_fence->mm);
mm                109 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 			if (usermm != current->mm) {
mm                542 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (usermm && usermm != current->mm)
mm                129 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 	struct mm_struct *mm;
mm                132 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
mm                133 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 	if (mm && mm != current->mm)
mm                 28 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	struct drm_mm mm;
mm                101 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	drm_mm_init(&mgr->mm, start, size);
mm                133 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	drm_mm_takedown(&mgr->mm);
mm                198 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
mm                319 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	drm_mm_for_each_node(mm_node, &mgr->mm) {
mm                344 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	drm_mm_print(&mgr->mm, printer);
mm                281 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 				amdgpu_amdkfd_evict_userptr(mem, amn->mm);
mm                294 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c #define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
mm                318 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct mm_struct *mm = current->mm;
mm                320 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	unsigned long key = AMDGPU_MN_KEY(mm, type);
mm                324 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	if (down_write_killable(&mm->mmap_sem)) {
mm                330 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
mm                340 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	amn->mm = mm;
mm                346 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	r = hmm_mirror_register(&amn->mirror, mm);
mm                350 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
mm                353 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	up_write(&mm->mmap_sem);
mm                359 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	up_write(&mm->mmap_sem);
mm                 55 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h 	struct mm_struct	*mm;
mm                754 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct drm_mm_node *mm;
mm                757 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	mm = amdgpu_find_mm_node(&bo->mem, &offset);
mm                758 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
mm                792 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct mm_struct *mm;
mm                805 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	mm = mirror->hmm->mmu_notifier.mm;
mm                806 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
mm                839 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	down_read(&mm->mmap_sem);
mm                840 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	vma = find_vma(mm, start);
mm                852 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	up_read(&mm->mmap_sem);
mm                869 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	mmput(mm);
mm                874 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	up_read(&mm->mmap_sem);
mm                881 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	mmput(mm);
mm               1365 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	return gtt->usertask->mm;
mm               1499 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			if (amdkfd_fence_check_mm(f, current->mm))
mm               3122 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		if (current->group_leader->mm == current->mm) {
mm                 28 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct drm_mm mm;
mm                132 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	drm_mm_init(&mgr->mm, 0, p_size);
mm                175 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	drm_mm_takedown(&mgr->mm);
mm                274 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct drm_mm *mm = &mgr->mm;
mm                326 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 		r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
mm                345 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 		r = drm_mm_insert_node_in_range(mm, &nodes[i],
mm                455 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	drm_mm_print(&mgr->mm, printer);
mm                 31 drivers/gpu/drm/amd/amdgpu/soc15_common.h 	WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,	\
mm                 32 drivers/gpu/drm/amd/amdgpu/soc15_common.h 	(RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg)	\
mm                123 drivers/gpu/drm/amd/amdgpu/soc15_common.h     WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
mm                124 drivers/gpu/drm/amd/amdgpu/soc15_common.h     (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
mm                878 drivers/gpu/drm/amd/amdkfd/kfd_device.c int kgd2kfd_quiesce_mm(struct mm_struct *mm)
mm                887 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p = kfd_lookup_process_by_mm(mm);
mm                897 drivers/gpu/drm/amd/amdkfd/kfd_device.c int kgd2kfd_resume_mm(struct mm_struct *mm)
mm                906 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p = kfd_lookup_process_by_mm(mm);
mm                924 drivers/gpu/drm/amd/amdkfd/kfd_device.c int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
mm                937 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p = kfd_lookup_process_by_mm(mm);
mm                335 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		if (WARN(q->process->mm != current->mm,
mm                340 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 					q->queue, &q->properties, current->mm);
mm                555 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		if (WARN(q->process->mm != current->mm,
mm                561 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 						   &q->properties, current->mm);
mm                651 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct mm_struct *mm = NULL;
mm                688 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	mm = get_task_mm(pdd->process->lead_thread);
mm                689 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	if (!mm) {
mm                706 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				       q->queue, &q->properties, mm);
mm                716 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	if (mm)
mm                717 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		mmput(mm);
mm                889 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct mm_struct *mm;
mm                897 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mm = get_task_mm(p->lead_thread);
mm                898 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!mm) {
mm                905 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	down_read(&mm->mmap_sem);
mm                906 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	vma = find_vma(mm, address);
mm                928 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	up_read(&mm->mmap_sem);
mm                929 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mmput(mm);
mm                 89 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
mm                 96 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
mm                104 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 	amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
mm                 72 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	void	(*init_mqd)(struct mqd_manager *mm, void **mqd,
mm                 76 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	int	(*load_mqd)(struct mqd_manager *mm, void *mqd,
mm                 81 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	void	(*update_mqd)(struct mqd_manager *mm, void *mqd,
mm                 84 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	int	(*destroy_mqd)(struct mqd_manager *mm, void *mqd,
mm                 89 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	void	(*free_mqd)(struct mqd_manager *mm, void *mqd,
mm                 92 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	bool	(*is_occupied)(struct mqd_manager *mm, void *mqd,
mm                 96 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 	int	(*get_wave_state)(struct mqd_manager *mm, void *mqd,
mm                115 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
mm                118 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
mm                 44 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void update_cu_mask(struct mqd_manager *mm, void *mqd,
mm                 53 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	mqd_symmetrically_map_cu_mask(mm,
mm                 87 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void init_mqd(struct mqd_manager *mm, void **mqd,
mm                138 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	mm->update_mqd(mm, m, q);
mm                141 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm                155 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	mm->update_mqd(mm, m, q);
mm                158 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void free_mqd(struct mqd_manager *mm, void *mqd,
mm                161 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
mm                165 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
mm                173 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
mm                178 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                182 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
mm                187 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void __update_mqd(struct mqd_manager *mm, void *mqd,
mm                217 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	update_cu_mask(mm, mqd, q);
mm                223 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void update_mqd(struct mqd_manager *mm, void *mqd,
mm                226 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	__update_mqd(mm, mqd, q, 1);
mm                229 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
mm                232 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	__update_mqd(mm, mqd, q, 0);
mm                235 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                262 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static int destroy_mqd(struct mqd_manager *mm, void *mqd,
mm                267 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
mm                275 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                280 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
mm                283 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static bool is_occupied(struct mqd_manager *mm, void *mqd,
mm                288 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
mm                293 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
mm                297 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
mm                306 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
mm                310 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
mm                313 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
mm                 44 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void update_cu_mask(struct mqd_manager *mm, void *mqd,
mm                 53 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	mqd_symmetrically_map_cu_mask(mm,
mm                103 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void init_mqd(struct mqd_manager *mm, void **mqd,
mm                142 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	if (mm->dev->cwsr_enabled) {
mm                158 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	mm->update_mqd(mm, m, q);
mm                161 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static int load_mqd(struct mqd_manager *mm, void *mqd,
mm                169 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
mm                175 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void update_mqd(struct mqd_manager *mm, void *mqd,
mm                229 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	if (mm->dev->cwsr_enabled)
mm                232 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	update_cu_mask(mm, mqd, q);
mm                240 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static int destroy_mqd(struct mqd_manager *mm, void *mqd,
mm                245 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	return mm->dev->kfd2kgd->hqd_destroy
mm                246 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 		(mm->dev->kgd, mqd, type, timeout,
mm                250 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void free_mqd(struct mqd_manager *mm, void *mqd,
mm                253 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	struct kfd_dev *kfd = mm->dev;
mm                259 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
mm                263 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static bool is_occupied(struct mqd_manager *mm, void *mqd,
mm                267 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	return mm->dev->kfd2kgd->hqd_is_occupied(
mm                268 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 		mm->dev->kgd, queue_address,
mm                272 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static int get_wave_state(struct mqd_manager *mm, void *mqd,
mm                295 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
mm                301 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
mm                309 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
mm                314 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	update_mqd(mm, mqd, q);
mm                321 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm                335 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	mm->update_mqd(mm, m, q);
mm                338 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                342 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
mm                349 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                383 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                388 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
mm                391 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
mm                395 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
mm                 45 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void update_cu_mask(struct mqd_manager *mm, void *mqd,
mm                 54 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	mqd_symmetrically_map_cu_mask(mm,
mm                118 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void init_mqd(struct mqd_manager *mm, void **mqd,
mm                163 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
mm                179 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	mm->update_mqd(mm, m, q);
mm                182 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static int load_mqd(struct mqd_manager *mm, void *mqd,
mm                189 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
mm                194 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void update_mqd(struct mqd_manager *mm, void *mqd,
mm                249 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
mm                252 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	update_cu_mask(mm, mqd, q);
mm                259 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static int destroy_mqd(struct mqd_manager *mm, void *mqd,
mm                264 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	return mm->dev->kfd2kgd->hqd_destroy
mm                265 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 		(mm->dev->kgd, mqd, type, timeout,
mm                269 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void free_mqd(struct mqd_manager *mm, void *mqd,
mm                272 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	struct kfd_dev *kfd = mm->dev;
mm                278 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
mm                282 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static bool is_occupied(struct mqd_manager *mm, void *mqd,
mm                286 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	return mm->dev->kfd2kgd->hqd_is_occupied(
mm                287 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 		mm->dev->kgd, queue_address,
mm                291 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static int get_wave_state(struct mqd_manager *mm, void *mqd,
mm                313 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
mm                319 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
mm                327 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
mm                332 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	update_mqd(mm, mqd, q);
mm                339 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm                353 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	mm->update_mqd(mm, m, q);
mm                356 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                360 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
mm                367 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                397 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                402 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
mm                405 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
mm                409 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
mm                 47 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void update_cu_mask(struct mqd_manager *mm, void *mqd,
mm                 56 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	mqd_symmetrically_map_cu_mask(mm,
mm                 90 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void init_mqd(struct mqd_manager *mm, void **mqd,
mm                137 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
mm                153 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	mm->update_mqd(mm, m, q);
mm                156 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static int load_mqd(struct mqd_manager *mm, void *mqd,
mm                164 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
mm                169 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void __update_mqd(struct mqd_manager *mm, void *mqd,
mm                228 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
mm                233 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	update_cu_mask(mm, mqd, q);
mm                240 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void update_mqd(struct mqd_manager *mm, void *mqd,
mm                243 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	__update_mqd(mm, mqd, q, MTYPE_CC, 1);
mm                246 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
mm                249 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	__update_mqd(mm, mqd, q, MTYPE_UC, 0);
mm                252 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static int destroy_mqd(struct mqd_manager *mm, void *mqd,
mm                257 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	return mm->dev->kfd2kgd->hqd_destroy
mm                258 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 		(mm->dev->kgd, mqd, type, timeout,
mm                262 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void free_mqd(struct mqd_manager *mm, void *mqd,
mm                265 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
mm                268 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static bool is_occupied(struct mqd_manager *mm, void *mqd,
mm                272 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	return mm->dev->kfd2kgd->hqd_is_occupied(
mm                273 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 		mm->dev->kgd, queue_address,
mm                277 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static int get_wave_state(struct mqd_manager *mm, void *mqd,
mm                299 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
mm                304 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
mm                312 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
mm                316 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	__update_mqd(mm, mqd, q, MTYPE_UC, 0);
mm                322 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm                336 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	mm->update_mqd(mm, m, q);
mm                339 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                343 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
mm                348 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                379 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
mm                384 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
mm                387 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
mm                391 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
mm                671 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 	void *mm;
mm                765 drivers/gpu/drm/amd/amdkfd/kfd_priv.h struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
mm                273 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (!thread->mm)
mm                277 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (thread->group_leader->mm != thread->mm)
mm                336 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (!thread->mm)
mm                340 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (thread->group_leader->mm != thread->mm)
mm                350 drivers/gpu/drm/amd/amdkfd/kfd_process.c static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
mm                355 drivers/gpu/drm/amd/amdkfd/kfd_process.c 					kfd_processes, (uintptr_t)mm)
mm                356 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		if (process->mm == mm)
mm                368 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = find_process_by_mm(thread->mm);
mm                495 drivers/gpu/drm/amd/amdkfd/kfd_process.c 					struct mm_struct *mm)
mm                505 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (WARN_ON(p->mm != mm))
mm                539 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p->mm = NULL;
mm                633 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	process->mm = thread->mm;
mm                660 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	err = mmu_notifier_register(&process->mmu_notifier, process->mm);
mm                666 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			(uintptr_t)process->mm);
mm                926 drivers/gpu/drm/amd/amdkfd/kfd_process.c struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
mm                932 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = find_process_by_mm(mm);
mm                 36 drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c 	.reg_name = mm ## reg_name
mm                 40 drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c 	.reg_name = mm ## block ## id ## _ ## reg_name
mm                 38 drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c 	.reg_name = mm ## reg_name
mm                 42 drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c 	.reg_name = mm ## block ## id ## _ ## reg_name
mm                 44 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c 	CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
mm                 45 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c 					mm ## block ## _ ## inst ## _ ## reg_name
mm                 63 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c 	(MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
mm                 52 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                 53 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c 					mm ## reg_name
mm                 53 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 	(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
mm                 36 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c 	(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
mm                135 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	.reg_name = mm ## reg_name
mm                139 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
mm                392 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c #define REG(reg) mm ## reg
mm                451 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
mm                146 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	.reg_name = mm ## reg_name
mm                150 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
mm                433 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c #define REG(reg) mm ## reg
mm                493 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
mm                145 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	.reg_name = mm ## reg_name
mm                149 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
mm                410 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c #define REG(reg) mm ## reg
mm                471 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
mm                138 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                139 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 					mm ## reg_name
mm                142 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                143 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 					mm ## block ## id ## _ ## reg_name
mm                153 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                154 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 					mm ## reg_name
mm                704 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                705 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 					mm ## block ## id ## _ ## reg_name
mm                152 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	.reg_name = mm ## reg_name
mm                156 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
mm                424 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c #define REG(reg) mm ## reg
mm                563 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
mm                 37 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                 38 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 					mm ## reg_name
mm                 41 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 42 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 					mm ## block ## id ## _ ## reg_name
mm                 46 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 47 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 					mm ## block ## id ## _ ## reg_name
mm                170 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                171 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## reg_name
mm                174 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                175 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## block ## id ## _ ## reg_name
mm                179 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                180 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## block ## id ## _ ## reg_name
mm                190 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                191 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## reg_name
mm                201 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                202 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## reg_name
mm                 38 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                 39 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 					mm ## reg_name
mm                 42 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 43 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 					mm ## block ## id ## _ ## reg_name
mm                 46 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 	.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm                 47 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 					mm ## reg_name
mm                 50 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 51 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 					mm ## block ## id ## _ ## reg_name
mm                 40 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                 41 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 					mm ## reg_name
mm                 44 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 45 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 					mm ## block ## id ## _ ## reg_name
mm                 48 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 	.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm                 49 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 					mm ## reg_name
mm                 52 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 53 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 					mm ## block ## id ## _ ## reg_name
mm                424 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                425 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## reg_name
mm                428 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                429 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
mm                432 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                433 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
mm                436 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                437 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
mm                440 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                441 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
mm                451 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm                452 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## reg_name
mm                 38 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 39 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h 					mm ## block ## id ## _ ## reg_name
mm                287 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
mm                288 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## reg_name
mm                291 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                292 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
mm                295 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                296 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
mm                299 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                300 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
mm                303 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                304 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
mm                314 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm                315 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## reg_name
mm                166 drivers/gpu/drm/amd/display/dc/dm_services.h 		generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] +  mm##reg_name + inst_offset, \
mm                170 drivers/gpu/drm/amd/display/dc/dm_services.h 		generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
mm                 46 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c 		mm ## reg_name
mm                 49 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c 	mm ## block ## id ## _ ## reg_name
mm                 61 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 64 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 65 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c 				mm ## block ## id ## _ ## reg_name
mm                 52 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 55 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 56 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c 				mm ## block ## id ## _ ## reg_name
mm                 42 drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c 		mm ## reg_name
mm                 58 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 61 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 62 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c 				mm ## block ## id ## _ ## reg_name
mm                 52 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 55 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 56 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c 				mm ## block ## id ## _ ## reg_name
mm                 61 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 67 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 68 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c 				mm ## block ## id ## _ ## reg_name
mm                 57 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 59 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 65 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                 66 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c 				mm ## block ## id ## _ ## reg_name
mm                 57 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c 		BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
mm                 84 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h 	.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
mm                 85 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h 					mm ## block ## _ ## inst ## _ ## reg_name
mm                101 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                102 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 			mm ## block ## id ## _ ## reg_name
mm                182 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                183 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 			mm ## block ## id ## _ ## reg_name
mm                184 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                185 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 			mm ## block ## id ## _ ## reg_name
mm                180 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm                181 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 			mm ## block ## id ## _ ## reg_name
mm                132 drivers/gpu/drm/amd/include/cgs_common.h 	cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
mm                257 drivers/gpu/drm/amd/include/kgd_kfd_interface.h 			struct mm_struct *mm);
mm                260 drivers/gpu/drm/amd/include/kgd_kfd_interface.h 			     uint32_t __user *wptr, struct mm_struct *mm);
mm                139 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 	PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
mm                150 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 	cgs_write_register(device, mm##reg, PHM_SET_FIELD(	\
mm                151 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 				cgs_read_register(device, mm##reg), reg, field, fieldval))
mm                164 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h        phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
mm                176 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 				mm##port##_INDEX, index, value, mask)
mm                190 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 		mm##port##_INDEX_11, index, value, mask)
mm                204 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 		mm##port##_INDEX_11, index, value, mask)
mm                221 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h 				mm##reg, value, mask)
mm                118 drivers/gpu/drm/drm_mm.c static void show_leaks(struct drm_mm *mm)
mm                129 drivers/gpu/drm/drm_mm.c 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
mm                149 drivers/gpu/drm/drm_mm.c static void show_leaks(struct drm_mm *mm) { }
mm                160 drivers/gpu/drm/drm_mm.c __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
mm                162 drivers/gpu/drm/drm_mm.c 	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
mm                163 drivers/gpu/drm/drm_mm.c 					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
mm                170 drivers/gpu/drm/drm_mm.c 	struct drm_mm *mm = hole_node->mm;
mm                193 drivers/gpu/drm/drm_mm.c 		link = &mm->interval_tree.rb_root.rb_node;
mm                211 drivers/gpu/drm/drm_mm.c 	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
mm                260 drivers/gpu/drm/drm_mm.c 	struct drm_mm *mm = node->mm;
mm                266 drivers/gpu/drm/drm_mm.c 	insert_hole_size(&mm->holes_size, node);
mm                267 drivers/gpu/drm/drm_mm.c 	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
mm                269 drivers/gpu/drm/drm_mm.c 	list_add(&node->hole_stack, &mm->hole_stack);
mm                277 drivers/gpu/drm/drm_mm.c 	rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
mm                278 drivers/gpu/drm/drm_mm.c 	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
mm                299 drivers/gpu/drm/drm_mm.c static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
mm                301 drivers/gpu/drm/drm_mm.c 	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
mm                319 drivers/gpu/drm/drm_mm.c static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
mm                321 drivers/gpu/drm/drm_mm.c 	struct rb_node *rb = mm->holes_addr.rb_node;
mm                342 drivers/gpu/drm/drm_mm.c first_hole(struct drm_mm *mm,
mm                349 drivers/gpu/drm/drm_mm.c 		return best_hole(mm, size);
mm                352 drivers/gpu/drm/drm_mm.c 		return find_hole(mm, start);
mm                355 drivers/gpu/drm/drm_mm.c 		return find_hole(mm, end);
mm                358 drivers/gpu/drm/drm_mm.c 		return list_first_entry_or_null(&mm->hole_stack,
mm                365 drivers/gpu/drm/drm_mm.c next_hole(struct drm_mm *mm,
mm                382 drivers/gpu/drm/drm_mm.c 		return &node->hole_stack == &mm->hole_stack ? NULL : node;
mm                400 drivers/gpu/drm/drm_mm.c int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
mm                412 drivers/gpu/drm/drm_mm.c 	hole = find_hole(mm, node->start);
mm                419 drivers/gpu/drm/drm_mm.c 	if (mm->color_adjust)
mm                420 drivers/gpu/drm/drm_mm.c 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
mm                425 drivers/gpu/drm/drm_mm.c 	node->mm = mm;
mm                464 drivers/gpu/drm/drm_mm.c int drm_mm_insert_node_in_range(struct drm_mm * const mm,
mm                480 drivers/gpu/drm/drm_mm.c 	if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
mm                490 drivers/gpu/drm/drm_mm.c 	for (hole = first_hole(mm, range_start, range_end, size, mode);
mm                492 drivers/gpu/drm/drm_mm.c 	     hole = once ? NULL : next_hole(mm, hole, mode)) {
mm                506 drivers/gpu/drm/drm_mm.c 		if (mm->color_adjust)
mm                507 drivers/gpu/drm/drm_mm.c 			mm->color_adjust(hole, color, &col_start, &col_end);
mm                540 drivers/gpu/drm/drm_mm.c 		node->mm = mm;
mm                574 drivers/gpu/drm/drm_mm.c 	struct drm_mm *mm = node->mm;
mm                585 drivers/gpu/drm/drm_mm.c 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
mm                606 drivers/gpu/drm/drm_mm.c 	struct drm_mm *mm = old->mm;
mm                613 drivers/gpu/drm/drm_mm.c 	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
mm                619 drivers/gpu/drm/drm_mm.c 				       &mm->holes_size);
mm                622 drivers/gpu/drm/drm_mm.c 				&mm->holes_addr);
mm                681 drivers/gpu/drm/drm_mm.c 				 struct drm_mm *mm,
mm                691 drivers/gpu/drm/drm_mm.c 	DRM_MM_BUG_ON(mm->scan_active);
mm                693 drivers/gpu/drm/drm_mm.c 	scan->mm = mm;
mm                727 drivers/gpu/drm/drm_mm.c 	struct drm_mm *mm = scan->mm;
mm                733 drivers/gpu/drm/drm_mm.c 	DRM_MM_BUG_ON(node->mm != mm);
mm                737 drivers/gpu/drm/drm_mm.c 	mm->scan_active++;
mm                753 drivers/gpu/drm/drm_mm.c 	if (mm->color_adjust)
mm                754 drivers/gpu/drm/drm_mm.c 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
mm                820 drivers/gpu/drm/drm_mm.c 	DRM_MM_BUG_ON(node->mm != scan->mm);
mm                824 drivers/gpu/drm/drm_mm.c 	DRM_MM_BUG_ON(!node->mm->scan_active);
mm                825 drivers/gpu/drm/drm_mm.c 	node->mm->scan_active--;
mm                858 drivers/gpu/drm/drm_mm.c 	struct drm_mm *mm = scan->mm;
mm                862 drivers/gpu/drm/drm_mm.c 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
mm                864 drivers/gpu/drm/drm_mm.c 	if (!mm->color_adjust)
mm                872 drivers/gpu/drm/drm_mm.c 	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
mm                882 drivers/gpu/drm/drm_mm.c 	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
mm                883 drivers/gpu/drm/drm_mm.c 	if (unlikely(&hole->hole_stack == &mm->hole_stack))
mm                889 drivers/gpu/drm/drm_mm.c 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
mm                907 drivers/gpu/drm/drm_mm.c void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
mm                911 drivers/gpu/drm/drm_mm.c 	mm->color_adjust = NULL;
mm                913 drivers/gpu/drm/drm_mm.c 	INIT_LIST_HEAD(&mm->hole_stack);
mm                914 drivers/gpu/drm/drm_mm.c 	mm->interval_tree = RB_ROOT_CACHED;
mm                915 drivers/gpu/drm/drm_mm.c 	mm->holes_size = RB_ROOT_CACHED;
mm                916 drivers/gpu/drm/drm_mm.c 	mm->holes_addr = RB_ROOT;
mm                919 drivers/gpu/drm/drm_mm.c 	INIT_LIST_HEAD(&mm->head_node.node_list);
mm                920 drivers/gpu/drm/drm_mm.c 	mm->head_node.allocated = false;
mm                921 drivers/gpu/drm/drm_mm.c 	mm->head_node.mm = mm;
mm                922 drivers/gpu/drm/drm_mm.c 	mm->head_node.start = start + size;
mm                923 drivers/gpu/drm/drm_mm.c 	mm->head_node.size = -size;
mm                924 drivers/gpu/drm/drm_mm.c 	add_hole(&mm->head_node);
mm                926 drivers/gpu/drm/drm_mm.c 	mm->scan_active = 0;
mm                937 drivers/gpu/drm/drm_mm.c void drm_mm_takedown(struct drm_mm *mm)
mm                939 drivers/gpu/drm/drm_mm.c 	if (WARN(!drm_mm_clean(mm),
mm                941 drivers/gpu/drm/drm_mm.c 		show_leaks(mm);
mm                963 drivers/gpu/drm/drm_mm.c void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
mm                968 drivers/gpu/drm/drm_mm.c 	total_free += drm_mm_dump_hole(p, &mm->head_node);
mm                970 drivers/gpu/drm/drm_mm.c 	drm_mm_for_each_node(entry, mm) {
mm                151 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	drm_mm_print(&mmu_context->mm, &p);
mm                664 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	might_lock_read(&current->mm->mmap_sem);
mm                666 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	if (userptr->mm != current->mm)
mm                737 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	etnaviv_obj->userptr.mm = current->mm;
mm                 19 drivers/gpu/drm/etnaviv/etnaviv_gem.h 	struct mm_struct *mm;
mm                 37 drivers/gpu/drm/etnaviv/etnaviv_iommu.c 	drm_mm_takedown(&context->mm);
mm                161 drivers/gpu/drm/etnaviv/etnaviv_iommu.c 	drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
mm                 52 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 	drm_mm_takedown(&context->mm);
mm                297 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 	drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
mm                154 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		ret = drm_mm_insert_node_in_range(&context->mm, node,
mm                160 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
mm                166 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 			if (!free->vram_node.mm)
mm                226 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
mm                292 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	if (mapping->vram_node.mm == &context->mm)
mm                 77 drivers/gpu/drm/etnaviv/etnaviv_mmu.h 	struct drm_mm mm;
mm               10513 drivers/gpu/drm/i915/display/intel_display.c 		base = sg_dma_address(obj->mm.pages->sgl);
mm               15564 drivers/gpu/drm/i915/display/intel_display.c 	if (obj->userptr.mm) {
mm                549 drivers/gpu/drm/i915/display/intel_fbc.c 	if (drm_mm_initialized(&dev_priv->mm.stolen))
mm               1323 drivers/gpu/drm/i915/display/intel_overlay.c 		overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
mm                 22 drivers/gpu/drm/i915/gem/i915_gem_clflush.c 	drm_clflush_sg(obj->mm.pages);
mm                117 drivers/gpu/drm/i915/gem/i915_gem_clflush.c 	} else if (obj->mm.pages) {
mm                 39 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
mm                 43 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	src = obj->mm.pages->sgl;
mm                 45 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	for (i = 0; i < obj->mm.pages->nents; i++) {
mm                 92 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		obj->mm.dirty = true;
mm                153 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		obj->mm.dirty = true;
mm                501 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                503 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		if (obj->mm.madv == I915_MADV_WILLNEED)
mm                504 drivers/gpu/drm/i915/gem/i915_gem_domain.c 			list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
mm                506 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                798 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	obj->mm.dirty = true;
mm                964 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
mm                977 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
mm               1005 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		cache->node.mm = (void *)obj;
mm               1051 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				(&ggtt->vm.mm, &cache->node,
mm               1059 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			cache->node.mm = (void *)vma;
mm               1725 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	flush_workqueue(eb->i915->mm.userptr_wq);
mm               2066 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	spin_lock(&file_priv->mm.lock);
mm               2067 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
mm               2068 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	spin_unlock(&file_priv->mm.lock);
mm                125 drivers/gpu/drm/i915/gem/i915_gem_internal.c 	obj->mm.madv = I915_MADV_DONTNEED;
mm                145 drivers/gpu/drm/i915/gem/i915_gem_internal.c 	obj->mm.dirty = false;
mm                146 drivers/gpu/drm/i915/gem/i915_gem_internal.c 	obj->mm.madv = I915_MADV_WILLNEED;
mm                 88 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		struct mm_struct *mm = current->mm;
mm                 91 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		if (down_write_killable(&mm->mmap_sem)) {
mm                 95 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		vma = find_vma(mm, addr);
mm                101 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		up_write(&mm->mmap_sem);
mm                322 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		obj->mm.dirty = true;
mm                 52 drivers/gpu/drm/i915/gem/i915_gem_object.c 	mutex_init(&obj->mm.lock);
mm                 57 drivers/gpu/drm/i915/gem/i915_gem_object.c 	INIT_LIST_HEAD(&obj->mm.link);
mm                 65 drivers/gpu/drm/i915/gem/i915_gem_object.c 	obj->mm.madv = I915_MADV_WILLNEED;
mm                 66 drivers/gpu/drm/i915/gem/i915_gem_object.c 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mm                 67 drivers/gpu/drm/i915/gem/i915_gem_object.c 	mutex_init(&obj->mm.get_page.lock);
mm                146 drivers/gpu/drm/i915/gem/i915_gem_object.c 	GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
mm                147 drivers/gpu/drm/i915/gem/i915_gem_object.c 	atomic_dec(&i915->mm.free_count);
mm                178 drivers/gpu/drm/i915/gem/i915_gem_object.c 		atomic_set(&obj->mm.pages_pin_count, 0);
mm                199 drivers/gpu/drm/i915/gem/i915_gem_object.c 	struct llist_node *freed = llist_del_all(&i915->mm.free_list);
mm                208 drivers/gpu/drm/i915/gem/i915_gem_object.c 		container_of(work, struct drm_i915_private, mm.free_work);
mm                226 drivers/gpu/drm/i915/gem/i915_gem_object.c 	atomic_inc(&i915->mm.free_count);
mm                247 drivers/gpu/drm/i915/gem/i915_gem_object.c 	if (llist_add(&obj->freed, &i915->mm.free_list))
mm                248 drivers/gpu/drm/i915/gem/i915_gem_object.c 		queue_work(i915->wq, &i915->mm.free_work);
mm                303 drivers/gpu/drm/i915/gem/i915_gem_object.c 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
mm                245 drivers/gpu/drm/i915/gem/i915_gem_object.h 	might_lock(&obj->mm.lock);
mm                247 drivers/gpu/drm/i915/gem/i915_gem_object.h 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
mm                256 drivers/gpu/drm/i915/gem/i915_gem_object.h 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
mm                264 drivers/gpu/drm/i915/gem/i915_gem_object.h 	atomic_inc(&obj->mm.pages_pin_count);
mm                270 drivers/gpu/drm/i915/gem/i915_gem_object.h 	return atomic_read(&obj->mm.pages_pin_count);
mm                279 drivers/gpu/drm/i915/gem/i915_gem_object.h 	atomic_dec(&obj->mm.pages_pin_count);
mm                225 drivers/gpu/drm/i915/gem/i915_gem_object_types.h 	} mm;
mm                234 drivers/gpu/drm/i915/gem/i915_gem_object_types.h 			struct i915_mm_struct *mm;
mm                 19 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	lockdep_assert_held(&obj->mm.lock);
mm                 29 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.get_page.sg_pos = pages->sgl;
mm                 30 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.get_page.sg_idx = 0;
mm                 32 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.pages = pages;
mm                 36 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		GEM_BUG_ON(obj->mm.quirked);
mm                 38 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		obj->mm.quirked = true;
mm                 42 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.page_sizes.phys = sg_page_sizes;
mm                 52 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.page_sizes.sg = 0;
mm                 54 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		if (obj->mm.page_sizes.phys & ~0u << i)
mm                 55 drivers/gpu/drm/i915/gem/i915_gem_pages.c 			obj->mm.page_sizes.sg |= BIT(i);
mm                 57 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
mm                 63 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                 65 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		i915->mm.shrink_count++;
mm                 66 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		i915->mm.shrink_memory += obj->base.size;
mm                 68 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		if (obj->mm.madv != I915_MADV_WILLNEED)
mm                 69 drivers/gpu/drm/i915/gem/i915_gem_pages.c 			list = &i915->mm.purge_list;
mm                 71 drivers/gpu/drm/i915/gem/i915_gem_pages.c 			list = &i915->mm.shrink_list;
mm                 72 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		list_add_tail(&obj->mm.link, list);
mm                 74 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                 82 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
mm                104 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	err = mutex_lock_interruptible(&obj->mm.lock);
mm                117 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	atomic_inc(&obj->mm.pages_pin_count);
mm                120 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	mutex_unlock(&obj->mm.lock);
mm                135 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	lockdep_assert_held(&obj->mm.lock);
mm                148 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
mm                149 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
mm                158 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	pages = fetch_and_zero(&obj->mm.pages);
mm                164 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	if (obj->mm.mapping) {
mm                167 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		ptr = page_mask_bits(obj->mm.mapping);
mm                173 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		obj->mm.mapping = NULL;
mm                177 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
mm                194 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	mutex_lock_nested(&obj->mm.lock, subclass);
mm                195 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
mm                221 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	mutex_unlock(&obj->mm.lock);
mm                231 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct sg_table *sgt = obj->mm.pages;
mm                288 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	err = mutex_lock_interruptible(&obj->mm.lock);
mm                295 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
mm                305 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		atomic_inc(&obj->mm.pages_pin_count);
mm                310 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
mm                322 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		ptr = obj->mm.mapping = NULL;
mm                332 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		obj->mm.mapping = page_pack_bits(ptr, type);
mm                336 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	mutex_unlock(&obj->mm.lock);
mm                340 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	atomic_dec(&obj->mm.pages_pin_count);
mm                357 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	obj->mm.dirty = true;
mm                362 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
mm                378 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
mm                506 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	if (!obj->mm.dirty)
mm                102 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	if (obj->mm.dirty) {
mm                121 drivers/gpu/drm/i915/gem/i915_gem_phys.c 			if (obj->mm.madv == I915_MADV_WILLNEED)
mm                127 drivers/gpu/drm/i915/gem/i915_gem_phys.c 		obj->mm.dirty = false;
mm                168 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	mutex_lock(&obj->mm.lock);
mm                170 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	if (obj->mm.madv != I915_MADV_WILLNEED) {
mm                175 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	if (obj->mm.quirked) {
mm                180 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	if (obj->mm.mapping) {
mm                198 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	mutex_unlock(&obj->mm.lock);
mm                209 drivers/gpu/drm/i915/gem/i915_gem_phys.c 	mutex_unlock(&obj->mm.lock);
mm                176 drivers/gpu/drm/i915/gem/i915_gem_pm.c 					mm.link);
mm                183 drivers/gpu/drm/i915/gem/i915_gem_pm.c 		&i915->mm.shrink_list,
mm                184 drivers/gpu/drm/i915/gem/i915_gem_pm.c 		&i915->mm.purge_list,
mm                209 drivers/gpu/drm/i915/gem/i915_gem_pm.c 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                214 drivers/gpu/drm/i915/gem/i915_gem_pm.c 			list_move_tail(&obj->mm.link, &keep);
mm                220 drivers/gpu/drm/i915/gem/i915_gem_pm.c 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                227 drivers/gpu/drm/i915/gem/i915_gem_pm.c 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                232 drivers/gpu/drm/i915/gem/i915_gem_pm.c 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                226 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	obj->mm.madv = __I915_MADV_PURGED;
mm                227 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	obj->mm.pages = ERR_PTR(-EFAULT);
mm                280 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
mm                282 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	if (obj->mm.madv == I915_MADV_DONTNEED)
mm                283 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		obj->mm.dirty = false;
mm                311 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		if (obj->mm.dirty)
mm                314 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		if (obj->mm.madv == I915_MADV_WILLNEED)
mm                322 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	obj->mm.dirty = false;
mm                352 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	if (obj->mm.madv != I915_MADV_WILLNEED)
mm                446 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	if (i915->mm.gemfs)
mm                447 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
mm                 72 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
mm                 88 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
mm                109 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	switch (obj->mm.madv) {
mm                155 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		{ &i915->mm.purge_list, ~0u },
mm                157 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 			&i915->mm.shrink_list,
mm                228 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                232 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 						       mm.link))) {
mm                233 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 			list_move_tail(&obj->mm.link, &still_in_list);
mm                236 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 			    !is_vmalloc_addr(obj->mm.mapping))
mm                253 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                257 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 				mutex_lock_nested(&obj->mm.lock,
mm                263 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 				mutex_unlock(&obj->mm.lock);
mm                269 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                272 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                318 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
mm                322 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
mm                323 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	num_objects = READ_ONCE(i915->mm.shrink_count);
mm                335 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		i915->mm.shrinker.batch =
mm                336 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 			max((i915->mm.shrinker.batch + avg) >> 1,
mm                347 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
mm                385 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		container_of(nb, struct drm_i915_private, mm.oom_notifier);
mm                403 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                404 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
mm                410 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                425 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
mm                464 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
mm                465 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
mm                466 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	i915->mm.shrinker.seeks = DEFAULT_SEEKS;
mm                467 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	i915->mm.shrinker.batch = 4096;
mm                468 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	WARN_ON(register_shrinker(&i915->mm.shrinker));
mm                470 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
mm                471 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
mm                473 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
mm                474 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
mm                479 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
mm                480 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
mm                481 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	unregister_shrinker(&i915->mm.shrinker);
mm                532 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	if (!list_empty(&obj->mm.link)) { /* pinned by caller */
mm                536 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                537 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		GEM_BUG_ON(list_empty(&obj->mm.link));
mm                539 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		list_del_init(&obj->mm.link);
mm                540 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		i915->mm.shrink_count--;
mm                541 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		i915->mm.shrink_memory -= obj->base.size;
mm                543 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                551 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	GEM_BUG_ON(!list_empty(&obj->mm.link));
mm                557 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm                560 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		list_add_tail(&obj->mm.link, head);
mm                561 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		i915->mm.shrink_count++;
mm                562 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		i915->mm.shrink_memory += obj->base.size;
mm                564 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm                571 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 					  &obj_to_i915(obj)->mm.shrink_list);
mm                577 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 					  &obj_to_i915(obj)->mm.purge_list);
mm                 34 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
mm                 41 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_lock(&dev_priv->mm.stolen_lock);
mm                 42 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
mm                 45 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_unlock(&dev_priv->mm.stolen_lock);
mm                 61 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_lock(&dev_priv->mm.stolen_lock);
mm                 63 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_unlock(&dev_priv->mm.stolen_lock);
mm                155 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
mm                158 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	drm_mm_takedown(&dev_priv->mm.stolen);
mm                363 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_init(&dev_priv->mm.stolen_lock);
mm                468 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
mm                584 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
mm                621 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
mm                641 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_lock(&dev_priv->mm.stolen_lock);
mm                642 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
mm                643 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	mutex_unlock(&dev_priv->mm.stolen_lock);
mm                687 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	vma->pages = obj->mm.pages;
mm                 48 drivers/gpu/drm/i915/gem/i915_gem_throttle.c 	spin_lock(&file_priv->mm.lock);
mm                 49 drivers/gpu/drm/i915/gem/i915_gem_throttle.c 	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
mm                 62 drivers/gpu/drm/i915/gem/i915_gem_throttle.c 	spin_unlock(&file_priv->mm.lock);
mm                250 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	mutex_lock(&obj->mm.lock);
mm                252 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	    obj->mm.madv == I915_MADV_WILLNEED &&
mm                255 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			GEM_BUG_ON(!obj->mm.quirked);
mm                257 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			obj->mm.quirked = false;
mm                260 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			GEM_BUG_ON(obj->mm.quirked);
mm                262 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			obj->mm.quirked = true;
mm                265 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	mutex_unlock(&obj->mm.lock);
mm                343 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
mm                345 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
mm                418 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
mm                421 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
mm                 21 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct mm_struct *mm;
mm                 37 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct i915_mm_struct *mm;
mm                133 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			unlock = &mn->mm->i915->drm.struct_mutex;
mm                184 drivers/gpu/drm/i915/gem/i915_gem_userptr.c i915_mmu_notifier_create(struct i915_mm_struct *mm)
mm                195 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mn->mm = mm;
mm                216 drivers/gpu/drm/i915/gem/i915_gem_userptr.c i915_mmu_notifier_find(struct i915_mm_struct *mm)
mm                221 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mn = mm->mn;
mm                225 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mn = i915_mmu_notifier_create(mm);
mm                229 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	down_write(&mm->mm->mmap_sem);
mm                230 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mutex_lock(&mm->i915->mm_lock);
mm                231 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (mm->mn == NULL && !err) {
mm                233 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		err = __mmu_notifier_register(&mn->mn, mm->mm);
mm                236 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			mm->mn = fetch_and_zero(&mn);
mm                238 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	} else if (mm->mn) {
mm                245 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mutex_unlock(&mm->i915->mm_lock);
mm                246 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	up_write(&mm->mm->mmap_sem);
mm                251 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	return err ? ERR_PTR(err) : mm->mn;
mm                264 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (WARN_ON(obj->userptr.mm == NULL))
mm                267 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mn = i915_mmu_notifier_find(obj->userptr.mm);
mm                287 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		       struct mm_struct *mm)
mm                292 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mmu_notifier_unregister(&mn->mn, mm);
mm                323 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		       struct mm_struct *mm)
mm                332 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct i915_mm_struct *mm;
mm                335 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
mm                336 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		if (mm->mm == real)
mm                337 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			return mm;
mm                346 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct i915_mm_struct *mm;
mm                360 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mm = __i915_mm_struct_find(dev_priv, current->mm);
mm                361 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (mm == NULL) {
mm                362 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
mm                363 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		if (mm == NULL) {
mm                368 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		kref_init(&mm->kref);
mm                369 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		mm->i915 = to_i915(obj->base.dev);
mm                371 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		mm->mm = current->mm;
mm                372 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		mmgrab(current->mm);
mm                374 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		mm->mn = NULL;
mm                378 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			 &mm->node, (unsigned long)mm->mm);
mm                380 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		kref_get(&mm->kref);
mm                382 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	obj->userptr.mm = mm;
mm                391 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
mm                392 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	i915_mmu_notifier_free(mm->mn, mm->mm);
mm                393 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mmdrop(mm->mm);
mm                394 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	kfree(mm);
mm                400 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
mm                403 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	hash_del(&mm->node);
mm                404 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mutex_unlock(&mm->i915->mm_lock);
mm                406 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
mm                407 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	queue_work(mm->i915->mm.userptr_wq, &mm->work);
mm                413 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (obj->userptr.mm == NULL)
mm                416 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	kref_put_mutex(&obj->userptr.mm->kref,
mm                419 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	obj->userptr.mm = NULL;
mm                486 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		struct mm_struct *mm = obj->userptr.mm->mm;
mm                493 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		if (mmget_not_zero(mm)) {
mm                494 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			down_read(&mm->mmap_sem);
mm                497 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 					(work->task, mm,
mm                507 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			up_read(&mm->mmap_sem);
mm                508 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			mmput(mm);
mm                512 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mutex_lock(&obj->mm.lock);
mm                529 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mutex_unlock(&obj->mm.lock);
mm                575 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
mm                583 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct mm_struct *mm = obj->userptr.mm->mm;
mm                617 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (mm == current->mm) {
mm                680 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		obj->mm.dirty = false;
mm                683 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		if (obj->mm.dirty && trylock_page(page)) {
mm                709 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	obj->mm.dirty = false;
mm                858 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	dev_priv->mm.userptr_wq =
mm                862 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (!dev_priv->mm.userptr_wq)
mm                870 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	destroy_workqueue(dev_priv->mm.userptr_wq);
mm                 36 drivers/gpu/drm/i915/gem/i915_gemfs.c 	i915->mm.gemfs = gemfs;
mm                 43 drivers/gpu/drm/i915/gem/i915_gemfs.c 	kern_unmount(i915->mm.gemfs);
mm                 84 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c 	obj->mm.dirty = false;
mm                 59 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	unsigned int page_mask = obj->mm.page_mask;
mm                116 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.madv = I915_MADV_DONTNEED;
mm                118 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
mm                137 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.dirty = false;
mm                138 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.madv = I915_MADV_WILLNEED;
mm                175 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.page_mask = page_mask;
mm                230 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.madv = I915_MADV_DONTNEED;
mm                264 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.madv = I915_MADV_DONTNEED;
mm                283 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.dirty = false;
mm                284 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	obj->mm.madv = I915_MADV_WILLNEED;
mm                350 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
mm                352 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
mm                356 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
mm                358 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
mm                362 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (obj->mm.page_sizes.gtt) {
mm                364 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       obj->mm.page_sizes.gtt);
mm                493 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		obj->mm.page_sizes.sg = page_size;
mm                814 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
mm               1008 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
mm               1011 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
mm               1056 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
mm               1157 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			obj->mm.page_sizes.sg = page_sizes;
mm               1215 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
mm               1245 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	return i915->mm.gemfs && has_transparent_hugepage();
mm               1284 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
mm               1450 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct vfsmount *gemfs = i915->mm.gemfs;
mm               1463 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915->mm.gemfs = NULL;
mm               1499 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915->mm.gemfs = gemfs;
mm               1541 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
mm               1573 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
mm                 68 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c 		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
mm                 69 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c 						       &obj->mm.page_sizes,
mm               1152 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		__drm_mm_interval_first(&ctx->vm->mm,
mm                244 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			tile.swizzle = i915->mm.bit_6_swizzle_x;
mm                247 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			tile.swizzle = i915->mm.bit_6_swizzle_y;
mm                422 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
mm                434 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
mm                438 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		err = drm_mm_reserve_node(mm, &resv);
mm                 42 drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c 	if (!atomic_read(&obj->mm.pages_pin_count)) {
mm                130 drivers/gpu/drm/i915/gt/intel_context.c 	vma->obj->mm.dirty = true;
mm                522 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return sg_page(obj->mm.pages->sgl);
mm                 24 drivers/gpu/drm/i915/gt/selftest_timeline.c 	return sg_page(obj->mm.pages->sgl);
mm                418 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 		.pages = obj->mm.pages,
mm                589 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	struct sg_table *pages = uc_fw->obj->mm.pages;
mm               1691 drivers/gpu/drm/i915/gvt/cmd_parser.c static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
mm               1699 drivers/gpu/drm/i915/gvt/cmd_parser.c 		gpa = intel_vgpu_gma_to_gpa(mm, gma);
mm               1742 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
mm               1763 drivers/gpu/drm/i915/gvt/cmd_parser.c 		if (copy_gma_to_hva(s->vgpu, mm,
mm               1825 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
mm               1881 drivers/gpu/drm/i915/gvt/cmd_parser.c 	ret = copy_gma_to_hva(s->vgpu, mm,
mm                549 drivers/gpu/drm/i915/gvt/gtt.c static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
mm                553 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
mm                555 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
mm                557 drivers/gpu/drm/i915/gvt/gtt.c 	entry->type = mm->ppgtt_mm.root_entry_type;
mm                558 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
mm                559 drivers/gpu/drm/i915/gvt/gtt.c 			   mm->ppgtt_mm.shadow_pdps,
mm                560 drivers/gpu/drm/i915/gvt/gtt.c 			   entry, index, false, 0, mm->vgpu);
mm                564 drivers/gpu/drm/i915/gvt/gtt.c static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
mm                567 drivers/gpu/drm/i915/gvt/gtt.c 	_ppgtt_get_root_entry(mm, entry, index, true);
mm                570 drivers/gpu/drm/i915/gvt/gtt.c static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
mm                573 drivers/gpu/drm/i915/gvt/gtt.c 	_ppgtt_get_root_entry(mm, entry, index, false);
mm                576 drivers/gpu/drm/i915/gvt/gtt.c static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
mm                580 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
mm                582 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
mm                583 drivers/gpu/drm/i915/gvt/gtt.c 			   mm->ppgtt_mm.shadow_pdps,
mm                584 drivers/gpu/drm/i915/gvt/gtt.c 			   entry, index, false, 0, mm->vgpu);
mm                587 drivers/gpu/drm/i915/gvt/gtt.c static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
mm                590 drivers/gpu/drm/i915/gvt/gtt.c 	_ppgtt_set_root_entry(mm, entry, index, true);
mm                593 drivers/gpu/drm/i915/gvt/gtt.c static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
mm                596 drivers/gpu/drm/i915/gvt/gtt.c 	_ppgtt_set_root_entry(mm, entry, index, false);
mm                599 drivers/gpu/drm/i915/gvt/gtt.c static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
mm                602 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
mm                604 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
mm                607 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
mm                608 drivers/gpu/drm/i915/gvt/gtt.c 			   false, 0, mm->vgpu);
mm                611 drivers/gpu/drm/i915/gvt/gtt.c static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
mm                614 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
mm                616 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
mm                618 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
mm                619 drivers/gpu/drm/i915/gvt/gtt.c 			   false, 0, mm->vgpu);
mm                622 drivers/gpu/drm/i915/gvt/gtt.c static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
mm                625 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
mm                627 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
mm                629 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
mm                632 drivers/gpu/drm/i915/gvt/gtt.c static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
mm                635 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
mm                637 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
mm                639 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
mm               1774 drivers/gpu/drm/i915/gvt/gtt.c static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
mm               1776 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu *vgpu = mm->vgpu;
mm               1783 drivers/gpu/drm/i915/gvt/gtt.c 	if (!mm->ppgtt_mm.shadowed)
mm               1786 drivers/gpu/drm/i915/gvt/gtt.c 	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
mm               1787 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_get_shadow_root_entry(mm, &se, index);
mm               1794 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_set_shadow_root_entry(mm, &se, index);
mm               1800 drivers/gpu/drm/i915/gvt/gtt.c 	mm->ppgtt_mm.shadowed = false;
mm               1804 drivers/gpu/drm/i915/gvt/gtt.c static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
mm               1806 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu *vgpu = mm->vgpu;
mm               1814 drivers/gpu/drm/i915/gvt/gtt.c 	if (mm->ppgtt_mm.shadowed)
mm               1817 drivers/gpu/drm/i915/gvt/gtt.c 	mm->ppgtt_mm.shadowed = true;
mm               1819 drivers/gpu/drm/i915/gvt/gtt.c 	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
mm               1820 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_get_guest_root_entry(mm, &ge, index);
mm               1835 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_set_shadow_root_entry(mm, &se, index);
mm               1843 drivers/gpu/drm/i915/gvt/gtt.c 	invalidate_ppgtt_mm(mm);
mm               1849 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               1851 drivers/gpu/drm/i915/gvt/gtt.c 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
mm               1852 drivers/gpu/drm/i915/gvt/gtt.c 	if (!mm)
mm               1855 drivers/gpu/drm/i915/gvt/gtt.c 	mm->vgpu = vgpu;
mm               1856 drivers/gpu/drm/i915/gvt/gtt.c 	kref_init(&mm->ref);
mm               1857 drivers/gpu/drm/i915/gvt/gtt.c 	atomic_set(&mm->pincount, 0);
mm               1859 drivers/gpu/drm/i915/gvt/gtt.c 	return mm;
mm               1862 drivers/gpu/drm/i915/gvt/gtt.c static void vgpu_free_mm(struct intel_vgpu_mm *mm)
mm               1864 drivers/gpu/drm/i915/gvt/gtt.c 	kfree(mm);
mm               1882 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               1885 drivers/gpu/drm/i915/gvt/gtt.c 	mm = vgpu_alloc_mm(vgpu);
mm               1886 drivers/gpu/drm/i915/gvt/gtt.c 	if (!mm)
mm               1889 drivers/gpu/drm/i915/gvt/gtt.c 	mm->type = INTEL_GVT_MM_PPGTT;
mm               1893 drivers/gpu/drm/i915/gvt/gtt.c 	mm->ppgtt_mm.root_entry_type = root_entry_type;
mm               1895 drivers/gpu/drm/i915/gvt/gtt.c 	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
mm               1896 drivers/gpu/drm/i915/gvt/gtt.c 	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
mm               1899 drivers/gpu/drm/i915/gvt/gtt.c 		mm->ppgtt_mm.guest_pdps[0] = pdps[0];
mm               1901 drivers/gpu/drm/i915/gvt/gtt.c 		memcpy(mm->ppgtt_mm.guest_pdps, pdps,
mm               1902 drivers/gpu/drm/i915/gvt/gtt.c 		       sizeof(mm->ppgtt_mm.guest_pdps));
mm               1904 drivers/gpu/drm/i915/gvt/gtt.c 	ret = shadow_ppgtt_mm(mm);
mm               1907 drivers/gpu/drm/i915/gvt/gtt.c 		vgpu_free_mm(mm);
mm               1911 drivers/gpu/drm/i915/gvt/gtt.c 	list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
mm               1914 drivers/gpu/drm/i915/gvt/gtt.c 	list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
mm               1917 drivers/gpu/drm/i915/gvt/gtt.c 	return mm;
mm               1922 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               1925 drivers/gpu/drm/i915/gvt/gtt.c 	mm = vgpu_alloc_mm(vgpu);
mm               1926 drivers/gpu/drm/i915/gvt/gtt.c 	if (!mm)
mm               1929 drivers/gpu/drm/i915/gvt/gtt.c 	mm->type = INTEL_GVT_MM_GGTT;
mm               1932 drivers/gpu/drm/i915/gvt/gtt.c 	mm->ggtt_mm.virtual_ggtt =
mm               1935 drivers/gpu/drm/i915/gvt/gtt.c 	if (!mm->ggtt_mm.virtual_ggtt) {
mm               1936 drivers/gpu/drm/i915/gvt/gtt.c 		vgpu_free_mm(mm);
mm               1940 drivers/gpu/drm/i915/gvt/gtt.c 	return mm;
mm               1952 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
mm               1954 drivers/gpu/drm/i915/gvt/gtt.c 	if (GEM_WARN_ON(atomic_read(&mm->pincount)))
mm               1957 drivers/gpu/drm/i915/gvt/gtt.c 	if (mm->type == INTEL_GVT_MM_PPGTT) {
mm               1958 drivers/gpu/drm/i915/gvt/gtt.c 		list_del(&mm->ppgtt_mm.list);
mm               1960 drivers/gpu/drm/i915/gvt/gtt.c 		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
mm               1961 drivers/gpu/drm/i915/gvt/gtt.c 		list_del(&mm->ppgtt_mm.lru_list);
mm               1962 drivers/gpu/drm/i915/gvt/gtt.c 		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
mm               1964 drivers/gpu/drm/i915/gvt/gtt.c 		invalidate_ppgtt_mm(mm);
mm               1966 drivers/gpu/drm/i915/gvt/gtt.c 		vfree(mm->ggtt_mm.virtual_ggtt);
mm               1969 drivers/gpu/drm/i915/gvt/gtt.c 	vgpu_free_mm(mm);
mm               1978 drivers/gpu/drm/i915/gvt/gtt.c void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
mm               1980 drivers/gpu/drm/i915/gvt/gtt.c 	atomic_dec_if_positive(&mm->pincount);
mm               1994 drivers/gpu/drm/i915/gvt/gtt.c int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
mm               1998 drivers/gpu/drm/i915/gvt/gtt.c 	atomic_inc(&mm->pincount);
mm               2000 drivers/gpu/drm/i915/gvt/gtt.c 	if (mm->type == INTEL_GVT_MM_PPGTT) {
mm               2001 drivers/gpu/drm/i915/gvt/gtt.c 		ret = shadow_ppgtt_mm(mm);
mm               2005 drivers/gpu/drm/i915/gvt/gtt.c 		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
mm               2006 drivers/gpu/drm/i915/gvt/gtt.c 		list_move_tail(&mm->ppgtt_mm.lru_list,
mm               2007 drivers/gpu/drm/i915/gvt/gtt.c 			       &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
mm               2008 drivers/gpu/drm/i915/gvt/gtt.c 		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
mm               2016 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               2022 drivers/gpu/drm/i915/gvt/gtt.c 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
mm               2024 drivers/gpu/drm/i915/gvt/gtt.c 		if (atomic_read(&mm->pincount))
mm               2027 drivers/gpu/drm/i915/gvt/gtt.c 		list_del_init(&mm->ppgtt_mm.lru_list);
mm               2029 drivers/gpu/drm/i915/gvt/gtt.c 		invalidate_ppgtt_mm(mm);
mm               2039 drivers/gpu/drm/i915/gvt/gtt.c static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
mm               2042 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu *vgpu = mm->vgpu;
mm               2068 drivers/gpu/drm/i915/gvt/gtt.c unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
mm               2070 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu *vgpu = mm->vgpu;
mm               2080 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
mm               2081 drivers/gpu/drm/i915/gvt/gtt.c 		   mm->type != INTEL_GVT_MM_PPGTT);
mm               2083 drivers/gpu/drm/i915/gvt/gtt.c 	if (mm->type == INTEL_GVT_MM_GGTT) {
mm               2087 drivers/gpu/drm/i915/gvt/gtt.c 		ggtt_get_guest_entry(mm, &e,
mm               2095 drivers/gpu/drm/i915/gvt/gtt.c 		switch (mm->ppgtt_mm.root_entry_type) {
mm               2097 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_get_shadow_root_entry(mm, &e, 0);
mm               2106 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_get_shadow_root_entry(mm, &e,
mm               2119 drivers/gpu/drm/i915/gvt/gtt.c 			ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
mm               2133 drivers/gpu/drm/i915/gvt/gtt.c 				    mm->ppgtt_mm.root_entry_type, gma, gpa);
mm               2138 drivers/gpu/drm/i915/gvt/gtt.c 	gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
mm               2483 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               2486 drivers/gpu/drm/i915/gvt/gtt.c 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
mm               2487 drivers/gpu/drm/i915/gvt/gtt.c 		intel_vgpu_destroy_mm(mm);
mm               2598 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               2602 drivers/gpu/drm/i915/gvt/gtt.c 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
mm               2604 drivers/gpu/drm/i915/gvt/gtt.c 		switch (mm->ppgtt_mm.root_entry_type) {
mm               2606 drivers/gpu/drm/i915/gvt/gtt.c 			if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
mm               2607 drivers/gpu/drm/i915/gvt/gtt.c 				return mm;
mm               2610 drivers/gpu/drm/i915/gvt/gtt.c 			if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
mm               2611 drivers/gpu/drm/i915/gvt/gtt.c 				    sizeof(mm->ppgtt_mm.guest_pdps)))
mm               2612 drivers/gpu/drm/i915/gvt/gtt.c 				return mm;
mm               2635 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               2637 drivers/gpu/drm/i915/gvt/gtt.c 	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
mm               2638 drivers/gpu/drm/i915/gvt/gtt.c 	if (mm) {
mm               2639 drivers/gpu/drm/i915/gvt/gtt.c 		intel_vgpu_mm_get(mm);
mm               2641 drivers/gpu/drm/i915/gvt/gtt.c 		mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
mm               2642 drivers/gpu/drm/i915/gvt/gtt.c 		if (IS_ERR(mm))
mm               2645 drivers/gpu/drm/i915/gvt/gtt.c 	return mm;
mm               2660 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               2662 drivers/gpu/drm/i915/gvt/gtt.c 	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
mm               2663 drivers/gpu/drm/i915/gvt/gtt.c 	if (!mm) {
mm               2667 drivers/gpu/drm/i915/gvt/gtt.c 	intel_vgpu_mm_put(mm);
mm               2756 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_vgpu_mm *mm;
mm               2759 drivers/gpu/drm/i915/gvt/gtt.c 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
mm               2760 drivers/gpu/drm/i915/gvt/gtt.c 		if (mm->type == INTEL_GVT_MM_PPGTT) {
mm               2762 drivers/gpu/drm/i915/gvt/gtt.c 			list_del_init(&mm->ppgtt_mm.lru_list);
mm               2764 drivers/gpu/drm/i915/gvt/gtt.c 			if (mm->ppgtt_mm.shadowed)
mm               2765 drivers/gpu/drm/i915/gvt/gtt.c 				invalidate_ppgtt_mm(mm);
mm                 87 drivers/gpu/drm/i915/gvt/gtt.h 	int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
mm                 88 drivers/gpu/drm/i915/gvt/gtt.h 	void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
mm                174 drivers/gpu/drm/i915/gvt/gtt.h static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
mm                176 drivers/gpu/drm/i915/gvt/gtt.h 	kref_get(&mm->ref);
mm                181 drivers/gpu/drm/i915/gvt/gtt.h static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
mm                183 drivers/gpu/drm/i915/gvt/gtt.h 	kref_put(&mm->ref, _intel_vgpu_mm_release);
mm                186 drivers/gpu/drm/i915/gvt/gtt.h static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
mm                188 drivers/gpu/drm/i915/gvt/gtt.h 	intel_vgpu_mm_put(mm);
mm                260 drivers/gpu/drm/i915/gvt/gtt.h int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
mm                262 drivers/gpu/drm/i915/gvt/gtt.h void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
mm                264 drivers/gpu/drm/i915/gvt/gtt.h unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
mm               1212 drivers/gpu/drm/i915/gvt/handlers.c 	struct intel_vgpu_mm *mm;
mm               1222 drivers/gpu/drm/i915/gvt/handlers.c 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
mm               1223 drivers/gpu/drm/i915/gvt/handlers.c 		return PTR_ERR_OR_ZERO(mm);
mm               1769 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (!kvm || kvm->mm != current->mm) {
mm               1968 drivers/gpu/drm/i915/gvt/kvmgt.c 	bool kthread = current->mm == NULL;
mm               1977 drivers/gpu/drm/i915/gvt/kvmgt.c 		if (!mmget_not_zero(kvm->mm))
mm               1979 drivers/gpu/drm/i915/gvt/kvmgt.c 		use_mm(kvm->mm);
mm               1988 drivers/gpu/drm/i915/gvt/kvmgt.c 		unuse_mm(kvm->mm);
mm               1989 drivers/gpu/drm/i915/gvt/kvmgt.c 		mmput(kvm->mm);
mm                367 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_mm *mm = workload->shadow_mm;
mm                371 drivers/gpu/drm/i915/gvt/scheduler.c 	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
mm                372 drivers/gpu/drm/i915/gvt/scheduler.c 		px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
mm                382 drivers/gpu/drm/i915/gvt/scheduler.c 			px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
mm               1432 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_mm *mm;
mm               1451 drivers/gpu/drm/i915/gvt/scheduler.c 	mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
mm               1452 drivers/gpu/drm/i915/gvt/scheduler.c 	if (IS_ERR(mm))
mm               1453 drivers/gpu/drm/i915/gvt/scheduler.c 		return PTR_ERR(mm);
mm               1455 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->shadow_mm = mm;
mm                 74 drivers/gpu/drm/i915/i915_buddy.c static void mark_free(struct i915_buddy_mm *mm,
mm                 81 drivers/gpu/drm/i915/i915_buddy.c 		 &mm->free_list[i915_buddy_block_order(block)]);
mm                 92 drivers/gpu/drm/i915/i915_buddy.c int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
mm                108 drivers/gpu/drm/i915/i915_buddy.c 	mm->size = size;
mm                109 drivers/gpu/drm/i915/i915_buddy.c 	mm->chunk_size = chunk_size;
mm                110 drivers/gpu/drm/i915/i915_buddy.c 	mm->max_order = ilog2(size) - ilog2(chunk_size);
mm                112 drivers/gpu/drm/i915/i915_buddy.c 	GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
mm                114 drivers/gpu/drm/i915/i915_buddy.c 	mm->free_list = kmalloc_array(mm->max_order + 1,
mm                117 drivers/gpu/drm/i915/i915_buddy.c 	if (!mm->free_list)
mm                120 drivers/gpu/drm/i915/i915_buddy.c 	for (i = 0; i <= mm->max_order; ++i)
mm                121 drivers/gpu/drm/i915/i915_buddy.c 		INIT_LIST_HEAD(&mm->free_list[i]);
mm                123 drivers/gpu/drm/i915/i915_buddy.c 	mm->n_roots = hweight64(size);
mm                125 drivers/gpu/drm/i915/i915_buddy.c 	mm->roots = kmalloc_array(mm->n_roots,
mm                128 drivers/gpu/drm/i915/i915_buddy.c 	if (!mm->roots)
mm                150 drivers/gpu/drm/i915/i915_buddy.c 		mark_free(mm, root);
mm                152 drivers/gpu/drm/i915/i915_buddy.c 		GEM_BUG_ON(i > mm->max_order);
mm                153 drivers/gpu/drm/i915/i915_buddy.c 		GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
mm                155 drivers/gpu/drm/i915/i915_buddy.c 		mm->roots[i] = root;
mm                166 drivers/gpu/drm/i915/i915_buddy.c 		i915_block_free(mm->roots[i]);
mm                167 drivers/gpu/drm/i915/i915_buddy.c 	kfree(mm->roots);
mm                169 drivers/gpu/drm/i915/i915_buddy.c 	kfree(mm->free_list);
mm                173 drivers/gpu/drm/i915/i915_buddy.c void i915_buddy_fini(struct i915_buddy_mm *mm)
mm                177 drivers/gpu/drm/i915/i915_buddy.c 	for (i = 0; i < mm->n_roots; ++i) {
mm                178 drivers/gpu/drm/i915/i915_buddy.c 		GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
mm                179 drivers/gpu/drm/i915/i915_buddy.c 		i915_block_free(mm->roots[i]);
mm                182 drivers/gpu/drm/i915/i915_buddy.c 	kfree(mm->roots);
mm                183 drivers/gpu/drm/i915/i915_buddy.c 	kfree(mm->free_list);
mm                186 drivers/gpu/drm/i915/i915_buddy.c static int split_block(struct i915_buddy_mm *mm,
mm                200 drivers/gpu/drm/i915/i915_buddy.c 					offset + (mm->chunk_size << block_order));
mm                206 drivers/gpu/drm/i915/i915_buddy.c 	mark_free(mm, block->left);
mm                207 drivers/gpu/drm/i915/i915_buddy.c 	mark_free(mm, block->right);
mm                229 drivers/gpu/drm/i915/i915_buddy.c static void __i915_buddy_free(struct i915_buddy_mm *mm,
mm                250 drivers/gpu/drm/i915/i915_buddy.c 	mark_free(mm, block);
mm                253 drivers/gpu/drm/i915/i915_buddy.c void i915_buddy_free(struct i915_buddy_mm *mm,
mm                257 drivers/gpu/drm/i915/i915_buddy.c 	__i915_buddy_free(mm, block);
mm                260 drivers/gpu/drm/i915/i915_buddy.c void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
mm                265 drivers/gpu/drm/i915/i915_buddy.c 		i915_buddy_free(mm, block);
mm                278 drivers/gpu/drm/i915/i915_buddy.c i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
mm                284 drivers/gpu/drm/i915/i915_buddy.c 	for (i = order; i <= mm->max_order; ++i) {
mm                285 drivers/gpu/drm/i915/i915_buddy.c 		block = list_first_entry_or_null(&mm->free_list[i],
mm                298 drivers/gpu/drm/i915/i915_buddy.c 		err = split_block(mm, block);
mm                312 drivers/gpu/drm/i915/i915_buddy.c 	__i915_buddy_free(mm, block);
mm                336 drivers/gpu/drm/i915/i915_buddy.c int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
mm                348 drivers/gpu/drm/i915/i915_buddy.c 	if (size < mm->chunk_size)
mm                351 drivers/gpu/drm/i915/i915_buddy.c 	if (!IS_ALIGNED(size | start, mm->chunk_size))
mm                354 drivers/gpu/drm/i915/i915_buddy.c 	if (range_overflows(start, size, mm->size))
mm                357 drivers/gpu/drm/i915/i915_buddy.c 	for (i = 0; i < mm->n_roots; ++i)
mm                358 drivers/gpu/drm/i915/i915_buddy.c 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
mm                375 drivers/gpu/drm/i915/i915_buddy.c 		block_end = block_start + i915_buddy_block_size(mm, block) - 1;
mm                397 drivers/gpu/drm/i915/i915_buddy.c 			err = split_block(mm, block);
mm                419 drivers/gpu/drm/i915/i915_buddy.c 		__i915_buddy_free(mm, block);
mm                422 drivers/gpu/drm/i915/i915_buddy.c 	i915_buddy_free_list(mm, &allocated);
mm                107 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_size(struct i915_buddy_mm *mm,
mm                110 drivers/gpu/drm/i915/i915_buddy.h 	return mm->chunk_size << i915_buddy_block_order(block);
mm                113 drivers/gpu/drm/i915/i915_buddy.h int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
mm                115 drivers/gpu/drm/i915/i915_buddy.h void i915_buddy_fini(struct i915_buddy_mm *mm);
mm                118 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
mm                120 drivers/gpu/drm/i915/i915_buddy.h int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
mm                124 drivers/gpu/drm/i915/i915_buddy.h void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
mm                126 drivers/gpu/drm/i915/i915_buddy.h void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
mm               1510 drivers/gpu/drm/i915/i915_cmd_parser.c 		void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
mm                102 drivers/gpu/drm/i915/i915_debugfs.c 	return obj->mm.mapping ? 'M' : ' ';
mm                153 drivers/gpu/drm/i915/i915_debugfs.c 		   obj->mm.dirty ? " dirty" : "",
mm                154 drivers/gpu/drm/i915/i915_debugfs.c 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
mm                360 drivers/gpu/drm/i915/i915_debugfs.c 		   i915->mm.shrink_count,
mm                361 drivers/gpu/drm/i915/i915_debugfs.c 		   atomic_read(&i915->mm.free_count),
mm                362 drivers/gpu/drm/i915/i915_debugfs.c 		   i915->mm.shrink_memory);
mm               1657 drivers/gpu/drm/i915/i915_debugfs.c 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
mm               1659 drivers/gpu/drm/i915/i915_debugfs.c 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
mm               2818 drivers/gpu/drm/i915/i915_debugfs.c 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
mm               2819 drivers/gpu/drm/i915/i915_debugfs.c 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
mm                193 drivers/gpu/drm/i915/i915_drv.h 	} mm;
mm               1448 drivers/gpu/drm/i915/i915_drv.h 	struct i915_gem_mm mm;
mm               2259 drivers/gpu/drm/i915/i915_drv.h 	while (atomic_read(&i915->mm.free_count)) {
mm               2260 drivers/gpu/drm/i915/i915_drv.h 		flush_work(&i915->mm.free_work);
mm               2400 drivers/gpu/drm/i915/i915_drv.h 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
mm                 69 drivers/gpu/drm/i915/i915_gem.c 	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
mm                139 drivers/gpu/drm/i915/i915_gem.c 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
mm               1083 drivers/gpu/drm/i915/i915_gem.c 	err = mutex_lock_interruptible(&obj->mm.lock);
mm               1090 drivers/gpu/drm/i915/i915_gem.c 		if (obj->mm.madv == I915_MADV_WILLNEED) {
mm               1091 drivers/gpu/drm/i915/i915_gem.c 			GEM_BUG_ON(!obj->mm.quirked);
mm               1093 drivers/gpu/drm/i915/i915_gem.c 			obj->mm.quirked = false;
mm               1096 drivers/gpu/drm/i915/i915_gem.c 			GEM_BUG_ON(obj->mm.quirked);
mm               1098 drivers/gpu/drm/i915/i915_gem.c 			obj->mm.quirked = true;
mm               1102 drivers/gpu/drm/i915/i915_gem.c 	if (obj->mm.madv != __I915_MADV_PURGED)
mm               1103 drivers/gpu/drm/i915/i915_gem.c 		obj->mm.madv = args->madv;
mm               1111 drivers/gpu/drm/i915/i915_gem.c 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
mm               1113 drivers/gpu/drm/i915/i915_gem.c 			if (obj->mm.madv != I915_MADV_WILLNEED)
mm               1114 drivers/gpu/drm/i915/i915_gem.c 				list = &i915->mm.purge_list;
mm               1116 drivers/gpu/drm/i915/i915_gem.c 				list = &i915->mm.shrink_list;
mm               1117 drivers/gpu/drm/i915/i915_gem.c 			list_move_tail(&obj->mm.link, list);
mm               1119 drivers/gpu/drm/i915/i915_gem.c 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
mm               1124 drivers/gpu/drm/i915/i915_gem.c 	if (obj->mm.madv == I915_MADV_DONTNEED &&
mm               1128 drivers/gpu/drm/i915/i915_gem.c 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
mm               1129 drivers/gpu/drm/i915/i915_gem.c 	mutex_unlock(&obj->mm.lock);
mm               1653 drivers/gpu/drm/i915/i915_gem.c 	spin_lock_init(&i915->mm.obj_lock);
mm               1655 drivers/gpu/drm/i915/i915_gem.c 	init_llist_head(&i915->mm.free_list);
mm               1657 drivers/gpu/drm/i915/i915_gem.c 	INIT_LIST_HEAD(&i915->mm.purge_list);
mm               1658 drivers/gpu/drm/i915/i915_gem.c 	INIT_LIST_HEAD(&i915->mm.shrink_list);
mm               1682 drivers/gpu/drm/i915/i915_gem.c 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
mm               1683 drivers/gpu/drm/i915/i915_gem.c 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
mm               1684 drivers/gpu/drm/i915/i915_gem.c 	WARN_ON(dev_priv->mm.shrink_count);
mm               1724 drivers/gpu/drm/i915/i915_gem.c 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
mm               1744 drivers/gpu/drm/i915/i915_gem.c 	spin_lock(&file_priv->mm.lock);
mm               1745 drivers/gpu/drm/i915/i915_gem.c 	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
mm               1747 drivers/gpu/drm/i915/i915_gem.c 	spin_unlock(&file_priv->mm.lock);
mm               1765 drivers/gpu/drm/i915/i915_gem.c 	spin_lock_init(&file_priv->mm.lock);
mm               1766 drivers/gpu/drm/i915/i915_gem.c 	INIT_LIST_HEAD(&file_priv->mm.request_list);
mm                126 drivers/gpu/drm/i915/i915_gem_evict.c 	drm_mm_scan_init_with_range(&scan, &vm->mm,
mm                286 drivers/gpu/drm/i915/i915_gem_evict.c 	check_color = vm->mm.color_adjust;
mm                297 drivers/gpu/drm/i915/i915_gem_evict.c 	drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
mm                720 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	i915->mm.bit_6_swizzle_x = swizzle_x;
mm                721 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	i915->mm.bit_6_swizzle_y = swizzle_y;
mm                864 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	    i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
mm                178 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->pages = vma->obj->mm.pages;
mm                180 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->page_sizes = vma->obj->mm.page_sizes;
mm                189 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (vma->pages != vma->obj->mm.pages) {
mm                381 drivers/gpu/drm/i915/i915_gem_gtt.c 	page = stash_pop_page(&vm->i915->mm.wc_stash);
mm                409 drivers/gpu/drm/i915/i915_gem_gtt.c 			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
mm                439 drivers/gpu/drm/i915/i915_gem_gtt.c 		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
mm                494 drivers/gpu/drm/i915/i915_gem_gtt.c 	drm_mm_takedown(&vm->mm);
mm                560 drivers/gpu/drm/i915/i915_gem_gtt.c 	drm_mm_init(&vm->mm, 0, vm->total);
mm                561 drivers/gpu/drm/i915/i915_gem_gtt.c 	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
mm               2160 drivers/gpu/drm/i915/i915_gem_gtt.c 		GEM_BUG_ON(obj->mm.pages == pages);
mm               2555 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->page_sizes = vma->obj->mm.page_sizes;
mm               2698 drivers/gpu/drm/i915/i915_gem_gtt.c 	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
mm               2715 drivers/gpu/drm/i915/i915_gem_gtt.c 	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
mm               2769 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (drm_mm_initialized(&ggtt->vm.mm)) {
mm               2794 drivers/gpu/drm/i915/i915_gem_gtt.c 	pvec = &i915->mm.wc_stash.pvec;
mm               3217 drivers/gpu/drm/i915/i915_gem_gtt.c 		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mm               3245 drivers/gpu/drm/i915/i915_gem_gtt.c 	stash_init(&dev_priv->mm.wc_stash);
mm               3578 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->pages = vma->obj->mm.pages;
mm               3649 drivers/gpu/drm/i915/i915_gem_gtt.c 	err = drm_mm_reserve_node(&vm->mm, node);
mm               3658 drivers/gpu/drm/i915/i915_gem_gtt.c 		err = drm_mm_reserve_node(&vm->mm, node);
mm               3764 drivers/gpu/drm/i915/i915_gem_gtt.c 	err = drm_mm_insert_node_in_range(&vm->mm, node,
mm               3771 drivers/gpu/drm/i915/i915_gem_gtt.c 		err = drm_mm_insert_node_in_range(&vm->mm, node,
mm               3820 drivers/gpu/drm/i915/i915_gem_gtt.c 	return drm_mm_insert_node_in_range(&vm->mm, node,
mm                292 drivers/gpu/drm/i915/i915_gem_gtt.h 	struct drm_mm mm;
mm               1352 drivers/gpu/drm/i915/i915_gpu_error.c 			.pages = obj->mm.pages,
mm                 33 drivers/gpu/drm/i915/i915_mm.c 	struct mm_struct *mm;
mm                 43 drivers/gpu/drm/i915/i915_mm.c 	set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
mm                 70 drivers/gpu/drm/i915/i915_mm.c 	r.mm = vma->vm_mm;
mm                 75 drivers/gpu/drm/i915/i915_mm.c 	err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
mm                176 drivers/gpu/drm/i915/i915_request.c 	spin_lock(&file_priv->mm.lock);
mm                181 drivers/gpu/drm/i915/i915_request.c 	spin_unlock(&file_priv->mm.lock);
mm                497 drivers/gpu/drm/i915/i915_vma.c 	if (vma->vm->mm.color_adjust == NULL)
mm                524 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
mm                927 drivers/gpu/drm/i915/i915_vma.c 	obj->mm.dirty = true;
mm                 13 drivers/gpu/drm/i915/selftests/i915_buddy.c static void __igt_dump_block(struct i915_buddy_mm *mm,
mm                 22 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       i915_buddy_block_size(mm, block),
mm                 27 drivers/gpu/drm/i915/selftests/i915_buddy.c static void igt_dump_block(struct i915_buddy_mm *mm,
mm                 32 drivers/gpu/drm/i915/selftests/i915_buddy.c 	__igt_dump_block(mm, block, false);
mm                 36 drivers/gpu/drm/i915/selftests/i915_buddy.c 		__igt_dump_block(mm, buddy, true);
mm                 39 drivers/gpu/drm/i915/selftests/i915_buddy.c static int igt_check_block(struct i915_buddy_mm *mm,
mm                 57 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block_size = i915_buddy_block_size(mm, block);
mm                 60 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (block_size < mm->chunk_size) {
mm                 70 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (!IS_ALIGNED(block_size, mm->chunk_size)) {
mm                 75 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (!IS_ALIGNED(offset, mm->chunk_size)) {
mm                 98 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (i915_buddy_block_size(mm, buddy) != block_size) {
mm                113 drivers/gpu/drm/i915/selftests/i915_buddy.c static int igt_check_blocks(struct i915_buddy_mm *mm,
mm                128 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = igt_check_block(mm, block);
mm                141 drivers/gpu/drm/i915/selftests/i915_buddy.c 			prev_block_size = i915_buddy_block_size(mm, prev);
mm                153 drivers/gpu/drm/i915/selftests/i915_buddy.c 		total += i915_buddy_block_size(mm, block);
mm                168 drivers/gpu/drm/i915/selftests/i915_buddy.c 		igt_dump_block(mm, prev);
mm                173 drivers/gpu/drm/i915/selftests/i915_buddy.c 		igt_dump_block(mm, block);
mm                179 drivers/gpu/drm/i915/selftests/i915_buddy.c static int igt_check_mm(struct i915_buddy_mm *mm)
mm                187 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (!mm->n_roots) {
mm                192 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (mm->n_roots != hweight64(mm->size)) {
mm                194 drivers/gpu/drm/i915/selftests/i915_buddy.c 		       mm->n_roots, hweight64(mm->size));
mm                202 drivers/gpu/drm/i915/selftests/i915_buddy.c 	for (i = 0; i < mm->n_roots; ++i) {
mm                206 drivers/gpu/drm/i915/selftests/i915_buddy.c 		root = mm->roots[i];
mm                213 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = igt_check_block(mm, root);
mm                223 drivers/gpu/drm/i915/selftests/i915_buddy.c 			if (order != mm->max_order) {
mm                235 drivers/gpu/drm/i915/selftests/i915_buddy.c 			prev_block_size = i915_buddy_block_size(mm, prev);
mm                244 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = list_first_entry_or_null(&mm->free_list[order],
mm                256 drivers/gpu/drm/i915/selftests/i915_buddy.c 		total += i915_buddy_block_size(mm, root);
mm                260 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (total != mm->size) {
mm                261 drivers/gpu/drm/i915/selftests/i915_buddy.c 			pr_err("expected mm size=%llx, found=%llx\n", mm->size,
mm                270 drivers/gpu/drm/i915/selftests/i915_buddy.c 		igt_dump_block(mm, prev);
mm                275 drivers/gpu/drm/i915/selftests/i915_buddy.c 		igt_dump_block(mm, root);
mm                300 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_mm mm;
mm                310 drivers/gpu/drm/i915/selftests/i915_buddy.c 	err = i915_buddy_init(&mm, mm_size, chunk_size);
mm                316 drivers/gpu/drm/i915/selftests/i915_buddy.c 	for (max_order = mm.max_order; max_order >= 0; max_order--) {
mm                322 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = igt_check_mm(&mm);
mm                335 drivers/gpu/drm/i915/selftests/i915_buddy.c 			block = i915_buddy_alloc(&mm, order);
mm                362 drivers/gpu/drm/i915/selftests/i915_buddy.c 			total += i915_buddy_block_size(&mm, block);
mm                363 drivers/gpu/drm/i915/selftests/i915_buddy.c 		} while (total < mm.size);
mm                366 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = igt_check_blocks(&mm, &blocks, total, false);
mm                368 drivers/gpu/drm/i915/selftests/i915_buddy.c 		i915_buddy_free_list(&mm, &blocks);
mm                371 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = igt_check_mm(&mm);
mm                383 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_fini(&mm);
mm                392 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_mm mm;
mm                403 drivers/gpu/drm/i915/selftests/i915_buddy.c 	err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
mm                408 drivers/gpu/drm/i915/selftests/i915_buddy.c 	GEM_BUG_ON(mm.max_order != max_order);
mm                411 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
mm                423 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = i915_buddy_alloc(&mm, 0);
mm                433 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
mm                445 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free(&mm, block);
mm                451 drivers/gpu/drm/i915/selftests/i915_buddy.c 		i915_buddy_free(&mm, block);
mm                453 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
mm                460 drivers/gpu/drm/i915/selftests/i915_buddy.c 		i915_buddy_free(&mm, block);
mm                465 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = i915_buddy_alloc(&mm, max_order);
mm                472 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free(&mm, block);
mm                475 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free_list(&mm, &blocks);
mm                476 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_fini(&mm);
mm                484 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_mm mm;
mm                494 drivers/gpu/drm/i915/selftests/i915_buddy.c 	err = i915_buddy_init(&mm,
mm                501 drivers/gpu/drm/i915/selftests/i915_buddy.c 	GEM_BUG_ON(mm.max_order != max_order);
mm                504 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
mm                516 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = i915_buddy_alloc(&mm, 0);
mm                525 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free_list(&mm, &blocks);
mm                526 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_fini(&mm);
mm                534 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_mm mm;
mm                547 drivers/gpu/drm/i915/selftests/i915_buddy.c 	err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
mm                552 drivers/gpu/drm/i915/selftests/i915_buddy.c 	GEM_BUG_ON(mm.max_order != max_order);
mm                559 drivers/gpu/drm/i915/selftests/i915_buddy.c 			i915_buddy_free(&mm, block);
mm                563 drivers/gpu/drm/i915/selftests/i915_buddy.c 			block = i915_buddy_alloc(&mm, order);
mm                574 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, 0);
mm                582 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, top);
mm                592 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free_list(&mm, &holes);
mm                596 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
mm                608 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free_list(&mm, &blocks);
mm                609 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_fini(&mm);
mm                615 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_mm mm;
mm                628 drivers/gpu/drm/i915/selftests/i915_buddy.c 	err = i915_buddy_init(&mm, size, chunk_size);
mm                634 drivers/gpu/drm/i915/selftests/i915_buddy.c 	err = igt_check_mm(&mm);
mm                640 drivers/gpu/drm/i915/selftests/i915_buddy.c 	rem = mm.size;
mm                647 drivers/gpu/drm/i915/selftests/i915_buddy.c 		size = min(page_num * mm.chunk_size, rem);
mm                649 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
mm                678 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = igt_check_blocks(&mm, &tmp, size, true);
mm                695 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free_list(&mm, &blocks);
mm                698 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = igt_check_mm(&mm);
mm                704 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_fini(&mm);
mm                 41 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	GEM_BUG_ON(obj->mm.quirked);
mm                 42 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	obj->mm.quirked = true;
mm                 78 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		GEM_BUG_ON(!obj->mm.quirked);
mm                114 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		if (vma->obj->mm.quirked)
mm                125 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		GEM_BUG_ON(!obj->mm.quirked);
mm                126 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		obj->mm.quirked = false;
mm                282 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	ggtt->vm.mm.color_adjust = mock_color_adjust;
mm                343 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	ggtt->vm.mm.color_adjust = NULL;
mm                 91 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	obj->mm.madv = I915_MADV_DONTNEED;
mm                103 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	obj->mm.dirty = false;
mm                104 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	obj->mm.madv = I915_MADV_WILLNEED;
mm                292 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			mock_vma.pages = obj->mm.pages;
mm               1090 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
mm               1091 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
mm               1095 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (ggtt->vm.mm.color_adjust)
mm               1096 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			ggtt->vm.mm.color_adjust(node, 0,
mm               1164 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
mm               1237 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	vma->pages = obj->mm.pages;
mm                315 drivers/gpu/drm/i915/selftests/i915_vma.c 	GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
mm                574 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (vma->pages == obj->mm.pages) {
mm                674 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (vma->pages == vma->obj->mm.pages) {
mm                686 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (vma->pages != vma->obj->mm.pages) {
mm                131 drivers/gpu/drm/lima/lima_vm.c 	err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
mm                225 drivers/gpu/drm/lima/lima_vm.c 	drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
mm                241 drivers/gpu/drm/lima/lima_vm.c 	drm_mm_takedown(&vm->mm);
mm                 33 drivers/gpu/drm/lima/lima_vm.h 	struct drm_mm mm;
mm                269 drivers/gpu/drm/msm/msm_drv.c 		drm_mm_takedown(&priv->vram.mm);
mm                358 drivers/gpu/drm/msm/msm_drv.c 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
mm                210 drivers/gpu/drm/msm/msm_drv.h 		struct drm_mm mm;
mm                 91 drivers/gpu/drm/msm/msm_gem.c 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
mm                 22 drivers/gpu/drm/msm/msm_gem.h 	struct drm_mm mm;
mm                 17 drivers/gpu/drm/msm/msm_gem_vma.c 	drm_mm_takedown(&aspace->mm);
mm                114 drivers/gpu/drm/msm/msm_gem_vma.c 	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
mm                145 drivers/gpu/drm/msm/msm_gem_vma.c 	drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
mm                168 drivers/gpu/drm/msm/msm_gem_vma.c 	drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
mm                 29 drivers/gpu/drm/nouveau/include/nvkm/core/mm.h nvkm_mm_initialised(struct nvkm_mm *mm)
mm                 31 drivers/gpu/drm/nouveau/include/nvkm/core/mm.h 	return mm->heap_nodes;
mm                 44 drivers/gpu/drm/nouveau/include/nvkm/core/mm.h nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
mm                 48 drivers/gpu/drm/nouveau/include/nvkm/core/mm.h 	list_for_each_entry(node, &mm->nodes, nl_entry) {
mm                 26 drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h 		struct nvkm_mm mm;
mm                 99 drivers/gpu/drm/nouveau/nouveau_svm.c 	struct mm_struct *mm;
mm                116 drivers/gpu/drm/nouveau/nouveau_svm.c 	struct mm_struct *mm;
mm                173 drivers/gpu/drm/nouveau/nouveau_svm.c 	mm = get_task_mm(current);
mm                174 drivers/gpu/drm/nouveau/nouveau_svm.c 	down_read(&mm->mmap_sem);
mm                177 drivers/gpu/drm/nouveau/nouveau_svm.c 		up_read(&mm->mmap_sem);
mm                185 drivers/gpu/drm/nouveau/nouveau_svm.c 		vma = find_vma_intersection(mm, addr, end);
mm                203 drivers/gpu/drm/nouveau/nouveau_svm.c 	up_read(&mm->mmap_sem);
mm                204 drivers/gpu/drm/nouveau/nouveau_svm.c 	mmput(mm);
mm                347 drivers/gpu/drm/nouveau/nouveau_svm.c 	svmm->mm = get_task_mm(current);
mm                348 drivers/gpu/drm/nouveau/nouveau_svm.c 	down_write(&svmm->mm->mmap_sem);
mm                350 drivers/gpu/drm/nouveau/nouveau_svm.c 	ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
mm                355 drivers/gpu/drm/nouveau/nouveau_svm.c 	up_write(&svmm->mm->mmap_sem);
mm                356 drivers/gpu/drm/nouveau/nouveau_svm.c 	mmput(svmm->mm);
mm                503 drivers/gpu/drm/nouveau/nouveau_svm.c 		up_read(&svmm->mm->mmap_sem);
mm                508 drivers/gpu/drm/nouveau/nouveau_svm.c 		up_read(&svmm->mm->mmap_sem);
mm                516 drivers/gpu/drm/nouveau/nouveau_svm.c 		up_read(&svmm->mm->mmap_sem);
mm                618 drivers/gpu/drm/nouveau/nouveau_svm.c 		down_read(&svmm->mm->mmap_sem);
mm                619 drivers/gpu/drm/nouveau/nouveau_svm.c 		vma = find_vma_intersection(svmm->mm, start, limit);
mm                622 drivers/gpu/drm/nouveau/nouveau_svm.c 			up_read(&svmm->mm->mmap_sem);
mm                632 drivers/gpu/drm/nouveau/nouveau_svm.c 			up_read(&svmm->mm->mmap_sem);
mm                713 drivers/gpu/drm/nouveau/nouveau_svm.c 			up_read(&svmm->mm->mmap_sem);
mm                 26 drivers/gpu/drm/nouveau/nvkm/core/mm.c #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL :          \
mm                 30 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
mm                 36 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_for_each_entry(node, &mm->nodes, nl_entry) {
mm                 41 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_for_each_entry(node, &mm->free, fl_entry) {
mm                 48 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
mm                 72 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			list_for_each_entry(prev, &mm->free, fl_entry) {
mm                 86 drivers/gpu/drm/nouveau/nvkm/core/mm.c region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
mm                111 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
mm                121 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_for_each_entry(this, &mm->free, fl_entry) {
mm                131 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			s = roundup(s, mm->block_size);
mm                135 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			e = rounddown(e, mm->block_size);
mm                143 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		if (splitoff && !region_head(mm, this, splitoff))
mm                146 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		this = region_head(mm, this, min(size_max, e - s));
mm                161 drivers/gpu/drm/nouveau/nvkm/core/mm.c region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
mm                186 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
mm                194 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_for_each_entry_reverse(this, &mm->free, fl_entry) {
mm                205 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			s = roundup(s, mm->block_size);
mm                209 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			e = rounddown(e, mm->block_size);
mm                222 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		if (c && !region_tail(mm, this, c))
mm                225 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		this = region_tail(mm, this, a);
mm                240 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
mm                245 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	if (nvkm_mm_initialised(mm)) {
mm                246 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
mm                255 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			list_add_tail(&node->nl_entry, &mm->nodes);
mm                257 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		BUG_ON(block != mm->block_size);
mm                259 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		INIT_LIST_HEAD(&mm->nodes);
mm                260 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		INIT_LIST_HEAD(&mm->free);
mm                261 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		mm->block_size = block;
mm                262 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		mm->heap_nodes = 0;
mm                270 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		node->offset  = roundup(offset, mm->block_size);
mm                271 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		node->length  = rounddown(offset + length, mm->block_size);
mm                275 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_add_tail(&node->nl_entry, &mm->nodes);
mm                276 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_add_tail(&node->fl_entry, &mm->free);
mm                278 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	mm->heap_nodes++;
mm                283 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_fini(struct nvkm_mm *mm)
mm                288 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	if (!nvkm_mm_initialised(mm))
mm                291 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_for_each_entry(node, &mm->nodes, nl_entry) {
mm                293 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			if (++nodes > mm->heap_nodes) {
mm                294 drivers/gpu/drm/nouveau/nvkm/core/mm.c 				nvkm_mm_dump(mm, "mm not clean!");
mm                300 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
mm                305 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	mm->heap_nodes = 0;
mm                152 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c 		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
mm                179 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c 		nvkm_mm_fini(&tdev->iommu.mm);
mm                108 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	struct nvkm_mm *mm;
mm                120 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	mm = &ram->vram;
mm                133 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 			ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
mm                135 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 			ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
mm                105 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	struct nvkm_mm *mm;
mm                345 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	nvkm_mm_free(imem->mm, &r);
mm                465 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
mm                498 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	nvkm_mm_free(imem->mm, &r);
mm                590 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		imem->mm = &tdev->iommu.mm;
mm                318 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	struct nvkm_mm *mm = &device->fb->ram->vram;
mm                319 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
mm                320 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
mm                321 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
mm                116 drivers/gpu/drm/panfrost/panfrost_device.h 	struct drm_mm mm;
mm                496 drivers/gpu/drm/panfrost/panfrost_drv.c 	drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
mm                497 drivers/gpu/drm/panfrost/panfrost_drv.c 	panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
mm                512 drivers/gpu/drm/panfrost/panfrost_drv.c 	drm_mm_takedown(&panfrost_priv->mm);
mm                526 drivers/gpu/drm/panfrost/panfrost_drv.c 	drm_mm_takedown(&panfrost_priv->mm);
mm                150 drivers/gpu/drm/panfrost/panfrost_gem.c 	ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
mm                428 drivers/gpu/drm/panfrost/panfrost_mmu.c 	drm_mm_for_each_node(node, &priv->mm) {
mm                370 drivers/gpu/drm/qxl/qxl_ttm.c 	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
mm                377 drivers/gpu/drm/qxl/qxl_ttm.c 	drm_mm_print(mm, &p);
mm                199 drivers/gpu/drm/radeon/radeon_cs.c 		down_read(&current->mm->mmap_sem);
mm                204 drivers/gpu/drm/radeon/radeon_cs.c 		up_read(&current->mm->mmap_sem);
mm                344 drivers/gpu/drm/radeon/radeon_gem.c 		down_read(&current->mm->mmap_sem);
mm                347 drivers/gpu/drm/radeon/radeon_gem.c 			up_read(&current->mm->mmap_sem);
mm                354 drivers/gpu/drm/radeon/radeon_gem.c 		up_read(&current->mm->mmap_sem);
mm                128 drivers/gpu/drm/radeon/radeon_mn.c static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
mm                131 drivers/gpu/drm/radeon/radeon_mn.c 		.mm = mm,
mm                141 drivers/gpu/drm/radeon/radeon_mn.c static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm)
mm                184 drivers/gpu/drm/radeon/radeon_mn.c 	mn = mmu_notifier_get(&radeon_mn_ops, current->mm);
mm                496 drivers/gpu/drm/radeon/radeon_ttm.c 	if (current->mm != gtt->usermm)
mm                748 drivers/gpu/drm/radeon/radeon_ttm.c 	gtt->usermm = current->mm;
mm                 94 drivers/gpu/drm/rockchip/rockchip_drm_drv.c 	drm_mm_init(&private->mm, start, end - start + 1);
mm                107 drivers/gpu/drm/rockchip/rockchip_drm_drv.c 	drm_mm_takedown(&private->mm);
mm                 49 drivers/gpu/drm/rockchip/rockchip_drm_drv.h 	struct drm_mm mm;
mm                 26 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
mm                 36 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	rk_obj->dma_addr = rk_obj->mm.start;
mm                 53 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	drm_mm_remove_node(&rk_obj->mm);
mm                 68 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	drm_mm_remove_node(&rk_obj->mm);
mm                 22 drivers/gpu/drm/rockchip/rockchip_drm_gem.h 	struct drm_mm_node mm;
mm                 53 drivers/gpu/drm/selftests/test-drm_mm.c static bool assert_no_holes(const struct drm_mm *mm)
mm                 60 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
mm                 67 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node(hole, mm) {
mm                 77 drivers/gpu/drm/selftests/test-drm_mm.c static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
mm                 88 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
mm                106 drivers/gpu/drm/selftests/test-drm_mm.c static bool assert_continuous(const struct drm_mm *mm, u64 size)
mm                112 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_no_holes(mm))
mm                117 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node(node, mm) {
mm                136 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
mm                168 drivers/gpu/drm/selftests/test-drm_mm.c static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
mm                173 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!drm_mm_node_allocated(node) || node->mm != mm) {
mm                199 drivers/gpu/drm/selftests/test-drm_mm.c #define show_mm(mm) do { \
mm                201 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_print((mm), &__p); } while (0)
mm                206 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm                211 drivers/gpu/drm/selftests/test-drm_mm.c 	memset(&mm, 0, sizeof(mm));
mm                212 drivers/gpu/drm/selftests/test-drm_mm.c 	if (drm_mm_initialized(&mm)) {
mm                217 drivers/gpu/drm/selftests/test-drm_mm.c 	memset(&mm, 0xff, sizeof(mm));
mm                218 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, size);
mm                219 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!drm_mm_initialized(&mm)) {
mm                224 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!drm_mm_clean(&mm)) {
mm                230 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_one_hole(&mm, 0, size)) {
mm                238 drivers/gpu/drm/selftests/test-drm_mm.c 	ret = drm_mm_reserve_node(&mm, &tmp);
mm                245 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_no_holes(&mm)) {
mm                252 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_one_hole(&mm, 0, size)) {
mm                259 drivers/gpu/drm/selftests/test-drm_mm.c 		show_mm(&mm);
mm                260 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm                266 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm                274 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, 4096);
mm                279 drivers/gpu/drm/selftests/test-drm_mm.c 	ret = drm_mm_reserve_node(&mm, &nodes[0]);
mm                288 drivers/gpu/drm/selftests/test-drm_mm.c 	ret = drm_mm_reserve_node(&mm, &nodes[1]);
mm                295 drivers/gpu/drm/selftests/test-drm_mm.c 	show_mm(&mm);
mm                307 drivers/gpu/drm/selftests/test-drm_mm.c static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
mm                311 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_reserve_node(mm, node);
mm                326 drivers/gpu/drm/selftests/test-drm_mm.c static bool check_reserve_boundaries(struct drm_mm *mm,
mm                358 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_reserve_fail(mm,
mm                374 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm                398 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, count * size);
mm                400 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!check_reserve_boundaries(&mm, count, size))
mm                407 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(&mm, &nodes[n]);
mm                421 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_reserve_fail(&mm, &nodes[n]))
mm                426 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_continuous(&mm, size))
mm                432 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_reserve_fail(&mm,
mm                438 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
mm                447 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_continuous(&mm, size))
mm                452 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
mm                456 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_reserve_fail(&mm,
mm                472 drivers/gpu/drm/selftests/test-drm_mm.c 			err = drm_mm_reserve_node(&mm, node);
mm                483 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!assert_continuous(&mm, size))
mm                489 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm                491 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm                525 drivers/gpu/drm/selftests/test-drm_mm.c static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
mm                531 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_generic(mm, node,
mm                540 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_node(node, mm, size, alignment, color)) {
mm                548 drivers/gpu/drm/selftests/test-drm_mm.c static bool expect_insert_fail(struct drm_mm *mm, u64 size)
mm                553 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node(mm, &tmp, size);
mm                572 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm                592 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, count * size);
mm                600 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert(&mm, node, size, 0, n, mode)) {
mm                614 drivers/gpu/drm/selftests/test-drm_mm.c 				if (!assert_node(&nodes[n], &mm, size, 0, n)) {
mm                630 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!assert_continuous(&mm, size))
mm                634 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_insert_fail(&mm, size))
mm                642 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
mm                654 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!assert_continuous(&mm, size))
mm                667 drivers/gpu/drm/selftests/test-drm_mm.c 				if (!expect_insert(&mm, node, size, 0, n, mode)) {
mm                676 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!assert_continuous(&mm, size))
mm                679 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert_fail(&mm, size))
mm                683 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_safe(node, next, &mm)
mm                685 drivers/gpu/drm/selftests/test-drm_mm.c 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
mm                692 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm                694 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm                762 drivers/gpu/drm/selftests/test-drm_mm.c static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
mm                769 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_in_range(mm, node,
mm                780 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_node(node, mm, size, alignment, color)) {
mm                788 drivers/gpu/drm/selftests/test-drm_mm.c static bool expect_insert_in_range_fail(struct drm_mm *mm,
mm                796 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_in_range(mm, &tmp,
mm                815 drivers/gpu/drm/selftests/test-drm_mm.c static bool assert_contiguous_in_range(struct drm_mm *mm,
mm                823 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!expect_insert_in_range_fail(mm, size, start, end))
mm                827 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node(node, mm) {
mm                856 drivers/gpu/drm/selftests/test-drm_mm.c 		node = __drm_mm_interval_first(mm, 0, start - 1);
mm                865 drivers/gpu/drm/selftests/test-drm_mm.c 		node = __drm_mm_interval_first(mm, end, U64_MAX);
mm                879 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm                898 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, count * size);
mm                905 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert_in_range(&mm, &nodes[n],
mm                916 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
mm                927 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert_in_range(&mm, &nodes[n],
mm                941 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
mm                947 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_safe(node, next, &mm)
mm                949 drivers/gpu/drm/selftests/test-drm_mm.c 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
mm                956 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm                958 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm                966 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm                971 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, start, size);
mm                973 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!expect_insert_in_range_fail(&mm, 1, 0, start))
mm                976 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!expect_insert_in_range_fail(&mm, size,
mm                980 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!expect_insert_in_range_fail(&mm, size,
mm                984 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
mm                987 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1040 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1054 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 1, U64_MAX - 2);
mm               1062 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert(&mm, &nodes[i],
mm               1073 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_safe(node, next, &mm)
mm               1075 drivers/gpu/drm/selftests/test-drm_mm.c 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
mm               1082 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               1084 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1092 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1099 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 1, U64_MAX - 2);
mm               1112 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_insert(&mm, node,
mm               1125 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm) {
mm               1129 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1150 drivers/gpu/drm/selftests/test-drm_mm.c static void show_holes(const struct drm_mm *mm, int count)
mm               1155 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
mm               1234 drivers/gpu/drm/selftests/test-drm_mm.c static bool evict_nothing(struct drm_mm *mm,
mm               1244 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
mm               1264 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node(node, mm) {
mm               1278 drivers/gpu/drm/selftests/test-drm_mm.c 	return assert_continuous(mm, nodes[0].node.size);
mm               1281 drivers/gpu/drm/selftests/test-drm_mm.c static bool evict_everything(struct drm_mm *mm,
mm               1291 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
mm               1315 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_one_hole(mm, 0, total_size))
mm               1319 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(mm, &e->node);
mm               1327 drivers/gpu/drm/selftests/test-drm_mm.c 	return assert_continuous(mm, nodes[0].node.size);
mm               1330 drivers/gpu/drm/selftests/test-drm_mm.c static int evict_something(struct drm_mm *mm,
mm               1345 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_scan_init_with_range(&scan, mm,
mm               1355 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
mm               1361 drivers/gpu/drm/selftests/test-drm_mm.c 		show_holes(mm, 3);
mm               1371 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_node(&tmp, mm, size, alignment, 0) ||
mm               1385 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(mm, &e->node);
mm               1393 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_continuous(mm, nodes[0].node.size)) {
mm               1406 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1429 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, size);
mm               1431 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
mm               1440 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!evict_nothing(&mm, size, nodes)) {
mm               1444 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!evict_everything(&mm, size, nodes)) {
mm               1452 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_something(&mm, 0, U64_MAX,
mm               1466 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_something(&mm, 0, U64_MAX,
mm               1484 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_something(&mm, 0, U64_MAX,
mm               1501 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               1503 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1519 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1539 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, size);
mm               1541 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
mm               1552 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_something(&mm, range_start, range_end,
mm               1565 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_something(&mm, range_start, range_end,
mm               1582 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_something(&mm, range_start, range_end,
mm               1598 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               1600 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1620 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1645 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_init(&mm, 0, size*count);
mm               1647 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert(&mm, &nodes[n],
mm               1660 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
mm               1664 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!assert_continuous(&mm, size))
mm               1679 drivers/gpu/drm/selftests/test-drm_mm.c 				if (!expect_insert(&mm, node,
mm               1707 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_safe(node, next, &mm)
mm               1709 drivers/gpu/drm/selftests/test-drm_mm.c 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
mm               1715 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               1717 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1734 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1758 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_init(&mm, 0, size*count);
mm               1760 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert(&mm, &nodes[n],
mm               1767 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!assert_one_hole(&mm, size*(n + 1), size*count))
mm               1771 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!assert_continuous(&mm, size))
mm               1786 drivers/gpu/drm/selftests/test-drm_mm.c 				if (!expect_insert(&mm, node,
mm               1807 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_safe(node, next, &mm)
mm               1809 drivers/gpu/drm/selftests/test-drm_mm.c 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
mm               1815 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               1817 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1829 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1833 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, 7);
mm               1838 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_reserve_node(&mm, &rsvd_lo);
mm               1847 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_reserve_node(&mm, &rsvd_hi);
mm               1860 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_generic(&mm, &node,
mm               1870 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
mm               1884 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               1930 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               1942 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, U64_MAX);
mm               1951 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_insert(&mm, node,
mm               1960 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, nn, &mm) {
mm               1973 drivers/gpu/drm/selftests/test-drm_mm.c 	mm.color_adjust = separate_adjacent_colors;
mm               1986 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(&mm, node);
mm               2008 drivers/gpu/drm/selftests/test-drm_mm.c 			err = drm_mm_reserve_node(&mm, node);
mm               2019 drivers/gpu/drm/selftests/test-drm_mm.c 			err = drm_mm_reserve_node(&mm, node);
mm               2036 drivers/gpu/drm/selftests/test-drm_mm.c 			if (!expect_insert(&mm, node,
mm               2046 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_for_each_node_safe(node, nn, &mm) {
mm               2075 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, nn, &mm) {
mm               2079 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               2083 drivers/gpu/drm/selftests/test-drm_mm.c static int evict_color(struct drm_mm *mm,
mm               2099 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_scan_init_with_range(&scan, mm,
mm               2109 drivers/gpu/drm/selftests/test-drm_mm.c 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
mm               2115 drivers/gpu/drm/selftests/test-drm_mm.c 		show_holes(mm, 3);
mm               2128 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!assert_node(&tmp, mm, size, alignment, color)) {
mm               2140 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(mm, &e->node);
mm               2158 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               2180 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, 2*total_size - 1);
mm               2181 drivers/gpu/drm/selftests/test-drm_mm.c 	mm.color_adjust = separate_adjacent_colors;
mm               2183 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_insert(&mm, &nodes[n].node,
mm               2194 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_color(&mm, 0, U64_MAX,
mm               2207 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_color(&mm, 0, U64_MAX,
mm               2224 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_color(&mm, 0, U64_MAX,
mm               2241 drivers/gpu/drm/selftests/test-drm_mm.c 		show_mm(&mm);
mm               2242 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               2244 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm               2261 drivers/gpu/drm/selftests/test-drm_mm.c 	struct drm_mm mm;
mm               2281 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_init(&mm, 0, 2*total_size - 1);
mm               2282 drivers/gpu/drm/selftests/test-drm_mm.c 	mm.color_adjust = separate_adjacent_colors;
mm               2284 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!expect_insert(&mm, &nodes[n].node,
mm               2295 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_color(&mm, range_start, range_end,
mm               2308 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_color(&mm, range_start, range_end,
mm               2325 drivers/gpu/drm/selftests/test-drm_mm.c 			err = evict_color(&mm, range_start, range_end,
mm               2342 drivers/gpu/drm/selftests/test-drm_mm.c 		show_mm(&mm);
mm               2343 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_for_each_node_safe(node, next, &mm)
mm               2345 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_mm_takedown(&mm);
mm                163 drivers/gpu/drm/tegra/drm.c 		drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
mm                206 drivers/gpu/drm/tegra/drm.c 		drm_mm_takedown(&tegra->mm);
mm                242 drivers/gpu/drm/tegra/drm.c 		drm_mm_takedown(&tegra->mm);
mm                994 drivers/gpu/drm/tegra/drm.c 		drm_mm_print(&tegra->mm, &p);
mm                 41 drivers/gpu/drm/tegra/drm.h 	struct drm_mm mm;
mm                119 drivers/gpu/drm/tegra/gem.c 	if (bo->mm)
mm                122 drivers/gpu/drm/tegra/gem.c 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
mm                123 drivers/gpu/drm/tegra/gem.c 	if (!bo->mm)
mm                128 drivers/gpu/drm/tegra/gem.c 	err = drm_mm_insert_node_generic(&tegra->mm,
mm                129 drivers/gpu/drm/tegra/gem.c 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
mm                136 drivers/gpu/drm/tegra/gem.c 	bo->paddr = bo->mm->start;
mm                151 drivers/gpu/drm/tegra/gem.c 	drm_mm_remove_node(bo->mm);
mm                154 drivers/gpu/drm/tegra/gem.c 	kfree(bo->mm);
mm                160 drivers/gpu/drm/tegra/gem.c 	if (!bo->mm)
mm                165 drivers/gpu/drm/tegra/gem.c 	drm_mm_remove_node(bo->mm);
mm                168 drivers/gpu/drm/tegra/gem.c 	kfree(bo->mm);
mm                 37 drivers/gpu/drm/tegra/gem.h 	struct drm_mm_node *mm;
mm                 47 drivers/gpu/drm/ttm/ttm_bo_manager.c 	struct drm_mm mm;
mm                 57 drivers/gpu/drm/ttm/ttm_bo_manager.c 	struct drm_mm *mm = &rman->mm;
mm                 76 drivers/gpu/drm/ttm/ttm_bo_manager.c 	ret = drm_mm_insert_node_in_range(mm, node,
mm                116 drivers/gpu/drm/ttm/ttm_bo_manager.c 	drm_mm_init(&rman->mm, 0, p_size);
mm                125 drivers/gpu/drm/ttm/ttm_bo_manager.c 	struct drm_mm *mm = &rman->mm;
mm                128 drivers/gpu/drm/ttm/ttm_bo_manager.c 	if (drm_mm_clean(mm)) {
mm                129 drivers/gpu/drm/ttm/ttm_bo_manager.c 		drm_mm_takedown(mm);
mm                145 drivers/gpu/drm/ttm/ttm_bo_manager.c 	drm_mm_print(&rman->mm, printer);
mm                107 drivers/gpu/drm/v3d/v3d_bo.c 	ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
mm                 78 drivers/gpu/drm/v3d/v3d_drv.h 	struct drm_mm mm;
mm                848 drivers/gpu/drm/v3d/v3d_gem.c 	drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
mm                854 drivers/gpu/drm/v3d/v3d_gem.c 		drm_mm_takedown(&v3d->mm);
mm                866 drivers/gpu/drm/v3d/v3d_gem.c 		drm_mm_takedown(&v3d->mm);
mm                887 drivers/gpu/drm/v3d/v3d_gem.c 	drm_mm_takedown(&v3d->mm);
mm                 50 drivers/gpu/drm/vc4/vc4_crtc.c 	struct drm_mm_node mm;
mm                536 drivers/gpu/drm/vc4/vc4_crtc.c 			  vc4_state->mm.start);
mm                541 drivers/gpu/drm/vc4/vc4_crtc.c 			  vc4_state->mm.start);
mm                651 drivers/gpu/drm/vc4/vc4_crtc.c 	ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
mm                693 drivers/gpu/drm/vc4/vc4_crtc.c 	u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
mm                723 drivers/gpu/drm/vc4/vc4_crtc.c 	WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
mm                792 drivers/gpu/drm/vc4/vc4_crtc.c 	    (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
mm                997 drivers/gpu/drm/vc4/vc4_crtc.c 	if (vc4_state->mm.allocated) {
mm               1001 drivers/gpu/drm/vc4/vc4_crtc.c 		drm_mm_remove_node(&vc4_state->mm);
mm                117 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c 	struct drm_mm mm;
mm                772 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
mm                775 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
mm               1263 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
mm                202 drivers/infiniband/core/umem.c 	struct mm_struct *mm;
mm                241 drivers/infiniband/core/umem.c 	umem->owning_mm = mm = current->mm;
mm                242 drivers/infiniband/core/umem.c 	mmgrab(mm);
mm                258 drivers/infiniband/core/umem.c 	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
mm                260 drivers/infiniband/core/umem.c 		atomic64_sub(npages, &mm->pinned_vm);
mm                277 drivers/infiniband/core/umem.c 		down_read(&mm->mmap_sem);
mm                284 drivers/infiniband/core/umem.c 			up_read(&mm->mmap_sem);
mm                295 drivers/infiniband/core/umem.c 		up_read(&mm->mmap_sem);
mm                317 drivers/infiniband/core/umem.c 	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
mm                 78 drivers/infiniband/core/umem_odp.c 				     struct mm_struct *mm)
mm                170 drivers/infiniband/core/umem_odp.c static struct mmu_notifier *ib_umem_alloc_notifier(struct mm_struct *mm)
mm                181 drivers/infiniband/core/umem_odp.c 	WARN_ON(mm != current->mm);
mm                317 drivers/infiniband/core/umem_odp.c 	umem->owning_mm = current->mm;
mm                390 drivers/infiniband/core/umem_odp.c 	struct mm_struct *mm;
mm                413 drivers/infiniband/core/umem_odp.c 	umem_odp->umem.owning_mm = mm = current->mm;
mm                420 drivers/infiniband/core/umem_odp.c 		down_read(&mm->mmap_sem);
mm                421 drivers/infiniband/core/umem_odp.c 		vma = find_vma(mm, ib_umem_start(umem_odp));
mm                423 drivers/infiniband/core/umem_odp.c 			up_read(&mm->mmap_sem);
mm                429 drivers/infiniband/core/umem_odp.c 		up_read(&mm->mmap_sem);
mm                977 drivers/infiniband/core/uverbs_main.c 		struct mm_struct *mm = NULL;
mm                986 drivers/infiniband/core/uverbs_main.c 			mm = priv->vma->vm_mm;
mm                987 drivers/infiniband/core/uverbs_main.c 			ret = mmget_not_zero(mm);
mm                990 drivers/infiniband/core/uverbs_main.c 				mm = NULL;
mm                996 drivers/infiniband/core/uverbs_main.c 		if (!mm)
mm               1005 drivers/infiniband/core/uverbs_main.c 		down_read(&mm->mmap_sem);
mm               1006 drivers/infiniband/core/uverbs_main.c 		if (!mmget_still_valid(mm))
mm               1013 drivers/infiniband/core/uverbs_main.c 			if (vma->vm_mm != mm)
mm               1022 drivers/infiniband/core/uverbs_main.c 		up_read(&mm->mmap_sem);
mm               1023 drivers/infiniband/core/uverbs_main.c 		mmput(mm);
mm                 69 drivers/infiniband/hw/cxgb3/iwch_provider.c 	struct iwch_mm_entry *mm, *tmp;
mm                 72 drivers/infiniband/hw/cxgb3/iwch_provider.c 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
mm                 73 drivers/infiniband/hw/cxgb3/iwch_provider.c 		kfree(mm);
mm                162 drivers/infiniband/hw/cxgb3/iwch_provider.c 		struct iwch_mm_entry *mm;
mm                166 drivers/infiniband/hw/cxgb3/iwch_provider.c 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
mm                167 drivers/infiniband/hw/cxgb3/iwch_provider.c 		if (!mm) {
mm                177 drivers/infiniband/hw/cxgb3/iwch_provider.c 		mm->key = uresp.key;
mm                178 drivers/infiniband/hw/cxgb3/iwch_provider.c 		mm->addr = virt_to_phys(chp->cq.queue);
mm                182 drivers/infiniband/hw/cxgb3/iwch_provider.c 			mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
mm                186 drivers/infiniband/hw/cxgb3/iwch_provider.c 			mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
mm                188 drivers/infiniband/hw/cxgb3/iwch_provider.c 			uresp.memsize = mm->len;
mm                193 drivers/infiniband/hw/cxgb3/iwch_provider.c 			kfree(mm);
mm                197 drivers/infiniband/hw/cxgb3/iwch_provider.c 		insert_mmap(ucontext, mm);
mm                243 drivers/infiniband/hw/cxgb3/iwch_provider.c 	struct iwch_mm_entry *mm;
mm                257 drivers/infiniband/hw/cxgb3/iwch_provider.c 	mm = remove_mmap(ucontext, key, len);
mm                258 drivers/infiniband/hw/cxgb3/iwch_provider.c 	if (!mm)
mm                260 drivers/infiniband/hw/cxgb3/iwch_provider.c 	addr = mm->addr;
mm                261 drivers/infiniband/hw/cxgb3/iwch_provider.c 	kfree(mm);
mm                210 drivers/infiniband/hw/cxgb3/iwch_provider.h 	struct iwch_mm_entry *mm;
mm                215 drivers/infiniband/hw/cxgb3/iwch_provider.h 		mm = list_entry(pos, struct iwch_mm_entry, entry);
mm                216 drivers/infiniband/hw/cxgb3/iwch_provider.h 		if (mm->key == key && mm->len == len) {
mm                217 drivers/infiniband/hw/cxgb3/iwch_provider.h 			list_del_init(&mm->entry);
mm                221 drivers/infiniband/hw/cxgb3/iwch_provider.h 				 (unsigned long long)mm->addr, mm->len);
mm                222 drivers/infiniband/hw/cxgb3/iwch_provider.h 			return mm;
mm                230 drivers/infiniband/hw/cxgb3/iwch_provider.h 			       struct iwch_mm_entry *mm)
mm                234 drivers/infiniband/hw/cxgb3/iwch_provider.h 		 __func__, mm->key, (unsigned long long)mm->addr, mm->len);
mm                235 drivers/infiniband/hw/cxgb3/iwch_provider.h 	list_add_tail(&mm->entry, &ucontext->mmaps);
mm               1002 drivers/infiniband/hw/cxgb4/cq.c 	struct c4iw_mm_entry *mm, *mm2;
mm               1087 drivers/infiniband/hw/cxgb4/cq.c 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
mm               1088 drivers/infiniband/hw/cxgb4/cq.c 		if (!mm)
mm               1117 drivers/infiniband/hw/cxgb4/cq.c 		mm->key = uresp.key;
mm               1118 drivers/infiniband/hw/cxgb4/cq.c 		mm->addr = virt_to_phys(chp->cq.queue);
mm               1119 drivers/infiniband/hw/cxgb4/cq.c 		mm->len = chp->cq.memsize;
mm               1120 drivers/infiniband/hw/cxgb4/cq.c 		insert_mmap(ucontext, mm);
mm               1135 drivers/infiniband/hw/cxgb4/cq.c 	kfree(mm);
mm                552 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	struct c4iw_mm_entry *mm;
mm                557 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 		mm = list_entry(pos, struct c4iw_mm_entry, entry);
mm                558 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 		if (mm->key == key && mm->len == len) {
mm                559 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 			list_del_init(&mm->entry);
mm                562 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 				 (unsigned long long)mm->addr, mm->len);
mm                563 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 			return mm;
mm                571 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 			       struct c4iw_mm_entry *mm)
mm                575 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 		 mm->key, (unsigned long long)mm->addr, mm->len);
mm                576 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	list_add_tail(&mm->entry, &ucontext->mmaps);
mm                 65 drivers/infiniband/hw/cxgb4/provider.c 	struct c4iw_mm_entry *mm, *tmp;
mm                 70 drivers/infiniband/hw/cxgb4/provider.c 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
mm                 71 drivers/infiniband/hw/cxgb4/provider.c 		kfree(mm);
mm                 83 drivers/infiniband/hw/cxgb4/provider.c 	struct c4iw_mm_entry *mm = NULL;
mm                 94 drivers/infiniband/hw/cxgb4/provider.c 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
mm                 95 drivers/infiniband/hw/cxgb4/provider.c 		if (!mm) {
mm                112 drivers/infiniband/hw/cxgb4/provider.c 		mm->key = uresp.status_page_key;
mm                113 drivers/infiniband/hw/cxgb4/provider.c 		mm->addr = virt_to_phys(rhp->rdev.status_page);
mm                114 drivers/infiniband/hw/cxgb4/provider.c 		mm->len = PAGE_SIZE;
mm                115 drivers/infiniband/hw/cxgb4/provider.c 		insert_mmap(context, mm);
mm                119 drivers/infiniband/hw/cxgb4/provider.c 	kfree(mm);
mm                130 drivers/infiniband/hw/cxgb4/provider.c 	struct c4iw_mm_entry *mm;
mm                143 drivers/infiniband/hw/cxgb4/provider.c 	mm = remove_mmap(ucontext, key, len);
mm                144 drivers/infiniband/hw/cxgb4/provider.c 	if (!mm)
mm                146 drivers/infiniband/hw/cxgb4/provider.c 	addr = mm->addr;
mm                147 drivers/infiniband/hw/cxgb4/provider.c 	kfree(mm);
mm                209 drivers/infiniband/hw/hfi1/file_ops.c 	fd->mm = current->mm;
mm                210 drivers/infiniband/hw/hfi1/file_ops.c 	mmgrab(fd->mm);
mm                715 drivers/infiniband/hw/hfi1/file_ops.c 	mmdrop(fdata->mm);
mm               1461 drivers/infiniband/hw/hfi1/hfi.h 	struct mm_struct *mm;
mm               2016 drivers/infiniband/hw/hfi1/hfi.h bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
mm               2018 drivers/infiniband/hw/hfi1/hfi.h int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
mm               2020 drivers/infiniband/hw/hfi1/hfi.h void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
mm                 61 drivers/infiniband/hw/hfi1/mmu_rb.c 	struct mm_struct *mm;
mm                 95 drivers/infiniband/hw/hfi1/mmu_rb.c int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
mm                113 drivers/infiniband/hw/hfi1/mmu_rb.c 	handlr->mm = mm;
mm                119 drivers/infiniband/hw/hfi1/mmu_rb.c 	ret = mmu_notifier_register(&handlr->mn, handlr->mm);
mm                137 drivers/infiniband/hw/hfi1/mmu_rb.c 	mmu_notifier_unregister(&handler->mn, handler->mm);
mm                 74 drivers/infiniband/hw/hfi1/mmu_rb.h int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
mm                114 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
mm                208 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	hfi1_release_user_pages(fd->mm, pages, npages, mapped);
mm                250 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
mm                255 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
mm                 71 drivers/infiniband/hw/hfi1/user_pages.c bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
mm                 94 drivers/infiniband/hw/hfi1/user_pages.c 	pinned = atomic64_read(&mm->pinned_vm);
mm                103 drivers/infiniband/hw/hfi1/user_pages.c int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
mm                113 drivers/infiniband/hw/hfi1/user_pages.c 	atomic64_add(ret, &mm->pinned_vm);
mm                118 drivers/infiniband/hw/hfi1/user_pages.c void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
mm                123 drivers/infiniband/hw/hfi1/user_pages.c 	if (mm) { /* during close after signal, mm can be NULL */
mm                124 drivers/infiniband/hw/hfi1/user_pages.c 		atomic64_sub(npages, &mm->pinned_vm);
mm                 85 drivers/infiniband/hw/hfi1/user_sdma.c static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
mm                191 drivers/infiniband/hw/hfi1/user_sdma.c 	pq->mm = fd->mm;
mm                233 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
mm                983 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!hfi1_can_pin_pages(pq->dd, pq->mm,
mm                989 drivers/infiniband/hw/hfi1/user_sdma.c 	pinned = hfi1_acquire_user_pages(pq->mm,
mm                998 drivers/infiniband/hw/hfi1/user_sdma.c 		unpin_vector_pages(pq->mm, pages, node->npages, pinned);
mm               1011 drivers/infiniband/hw/hfi1/user_sdma.c 		unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
mm               1079 drivers/infiniband/hw/hfi1/user_sdma.c static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
mm               1082 drivers/infiniband/hw/hfi1/user_sdma.c 	hfi1_release_user_pages(mm, pages + start, npages, false);
mm                136 drivers/infiniband/hw/hfi1/user_sdma.h 	struct mm_struct *mm;
mm                383 drivers/infiniband/hw/mlx4/mr.c 		down_read(&current->mm->mmap_sem);
mm                389 drivers/infiniband/hw/mlx4/mr.c 		vma = find_vma(current->mm, untagged_start);
mm                398 drivers/infiniband/hw/mlx4/mr.c 		up_read(&current->mm->mmap_sem);
mm                202 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_mm *mm;
mm                204 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
mm                205 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	if (mm == NULL)
mm                207 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	mm->key.phy_addr = phy_addr;
mm                208 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	mm->key.len = len;
mm                209 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	INIT_LIST_HEAD(&mm->entry);
mm                212 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	list_add_tail(&mm->entry, &uctx->mm_head);
mm                220 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_mm *mm, *tmp;
mm                223 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
mm                224 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
mm                227 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		list_del(&mm->entry);
mm                228 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		kfree(mm);
mm                238 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_mm *mm;
mm                241 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	list_for_each_entry(mm, &uctx->mm_head, entry) {
mm                242 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
mm                528 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_mm *mm, *tmp;
mm                539 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
mm                540 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		list_del(&mm->entry);
mm                541 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		kfree(mm);
mm                263 drivers/infiniband/hw/qedr/verbs.c 	struct qedr_mm *mm;
mm                265 drivers/infiniband/hw/qedr/verbs.c 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
mm                266 drivers/infiniband/hw/qedr/verbs.c 	if (!mm)
mm                269 drivers/infiniband/hw/qedr/verbs.c 	mm->key.phy_addr = phy_addr;
mm                277 drivers/infiniband/hw/qedr/verbs.c 	mm->key.len = roundup(len, PAGE_SIZE);
mm                278 drivers/infiniband/hw/qedr/verbs.c 	INIT_LIST_HEAD(&mm->entry);
mm                281 drivers/infiniband/hw/qedr/verbs.c 	list_add(&mm->entry, &uctx->mm_head);
mm                286 drivers/infiniband/hw/qedr/verbs.c 		 (unsigned long long)mm->key.phy_addr,
mm                287 drivers/infiniband/hw/qedr/verbs.c 		 (unsigned long)mm->key.len, uctx);
mm                296 drivers/infiniband/hw/qedr/verbs.c 	struct qedr_mm *mm;
mm                299 drivers/infiniband/hw/qedr/verbs.c 	list_for_each_entry(mm, &uctx->mm_head, entry) {
mm                300 drivers/infiniband/hw/qedr/verbs.c 		if (len != mm->key.len || phy_addr != mm->key.phy_addr)
mm                309 drivers/infiniband/hw/qedr/verbs.c 		 mm->key.phy_addr, mm->key.len, uctx, found);
mm                372 drivers/infiniband/hw/qedr/verbs.c 	struct qedr_mm *mm, *tmp;
mm                378 drivers/infiniband/hw/qedr/verbs.c 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
mm                381 drivers/infiniband/hw/qedr/verbs.c 			 mm->key.phy_addr, mm->key.len, uctx);
mm                382 drivers/infiniband/hw/qedr/verbs.c 		list_del(&mm->entry);
mm                383 drivers/infiniband/hw/qedr/verbs.c 		kfree(mm);
mm                102 drivers/infiniband/hw/qib/qib_user_pages.c 	locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
mm                109 drivers/infiniband/hw/qib/qib_user_pages.c 	down_read(&current->mm->mmap_sem);
mm                116 drivers/infiniband/hw/qib/qib_user_pages.c 			up_read(&current->mm->mmap_sem);
mm                120 drivers/infiniband/hw/qib/qib_user_pages.c 	up_read(&current->mm->mmap_sem);
mm                126 drivers/infiniband/hw/qib/qib_user_pages.c 	atomic64_sub(num_pages, &current->mm->pinned_vm);
mm                135 drivers/infiniband/hw/qib/qib_user_pages.c 	if (current->mm)
mm                136 drivers/infiniband/hw/qib/qib_user_pages.c 		atomic64_sub(num_pages, &current->mm->pinned_vm);
mm                102 drivers/infiniband/hw/usnic/usnic_uiom.c 	struct mm_struct *mm;
mm                125 drivers/infiniband/hw/usnic/usnic_uiom.c 	uiomr->owning_mm = mm = current->mm;
mm                126 drivers/infiniband/hw/usnic/usnic_uiom.c 	down_read(&mm->mmap_sem);
mm                128 drivers/infiniband/hw/usnic/usnic_uiom.c 	locked = atomic64_add_return(npages, &current->mm->pinned_vm);
mm                186 drivers/infiniband/hw/usnic/usnic_uiom.c 		atomic64_sub(npages, &current->mm->pinned_vm);
mm                190 drivers/infiniband/hw/usnic/usnic_uiom.c 	up_read(&mm->mmap_sem);
mm                391 drivers/infiniband/sw/siw/siw_mem.c 	mm_s = current->mm;
mm               1324 drivers/infiniband/sw/siw/siw_verbs.c 		if (num_pages > mem_limit - current->mm->locked_vm) {
mm               1327 drivers/infiniband/sw/siw/siw_verbs.c 				   current->mm->locked_vm);
mm                 41 drivers/iommu/amd_iommu_v2.c 	struct mm_struct *mm;			/* mm_struct for the faults */
mm                 72 drivers/iommu/amd_iommu_v2.c 	struct mm_struct *mm;
mm                337 drivers/iommu/amd_iommu_v2.c 		mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
mm                362 drivers/iommu/amd_iommu_v2.c 				struct mm_struct *mm,
mm                378 drivers/iommu/amd_iommu_v2.c static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
mm                478 drivers/iommu/amd_iommu_v2.c 	struct mm_struct *mm;
mm                481 drivers/iommu/amd_iommu_v2.c 	mm = fault->state->mm;
mm                490 drivers/iommu/amd_iommu_v2.c 	down_read(&mm->mmap_sem);
mm                491 drivers/iommu/amd_iommu_v2.c 	vma = find_extend_vma(mm, address);
mm                502 drivers/iommu/amd_iommu_v2.c 	up_read(&mm->mmap_sem);
mm                606 drivers/iommu/amd_iommu_v2.c 	struct mm_struct *mm;
mm                635 drivers/iommu/amd_iommu_v2.c 	mm                        = get_task_mm(task);
mm                636 drivers/iommu/amd_iommu_v2.c 	pasid_state->mm           = mm;
mm                643 drivers/iommu/amd_iommu_v2.c 	if (pasid_state->mm == NULL)
mm                646 drivers/iommu/amd_iommu_v2.c 	mmu_notifier_register(&pasid_state->mn, mm);
mm                653 drivers/iommu/amd_iommu_v2.c 					__pa(pasid_state->mm->pgd));
mm                665 drivers/iommu/amd_iommu_v2.c 	mmput(mm);
mm                673 drivers/iommu/amd_iommu_v2.c 	mmu_notifier_unregister(&pasid_state->mn, mm);
mm                674 drivers/iommu/amd_iommu_v2.c 	mmput(mm);
mm                721 drivers/iommu/amd_iommu_v2.c 	mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
mm                167 drivers/iommu/intel-svm.c 				   struct mm_struct *mm,
mm                176 drivers/iommu/intel-svm.c static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
mm                216 drivers/iommu/intel-svm.c 	struct mm_struct *mm = NULL;
mm                234 drivers/iommu/intel-svm.c 		mm = get_task_mm(current);
mm                235 drivers/iommu/intel-svm.c 		BUG_ON(!mm);
mm                243 drivers/iommu/intel-svm.c 			if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
mm                328 drivers/iommu/intel-svm.c 		svm->mm = mm;
mm                333 drivers/iommu/intel-svm.c 		if (mm) {
mm                334 drivers/iommu/intel-svm.c 			ret = mmu_notifier_register(&svm->notifier, mm);
mm                345 drivers/iommu/intel-svm.c 				mm ? mm->pgd : init_mm.pgd,
mm                347 drivers/iommu/intel-svm.c 				mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
mm                350 drivers/iommu/intel-svm.c 			if (mm)
mm                351 drivers/iommu/intel-svm.c 				mmu_notifier_unregister(&svm->notifier, mm);
mm                366 drivers/iommu/intel-svm.c 						mm ? mm->pgd : init_mm.pgd,
mm                368 drivers/iommu/intel-svm.c 						mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
mm                382 drivers/iommu/intel-svm.c 	if (mm)
mm                383 drivers/iommu/intel-svm.c 		mmput(mm);
mm                423 drivers/iommu/intel-svm.c 					if (svm->mm)
mm                424 drivers/iommu/intel-svm.c 						mmu_notifier_unregister(&svm->notifier, svm->mm);
mm                462 drivers/iommu/intel-svm.c 	if (!svm->mm)
mm                464 drivers/iommu/intel-svm.c 	else if (atomic_read(&svm->mm->mm_users) > 0)
mm                584 drivers/iommu/intel-svm.c 		if (!svm->mm)
mm                592 drivers/iommu/intel-svm.c 		if (!mmget_not_zero(svm->mm))
mm                595 drivers/iommu/intel-svm.c 		down_read(&svm->mm->mmap_sem);
mm                596 drivers/iommu/intel-svm.c 		vma = find_extend_vma(svm->mm, address);
mm                610 drivers/iommu/intel-svm.c 		up_read(&svm->mm->mmap_sem);
mm                611 drivers/iommu/intel-svm.c 		mmput(svm->mm);
mm               2462 drivers/iommu/iommu.c iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
mm               2487 drivers/iommu/iommu.c 	handle = ops->sva_bind(dev, mm, drvdata);
mm                258 drivers/media/dvb-frontends/drxd_hard.c 	u8 mm[6] = { reg & 0xff, (reg >> 16) & 0xff,
mm                263 drivers/media/dvb-frontends/drxd_hard.c 	if (i2c_write(state->i2c, adr, mm, 6) < 0)
mm                271 drivers/media/dvb-frontends/drxd_hard.c 	u8 mm[8] = { reg & 0xff, (reg >> 16) & 0xff,
mm                277 drivers/media/dvb-frontends/drxd_hard.c 	if (i2c_write(state->i2c, adr, mm, 8) < 0)
mm                286 drivers/media/dvb-frontends/drxd_hard.c 	u8 mm[CHUNK_SIZE + 4] = { reg & 0xff, (reg >> 16) & 0xff,
mm                292 drivers/media/dvb-frontends/drxd_hard.c 		mm[4 + i] = data[i];
mm                293 drivers/media/dvb-frontends/drxd_hard.c 	if (i2c_write(state->i2c, adr, mm, 4 + len) < 0) {
mm                365 drivers/media/dvb-frontends/drxk_hard.c 	u8 adr = state->demod_address, mm[6], len;
mm                370 drivers/media/dvb-frontends/drxk_hard.c 		mm[0] = (((reg << 1) & 0xFF) | 0x01);
mm                371 drivers/media/dvb-frontends/drxk_hard.c 		mm[1] = ((reg >> 16) & 0xFF);
mm                372 drivers/media/dvb-frontends/drxk_hard.c 		mm[2] = ((reg >> 24) & 0xFF) | flags;
mm                373 drivers/media/dvb-frontends/drxk_hard.c 		mm[3] = ((reg >> 7) & 0xFF);
mm                376 drivers/media/dvb-frontends/drxk_hard.c 		mm[0] = ((reg << 1) & 0xFF);
mm                377 drivers/media/dvb-frontends/drxk_hard.c 		mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
mm                380 drivers/media/dvb-frontends/drxk_hard.c 	mm[len] = data & 0xff;
mm                381 drivers/media/dvb-frontends/drxk_hard.c 	mm[len + 1] = (data >> 8) & 0xff;
mm                384 drivers/media/dvb-frontends/drxk_hard.c 	return i2c_write(state, adr, mm, len + 2);
mm                394 drivers/media/dvb-frontends/drxk_hard.c 	u8 adr = state->demod_address, mm[8], len;
mm                399 drivers/media/dvb-frontends/drxk_hard.c 		mm[0] = (((reg << 1) & 0xFF) | 0x01);
mm                400 drivers/media/dvb-frontends/drxk_hard.c 		mm[1] = ((reg >> 16) & 0xFF);
mm                401 drivers/media/dvb-frontends/drxk_hard.c 		mm[2] = ((reg >> 24) & 0xFF) | flags;
mm                402 drivers/media/dvb-frontends/drxk_hard.c 		mm[3] = ((reg >> 7) & 0xFF);
mm                405 drivers/media/dvb-frontends/drxk_hard.c 		mm[0] = ((reg << 1) & 0xFF);
mm                406 drivers/media/dvb-frontends/drxk_hard.c 		mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
mm                409 drivers/media/dvb-frontends/drxk_hard.c 	mm[len] = data & 0xff;
mm                410 drivers/media/dvb-frontends/drxk_hard.c 	mm[len + 1] = (data >> 8) & 0xff;
mm                411 drivers/media/dvb-frontends/drxk_hard.c 	mm[len + 2] = (data >> 16) & 0xff;
mm                412 drivers/media/dvb-frontends/drxk_hard.c 	mm[len + 3] = (data >> 24) & 0xff;
mm                415 drivers/media/dvb-frontends/drxk_hard.c 	return i2c_write(state, adr, mm, len + 4);
mm                537 drivers/media/v4l2-core/videobuf-core.c 		down_read(&current->mm->mmap_sem);
mm                624 drivers/media/v4l2-core/videobuf-core.c 		up_read(&current->mm->mmap_sem);
mm                161 drivers/media/v4l2-core/videobuf-dma-contig.c 	struct mm_struct *mm = current->mm;
mm                172 drivers/media/v4l2-core/videobuf-dma-contig.c 	down_read(&mm->mmap_sem);
mm                174 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma = find_vma(mm, untagged_baddr);
mm                204 drivers/media/v4l2-core/videobuf-dma-contig.c 	up_read(&current->mm->mmap_sem);
mm                203 drivers/media/v4l2-core/videobuf-dma-sg.c 	down_read(&current->mm->mmap_sem);
mm                205 drivers/media/v4l2-core/videobuf-dma-sg.c 	up_read(&current->mm->mmap_sem);
mm                290 drivers/misc/cxl/api.c 		ctx->mm = get_task_mm(current);
mm                295 drivers/misc/cxl/api.c 		if (ctx->mm) {
mm                297 drivers/misc/cxl/api.c 			mmput(ctx->mm);
mm                299 drivers/misc/cxl/api.c 			mm_context_add_copro(ctx->mm);
mm                319 drivers/misc/cxl/api.c 			if (ctx->mm)
mm                320 drivers/misc/cxl/api.c 				mm_context_remove_copro(ctx->mm);
mm                 66 drivers/misc/cxl/base.c void cxl_slbia(struct mm_struct *mm)
mm                 75 drivers/misc/cxl/base.c 	    calls->cxl_slbia(mm);
mm                269 drivers/misc/cxl/context.c 	if (ctx->mm)
mm                270 drivers/misc/cxl/context.c 		mm_context_remove_copro(ctx->mm);
mm                271 drivers/misc/cxl/context.c 	ctx->mm = NULL;
mm                354 drivers/misc/cxl/context.c 	if (ctx->mm)
mm                355 drivers/misc/cxl/context.c 		atomic_inc(&ctx->mm->mm_count);
mm                360 drivers/misc/cxl/context.c 	if (ctx->mm)
mm                361 drivers/misc/cxl/context.c 		mmdrop(ctx->mm);
mm                617 drivers/misc/cxl/cxl.h 	struct mm_struct *mm;
mm                854 drivers/misc/cxl/cxl.h 	void (*cxl_slbia)(struct mm_struct *mm);
mm                970 drivers/misc/cxl/cxl.h int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar);
mm                173 drivers/misc/cxl/cxllib.c 	struct mm_struct *mm = NULL;
mm                185 drivers/misc/cxl/cxllib.c 		mm = get_task_mm(task);
mm                186 drivers/misc/cxl/cxllib.c 		if (mm == NULL)
mm                192 drivers/misc/cxl/cxllib.c 		attr->pid = mm->context.id;
mm                193 drivers/misc/cxl/cxllib.c 		mmput(mm);
mm                203 drivers/misc/cxl/cxllib.c static int get_vma_info(struct mm_struct *mm, u64 addr,
mm                210 drivers/misc/cxl/cxllib.c 	down_read(&mm->mmap_sem);
mm                212 drivers/misc/cxl/cxllib.c 	vma = find_vma(mm, addr);
mm                221 drivers/misc/cxl/cxllib.c 	up_read(&mm->mmap_sem);
mm                225 drivers/misc/cxl/cxllib.c int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
mm                231 drivers/misc/cxl/cxllib.c 	if (mm == NULL)
mm                240 drivers/misc/cxl/cxllib.c 	rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
mm                261 drivers/misc/cxl/cxllib.c 			rc = get_vma_info(mm, dar, &vma_start, &vma_end,
mm                267 drivers/misc/cxl/cxllib.c 		rc = cxl_handle_mm_fault(mm, flags, dar);
mm                 84 drivers/misc/cxl/fault.c static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
mm                 90 drivers/misc/cxl/fault.c 	if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
mm                113 drivers/misc/cxl/fault.c 				   struct mm_struct *mm, u64 ea)
mm                120 drivers/misc/cxl/fault.c 	if ((rc = cxl_fault_segment(ctx, mm, ea)))
mm                131 drivers/misc/cxl/fault.c int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
mm                144 drivers/misc/cxl/fault.c 	if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
mm                145 drivers/misc/cxl/fault.c 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
mm                153 drivers/misc/cxl/fault.c 	if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
mm                167 drivers/misc/cxl/fault.c 		if (!mm && (get_region_id(dar) != USER_REGION_ID))
mm                174 drivers/misc/cxl/fault.c 		hash_page_mm(mm, dar, access, 0x300, inv_flags);
mm                181 drivers/misc/cxl/fault.c 				  struct mm_struct *mm,
mm                186 drivers/misc/cxl/fault.c 	if (cxl_handle_mm_fault(mm, dsisr, dar)) {
mm                200 drivers/misc/cxl/fault.c 	if (ctx->mm == NULL)
mm                203 drivers/misc/cxl/fault.c 	if (!atomic_inc_not_zero(&ctx->mm->mm_users))
mm                206 drivers/misc/cxl/fault.c 	return ctx->mm;
mm                234 drivers/misc/cxl/fault.c 	struct mm_struct *mm = NULL;
mm                260 drivers/misc/cxl/fault.c 		mm = get_mem_context(ctx);
mm                261 drivers/misc/cxl/fault.c 		if (mm == NULL) {
mm                273 drivers/misc/cxl/fault.c 		cxl_handle_segment_miss(ctx, mm, dar);
mm                275 drivers/misc/cxl/fault.c 		cxl_handle_page_fault(ctx, mm, dsisr, dar);
mm                279 drivers/misc/cxl/fault.c 	if (mm)
mm                280 drivers/misc/cxl/fault.c 		mmput(mm);
mm                285 drivers/misc/cxl/fault.c 	struct mm_struct *mm;
mm                287 drivers/misc/cxl/fault.c 	mm = get_mem_context(ctx);
mm                288 drivers/misc/cxl/fault.c 	if (mm == NULL) {
mm                294 drivers/misc/cxl/fault.c 	cxl_fault_segment(ctx, mm, ea);
mm                296 drivers/misc/cxl/fault.c 	mmput(mm);
mm                315 drivers/misc/cxl/fault.c 	struct mm_struct *mm;
mm                317 drivers/misc/cxl/fault.c 	mm = get_mem_context(ctx);
mm                318 drivers/misc/cxl/fault.c 	if (mm == NULL) {
mm                324 drivers/misc/cxl/fault.c 	down_read(&mm->mmap_sem);
mm                325 drivers/misc/cxl/fault.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                328 drivers/misc/cxl/fault.c 			rc = copro_calculate_slb(mm, ea, &slb);
mm                339 drivers/misc/cxl/fault.c 	up_read(&mm->mmap_sem);
mm                341 drivers/misc/cxl/fault.c 	mmput(mm);
mm                219 drivers/misc/cxl/file.c 	ctx->mm = get_task_mm(current);
mm                224 drivers/misc/cxl/file.c 	if (ctx->mm) {
mm                226 drivers/misc/cxl/file.c 		mmput(ctx->mm);
mm                228 drivers/misc/cxl/file.c 		mm_context_add_copro(ctx->mm);
mm                261 drivers/misc/cxl/file.c 		if (ctx->mm)
mm                262 drivers/misc/cxl/file.c 			mm_context_remove_copro(ctx->mm);
mm                 57 drivers/misc/cxl/main.c static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
mm                 61 drivers/misc/cxl/main.c 	if (ctx->mm != mm)
mm                 75 drivers/misc/cxl/main.c static inline void cxl_slbia_core(struct mm_struct *mm)
mm                 94 drivers/misc/cxl/main.c 				_cxl_slbia(ctx, mm);
mm                676 drivers/misc/cxl/native.c 		if (ctx->mm == NULL) {
mm                681 drivers/misc/cxl/native.c 		pid = ctx->mm->context.id;
mm                791 drivers/misc/fastrpc.c 			vma = find_vma(current->mm, ctx->args[i].ptr);
mm               1052 drivers/misc/habanalabs/habanalabs.h 	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
mm                185 drivers/misc/mic/scif/scif_dma.c 				      struct mm_struct *mm)
mm                229 drivers/misc/mic/scif/scif_dma.c 		mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm);
mm                237 drivers/misc/mic/scif/scif_dma.c 				   struct mm_struct *mm, struct scif_endpt *ep)
mm                240 drivers/misc/mic/scif/scif_dma.c 	mmn->mm = mm;
mm                247 drivers/misc/mic/scif/scif_dma.c scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
mm                252 drivers/misc/mic/scif/scif_dma.c 		if (mmn->mm == mm)
mm                258 drivers/misc/mic/scif/scif_dma.c scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
mm                266 drivers/misc/mic/scif/scif_dma.c 	scif_init_mmu_notifier(mmn, current->mm, ep);
mm                267 drivers/misc/mic/scif/scif_dma.c 	if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
mm                303 drivers/misc/mic/scif/scif_dma.c scif_find_mmu_notifier(struct mm_struct *mm,
mm                310 drivers/misc/mic/scif/scif_dma.c scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
mm               1691 drivers/misc/mic/scif/scif_dma.c 		mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
mm               1693 drivers/misc/mic/scif/scif_dma.c 			mmn = scif_add_mmu_notifier(current->mm, ep);
mm                258 drivers/misc/mic/scif/scif_rma.c static inline void __scif_release_mm(struct mm_struct *mm)
mm                260 drivers/misc/mic/scif/scif_rma.c 	if (mm)
mm                261 drivers/misc/mic/scif/scif_rma.c 		mmput(mm);
mm                265 drivers/misc/mic/scif/scif_rma.c __scif_dec_pinned_vm_lock(struct mm_struct *mm,
mm                268 drivers/misc/mic/scif/scif_rma.c 	if (!mm || !nr_pages || !scif_ulimit_check)
mm                271 drivers/misc/mic/scif/scif_rma.c 	atomic64_sub(nr_pages, &mm->pinned_vm);
mm                275 drivers/misc/mic/scif/scif_rma.c static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
mm                280 drivers/misc/mic/scif/scif_rma.c 	if (!mm || !nr_pages || !scif_ulimit_check)
mm                284 drivers/misc/mic/scif/scif_rma.c 	locked = atomic64_add_return(nr_pages, &mm->pinned_vm);
mm                287 drivers/misc/mic/scif/scif_rma.c 		atomic64_sub(nr_pages, &mm->pinned_vm);
mm                310 drivers/misc/mic/scif/scif_rma.c 	if (!window->temp && window->mm) {
mm                311 drivers/misc/mic/scif/scif_rma.c 		__scif_dec_pinned_vm_lock(window->mm, window->nr_pages);
mm                312 drivers/misc/mic/scif/scif_rma.c 		__scif_release_mm(window->mm);
mm                313 drivers/misc/mic/scif/scif_rma.c 		window->mm = NULL;
mm                721 drivers/misc/mic/scif/scif_rma.c 			if (!__scif_dec_pinned_vm_lock(window->mm,
mm                723 drivers/misc/mic/scif/scif_rma.c 				__scif_release_mm(window->mm);
mm                724 drivers/misc/mic/scif/scif_rma.c 				window->mm = NULL;
mm               1316 drivers/misc/mic/scif/scif_rma.c 	struct mm_struct *mm = NULL;
mm               1369 drivers/misc/mic/scif/scif_rma.c 		mm = current->mm;
mm               1371 drivers/misc/mic/scif/scif_rma.c 			err = __scif_check_inc_pinned_vm(mm, nr_pages);
mm               1386 drivers/misc/mic/scif/scif_rma.c 					__scif_dec_pinned_vm_lock(mm, nr_pages);
mm               1413 drivers/misc/mic/scif/scif_rma.c 		__scif_dec_pinned_vm_lock(mm, nr_pages);
mm               1576 drivers/misc/mic/scif/scif_rma.c 	struct mm_struct *mm = NULL;
mm               1642 drivers/misc/mic/scif/scif_rma.c 		mm = __scif_acquire_mm();
mm               1651 drivers/misc/mic/scif/scif_rma.c 		__scif_release_mm(mm);
mm               1657 drivers/misc/mic/scif/scif_rma.c 	window->mm = mm;
mm                271 drivers/misc/mic/scif/scif_rma.h 	struct mm_struct *mm;
mm                306 drivers/misc/mic/scif/scif_rma.h 	struct mm_struct *mm;
mm                 69 drivers/misc/ocxl/context.c int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
mm                 81 drivers/misc/ocxl/context.c 	if (mm)
mm                 82 drivers/misc/ocxl/context.c 		pidr = mm->context.id;
mm                 85 drivers/misc/ocxl/context.c 			      amr, mm, xsl_fault_error, ctx);
mm                 89 drivers/misc/ocxl/file.c 	rc = ocxl_context_attach(ctx, amr, current->mm);
mm                 37 drivers/misc/ocxl/link.c 	struct mm_struct *mm;
mm                143 drivers/misc/ocxl/link.c 	rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
mm                170 drivers/misc/ocxl/link.c 		hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300,
mm                176 drivers/misc/ocxl/link.c 	mmput(fault->pe_data.mm);
mm                228 drivers/misc/ocxl/link.c 	if (!pe_data->mm) {
mm                238 drivers/misc/ocxl/link.c 	WARN_ON(pe_data->mm->context.id != pid);
mm                240 drivers/misc/ocxl/link.c 	if (mmget_not_zero(pe_data->mm)) {
mm                496 drivers/misc/ocxl/link.c 		u64 amr, struct mm_struct *mm,
mm                525 drivers/misc/ocxl/link.c 	pe_data->mm = mm;
mm                542 drivers/misc/ocxl/link.c 	if (mm)
mm                543 drivers/misc/ocxl/link.c 		mm_context_add_copro(mm);
mm                566 drivers/misc/ocxl/link.c 	if (mm)
mm                567 drivers/misc/ocxl/link.c 		mmgrab(mm);
mm                673 drivers/misc/ocxl/link.c 		if (pe_data->mm) {
mm                674 drivers/misc/ocxl/link.c 			mm_context_remove_copro(pe_data->mm);
mm                675 drivers/misc/ocxl/link.c 			mmdrop(pe_data->mm);
mm                 52 drivers/misc/sgi-gru/grufault.c 	vma = find_vma(current->mm, vaddr);
mm                 68 drivers/misc/sgi-gru/grufault.c 	struct mm_struct *mm = current->mm;
mm                 72 drivers/misc/sgi-gru/grufault.c 	down_read(&mm->mmap_sem);
mm                 79 drivers/misc/sgi-gru/grufault.c 		up_read(&mm->mmap_sem);
mm                 85 drivers/misc/sgi-gru/grufault.c 	struct mm_struct *mm = current->mm;
mm                 89 drivers/misc/sgi-gru/grufault.c 	down_write(&mm->mmap_sem);
mm                 98 drivers/misc/sgi-gru/grufault.c 	downgrade_write(&mm->mmap_sem);
mm                102 drivers/misc/sgi-gru/grufault.c 	up_write(&mm->mmap_sem);
mm                112 drivers/misc/sgi-gru/grufault.c 	up_read(&current->mm->mmap_sem);
mm                255 drivers/misc/sgi-gru/grufault.c 	struct mm_struct *mm = gts->ts_mm;
mm                260 drivers/misc/sgi-gru/grufault.c 	vma = find_vma(mm, vaddr);
mm                138 drivers/misc/sgi-gru/grufile.c 	down_write(&current->mm->mmap_sem);
mm                149 drivers/misc/sgi-gru/grufile.c 	up_write(&current->mm->mmap_sem);
mm                340 drivers/misc/sgi-gru/grumain.c 		gts->ts_mm = current->mm;
mm                238 drivers/misc/sgi-gru/grutlbpurge.c static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
mm                269 drivers/misc/sgi-gru/grutlbpurge.c 	mn = mmu_notifier_get_locked(&gru_mmuops, current->mm);
mm                192 drivers/mtd/nand/raw/atmel/pmecc.c static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
mm                197 drivers/mtd/nand/raw/atmel/pmecc.c 	unsigned int nn = BIT(mm) - 1;
mm                200 drivers/mtd/nand/raw/atmel/pmecc.c 	if (k != (1u << mm))
mm                123 drivers/net/arcnet/com20020-pci.c 	struct com20020_pci_channel_map *mm;
mm                140 drivers/net/arcnet/com20020-pci.c 	mm = &ci->misc_map;
mm                144 drivers/net/arcnet/com20020-pci.c 	if (mm->size) {
mm                145 drivers/net/arcnet/com20020-pci.c 		ioaddr = pci_resource_start(pdev, mm->bar) + mm->offset;
mm                146 drivers/net/arcnet/com20020-pci.c 		r = devm_request_region(&pdev->dev, ioaddr, mm->size,
mm                150 drivers/net/arcnet/com20020-pci.c 			       ioaddr, ioaddr + mm->size - 1);
mm               5124 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
mm                200 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 	u16 mm;
mm               24601 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 		wlc_phy_ipa_set_bbmult_nphy(pi, (state->mm >> 8) & 0xff,
mm               24602 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 					    (state->mm & 0xff));
mm               24642 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 		wlc_phy_ipa_set_bbmult_nphy(pi, (state->mm >> 8) & 0xff,
mm               24643 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 					    (state->mm & 0xff));
mm               25206 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 	phy_b2.mm = wlc_phy_ipa_get_bbmult_nphy(pi);
mm               1372 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	wtbl_ht->mm = sta->ht_cap.ampdu_density;
mm                262 drivers/net/wireless/mediatek/mt76/mt7615/mcu.h 	u8 mm;
mm                 91 drivers/oprofile/buffer_sync.c 	struct mm_struct *mm = current->mm;
mm                 94 drivers/oprofile/buffer_sync.c 	down_read(&mm->mmap_sem);
mm                 96 drivers/oprofile/buffer_sync.c 	mpnt = find_vma(mm, addr);
mm                 98 drivers/oprofile/buffer_sync.c 		up_read(&mm->mmap_sem);
mm                106 drivers/oprofile/buffer_sync.c 	up_read(&mm->mmap_sem);
mm                227 drivers/oprofile/buffer_sync.c static unsigned long get_exec_dcookie(struct mm_struct *mm)
mm                232 drivers/oprofile/buffer_sync.c 	if (!mm)
mm                235 drivers/oprofile/buffer_sync.c 	exe_file = get_mm_exe_file(mm);
mm                254 drivers/oprofile/buffer_sync.c lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
mm                259 drivers/oprofile/buffer_sync.c 	down_read(&mm->mmap_sem);
mm                260 drivers/oprofile/buffer_sync.c 	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
mm                279 drivers/oprofile/buffer_sync.c 	up_read(&mm->mmap_sem);
mm                331 drivers/oprofile/buffer_sync.c static void add_data(struct op_entry *entry, struct mm_struct *mm)
mm                344 drivers/oprofile/buffer_sync.c 	if (mm) {
mm                345 drivers/oprofile/buffer_sync.c 		cookie = lookup_dcookie(mm, pc, &offset);
mm                381 drivers/oprofile/buffer_sync.c add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
mm                393 drivers/oprofile/buffer_sync.c 	if (!mm) {
mm                398 drivers/oprofile/buffer_sync.c 	cookie = lookup_dcookie(mm, s->eip, &offset);
mm                416 drivers/oprofile/buffer_sync.c static void release_mm(struct mm_struct *mm)
mm                418 drivers/oprofile/buffer_sync.c 	if (!mm)
mm                420 drivers/oprofile/buffer_sync.c 	mmput(mm);
mm                495 drivers/oprofile/buffer_sync.c 	struct mm_struct *mm = NULL;
mm                537 drivers/oprofile/buffer_sync.c 				oldmm = mm;
mm                539 drivers/oprofile/buffer_sync.c 				mm = get_task_mm(new);
mm                540 drivers/oprofile/buffer_sync.c 				if (mm != oldmm)
mm                541 drivers/oprofile/buffer_sync.c 					cookie = get_exec_dcookie(mm);
mm                545 drivers/oprofile/buffer_sync.c 				add_data(&entry, mm);
mm                553 drivers/oprofile/buffer_sync.c 		if (add_sample(mm, sample, in_kernel))
mm                562 drivers/oprofile/buffer_sync.c 	release_mm(mm);
mm                482 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *m, mm;
mm                498 drivers/pcmcia/rsrc_nonstatic.c 	for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) {
mm                499 drivers/pcmcia/rsrc_nonstatic.c 		mm = *m;
mm                501 drivers/pcmcia/rsrc_nonstatic.c 		if (mm.base >= 0x100000)
mm                503 drivers/pcmcia/rsrc_nonstatic.c 		if ((mm.base | mm.num) & 0xffff) {
mm                504 drivers/pcmcia/rsrc_nonstatic.c 			ok += do_mem_probe(s, mm.base, mm.num, readable,
mm                511 drivers/pcmcia/rsrc_nonstatic.c 			if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) {
mm                538 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *m, mm;
mm                542 drivers/pcmcia/rsrc_nonstatic.c 	for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) {
mm                543 drivers/pcmcia/rsrc_nonstatic.c 		mm = *m;
mm                544 drivers/pcmcia/rsrc_nonstatic.c 		ok += do_mem_probe(s, mm.base, mm.num, readable, checksum);
mm                335 drivers/scsi/cxlflash/ocxl_hw.c 	struct mm_struct *mm;
mm                358 drivers/scsi/cxlflash/ocxl_hw.c 		mm = NULL;
mm                360 drivers/scsi/cxlflash/ocxl_hw.c 		pid = current->mm->context.id;
mm                361 drivers/scsi/cxlflash/ocxl_hw.c 		mm = current->mm;
mm                364 drivers/scsi/cxlflash/ocxl_hw.c 	rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
mm                365 drivers/staging/android/ashmem.c 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
mm                 79 drivers/staging/kpc2000/kpc_dma/fileops.c 	down_read(&current->mm->mmap_sem);      /*  get memory map semaphore */
mm                 81 drivers/staging/kpc2000/kpc_dma/fileops.c 	up_read(&current->mm->mmap_sem);        /*  release the semaphore */
mm                554 drivers/tee/optee/call.c 	struct mm_struct *mm = current->mm;
mm                564 drivers/tee/optee/call.c 	down_read(&mm->mmap_sem);
mm                565 drivers/tee/optee/call.c 	rc = __check_mem_type(find_vma(mm, start),
mm                567 drivers/tee/optee/call.c 	up_read(&mm->mmap_sem);
mm                221 drivers/usb/gadget/function/f_fs.c 	struct mm_struct *mm;
mm                830 drivers/usb/gadget/function/f_fs.c 		use_mm(io_data->mm);
mm                832 drivers/usb/gadget/function/f_fs.c 		unuse_mm(io_data->mm);
mm               1203 drivers/usb/gadget/function/f_fs.c 	p->mm = current->mm;
mm               1249 drivers/usb/gadget/function/f_fs.c 	p->mm = current->mm;
mm                431 drivers/usb/gadget/legacy/inode.c 	struct mm_struct	*mm;
mm                461 drivers/usb/gadget/legacy/inode.c 	struct mm_struct *mm = priv->mm;
mm                465 drivers/usb/gadget/legacy/inode.c 	use_mm(mm);
mm                467 drivers/usb/gadget/legacy/inode.c 	unuse_mm(mm);
mm                536 drivers/usb/gadget/legacy/inode.c 	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
mm                 36 drivers/vfio/pci/vfio_pci_nvlink2.c 	struct mm_struct *mm;
mm                 99 drivers/vfio/pci/vfio_pci_nvlink2.c 	if (data->mm) {
mm                101 drivers/vfio/pci/vfio_pci_nvlink2.c 			ret = mm_iommu_put(data->mm, data->mem);
mm                105 drivers/vfio/pci/vfio_pci_nvlink2.c 		mmdrop(data->mm);
mm                162 drivers/vfio/pci/vfio_pci_nvlink2.c 	data->mm = current->mm;
mm                164 drivers/vfio/pci/vfio_pci_nvlink2.c 	atomic_inc(&data->mm->mm_count);
mm                165 drivers/vfio/pci/vfio_pci_nvlink2.c 	ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
mm                 67 drivers/vfio/vfio_iommu_spapr_tce.c 	struct mm_struct *mm;
mm                 75 drivers/vfio/vfio_iommu_spapr_tce.c 	if (container->mm) {
mm                 76 drivers/vfio/vfio_iommu_spapr_tce.c 		if (container->mm == current->mm)
mm                 80 drivers/vfio/vfio_iommu_spapr_tce.c 	BUG_ON(!current->mm);
mm                 81 drivers/vfio/vfio_iommu_spapr_tce.c 	container->mm = current->mm;
mm                 82 drivers/vfio/vfio_iommu_spapr_tce.c 	atomic_inc(&container->mm->mm_count);
mm                 92 drivers/vfio/vfio_iommu_spapr_tce.c 	ret = mm_iommu_put(container->mm, tcemem->mem);
mm                113 drivers/vfio/vfio_iommu_spapr_tce.c 	mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
mm                129 drivers/vfio/vfio_iommu_spapr_tce.c 	mm_iommu_put(container->mm, mem);
mm                146 drivers/vfio/vfio_iommu_spapr_tce.c 	mem = mm_iommu_get(container->mm, vaddr, entries);
mm                155 drivers/vfio/vfio_iommu_spapr_tce.c 		ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
mm                174 drivers/vfio/vfio_iommu_spapr_tce.c 	mm_iommu_put(container->mm, mem);
mm                178 drivers/vfio/vfio_iommu_spapr_tce.c static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
mm                184 drivers/vfio/vfio_iommu_spapr_tce.c 	if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
mm                292 drivers/vfio/vfio_iommu_spapr_tce.c 	ret = account_locked_vm(container->mm, locked, true);
mm                310 drivers/vfio/vfio_iommu_spapr_tce.c 	BUG_ON(!container->mm);
mm                311 drivers/vfio/vfio_iommu_spapr_tce.c 	account_locked_vm(container->mm, container->locked_pages, false);
mm                373 drivers/vfio/vfio_iommu_spapr_tce.c 	if (container->mm)
mm                374 drivers/vfio/vfio_iommu_spapr_tce.c 		mmdrop(container->mm);
mm                396 drivers/vfio/vfio_iommu_spapr_tce.c 	mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
mm                463 drivers/vfio/vfio_iommu_spapr_tce.c 		ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
mm                515 drivers/vfio/vfio_iommu_spapr_tce.c 		if (!tce_page_is_contained(container->mm, hpa,
mm                523 drivers/vfio/vfio_iommu_spapr_tce.c 		ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
mm                565 drivers/vfio/vfio_iommu_spapr_tce.c 		if (!tce_page_is_contained(container->mm, hpa,
mm                579 drivers/vfio/vfio_iommu_spapr_tce.c 		ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
mm                621 drivers/vfio/vfio_iommu_spapr_tce.c 	ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
mm                640 drivers/vfio/vfio_iommu_spapr_tce.c 	account_locked_vm(container->mm, pages, false);
mm                803 drivers/vfio/vfio_iommu_spapr_tce.c 	if (container->mm && container->mm != current->mm)
mm               1002 drivers/vfio/vfio_iommu_spapr_tce.c 		if (!container->mm)
mm                270 drivers/vfio/vfio_iommu_type1.c 	struct mm_struct *mm;
mm                276 drivers/vfio/vfio_iommu_type1.c 	mm = async ? get_task_mm(dma->task) : dma->task->mm;
mm                277 drivers/vfio/vfio_iommu_type1.c 	if (!mm)
mm                280 drivers/vfio/vfio_iommu_type1.c 	ret = down_write_killable(&mm->mmap_sem);
mm                282 drivers/vfio/vfio_iommu_type1.c 		ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
mm                284 drivers/vfio/vfio_iommu_type1.c 		up_write(&mm->mmap_sem);
mm                288 drivers/vfio/vfio_iommu_type1.c 		mmput(mm);
mm                338 drivers/vfio/vfio_iommu_type1.c static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
mm                350 drivers/vfio/vfio_iommu_type1.c 	down_read(&mm->mmap_sem);
mm                351 drivers/vfio/vfio_iommu_type1.c 	if (mm == current->mm) {
mm                355 drivers/vfio/vfio_iommu_type1.c 		ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
mm                369 drivers/vfio/vfio_iommu_type1.c 	up_read(&mm->mmap_sem);
mm                376 drivers/vfio/vfio_iommu_type1.c 	down_read(&mm->mmap_sem);
mm                380 drivers/vfio/vfio_iommu_type1.c 	vma = find_vma_intersection(mm, vaddr, vaddr + 1);
mm                388 drivers/vfio/vfio_iommu_type1.c 	up_read(&mm->mmap_sem);
mm                407 drivers/vfio/vfio_iommu_type1.c 	if (!current->mm)
mm                410 drivers/vfio/vfio_iommu_type1.c 	ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
mm                422 drivers/vfio/vfio_iommu_type1.c 		if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
mm                437 drivers/vfio/vfio_iommu_type1.c 		ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
mm                449 drivers/vfio/vfio_iommu_type1.c 			    current->mm->locked_vm + lock_acct + 1 > limit) {
mm                500 drivers/vfio/vfio_iommu_type1.c 	struct mm_struct *mm;
mm                503 drivers/vfio/vfio_iommu_type1.c 	mm = get_task_mm(dma->task);
mm                504 drivers/vfio/vfio_iommu_type1.c 	if (!mm)
mm                507 drivers/vfio/vfio_iommu_type1.c 	ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
mm                520 drivers/vfio/vfio_iommu_type1.c 	mmput(mm);
mm                944 drivers/vfio/vfio_iommu_type1.c 		if (dma->task->mm != current->mm)
mm                339 drivers/vhost/vhost.c 	use_mm(dev->mm);
mm                365 drivers/vhost/vhost.c 	unuse_mm(dev->mm);
mm                468 drivers/vhost/vhost.c 	dev->mm = NULL;
mm                499 drivers/vhost/vhost.c 	return dev->mm == current->mm ? 0 : -EPERM;
mm                531 drivers/vhost/vhost.c 	return dev->mm;
mm                548 drivers/vhost/vhost.c 	dev->mm = get_task_mm(current);
mm                571 drivers/vhost/vhost.c 	if (dev->mm)
mm                572 drivers/vhost/vhost.c 		mmput(dev->mm);
mm                573 drivers/vhost/vhost.c 	dev->mm = NULL;
mm                686 drivers/vhost/vhost.c 	if (dev->mm)
mm                687 drivers/vhost/vhost.c 		mmput(dev->mm);
mm                688 drivers/vhost/vhost.c 	dev->mm = NULL;
mm                160 drivers/vhost/vhost.h 	struct mm_struct *mm;
mm                250 drivers/video/fbdev/pm2fb.c static void pm2_mnp(u32 clk, unsigned char *mm, unsigned char *nn,
mm                260 drivers/video/fbdev/pm2fb.c 	*mm = *nn = *pp = 0;
mm                269 drivers/video/fbdev/pm2fb.c 						*mm = m;
mm                279 drivers/video/fbdev/pm2fb.c static void pm2v_mnp(u32 clk, unsigned char *mm, unsigned char *nn,
mm                288 drivers/video/fbdev/pm2fb.c 	*mm = *nn = *pp = 0;
mm                295 drivers/video/fbdev/pm2fb.c 					*mm = m;
mm                 31 drivers/xen/gntdev-common.h 	struct mm_struct *mm;
mm                294 drivers/xen/gntdev.c 	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
mm                550 drivers/xen/gntdev.c 		       struct mm_struct *mm)
mm                608 drivers/xen/gntdev.c 		priv->mm = get_task_mm(current);
mm                609 drivers/xen/gntdev.c 		if (!priv->mm) {
mm                614 drivers/xen/gntdev.c 		ret = mmu_notifier_register(&priv->mn, priv->mm);
mm                615 drivers/xen/gntdev.c 		mmput(priv->mm);
mm                655 drivers/xen/gntdev.c 		mmu_notifier_unregister(&priv->mn, priv->mm);
mm                739 drivers/xen/gntdev.c 	down_read(&current->mm->mmap_sem);
mm                740 drivers/xen/gntdev.c 	vma = find_vma(current->mm, op.vaddr);
mm                753 drivers/xen/gntdev.c 	up_read(&current->mm->mmap_sem);
mm               1090 drivers/xen/gntdev.c 	if (use_ptemod && priv->mm != vma->vm_mm) {
mm                257 drivers/xen/privcmd.c 	struct mm_struct *mm = current->mm;
mm                281 drivers/xen/privcmd.c 	down_write(&mm->mmap_sem);
mm                288 drivers/xen/privcmd.c 		vma = find_vma(mm, msg->va);
mm                306 drivers/xen/privcmd.c 	up_write(&mm->mmap_sem);
mm                451 drivers/xen/privcmd.c 	struct mm_struct *mm = current->mm;
mm                502 drivers/xen/privcmd.c 	down_write(&mm->mmap_sem);
mm                504 drivers/xen/privcmd.c 	vma = find_vma(mm, m.addr);
mm                558 drivers/xen/privcmd.c 	up_write(&mm->mmap_sem);
mm                579 drivers/xen/privcmd.c 	up_write(&mm->mmap_sem);
mm                730 drivers/xen/privcmd.c 	struct mm_struct *mm = current->mm;
mm                744 drivers/xen/privcmd.c 	down_write(&mm->mmap_sem);
mm                746 drivers/xen/privcmd.c 	vma = find_vma(mm, kdata.addr);
mm                823 drivers/xen/privcmd.c 	up_write(&mm->mmap_sem);
mm                267 drivers/xen/xlate_mmu.c 	struct mm_struct *mm;
mm                279 drivers/xen/xlate_mmu.c 	set_pte_at(r->mm, addr, ptep, pte);
mm                289 drivers/xen/xlate_mmu.c 		.mm = vma->vm_mm,
mm                331 fs/aio.c       	struct mm_struct *mm = vma->vm_mm;
mm                335 fs/aio.c       	spin_lock(&mm->ioctx_lock);
mm                337 fs/aio.c       	table = rcu_dereference(mm->ioctx_table);
mm                352 fs/aio.c       	spin_unlock(&mm->ioctx_lock);
mm                464 fs/aio.c       	struct mm_struct *mm = current->mm;
mm                523 fs/aio.c       	if (down_write_killable(&mm->mmap_sem)) {
mm                532 fs/aio.c       	up_write(&mm->mmap_sem);
mm                634 fs/aio.c       static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
mm                640 fs/aio.c       	spin_lock(&mm->ioctx_lock);
mm                641 fs/aio.c       	table = rcu_dereference_raw(mm->ioctx_table);
mm                649 fs/aio.c       					spin_unlock(&mm->ioctx_lock);
mm                662 fs/aio.c       		spin_unlock(&mm->ioctx_lock);
mm                671 fs/aio.c       		spin_lock(&mm->ioctx_lock);
mm                672 fs/aio.c       		old = rcu_dereference_raw(mm->ioctx_table);
mm                675 fs/aio.c       			rcu_assign_pointer(mm->ioctx_table, table);
mm                680 fs/aio.c       			rcu_assign_pointer(mm->ioctx_table, table);
mm                704 fs/aio.c       	struct mm_struct *mm = current->mm;
mm                784 fs/aio.c       	err = ioctx_add_table(ctx, mm);
mm                792 fs/aio.c       		 ctx, ctx->user_id, mm, ctx->nr_events);
mm                817 fs/aio.c       static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
mm                822 fs/aio.c       	spin_lock(&mm->ioctx_lock);
mm                824 fs/aio.c       		spin_unlock(&mm->ioctx_lock);
mm                828 fs/aio.c       	table = rcu_dereference_raw(mm->ioctx_table);
mm                831 fs/aio.c       	spin_unlock(&mm->ioctx_lock);
mm                861 fs/aio.c       void exit_aio(struct mm_struct *mm)
mm                863 fs/aio.c       	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
mm                891 fs/aio.c       		kill_ioctx(mm, ctx, &wait);
mm                899 fs/aio.c       	RCU_INIT_POINTER(mm->ioctx_table, NULL);
mm               1051 fs/aio.c       	struct mm_struct *mm = current->mm;
mm               1060 fs/aio.c       	table = rcu_dereference(mm->ioctx_table);
mm               1335 fs/aio.c       			kill_ioctx(current->mm, ioctx, NULL);
mm               1367 fs/aio.c       			kill_ioctx(current->mm, ioctx, NULL);
mm               1396 fs/aio.c       		ret = kill_ioctx(current->mm, ioctx, &wait);
mm                 90 fs/binfmt_aout.c 	current->mm->arg_start = (unsigned long) p;
mm                 99 fs/binfmt_aout.c 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
mm                108 fs/binfmt_aout.c 	current->mm->env_end = (unsigned long) p;
mm                166 fs/binfmt_aout.c 	current->mm->end_code = ex.a_text +
mm                167 fs/binfmt_aout.c 		(current->mm->start_code = N_TXTADDR(ex));
mm                168 fs/binfmt_aout.c 	current->mm->end_data = ex.a_data +
mm                169 fs/binfmt_aout.c 		(current->mm->start_data = N_DATADDR(ex));
mm                170 fs/binfmt_aout.c 	current->mm->brk = ex.a_bss +
mm                171 fs/binfmt_aout.c 		(current->mm->start_brk = N_BSSADDR(ex));
mm                242 fs/binfmt_aout.c 	retval = set_brk(current->mm->start_brk, current->mm->brk);
mm                246 fs/binfmt_aout.c 	current->mm->start_stack =
mm                252 fs/binfmt_aout.c 	start_thread(regs, ex.a_entry, current->mm->start_stack);
mm                117 fs/binfmt_elf.c 	current->mm->start_brk = current->mm->brk = end;
mm                229 fs/binfmt_elf.c 	elf_info = (elf_addr_t *)current->mm->saved_auxv;
mm                279 fs/binfmt_elf.c 	       sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
mm                302 fs/binfmt_elf.c 	vma = find_extend_vma(current->mm, bprm->p);
mm                311 fs/binfmt_elf.c 	p = current->mm->arg_end = current->mm->arg_start;
mm                323 fs/binfmt_elf.c 	current->mm->arg_end = p;
mm                326 fs/binfmt_elf.c 	current->mm->env_end = current->mm->env_start = p;
mm                338 fs/binfmt_elf.c 	current->mm->env_end = p;
mm               1103 fs/binfmt_elf.c 	current->mm->end_code = end_code;
mm               1104 fs/binfmt_elf.c 	current->mm->start_code = start_code;
mm               1105 fs/binfmt_elf.c 	current->mm->start_data = start_data;
mm               1106 fs/binfmt_elf.c 	current->mm->end_data = end_data;
mm               1107 fs/binfmt_elf.c 	current->mm->start_stack = bprm->p;
mm               1119 fs/binfmt_elf.c 			current->mm->brk = current->mm->start_brk =
mm               1122 fs/binfmt_elf.c 		current->mm->brk = current->mm->start_brk =
mm               1123 fs/binfmt_elf.c 			arch_randomize_brk(current->mm);
mm               1507 fs/binfmt_elf.c 		       struct mm_struct *mm)
mm               1515 fs/binfmt_elf.c 	len = mm->arg_end - mm->arg_start;
mm               1519 fs/binfmt_elf.c 		           (const char __user *)mm->arg_start, len))
mm               1549 fs/binfmt_elf.c static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
mm               1551 fs/binfmt_elf.c 	elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
mm               1590 fs/binfmt_elf.c 	count = current->mm->map_count;
mm               1608 fs/binfmt_elf.c 	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
mm               1645 fs/binfmt_elf.c 	n = current->mm->map_count - count;
mm               1809 fs/binfmt_elf.c 	for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
mm               1840 fs/binfmt_elf.c 	fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
mm               1846 fs/binfmt_elf.c 	fill_auxv_note(&info->auxv, current->mm);
mm               2019 fs/binfmt_elf.c 	for (ct = current->mm->core_state->dumper.next;
mm               2050 fs/binfmt_elf.c 	fill_psinfo(info->psinfo, current->group_leader, current->mm);
mm               2055 fs/binfmt_elf.c 	fill_auxv_note(info->notes + 3, current->mm);
mm               2138 fs/binfmt_elf.c 	struct vm_area_struct *ret = tsk->mm->mmap;
mm               2220 fs/binfmt_elf.c 	segs = current->mm->map_count;
mm               2223 fs/binfmt_elf.c 	gate_vma = get_gate_vma(current->mm);
mm                359 fs/binfmt_elf_fdpic.c 	current->mm->start_code = 0;
mm                360 fs/binfmt_elf_fdpic.c 	current->mm->end_code = 0;
mm                361 fs/binfmt_elf_fdpic.c 	current->mm->start_stack = 0;
mm                362 fs/binfmt_elf_fdpic.c 	current->mm->start_data = 0;
mm                363 fs/binfmt_elf_fdpic.c 	current->mm->end_data = 0;
mm                364 fs/binfmt_elf_fdpic.c 	current->mm->context.exec_fdpic_loadmap = 0;
mm                365 fs/binfmt_elf_fdpic.c 	current->mm->context.interp_fdpic_loadmap = 0;
mm                370 fs/binfmt_elf_fdpic.c 				  &current->mm->start_stack,
mm                371 fs/binfmt_elf_fdpic.c 				  &current->mm->start_brk);
mm                373 fs/binfmt_elf_fdpic.c 	retval = setup_arg_pages(bprm, current->mm->start_stack,
mm                385 fs/binfmt_elf_fdpic.c 	retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm,
mm                392 fs/binfmt_elf_fdpic.c 					    current->mm, "interpreter");
mm                404 fs/binfmt_elf_fdpic.c 	if (!current->mm->start_brk)
mm                405 fs/binfmt_elf_fdpic.c 		current->mm->start_brk = current->mm->end_data;
mm                407 fs/binfmt_elf_fdpic.c 	current->mm->brk = current->mm->start_brk =
mm                408 fs/binfmt_elf_fdpic.c 		PAGE_ALIGN(current->mm->start_brk);
mm                421 fs/binfmt_elf_fdpic.c 	current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot,
mm                426 fs/binfmt_elf_fdpic.c 	if (IS_ERR_VALUE(current->mm->start_brk)) {
mm                427 fs/binfmt_elf_fdpic.c 		retval = current->mm->start_brk;
mm                428 fs/binfmt_elf_fdpic.c 		current->mm->start_brk = 0;
mm                432 fs/binfmt_elf_fdpic.c 	current->mm->brk = current->mm->start_brk;
mm                433 fs/binfmt_elf_fdpic.c 	current->mm->context.end_brk = current->mm->start_brk;
mm                434 fs/binfmt_elf_fdpic.c 	current->mm->start_stack = current->mm->start_brk + stack_size;
mm                438 fs/binfmt_elf_fdpic.c 	if (create_elf_fdpic_tables(bprm, current->mm,
mm                442 fs/binfmt_elf_fdpic.c 	kdebug("- start_code  %lx", current->mm->start_code);
mm                443 fs/binfmt_elf_fdpic.c 	kdebug("- end_code    %lx", current->mm->end_code);
mm                444 fs/binfmt_elf_fdpic.c 	kdebug("- start_data  %lx", current->mm->start_data);
mm                445 fs/binfmt_elf_fdpic.c 	kdebug("- end_data    %lx", current->mm->end_data);
mm                446 fs/binfmt_elf_fdpic.c 	kdebug("- start_brk   %lx", current->mm->start_brk);
mm                447 fs/binfmt_elf_fdpic.c 	kdebug("- brk         %lx", current->mm->brk);
mm                448 fs/binfmt_elf_fdpic.c 	kdebug("- start_stack %lx", current->mm->start_stack);
mm                465 fs/binfmt_elf_fdpic.c 	start_thread(regs, entryaddr, current->mm->start_stack);
mm                498 fs/binfmt_elf_fdpic.c 				   struct mm_struct *mm,
mm                519 fs/binfmt_elf_fdpic.c 	sp = mm->start_stack;
mm                570 fs/binfmt_elf_fdpic.c 	current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
mm                583 fs/binfmt_elf_fdpic.c 		current->mm->context.interp_fdpic_loadmap = (unsigned long) sp;
mm                684 fs/binfmt_elf_fdpic.c 	current->mm->arg_start = bprm->p;
mm                686 fs/binfmt_elf_fdpic.c 	current->mm->arg_start = current->mm->start_stack -
mm                690 fs/binfmt_elf_fdpic.c 	p = (char __user *) current->mm->arg_start;
mm                699 fs/binfmt_elf_fdpic.c 	current->mm->arg_end = (unsigned long) p;
mm                702 fs/binfmt_elf_fdpic.c 	current->mm->env_start = (unsigned long) p;
mm                711 fs/binfmt_elf_fdpic.c 	current->mm->env_end = (unsigned long) p;
mm                713 fs/binfmt_elf_fdpic.c 	mm->start_stack = (unsigned long) sp;
mm                730 fs/binfmt_elf_fdpic.c 			      struct mm_struct *mm,
mm                771 fs/binfmt_elf_fdpic.c 		ret = elf_fdpic_map_file_constdisp_on_uclinux(params, file, mm);
mm                777 fs/binfmt_elf_fdpic.c 		ret = elf_fdpic_map_file_by_direct_mmap(params, file, mm);
mm                918 fs/binfmt_elf_fdpic.c 	struct mm_struct *mm)
mm                980 fs/binfmt_elf_fdpic.c 		if (mm) {
mm                982 fs/binfmt_elf_fdpic.c 				if (!mm->start_code) {
mm                983 fs/binfmt_elf_fdpic.c 					mm->start_code = seg->addr;
mm                984 fs/binfmt_elf_fdpic.c 					mm->end_code = seg->addr +
mm                987 fs/binfmt_elf_fdpic.c 			} else if (!mm->start_data) {
mm                988 fs/binfmt_elf_fdpic.c 				mm->start_data = seg->addr;
mm                989 fs/binfmt_elf_fdpic.c 				mm->end_data = seg->addr + phdr->p_memsz;
mm               1006 fs/binfmt_elf_fdpic.c 					     struct mm_struct *mm)
mm               1157 fs/binfmt_elf_fdpic.c 		if (mm) {
mm               1159 fs/binfmt_elf_fdpic.c 				if (!mm->start_code) {
mm               1160 fs/binfmt_elf_fdpic.c 					mm->start_code = maddr;
mm               1161 fs/binfmt_elf_fdpic.c 					mm->end_code = maddr + phdr->p_memsz;
mm               1163 fs/binfmt_elf_fdpic.c 			} else if (!mm->start_data) {
mm               1164 fs/binfmt_elf_fdpic.c 				mm->start_data = maddr;
mm               1165 fs/binfmt_elf_fdpic.c 				mm->end_data = maddr + phdr->p_memsz;
mm               1374 fs/binfmt_elf_fdpic.c 	prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
mm               1375 fs/binfmt_elf_fdpic.c 	prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
mm               1379 fs/binfmt_elf_fdpic.c 		       struct mm_struct *mm)
mm               1387 fs/binfmt_elf_fdpic.c 	len = mm->arg_end - mm->arg_start;
mm               1391 fs/binfmt_elf_fdpic.c 		           (const char __user *) mm->arg_start, len))
mm               1497 fs/binfmt_elf_fdpic.c 	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
mm               1535 fs/binfmt_elf_fdpic.c 	for (vma = current->mm->mmap; vma; vma = vma->vm_next)
mm               1612 fs/binfmt_elf_fdpic.c 	for (ct = current->mm->core_state->dumper.next;
mm               1635 fs/binfmt_elf_fdpic.c 	segs = current->mm->map_count;
mm               1656 fs/binfmt_elf_fdpic.c 	fill_psinfo(psinfo, current->group_leader, current->mm);
mm               1661 fs/binfmt_elf_fdpic.c 	auxv = (elf_addr_t *) current->mm->saved_auxv;
mm               1727 fs/binfmt_elf_fdpic.c 	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
mm                130 fs/binfmt_flat.c 	sp = (unsigned long __user *)current->mm->start_stack;
mm                138 fs/binfmt_flat.c 	current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
mm                139 fs/binfmt_flat.c 	sp = (unsigned long __user *)current->mm->start_stack;
mm                150 fs/binfmt_flat.c 	current->mm->arg_start = (unsigned long)p;
mm                159 fs/binfmt_flat.c 	current->mm->arg_end = (unsigned long)p;
mm                161 fs/binfmt_flat.c 	current->mm->env_start = (unsigned long) p;
mm                170 fs/binfmt_flat.c 	current->mm->env_end = (unsigned long)p;
mm                393 fs/binfmt_flat.c 	ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset);
mm                395 fs/binfmt_flat.c 	ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset);
mm                405 fs/binfmt_flat.c 		val += current->mm->start_code;
mm                408 fs/binfmt_flat.c 		val += current->mm->start_data;
mm                411 fs/binfmt_flat.c 		val += current->mm->end_data;
mm                719 fs/binfmt_flat.c 		current->mm->start_code = start_code;
mm                720 fs/binfmt_flat.c 		current->mm->end_code = end_code;
mm                721 fs/binfmt_flat.c 		current->mm->start_data = datapos;
mm                722 fs/binfmt_flat.c 		current->mm->end_data = datapos + data_len;
mm                730 fs/binfmt_flat.c 		current->mm->start_brk = datapos + data_len + bss_len;
mm                731 fs/binfmt_flat.c 		current->mm->brk = (current->mm->start_brk + 3) & ~3;
mm                733 fs/binfmt_flat.c 		current->mm->context.end_brk = memp + memp_size - stack_len;
mm                976 fs/binfmt_flat.c 	current->mm->start_stack =
mm                977 fs/binfmt_flat.c 		((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
mm                978 fs/binfmt_flat.c 	pr_debug("sp=%lx\n", current->mm->start_stack);
mm                981 fs/binfmt_flat.c 	res = transfer_args_to_stack(bprm, &current->mm->start_stack);
mm                983 fs/binfmt_flat.c 		res = create_flat_tables(bprm, current->mm->start_stack);
mm                999 fs/binfmt_flat.c 			current->mm->start_stack -= sizeof(unsigned long);
mm               1000 fs/binfmt_flat.c 			sp = (unsigned long __user *)current->mm->start_stack;
mm               1013 fs/binfmt_flat.c 		 regs, start_addr, current->mm->start_stack);
mm               1014 fs/binfmt_flat.c 	start_thread(regs, start_addr, current->mm->start_stack);
mm                162 fs/coredump.c  	exe_file = get_mm_exe_file(current->mm);
mm                352 fs/coredump.c  		if (t != current && t->mm) {
mm                362 fs/coredump.c  static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
mm                371 fs/coredump.c  		mm->core_state = core_state;
mm                381 fs/coredump.c  	if (atomic_read(&mm->mm_users) == nr + 1)
mm                421 fs/coredump.c  			if (unlikely(!p->mm))
mm                423 fs/coredump.c  			if (unlikely(p->mm == mm)) {
mm                441 fs/coredump.c  	struct mm_struct *mm = tsk->mm;
mm                448 fs/coredump.c  	if (down_write_killable(&mm->mmap_sem))
mm                451 fs/coredump.c  	if (!mm->core_state)
mm                452 fs/coredump.c  		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
mm                453 fs/coredump.c  	up_write(&mm->mmap_sem);
mm                476 fs/coredump.c  static void coredump_finish(struct mm_struct *mm, bool core_dumped)
mm                488 fs/coredump.c  	next = mm->core_state->dumper.next;
mm                501 fs/coredump.c  	mm->core_state = NULL;
mm                571 fs/coredump.c  	struct mm_struct *mm = current->mm;
mm                593 fs/coredump.c  		.mm_flags = mm->flags,
mm                598 fs/coredump.c  	binfmt = mm->binfmt;
mm                814 fs/coredump.c  	coredump_finish(mm, core_dumped);
mm                185 fs/exec.c      	struct mm_struct *mm = current->mm;
mm                188 fs/exec.c      	if (!mm || !diff)
mm                192 fs/exec.c      	add_mm_counter(mm, MM_ANONPAGES, diff);
mm                217 fs/exec.c      	ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
mm                247 fs/exec.c      	struct mm_struct *mm = bprm->mm;
mm                249 fs/exec.c      	bprm->vma = vma = vm_area_alloc(mm);
mm                254 fs/exec.c      	if (down_write_killable(&mm->mmap_sem)) {
mm                271 fs/exec.c      	err = insert_vm_struct(mm, vma);
mm                275 fs/exec.c      	mm->stack_vm = mm->total_vm = 1;
mm                276 fs/exec.c      	arch_bprm_mm_init(mm, vma);
mm                277 fs/exec.c      	up_write(&mm->mmap_sem);
mm                281 fs/exec.c      	up_write(&mm->mmap_sem);
mm                362 fs/exec.c      	struct mm_struct *mm = NULL;
mm                364 fs/exec.c      	bprm->mm = mm = mm_alloc();
mm                366 fs/exec.c      	if (!mm)
mm                381 fs/exec.c      	if (mm) {
mm                382 fs/exec.c      		bprm->mm = NULL;
mm                383 fs/exec.c      		mmdrop(mm);
mm                628 fs/exec.c      	struct mm_struct *mm = vma->vm_mm;
mm                642 fs/exec.c      	if (vma != find_vma(mm, new_start))
mm                660 fs/exec.c      	tlb_gather_mmu(&tlb, mm, old_start, old_end);
mm                697 fs/exec.c      	struct mm_struct *mm = current->mm;
mm                722 fs/exec.c      	mm->arg_start = bprm->p - stack_shift;
mm                735 fs/exec.c      	mm->arg_start = bprm->p;
mm                742 fs/exec.c      	if (down_write_killable(&mm->mmap_sem))
mm                756 fs/exec.c      	vm_flags |= mm->def_flags;
mm                793 fs/exec.c      	current->mm->start_stack = bprm->p;
mm                799 fs/exec.c      	up_write(&mm->mmap_sem);
mm               1010 fs/exec.c      static int exec_mmap(struct mm_struct *mm)
mm               1017 fs/exec.c      	old_mm = current->mm;
mm               1036 fs/exec.c      	membarrier_exec_mmap(mm);
mm               1037 fs/exec.c      	tsk->mm = mm;
mm               1038 fs/exec.c      	tsk->active_mm = mm;
mm               1039 fs/exec.c      	activate_mm(active_mm, mm);
mm               1040 fs/exec.c      	tsk->mm->vmacache_seqnum = 0;
mm               1275 fs/exec.c      	set_mm_exe_file(bprm->mm, bprm->file);
mm               1283 fs/exec.c      	retval = exec_mmap(bprm->mm);
mm               1293 fs/exec.c      	bprm->mm = NULL;
mm               1323 fs/exec.c      		user_ns = old = bprm->mm->user_ns;
mm               1329 fs/exec.c      			bprm->mm->user_ns = get_user_ns(user_ns);
mm               1360 fs/exec.c      	arch_pick_mmap_layout(current->mm, &bprm->rlim_stack);
mm               1372 fs/exec.c      		set_dumpable(current->mm, suid_dumpable);
mm               1374 fs/exec.c      		set_dumpable(current->mm, SUID_DUMP_USER);
mm               1384 fs/exec.c      	current->mm->task_size = TASK_SIZE;
mm               1467 fs/exec.c      	if (get_dumpable(current->mm) != SUID_DUMP_USER)
mm               1666 fs/exec.c      		if (retval < 0 && !bprm->mm) {
mm               1841 fs/exec.c      	if (bprm->mm) {
mm               1843 fs/exec.c      		mmput(bprm->mm);
mm               1934 fs/exec.c      	struct mm_struct *mm = current->mm;
mm               1936 fs/exec.c      	if (mm->binfmt)
mm               1937 fs/exec.c      		module_put(mm->binfmt->module);
mm               1939 fs/exec.c      	mm->binfmt = new;
mm               1948 fs/exec.c      void set_dumpable(struct mm_struct *mm, int value)
mm               1953 fs/exec.c      	set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
mm                202 fs/hugetlbfs/inode.c 	struct mm_struct *mm = current->mm;
mm                220 fs/hugetlbfs/inode.c 		vma = find_vma(mm, addr);
mm                427 fs/hugetlbfs/inode.c 	vma_init(&pseudo_vma, current->mm);
mm                578 fs/hugetlbfs/inode.c 	struct mm_struct *mm = current->mm;
mm                616 fs/hugetlbfs/inode.c 	vma_init(&pseudo_vma, mm);
mm               3241 fs/io_uring.c  	mmgrab(current->mm);
mm               3242 fs/io_uring.c  	ctx->sqo_mm = current->mm;
mm               3527 fs/io_uring.c  		down_read(&current->mm->mmap_sem);
mm               3545 fs/io_uring.c  		up_read(&current->mm->mmap_sem);
mm                824 fs/notify/fanotify/fanotify_user.c 	group->memcg = get_mem_cgroup_from_mm(current->mm);
mm                645 fs/notify/inotify/inotify_user.c 	group->memcg = get_mem_cgroup_from_mm(current->mm);
mm                389 fs/proc/array.c static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
mm                391 fs/proc/array.c 	seq_put_decimal_ull(m, "CoreDumping:\t", !!mm->core_state);
mm                395 fs/proc/array.c static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm)
mm                400 fs/proc/array.c 		thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags);
mm                407 fs/proc/array.c 	struct mm_struct *mm = get_task_mm(task);
mm                415 fs/proc/array.c 	if (mm) {
mm                416 fs/proc/array.c 		task_mem(m, mm);
mm                417 fs/proc/array.c 		task_core_dumping(m, mm);
mm                418 fs/proc/array.c 		task_thp_status(m, mm);
mm                419 fs/proc/array.c 		mmput(mm);
mm                441 fs/proc/array.c 	struct mm_struct *mm;
mm                453 fs/proc/array.c 	mm = get_task_mm(task);
mm                454 fs/proc/array.c 	if (mm) {
mm                455 fs/proc/array.c 		vsize = task_vsize(mm);
mm                563 fs/proc/array.c 	seq_put_decimal_ull(m, " ", mm ? get_mm_rss(mm) : 0);
mm                565 fs/proc/array.c 	seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->start_code : 1) : 0);
mm                566 fs/proc/array.c 	seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->end_code : 1) : 0);
mm                567 fs/proc/array.c 	seq_put_decimal_ull(m, " ", (permitted && mm) ? mm->start_stack : 0);
mm                601 fs/proc/array.c 	if (mm && permitted) {
mm                602 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->start_data);
mm                603 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->end_data);
mm                604 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->start_brk);
mm                605 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->arg_start);
mm                606 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->arg_end);
mm                607 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->env_start);
mm                608 fs/proc/array.c 		seq_put_decimal_ull(m, " ", mm->env_end);
mm                618 fs/proc/array.c 	if (mm)
mm                619 fs/proc/array.c 		mmput(mm);
mm                639 fs/proc/array.c 	struct mm_struct *mm = get_task_mm(task);
mm                641 fs/proc/array.c 	if (mm) {
mm                642 fs/proc/array.c 		size = task_statm(mm, &shared, &text, &data, &resident);
mm                643 fs/proc/array.c 		mmput(mm);
mm                216 fs/proc/base.c static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
mm                231 fs/proc/base.c 	got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
mm                253 fs/proc/base.c static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
mm                261 fs/proc/base.c 	if (!mm->env_end)
mm                264 fs/proc/base.c 	spin_lock(&mm->arg_lock);
mm                265 fs/proc/base.c 	arg_start = mm->arg_start;
mm                266 fs/proc/base.c 	arg_end = mm->arg_end;
mm                267 fs/proc/base.c 	env_start = mm->env_start;
mm                268 fs/proc/base.c 	env_end = mm->env_end;
mm                269 fs/proc/base.c 	spin_unlock(&mm->arg_lock);
mm                299 fs/proc/base.c 	if (access_remote_vm(mm, arg_end-1, &c, 1, FOLL_ANON) == 1 && c)
mm                300 fs/proc/base.c 		return get_mm_proctitle(mm, buf, count, pos, arg_start);
mm                321 fs/proc/base.c 		got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
mm                343 fs/proc/base.c 	struct mm_struct *mm;
mm                346 fs/proc/base.c 	mm = get_task_mm(tsk);
mm                347 fs/proc/base.c 	if (!mm)
mm                350 fs/proc/base.c 	ret = get_mm_cmdline(mm, buf, count, pos);
mm                351 fs/proc/base.c 	mmput(mm);
mm                778 fs/proc/base.c 	struct mm_struct *mm = ERR_PTR(-ESRCH);
mm                781 fs/proc/base.c 		mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
mm                784 fs/proc/base.c 		if (!IS_ERR_OR_NULL(mm)) {
mm                786 fs/proc/base.c 			mmgrab(mm);
mm                788 fs/proc/base.c 			mmput(mm);
mm                792 fs/proc/base.c 	return mm;
mm                797 fs/proc/base.c 	struct mm_struct *mm = proc_mem_open(inode, mode);
mm                799 fs/proc/base.c 	if (IS_ERR(mm))
mm                800 fs/proc/base.c 		return PTR_ERR(mm);
mm                802 fs/proc/base.c 	file->private_data = mm;
mm                819 fs/proc/base.c 	struct mm_struct *mm = file->private_data;
mm                825 fs/proc/base.c 	if (!mm)
mm                833 fs/proc/base.c 	if (!mmget_not_zero(mm))
mm                846 fs/proc/base.c 		this_len = access_remote_vm(mm, addr, page, this_len, flags);
mm                865 fs/proc/base.c 	mmput(mm);
mm                901 fs/proc/base.c 	struct mm_struct *mm = file->private_data;
mm                902 fs/proc/base.c 	if (mm)
mm                903 fs/proc/base.c 		mmdrop(mm);
mm                926 fs/proc/base.c 	struct mm_struct *mm = file->private_data;
mm                930 fs/proc/base.c 	if (!mm || !mm->env_end)
mm                938 fs/proc/base.c 	if (!mmget_not_zero(mm))
mm                941 fs/proc/base.c 	spin_lock(&mm->arg_lock);
mm                942 fs/proc/base.c 	env_start = mm->env_start;
mm                943 fs/proc/base.c 	env_end = mm->env_end;
mm                944 fs/proc/base.c 	spin_unlock(&mm->arg_lock);
mm                958 fs/proc/base.c 		retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
mm                976 fs/proc/base.c 	mmput(mm);
mm                998 fs/proc/base.c 	struct mm_struct *mm = file->private_data;
mm               1001 fs/proc/base.c 	if (!mm)
mm               1005 fs/proc/base.c 	} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
mm               1006 fs/proc/base.c 	return simple_read_from_buffer(buf, count, ppos, mm->saved_auxv,
mm               1007 fs/proc/base.c 				       nwords * sizeof(mm->saved_auxv[0]));
mm               1040 fs/proc/base.c 	struct mm_struct *mm = NULL;
mm               1079 fs/proc/base.c 			if (atomic_read(&p->mm->mm_users) > 1) {
mm               1080 fs/proc/base.c 				mm = p->mm;
mm               1081 fs/proc/base.c 				mmgrab(mm);
mm               1092 fs/proc/base.c 	if (mm) {
mm               1105 fs/proc/base.c 			if (!p->vfork_done && process_shares_mm(p, mm)) {
mm               1113 fs/proc/base.c 		mmdrop(mm);
mm               1720 fs/proc/base.c 		struct mm_struct *mm;
mm               1722 fs/proc/base.c 		mm = task->mm;
mm               1724 fs/proc/base.c 		if (mm) {
mm               1725 fs/proc/base.c 			if (get_dumpable(mm) != SUID_DUMP_USER) {
mm               1726 fs/proc/base.c 				struct user_namespace *user_ns = mm->user_ns;
mm               1964 fs/proc/base.c 	struct mm_struct *mm = NULL;
mm               1977 fs/proc/base.c 	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
mm               1978 fs/proc/base.c 	if (IS_ERR_OR_NULL(mm))
mm               1982 fs/proc/base.c 		status = down_read_killable(&mm->mmap_sem);
mm               1984 fs/proc/base.c 			exact_vma_exists = !!find_exact_vma(mm, vm_start,
mm               1986 fs/proc/base.c 			up_read(&mm->mmap_sem);
mm               1990 fs/proc/base.c 	mmput(mm);
mm               2016 fs/proc/base.c 	struct mm_struct *mm;
mm               2024 fs/proc/base.c 	mm = get_task_mm(task);
mm               2026 fs/proc/base.c 	if (!mm)
mm               2033 fs/proc/base.c 	rc = down_read_killable(&mm->mmap_sem);
mm               2038 fs/proc/base.c 	vma = find_exact_vma(mm, vm_start, vm_end);
mm               2044 fs/proc/base.c 	up_read(&mm->mmap_sem);
mm               2047 fs/proc/base.c 	mmput(mm);
mm               2114 fs/proc/base.c 	struct mm_struct *mm;
mm               2129 fs/proc/base.c 	mm = get_task_mm(task);
mm               2130 fs/proc/base.c 	if (!mm)
mm               2134 fs/proc/base.c 	if (down_read_killable(&mm->mmap_sem))
mm               2138 fs/proc/base.c 	vma = find_exact_vma(mm, vm_start, vm_end);
mm               2147 fs/proc/base.c 	up_read(&mm->mmap_sem);
mm               2149 fs/proc/base.c 	mmput(mm);
mm               2167 fs/proc/base.c 	struct mm_struct *mm;
mm               2188 fs/proc/base.c 	mm = get_task_mm(task);
mm               2189 fs/proc/base.c 	if (!mm)
mm               2192 fs/proc/base.c 	ret = down_read_killable(&mm->mmap_sem);
mm               2194 fs/proc/base.c 		mmput(mm);
mm               2210 fs/proc/base.c 	for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
mm               2219 fs/proc/base.c 			up_read(&mm->mmap_sem);
mm               2220 fs/proc/base.c 			mmput(mm);
mm               2228 fs/proc/base.c 	up_read(&mm->mmap_sem);
mm               2229 fs/proc/base.c 	mmput(mm);
mm               2694 fs/proc/base.c 	struct mm_struct *mm;
mm               2703 fs/proc/base.c 	mm = get_task_mm(task);
mm               2704 fs/proc/base.c 	if (mm) {
mm               2706 fs/proc/base.c 			       ((mm->flags & MMF_DUMP_FILTER_MASK) >>
mm               2708 fs/proc/base.c 		mmput(mm);
mm               2723 fs/proc/base.c 	struct mm_struct *mm;
mm               2738 fs/proc/base.c 	mm = get_task_mm(task);
mm               2739 fs/proc/base.c 	if (!mm)
mm               2745 fs/proc/base.c 			set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
mm               2747 fs/proc/base.c 			clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
mm               2750 fs/proc/base.c 	mmput(mm);
mm                320 fs/proc/inode.c 			get_area = current->mm->get_unmapped_area;
mm                279 fs/proc/internal.h 	struct mm_struct *mm;
mm                 30 fs/proc/task_mmu.c void task_mem(struct seq_file *m, struct mm_struct *mm)
mm                 35 fs/proc/task_mmu.c 	anon = get_mm_counter(mm, MM_ANONPAGES);
mm                 36 fs/proc/task_mmu.c 	file = get_mm_counter(mm, MM_FILEPAGES);
mm                 37 fs/proc/task_mmu.c 	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
mm                 46 fs/proc/task_mmu.c 	hiwater_vm = total_vm = mm->total_vm;
mm                 47 fs/proc/task_mmu.c 	if (hiwater_vm < mm->hiwater_vm)
mm                 48 fs/proc/task_mmu.c 		hiwater_vm = mm->hiwater_vm;
mm                 50 fs/proc/task_mmu.c 	if (hiwater_rss < mm->hiwater_rss)
mm                 51 fs/proc/task_mmu.c 		hiwater_rss = mm->hiwater_rss;
mm                 54 fs/proc/task_mmu.c 	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
mm                 55 fs/proc/task_mmu.c 	text = min(text, mm->exec_vm << PAGE_SHIFT);
mm                 56 fs/proc/task_mmu.c 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
mm                 58 fs/proc/task_mmu.c 	swap = get_mm_counter(mm, MM_SWAPENTS);
mm                 61 fs/proc/task_mmu.c 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
mm                 62 fs/proc/task_mmu.c 	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
mm                 68 fs/proc/task_mmu.c 	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
mm                 69 fs/proc/task_mmu.c 	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
mm                 75 fs/proc/task_mmu.c 		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
mm                 78 fs/proc/task_mmu.c 	hugetlb_report_usage(m, mm);
mm                 82 fs/proc/task_mmu.c unsigned long task_vsize(struct mm_struct *mm)
mm                 84 fs/proc/task_mmu.c 	return PAGE_SIZE * mm->total_vm;
mm                 87 fs/proc/task_mmu.c unsigned long task_statm(struct mm_struct *mm,
mm                 91 fs/proc/task_mmu.c 	*shared = get_mm_counter(mm, MM_FILEPAGES) +
mm                 92 fs/proc/task_mmu.c 			get_mm_counter(mm, MM_SHMEMPAGES);
mm                 93 fs/proc/task_mmu.c 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
mm                 95 fs/proc/task_mmu.c 	*data = mm->data_vm + mm->stack_vm;
mm                 96 fs/proc/task_mmu.c 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
mm                 97 fs/proc/task_mmu.c 	return mm->total_vm;
mm                128 fs/proc/task_mmu.c 	struct mm_struct *mm = priv->mm;
mm                131 fs/proc/task_mmu.c 	up_read(&mm->mmap_sem);
mm                132 fs/proc/task_mmu.c 	mmput(mm);
mm                153 fs/proc/task_mmu.c 	struct mm_struct *mm;
mm                165 fs/proc/task_mmu.c 	mm = priv->mm;
mm                166 fs/proc/task_mmu.c 	if (!mm || !mmget_not_zero(mm))
mm                169 fs/proc/task_mmu.c 	if (down_read_killable(&mm->mmap_sem)) {
mm                170 fs/proc/task_mmu.c 		mmput(mm);
mm                175 fs/proc/task_mmu.c 	priv->tail_vma = get_gate_vma(mm);
mm                178 fs/proc/task_mmu.c 		vma = find_vma(mm, last_addr - 1);
mm                186 fs/proc/task_mmu.c 	if (pos < mm->map_count) {
mm                187 fs/proc/task_mmu.c 		for (vma = mm->mmap; pos; pos--) {
mm                195 fs/proc/task_mmu.c 	if (pos == mm->map_count && priv->tail_vma)
mm                235 fs/proc/task_mmu.c 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
mm                236 fs/proc/task_mmu.c 	if (IS_ERR(priv->mm)) {
mm                237 fs/proc/task_mmu.c 		int err = PTR_ERR(priv->mm);
mm                251 fs/proc/task_mmu.c 	if (priv->mm)
mm                252 fs/proc/task_mmu.c 		mmdrop(priv->mm);
mm                302 fs/proc/task_mmu.c 	struct mm_struct *mm = vma->vm_mm;
mm                340 fs/proc/task_mmu.c 		if (!mm) {
mm                345 fs/proc/task_mmu.c 		if (vma->vm_start <= mm->brk &&
mm                346 fs/proc/task_mmu.c 		    vma->vm_end >= mm->start_brk) {
mm                859 fs/proc/task_mmu.c 	struct mm_struct *mm;
mm                868 fs/proc/task_mmu.c 	mm = priv->mm;
mm                869 fs/proc/task_mmu.c 	if (!mm || !mmget_not_zero(mm)) {
mm                876 fs/proc/task_mmu.c 	ret = down_read_killable(&mm->mmap_sem);
mm                882 fs/proc/task_mmu.c 	for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
mm                887 fs/proc/task_mmu.c 	show_vma_header_prefix(m, priv->mm->mmap->vm_start,
mm                895 fs/proc/task_mmu.c 	up_read(&mm->mmap_sem);
mm                898 fs/proc/task_mmu.c 	mmput(mm);
mm                933 fs/proc/task_mmu.c 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
mm                934 fs/proc/task_mmu.c 	if (IS_ERR(priv->mm)) {
mm                935 fs/proc/task_mmu.c 		ret = PTR_ERR(priv->mm);
mm                953 fs/proc/task_mmu.c 	if (priv->mm)
mm                954 fs/proc/task_mmu.c 		mmdrop(priv->mm);
mm               1139 fs/proc/task_mmu.c 	struct mm_struct *mm;
mm               1161 fs/proc/task_mmu.c 	mm = get_task_mm(task);
mm               1162 fs/proc/task_mmu.c 	if (mm) {
mm               1169 fs/proc/task_mmu.c 			if (down_write_killable(&mm->mmap_sem)) {
mm               1178 fs/proc/task_mmu.c 			reset_mm_hiwater_rss(mm);
mm               1179 fs/proc/task_mmu.c 			up_write(&mm->mmap_sem);
mm               1183 fs/proc/task_mmu.c 		if (down_read_killable(&mm->mmap_sem)) {
mm               1187 fs/proc/task_mmu.c 		tlb_gather_mmu(&tlb, mm, 0, -1);
mm               1189 fs/proc/task_mmu.c 			for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               1192 fs/proc/task_mmu.c 				up_read(&mm->mmap_sem);
mm               1193 fs/proc/task_mmu.c 				if (down_write_killable(&mm->mmap_sem)) {
mm               1202 fs/proc/task_mmu.c 				if (!mmget_still_valid(mm)) {
mm               1212 fs/proc/task_mmu.c 					up_write(&mm->mmap_sem);
mm               1215 fs/proc/task_mmu.c 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               1219 fs/proc/task_mmu.c 				downgrade_write(&mm->mmap_sem);
mm               1224 fs/proc/task_mmu.c 						0, NULL, mm, 0, -1UL);
mm               1227 fs/proc/task_mmu.c 		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
mm               1232 fs/proc/task_mmu.c 		up_read(&mm->mmap_sem);
mm               1234 fs/proc/task_mmu.c 		mmput(mm);
mm               1292 fs/proc/task_mmu.c 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
mm               1439 fs/proc/task_mmu.c 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
mm               1539 fs/proc/task_mmu.c 	struct mm_struct *mm = file->private_data;
mm               1547 fs/proc/task_mmu.c 	if (!mm || !mmget_not_zero(mm))
mm               1571 fs/proc/task_mmu.c 	end_vaddr = mm->task_size;
mm               1574 fs/proc/task_mmu.c 	if (svpfn > mm->task_size >> PAGE_SHIFT)
mm               1593 fs/proc/task_mmu.c 		ret = down_read_killable(&mm->mmap_sem);
mm               1596 fs/proc/task_mmu.c 		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
mm               1597 fs/proc/task_mmu.c 		up_read(&mm->mmap_sem);
mm               1616 fs/proc/task_mmu.c 	mmput(mm);
mm               1623 fs/proc/task_mmu.c 	struct mm_struct *mm;
mm               1625 fs/proc/task_mmu.c 	mm = proc_mem_open(inode, PTRACE_MODE_READ);
mm               1626 fs/proc/task_mmu.c 	if (IS_ERR(mm))
mm               1627 fs/proc/task_mmu.c 		return PTR_ERR(mm);
mm               1628 fs/proc/task_mmu.c 	file->private_data = mm;
mm               1634 fs/proc/task_mmu.c 	struct mm_struct *mm = file->private_data;
mm               1636 fs/proc/task_mmu.c 	if (mm)
mm               1637 fs/proc/task_mmu.c 		mmdrop(mm);
mm               1768 fs/proc/task_mmu.c 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
mm               1823 fs/proc/task_mmu.c 	struct mm_struct *mm = vma->vm_mm;
mm               1828 fs/proc/task_mmu.c 	if (!mm)
mm               1847 fs/proc/task_mmu.c 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
mm                 21 fs/proc/task_nommu.c void task_mem(struct seq_file *m, struct mm_struct *mm)
mm                 28 fs/proc/task_nommu.c 	down_read(&mm->mmap_sem);
mm                 29 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
mm                 42 fs/proc/task_nommu.c 		if (atomic_read(&mm->mm_count) > 1 ||
mm                 52 fs/proc/task_nommu.c 	if (atomic_read(&mm->mm_count) > 1)
mm                 53 fs/proc/task_nommu.c 		sbytes += kobjsize(mm);
mm                 55 fs/proc/task_nommu.c 		bytes += kobjsize(mm);
mm                 80 fs/proc/task_nommu.c 	up_read(&mm->mmap_sem);
mm                 83 fs/proc/task_nommu.c unsigned long task_vsize(struct mm_struct *mm)
mm                 89 fs/proc/task_nommu.c 	down_read(&mm->mmap_sem);
mm                 90 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
mm                 94 fs/proc/task_nommu.c 	up_read(&mm->mmap_sem);
mm                 98 fs/proc/task_nommu.c unsigned long task_statm(struct mm_struct *mm,
mm                105 fs/proc/task_nommu.c 	unsigned long size = kobjsize(mm);
mm                107 fs/proc/task_nommu.c 	down_read(&mm->mmap_sem);
mm                108 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
mm                118 fs/proc/task_nommu.c 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
mm                120 fs/proc/task_nommu.c 	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
mm                122 fs/proc/task_nommu.c 	up_read(&mm->mmap_sem);
mm                131 fs/proc/task_nommu.c 	struct mm_struct *mm = vma->vm_mm;
mm                138 fs/proc/task_nommu.c 	return vma->vm_start <= mm->start_stack &&
mm                139 fs/proc/task_nommu.c 		vma->vm_end >= mm->start_stack;
mm                147 fs/proc/task_nommu.c 	struct mm_struct *mm = vma->vm_mm;
mm                179 fs/proc/task_nommu.c 	} else if (mm && is_stack(vma)) {
mm                201 fs/proc/task_nommu.c 	struct mm_struct *mm;
mm                210 fs/proc/task_nommu.c 	mm = priv->mm;
mm                211 fs/proc/task_nommu.c 	if (!mm || !mmget_not_zero(mm))
mm                214 fs/proc/task_nommu.c 	if (down_read_killable(&mm->mmap_sem)) {
mm                215 fs/proc/task_nommu.c 		mmput(mm);
mm                220 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
mm                224 fs/proc/task_nommu.c 	up_read(&mm->mmap_sem);
mm                225 fs/proc/task_nommu.c 	mmput(mm);
mm                234 fs/proc/task_nommu.c 		up_read(&priv->mm->mmap_sem);
mm                235 fs/proc/task_nommu.c 		mmput(priv->mm);
mm                268 fs/proc/task_nommu.c 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
mm                269 fs/proc/task_nommu.c 	if (IS_ERR(priv->mm)) {
mm                270 fs/proc/task_nommu.c 		int err = PTR_ERR(priv->mm);
mm                285 fs/proc/task_nommu.c 	if (priv->mm)
mm                286 fs/proc/task_nommu.c 		mmdrop(priv->mm);
mm                 38 fs/ramfs/file-mmu.c 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
mm                 78 fs/userfaultfd.c 	struct mm_struct *mm;
mm                175 fs/userfaultfd.c 		mmdrop(ctx->mm);
mm                233 fs/userfaultfd.c 	struct mm_struct *mm = ctx->mm;
mm                237 fs/userfaultfd.c 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
mm                239 fs/userfaultfd.c 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
mm                281 fs/userfaultfd.c 	struct mm_struct *mm = ctx->mm;
mm                289 fs/userfaultfd.c 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
mm                291 fs/userfaultfd.c 	pgd = pgd_offset(mm, address);
mm                354 fs/userfaultfd.c 	struct mm_struct *mm = vmf->vma->vm_mm;
mm                379 fs/userfaultfd.c 	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
mm                385 fs/userfaultfd.c 	BUG_ON(ctx->mm != mm);
mm                492 fs/userfaultfd.c 	up_read(&mm->mmap_sem);
mm                546 fs/userfaultfd.c 			down_read(&mm->mmap_sem);
mm                638 fs/userfaultfd.c 		struct mm_struct *mm = release_new_ctx->mm;
mm                641 fs/userfaultfd.c 		down_write(&mm->mmap_sem);
mm                643 fs/userfaultfd.c 		VM_WARN_ON(!mmget_still_valid(mm));
mm                644 fs/userfaultfd.c 		for (vma = mm->mmap; vma; vma = vma->vm_next)
mm                649 fs/userfaultfd.c 		up_write(&mm->mmap_sem);
mm                706 fs/userfaultfd.c 		ctx->mm = vma->vm_mm;
mm                707 fs/userfaultfd.c 		mmgrab(ctx->mm);
mm                793 fs/userfaultfd.c 	struct mm_struct *mm = vma->vm_mm;
mm                803 fs/userfaultfd.c 	up_read(&mm->mmap_sem);
mm                856 fs/userfaultfd.c void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
mm                878 fs/userfaultfd.c 	struct mm_struct *mm = ctx->mm;
mm                887 fs/userfaultfd.c 	if (!mmget_not_zero(mm))
mm                898 fs/userfaultfd.c 	down_write(&mm->mmap_sem);
mm                899 fs/userfaultfd.c 	still_valid = mmget_still_valid(mm);
mm                901 fs/userfaultfd.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                911 fs/userfaultfd.c 			prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
mm                924 fs/userfaultfd.c 	up_write(&mm->mmap_sem);
mm                925 fs/userfaultfd.c 	mmput(mm);
mm               1274 fs/userfaultfd.c static __always_inline int validate_range(struct mm_struct *mm,
mm               1277 fs/userfaultfd.c 	__u64 task_size = mm->task_size;
mm               1305 fs/userfaultfd.c 	struct mm_struct *mm = ctx->mm;
mm               1341 fs/userfaultfd.c 	ret = validate_range(mm, &uffdio_register.range.start,
mm               1350 fs/userfaultfd.c 	if (!mmget_not_zero(mm))
mm               1353 fs/userfaultfd.c 	down_write(&mm->mmap_sem);
mm               1354 fs/userfaultfd.c 	if (!mmget_still_valid(mm))
mm               1356 fs/userfaultfd.c 	vma = find_vma_prev(mm, start, &prev);
mm               1464 fs/userfaultfd.c 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
mm               1473 fs/userfaultfd.c 			ret = split_vma(mm, vma, start, 1);
mm               1478 fs/userfaultfd.c 			ret = split_vma(mm, vma, end, 0);
mm               1497 fs/userfaultfd.c 	up_write(&mm->mmap_sem);
mm               1498 fs/userfaultfd.c 	mmput(mm);
mm               1517 fs/userfaultfd.c 	struct mm_struct *mm = ctx->mm;
mm               1530 fs/userfaultfd.c 	ret = validate_range(mm, &uffdio_unregister.start,
mm               1539 fs/userfaultfd.c 	if (!mmget_not_zero(mm))
mm               1542 fs/userfaultfd.c 	down_write(&mm->mmap_sem);
mm               1543 fs/userfaultfd.c 	if (!mmget_still_valid(mm))
mm               1545 fs/userfaultfd.c 	vma = find_vma_prev(mm, start, &prev);
mm               1626 fs/userfaultfd.c 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
mm               1635 fs/userfaultfd.c 			ret = split_vma(mm, vma, start, 1);
mm               1640 fs/userfaultfd.c 			ret = split_vma(mm, vma, end, 0);
mm               1659 fs/userfaultfd.c 	up_write(&mm->mmap_sem);
mm               1660 fs/userfaultfd.c 	mmput(mm);
mm               1681 fs/userfaultfd.c 	ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
mm               1721 fs/userfaultfd.c 	ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
mm               1734 fs/userfaultfd.c 	if (mmget_not_zero(ctx->mm)) {
mm               1735 fs/userfaultfd.c 		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
mm               1737 fs/userfaultfd.c 		mmput(ctx->mm);
mm               1777 fs/userfaultfd.c 	ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
mm               1785 fs/userfaultfd.c 	if (mmget_not_zero(ctx->mm)) {
mm               1786 fs/userfaultfd.c 		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
mm               1789 fs/userfaultfd.c 		mmput(ctx->mm);
mm               1953 fs/userfaultfd.c 	BUG_ON(!current->mm);
mm               1972 fs/userfaultfd.c 	ctx->mm = current->mm;
mm               1974 fs/userfaultfd.c 	mmgrab(ctx->mm);
mm               1979 fs/userfaultfd.c 		mmdrop(ctx->mm);
mm                 15 include/asm-generic/4level-fixup.h #define pmd_alloc(mm, pud, address) \
mm                 16 include/asm-generic/4level-fixup.h 	((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
mm                 26 include/asm-generic/4level-fixup.h #define pud_populate(mm, pud, pmd)	pgd_populate(mm, pud, pmd)
mm                 32 include/asm-generic/4level-fixup.h #define pud_free(mm, x)			do { } while (0)
mm                 16 include/asm-generic/5level-fixup.h #define pud_alloc(mm, p4d, address) \
mm                 17 include/asm-generic/5level-fixup.h 	((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
mm                 20 include/asm-generic/5level-fixup.h #define p4d_alloc(mm, pgd, address)	(pgd)
mm                 43 include/asm-generic/5level-fixup.h #define p4d_populate(mm, p4d, pud)	pgd_populate(mm, p4d, pud)
mm                 44 include/asm-generic/5level-fixup.h #define p4d_populate_safe(mm, p4d, pud)	pgd_populate(mm, p4d, pud)
mm                 53 include/asm-generic/5level-fixup.h #define p4d_free(mm, x)			do { } while (0)
mm                 21 include/asm-generic/cacheflush.h static inline void flush_cache_mm(struct mm_struct *mm)
mm                 27 include/asm-generic/cacheflush.h static inline void flush_cache_dup_mm(struct mm_struct *mm)
mm                 36 include/asm-generic/hugetlb.h static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
mm                 39 include/asm-generic/hugetlb.h 	pte_clear(mm, addr, ptep);
mm                 53 include/asm-generic/hugetlb.h static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mm                 56 include/asm-generic/hugetlb.h 	set_pte_at(mm, addr, ptep, pte);
mm                 61 include/asm-generic/hugetlb.h static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
mm                 64 include/asm-generic/hugetlb.h 	return ptep_get_and_clear(mm, addr, ptep);
mm                106 include/asm-generic/hugetlb.h static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
mm                109 include/asm-generic/hugetlb.h 	ptep_set_wrprotect(mm, addr, ptep);
mm                 11 include/asm-generic/mm_hooks.h 				struct mm_struct *mm)
mm                 16 include/asm-generic/mm_hooks.h static inline void arch_exit_mmap(struct mm_struct *mm)
mm                 20 include/asm-generic/mm_hooks.h static inline void arch_unmap(struct mm_struct *mm,
mm                 25 include/asm-generic/mm_hooks.h static inline void arch_bprm_mm_init(struct mm_struct *mm,
mm                 15 include/asm-generic/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm,
mm                 21 include/asm-generic/mmu_context.h 			struct mm_struct *mm)
mm                 26 include/asm-generic/mmu_context.h static inline void destroy_context(struct mm_struct *mm)
mm                 31 include/asm-generic/mmu_context.h 			struct mm_struct *mm)
mm                 19 include/asm-generic/pgalloc.h static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
mm                 31 include/asm-generic/pgalloc.h static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
mm                 33 include/asm-generic/pgalloc.h 	return __pte_alloc_one_kernel(mm);
mm                 42 include/asm-generic/pgalloc.h static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
mm                 59 include/asm-generic/pgalloc.h static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
mm                 83 include/asm-generic/pgalloc.h static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
mm                 85 include/asm-generic/pgalloc.h 	return __pte_alloc_one(mm, GFP_PGTABLE_USER);
mm                 99 include/asm-generic/pgalloc.h static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
mm                 33 include/asm-generic/pgtable-nop4d-hack.h #define pgd_populate(mm, pgd, pud)		do { } while (0)
mm                 34 include/asm-generic/pgtable-nop4d-hack.h #define pgd_populate_safe(mm, pgd, pud)		do { } while (0)
mm                 56 include/asm-generic/pgtable-nop4d-hack.h #define pud_alloc_one(mm, address)		NULL
mm                 57 include/asm-generic/pgtable-nop4d-hack.h #define pud_free(mm, x)				do { } while (0)
mm                 28 include/asm-generic/pgtable-nop4d.h #define pgd_populate(mm, pgd, p4d)		do { } while (0)
mm                 29 include/asm-generic/pgtable-nop4d.h #define pgd_populate_safe(mm, pgd, p4d)		do { } while (0)
mm                 51 include/asm-generic/pgtable-nop4d.h #define p4d_alloc_one(mm, address)		NULL
mm                 52 include/asm-generic/pgtable-nop4d.h #define p4d_free(mm, x)				do { } while (0)
mm                 36 include/asm-generic/pgtable-nopmd.h #define pud_populate(mm, pmd, pte)		do { } while (0)
mm                 59 include/asm-generic/pgtable-nopmd.h #define pmd_alloc_one(mm, address)		NULL
mm                 60 include/asm-generic/pgtable-nopmd.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
mm                 37 include/asm-generic/pgtable-nopud.h #define p4d_populate(mm, p4d, pud)		do { } while (0)
mm                 38 include/asm-generic/pgtable-nopud.h #define p4d_populate_safe(mm, p4d, pud)		do { } while (0)
mm                 60 include/asm-generic/pgtable-nopud.h #define pud_alloc_one(mm, address)		NULL
mm                 61 include/asm-generic/pgtable-nopud.h #define pud_free(mm, x)				do { } while (0)
mm                125 include/asm-generic/pgtable.h static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
mm                130 include/asm-generic/pgtable.h 	pte_clear(mm, address, ptep);
mm                137 include/asm-generic/pgtable.h static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
mm                147 include/asm-generic/pgtable.h static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
mm                161 include/asm-generic/pgtable.h static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
mm                165 include/asm-generic/pgtable.h 	return pmdp_huge_get_and_clear(mm, address, pmdp);
mm                170 include/asm-generic/pgtable.h static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
mm                174 include/asm-generic/pgtable.h 	return pudp_huge_get_and_clear(mm, address, pudp);
mm                180 include/asm-generic/pgtable.h static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
mm                185 include/asm-generic/pgtable.h 	pte = ptep_get_and_clear(mm, address, ptep);
mm                196 include/asm-generic/pgtable.h static inline void pte_clear_not_present_full(struct mm_struct *mm,
mm                201 include/asm-generic/pgtable.h 	pte_clear(mm, address, ptep);
mm                222 include/asm-generic/pgtable.h static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
mm                225 include/asm-generic/pgtable.h 	set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
mm                255 include/asm-generic/pgtable.h static inline void pmdp_set_wrprotect(struct mm_struct *mm,
mm                259 include/asm-generic/pgtable.h 	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
mm                262 include/asm-generic/pgtable.h static inline void pmdp_set_wrprotect(struct mm_struct *mm,
mm                271 include/asm-generic/pgtable.h static inline void pudp_set_wrprotect(struct mm_struct *mm,
mm                276 include/asm-generic/pgtable.h 	set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
mm                279 include/asm-generic/pgtable.h static inline void pudp_set_wrprotect(struct mm_struct *mm,
mm                304 include/asm-generic/pgtable.h extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                309 include/asm-generic/pgtable.h extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
mm                450 include/asm-generic/pgtable.h static inline void arch_do_swap_page(struct mm_struct *mm,
mm                468 include/asm-generic/pgtable.h static inline int arch_unmap_one(struct mm_struct *mm,
mm                478 include/asm-generic/pgtable.h #define pgd_offset_gate(mm, addr)	pgd_offset(mm, addr)
mm                486 include/asm-generic/pgtable.h # define pte_accessible(mm, pte)	((void)(pte), 1)
mm               1179 include/asm-generic/pgtable.h #define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
mm               1183 include/asm-generic/pgtable.h #define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
mm               1187 include/asm-generic/pgtable.h #define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
mm                236 include/asm-generic/tlb.h 	struct mm_struct	*mm;
mm                289 include/asm-generic/tlb.h 	struct mm_struct *mm, unsigned long start, unsigned long end);
mm                339 include/asm-generic/tlb.h 		flush_tlb_mm(tlb->mm);
mm                364 include/asm-generic/tlb.h 		flush_tlb_mm(tlb->mm);
mm                367 include/asm-generic/tlb.h 			.vm_mm = tlb->mm,
mm                409 include/asm-generic/tlb.h 	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
mm                 15 include/asm-generic/tlbflush.h static inline void flush_tlb_mm(struct mm_struct *mm)
mm                163 include/drm/drm_mm.h 	struct drm_mm *mm;
mm                225 include/drm/drm_mm.h 	struct drm_mm *mm;
mm                272 include/drm/drm_mm.h static inline bool drm_mm_initialized(const struct drm_mm *mm)
mm                274 include/drm/drm_mm.h 	return mm->hole_stack.next;
mm                349 include/drm/drm_mm.h #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
mm                359 include/drm/drm_mm.h #define drm_mm_for_each_node(entry, mm) \
mm                360 include/drm/drm_mm.h 	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
mm                371 include/drm/drm_mm.h #define drm_mm_for_each_node_safe(entry, next, mm) \
mm                372 include/drm/drm_mm.h 	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
mm                390 include/drm/drm_mm.h #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
mm                391 include/drm/drm_mm.h 	for (pos = list_first_entry(&(mm)->hole_stack, \
mm                393 include/drm/drm_mm.h 	     &pos->hole_stack != &(mm)->hole_stack ? \
mm                402 include/drm/drm_mm.h int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
mm                403 include/drm/drm_mm.h int drm_mm_insert_node_in_range(struct drm_mm *mm,
mm                430 include/drm/drm_mm.h drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
mm                435 include/drm/drm_mm.h 	return drm_mm_insert_node_in_range(mm, node,
mm                454 include/drm/drm_mm.h static inline int drm_mm_insert_node(struct drm_mm *mm,
mm                458 include/drm/drm_mm.h 	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
mm                463 include/drm/drm_mm.h void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
mm                464 include/drm/drm_mm.h void drm_mm_takedown(struct drm_mm *mm);
mm                474 include/drm/drm_mm.h static inline bool drm_mm_clean(const struct drm_mm *mm)
mm                476 include/drm/drm_mm.h 	return list_empty(drm_mm_nodes(mm));
mm                480 include/drm/drm_mm.h __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
mm                505 include/drm/drm_mm.h 				 struct drm_mm *mm,
mm                530 include/drm/drm_mm.h 				    struct drm_mm *mm,
mm                536 include/drm/drm_mm.h 	drm_mm_scan_init_with_range(scan, mm,
mm                547 include/drm/drm_mm.h void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
mm                 15 include/linux/aio.h extern void exit_aio(struct mm_struct *mm);
mm                 18 include/linux/aio.h static inline void exit_aio(struct mm_struct *mm) { }
mm                 25 include/linux/binfmts.h 	struct mm_struct *mm;
mm                582 include/linux/ccp.h 		struct ccp_ecc_modular_math mm;
mm               1110 include/linux/efi.h extern int efi_memattr_apply_permissions(struct mm_struct *mm,
mm                 13 include/linux/elf-randomize.h #  define arch_randomize_brk(mm)	(mm->brk)
mm                 17 include/linux/elf-randomize.h extern unsigned long arch_randomize_brk(struct mm_struct *mm);
mm                 40 include/linux/futex.h 			struct mm_struct *mm;
mm                372 include/linux/hmm.h int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
mm                265 include/linux/huge_mm.h struct page *mm_get_huge_zero_page(struct mm_struct *mm);
mm                266 include/linux/huge_mm.h void mm_put_huge_zero_page(struct mm_struct *mm);
mm                387 include/linux/huge_mm.h static inline void mm_put_huge_zero_page(struct mm_struct *mm)
mm                 90 include/linux/hugetlb.h vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
mm                111 include/linux/hugetlb.h pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
mm                118 include/linux/hugetlb.h pte_t *huge_pte_alloc(struct mm_struct *mm,
mm                120 include/linux/hugetlb.h pte_t *huge_pte_offset(struct mm_struct *mm,
mm                122 include/linux/hugetlb.h int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
mm                125 include/linux/hugetlb.h struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
mm                130 include/linux/hugetlb.h struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
mm                132 include/linux/hugetlb.h struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
mm                134 include/linux/hugetlb.h struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
mm                155 include/linux/hugetlb.h static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
mm                168 include/linux/hugetlb.h #define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
mm                178 include/linux/hugetlb.h #define follow_huge_pmd(mm, addr, pmd, flags)	NULL
mm                179 include/linux/hugetlb.h #define follow_huge_pud(mm, addr, pud, flags)	NULL
mm                180 include/linux/hugetlb.h #define follow_huge_pgd(mm, addr, pgd, flags)	NULL
mm                184 include/linux/hugetlb.h #define is_hugepage_only_range(mm, addr, len)	0
mm                188 include/linux/hugetlb.h #define huge_pte_offset(mm, address, sz)	0
mm                216 include/linux/hugetlb.h static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
mm                537 include/linux/hugetlb.h 					   struct mm_struct *mm, pte_t *pte)
mm                540 include/linux/hugetlb.h 		return pmd_lockptr(mm, (pmd_t *) pte);
mm                542 include/linux/hugetlb.h 	return &mm->page_table_lock;
mm                554 include/linux/hugetlb.h void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
mm                556 include/linux/hugetlb.h static inline void hugetlb_count_add(long l, struct mm_struct *mm)
mm                558 include/linux/hugetlb.h 	atomic_long_add(l, &mm->hugetlb_usage);
mm                561 include/linux/hugetlb.h static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
mm                563 include/linux/hugetlb.h 	atomic_long_sub(l, &mm->hugetlb_usage);
mm                567 include/linux/hugetlb.h static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
mm                570 include/linux/hugetlb.h 	set_huge_pte_at(mm, addr, ptep, pte);
mm                723 include/linux/hugetlb.h 					   struct mm_struct *mm, pte_t *pte)
mm                725 include/linux/hugetlb.h 	return &mm->page_table_lock;
mm                732 include/linux/hugetlb.h static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
mm                736 include/linux/hugetlb.h static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
mm                743 include/linux/hugetlb.h 					struct mm_struct *mm, pte_t *pte)
mm                747 include/linux/hugetlb.h 	ptl = huge_pte_lockptr(h, mm, pte);
mm                682 include/linux/intel-iommu.h 	struct mm_struct *mm;
mm                301 include/linux/iommu.h 	struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
mm                603 include/linux/iommu.h 					struct mm_struct *mm,
mm                988 include/linux/iommu.h iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
mm                 14 include/linux/khugepaged.h extern int __khugepaged_enter(struct mm_struct *mm);
mm                 15 include/linux/khugepaged.h extern void __khugepaged_exit(struct mm_struct *mm);
mm                 19 include/linux/khugepaged.h extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
mm                 21 include/linux/khugepaged.h static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
mm                 41 include/linux/khugepaged.h static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
mm                 44 include/linux/khugepaged.h 		return __khugepaged_enter(mm);
mm                 48 include/linux/khugepaged.h static inline void khugepaged_exit(struct mm_struct *mm)
mm                 50 include/linux/khugepaged.h 	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
mm                 51 include/linux/khugepaged.h 		__khugepaged_exit(mm);
mm                 67 include/linux/khugepaged.h static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
mm                 71 include/linux/khugepaged.h static inline void khugepaged_exit(struct mm_struct *mm)
mm                 84 include/linux/khugepaged.h static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
mm                 24 include/linux/ksm.h int __ksm_enter(struct mm_struct *mm);
mm                 25 include/linux/ksm.h void __ksm_exit(struct mm_struct *mm);
mm                 27 include/linux/ksm.h static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
mm                 30 include/linux/ksm.h 		return __ksm_enter(mm);
mm                 34 include/linux/ksm.h static inline void ksm_exit(struct mm_struct *mm)
mm                 36 include/linux/ksm.h 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
mm                 37 include/linux/ksm.h 		__ksm_exit(mm);
mm                 61 include/linux/ksm.h static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
mm                 66 include/linux/ksm.h static inline void ksm_exit(struct mm_struct *mm)
mm                206 include/linux/kvm_host.h 	struct mm_struct *mm;
mm                446 include/linux/kvm_host.h 	struct mm_struct *mm; /* userspace tied to this vm */
mm               1485 include/linux/lsm_hooks.h 	int (*vm_enough_memory)(struct mm_struct *mm, long pages);
mm                375 include/linux/memcontrol.h int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
mm                378 include/linux/memcontrol.h int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
mm                433 include/linux/memcontrol.h struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
mm                507 include/linux/memcontrol.h static inline bool mm_match_cgroup(struct mm_struct *mm,
mm                514 include/linux/memcontrol.h 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
mm                771 include/linux/memcontrol.h static inline void count_memcg_event_mm(struct mm_struct *mm,
mm                780 include/linux/memcontrol.h 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
mm                804 include/linux/memcontrol.h static inline void memcg_memory_event_mm(struct mm_struct *mm,
mm                813 include/linux/memcontrol.h 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
mm                845 include/linux/memcontrol.h static inline void memcg_memory_event_mm(struct mm_struct *mm,
mm                862 include/linux/memcontrol.h static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
mm                872 include/linux/memcontrol.h 					      struct mm_struct *mm,
mm                917 include/linux/memcontrol.h static inline bool mm_match_cgroup(struct mm_struct *mm,
mm                923 include/linux/memcontrol.h static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
mm               1168 include/linux/memcontrol.h void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
mm                147 include/linux/mempolicy.h extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
mm                165 include/linux/mempolicy.h int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
mm                267 include/linux/mempolicy.h static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
mm                285 include/linux/mempolicy.h static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
mm                141 include/linux/migrate.h extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
mm                147 include/linux/migrate.h static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
mm               9634 include/linux/mlx5/mlx5_ifc.h 	u8         mm[0x2];
mm                 14 include/linux/mm-arch-hooks.h static inline void arch_remap(struct mm_struct *mm,
mm                529 include/linux/mm.h static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
mm                534 include/linux/mm.h 	vma->vm_mm = mm;
mm                562 include/linux/mm.h #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
mm               1469 include/linux/mm.h int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
mm               1490 include/linux/mm.h extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
mm               1506 include/linux/mm.h 		struct mm_struct *mm, unsigned long address,
mm               1527 include/linux/mm.h extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
mm               1529 include/linux/mm.h extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
mm               1532 include/linux/mm.h long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
mm               1547 include/linux/mm.h int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
mm               1548 include/linux/mm.h int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
mm               1643 include/linux/mm.h static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
mm               1645 include/linux/mm.h 	long val = atomic_long_read(&mm->rss_stat.count[member]);
mm               1658 include/linux/mm.h static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
mm               1660 include/linux/mm.h 	atomic_long_add(value, &mm->rss_stat.count[member]);
mm               1663 include/linux/mm.h static inline void inc_mm_counter(struct mm_struct *mm, int member)
mm               1665 include/linux/mm.h 	atomic_long_inc(&mm->rss_stat.count[member]);
mm               1668 include/linux/mm.h static inline void dec_mm_counter(struct mm_struct *mm, int member)
mm               1670 include/linux/mm.h 	atomic_long_dec(&mm->rss_stat.count[member]);
mm               1688 include/linux/mm.h static inline unsigned long get_mm_rss(struct mm_struct *mm)
mm               1690 include/linux/mm.h 	return get_mm_counter(mm, MM_FILEPAGES) +
mm               1691 include/linux/mm.h 		get_mm_counter(mm, MM_ANONPAGES) +
mm               1692 include/linux/mm.h 		get_mm_counter(mm, MM_SHMEMPAGES);
mm               1695 include/linux/mm.h static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
mm               1697 include/linux/mm.h 	return max(mm->hiwater_rss, get_mm_rss(mm));
mm               1700 include/linux/mm.h static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
mm               1702 include/linux/mm.h 	return max(mm->hiwater_vm, mm->total_vm);
mm               1705 include/linux/mm.h static inline void update_hiwater_rss(struct mm_struct *mm)
mm               1707 include/linux/mm.h 	unsigned long _rss = get_mm_rss(mm);
mm               1709 include/linux/mm.h 	if ((mm)->hiwater_rss < _rss)
mm               1710 include/linux/mm.h 		(mm)->hiwater_rss = _rss;
mm               1713 include/linux/mm.h static inline void update_hiwater_vm(struct mm_struct *mm)
mm               1715 include/linux/mm.h 	if (mm->hiwater_vm < mm->total_vm)
mm               1716 include/linux/mm.h 		mm->hiwater_vm = mm->total_vm;
mm               1719 include/linux/mm.h static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
mm               1721 include/linux/mm.h 	mm->hiwater_rss = get_mm_rss(mm);
mm               1725 include/linux/mm.h 					 struct mm_struct *mm)
mm               1727 include/linux/mm.h 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
mm               1734 include/linux/mm.h void sync_mm_rss(struct mm_struct *mm);
mm               1736 include/linux/mm.h static inline void sync_mm_rss(struct mm_struct *mm)
mm               1750 include/linux/mm.h extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
mm               1752 include/linux/mm.h static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
mm               1756 include/linux/mm.h 	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
mm               1761 include/linux/mm.h static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
mm               1767 include/linux/mm.h int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
mm               1771 include/linux/mm.h static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
mm               1776 include/linux/mm.h static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
mm               1777 include/linux/mm.h static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
mm               1780 include/linux/mm.h int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
mm               1782 include/linux/mm.h static inline void mm_inc_nr_puds(struct mm_struct *mm)
mm               1784 include/linux/mm.h 	if (mm_pud_folded(mm))
mm               1786 include/linux/mm.h 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
mm               1789 include/linux/mm.h static inline void mm_dec_nr_puds(struct mm_struct *mm)
mm               1791 include/linux/mm.h 	if (mm_pud_folded(mm))
mm               1793 include/linux/mm.h 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
mm               1798 include/linux/mm.h static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
mm               1804 include/linux/mm.h static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
mm               1805 include/linux/mm.h static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
mm               1808 include/linux/mm.h int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
mm               1810 include/linux/mm.h static inline void mm_inc_nr_pmds(struct mm_struct *mm)
mm               1812 include/linux/mm.h 	if (mm_pmd_folded(mm))
mm               1814 include/linux/mm.h 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
mm               1817 include/linux/mm.h static inline void mm_dec_nr_pmds(struct mm_struct *mm)
mm               1819 include/linux/mm.h 	if (mm_pmd_folded(mm))
mm               1821 include/linux/mm.h 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
mm               1826 include/linux/mm.h static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
mm               1828 include/linux/mm.h 	atomic_long_set(&mm->pgtables_bytes, 0);
mm               1831 include/linux/mm.h static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
mm               1833 include/linux/mm.h 	return atomic_long_read(&mm->pgtables_bytes);
mm               1836 include/linux/mm.h static inline void mm_inc_nr_ptes(struct mm_struct *mm)
mm               1838 include/linux/mm.h 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
mm               1841 include/linux/mm.h static inline void mm_dec_nr_ptes(struct mm_struct *mm)
mm               1843 include/linux/mm.h 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
mm               1847 include/linux/mm.h static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
mm               1848 include/linux/mm.h static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
mm               1853 include/linux/mm.h static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
mm               1854 include/linux/mm.h static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
mm               1857 include/linux/mm.h int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
mm               1867 include/linux/mm.h static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
mm               1870 include/linux/mm.h 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
mm               1874 include/linux/mm.h static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
mm               1877 include/linux/mm.h 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
mm               1882 include/linux/mm.h static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
mm               1884 include/linux/mm.h 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
mm               1919 include/linux/mm.h static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
mm               1944 include/linux/mm.h static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
mm               1946 include/linux/mm.h 	return &mm->page_table_lock;
mm               1975 include/linux/mm.h #define pte_offset_map_lock(mm, pmd, address, ptlp)	\
mm               1977 include/linux/mm.h 	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
mm               1989 include/linux/mm.h #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
mm               1991 include/linux/mm.h #define pte_alloc_map(mm, pmd, address)			\
mm               1992 include/linux/mm.h 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
mm               1994 include/linux/mm.h #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
mm               1995 include/linux/mm.h 	(pte_alloc(mm, pmd) ?			\
mm               1996 include/linux/mm.h 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
mm               2010 include/linux/mm.h static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
mm               2031 include/linux/mm.h #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
mm               2035 include/linux/mm.h static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
mm               2037 include/linux/mm.h 	return &mm->page_table_lock;
mm               2043 include/linux/mm.h #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
mm               2047 include/linux/mm.h static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
mm               2049 include/linux/mm.h 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
mm               2060 include/linux/mm.h static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
mm               2062 include/linux/mm.h 	return &mm->page_table_lock;
mm               2065 include/linux/mm.h static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
mm               2067 include/linux/mm.h 	spinlock_t *ptl = pud_lockptr(mm, pud);
mm               2276 include/linux/mm.h extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
mm               2317 include/linux/mm.h extern int mm_take_all_locks(struct mm_struct *mm);
mm               2318 include/linux/mm.h extern void mm_drop_all_locks(struct mm_struct *mm);
mm               2320 include/linux/mm.h extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
mm               2321 include/linux/mm.h extern struct file *get_mm_exe_file(struct mm_struct *mm);
mm               2329 include/linux/mm.h extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
mm               2334 include/linux/mm.h extern int install_special_mapping(struct mm_struct *mm,
mm               2463 include/linux/mm.h extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
mm               2464 include/linux/mm.h extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
mm               2469 include/linux/mm.h static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
mm               2471 include/linux/mm.h 	struct vm_area_struct * vma = find_vma(mm,start_addr);
mm               2508 include/linux/mm.h static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
mm               2511 include/linux/mm.h 	struct vm_area_struct *vma = find_vma(mm, vm_start);
mm               2644 include/linux/mm.h extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
mm               2731 include/linux/mm.h extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
mm               2733 include/linux/mm.h extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
mm               2735 include/linux/mm.h static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
mm               2740 include/linux/mm.h static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
mm               2746 include/linux/mm.h extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
mm                539 include/linux/mm_types.h static inline void mm_init_cpumask(struct mm_struct *mm)
mm                541 include/linux/mm_types.h 	unsigned long cpu_bitmap = (unsigned long)mm;
mm                548 include/linux/mm_types.h static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
mm                550 include/linux/mm_types.h 	return (struct cpumask *)&mm->cpu_bitmap;
mm                554 include/linux/mm_types.h extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
mm                559 include/linux/mm_types.h static inline void init_tlb_flush_pending(struct mm_struct *mm)
mm                561 include/linux/mm_types.h 	atomic_set(&mm->tlb_flush_pending, 0);
mm                564 include/linux/mm_types.h static inline void inc_tlb_flush_pending(struct mm_struct *mm)
mm                566 include/linux/mm_types.h 	atomic_inc(&mm->tlb_flush_pending);
mm                605 include/linux/mm_types.h static inline void dec_tlb_flush_pending(struct mm_struct *mm)
mm                615 include/linux/mm_types.h 	atomic_dec(&mm->tlb_flush_pending);
mm                618 include/linux/mm_types.h static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
mm                628 include/linux/mm_types.h 	return atomic_read(&mm->tlb_flush_pending);
mm                631 include/linux/mm_types.h static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
mm                640 include/linux/mm_types.h 	return atomic_read(&mm->tlb_flush_pending) > 1;
mm                 15 include/linux/mmdebug.h void dump_mm(const struct mm_struct *mm);
mm                 33 include/linux/mmdebug.h #define VM_BUG_ON_MM(cond, mm)						\
mm                 36 include/linux/mmdebug.h 			dump_mm(mm);					\
mm                 48 include/linux/mmdebug.h #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
mm                  9 include/linux/mmu_context.h void use_mm(struct mm_struct *mm);
mm                 10 include/linux/mmu_context.h void unuse_mm(struct mm_struct *mm);
mm                 66 include/linux/mmu_notifier.h 	struct mm_struct *mm;
mm                 98 include/linux/mmu_notifier.h 			struct mm_struct *mm);
mm                110 include/linux/mmu_notifier.h 				 struct mm_struct *mm,
mm                120 include/linux/mmu_notifier.h 			   struct mm_struct *mm,
mm                131 include/linux/mmu_notifier.h 			  struct mm_struct *mm,
mm                139 include/linux/mmu_notifier.h 			   struct mm_struct *mm,
mm                216 include/linux/mmu_notifier.h 	void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
mm                229 include/linux/mmu_notifier.h 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
mm                247 include/linux/mmu_notifier.h 	struct mm_struct *mm;
mm                252 include/linux/mmu_notifier.h static inline int mm_has_notifiers(struct mm_struct *mm)
mm                254 include/linux/mmu_notifier.h 	return unlikely(mm->mmu_notifier_mm);
mm                258 include/linux/mmu_notifier.h 					     struct mm_struct *mm);
mm                260 include/linux/mmu_notifier.h mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
mm                264 include/linux/mmu_notifier.h 	down_write(&mm->mmap_sem);
mm                265 include/linux/mmu_notifier.h 	ret = mmu_notifier_get_locked(ops, mm);
mm                266 include/linux/mmu_notifier.h 	up_write(&mm->mmap_sem);
mm                273 include/linux/mmu_notifier.h 				 struct mm_struct *mm);
mm                275 include/linux/mmu_notifier.h 				   struct mm_struct *mm);
mm                277 include/linux/mmu_notifier.h 				    struct mm_struct *mm);
mm                278 include/linux/mmu_notifier.h extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
mm                279 include/linux/mmu_notifier.h extern void __mmu_notifier_release(struct mm_struct *mm);
mm                280 include/linux/mmu_notifier.h extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
mm                283 include/linux/mmu_notifier.h extern int __mmu_notifier_clear_young(struct mm_struct *mm,
mm                286 include/linux/mmu_notifier.h extern int __mmu_notifier_test_young(struct mm_struct *mm,
mm                288 include/linux/mmu_notifier.h extern void __mmu_notifier_change_pte(struct mm_struct *mm,
mm                293 include/linux/mmu_notifier.h extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
mm                304 include/linux/mmu_notifier.h static inline void mmu_notifier_release(struct mm_struct *mm)
mm                306 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                307 include/linux/mmu_notifier.h 		__mmu_notifier_release(mm);
mm                310 include/linux/mmu_notifier.h static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
mm                314 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                315 include/linux/mmu_notifier.h 		return __mmu_notifier_clear_flush_young(mm, start, end);
mm                319 include/linux/mmu_notifier.h static inline int mmu_notifier_clear_young(struct mm_struct *mm,
mm                323 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                324 include/linux/mmu_notifier.h 		return __mmu_notifier_clear_young(mm, start, end);
mm                328 include/linux/mmu_notifier.h static inline int mmu_notifier_test_young(struct mm_struct *mm,
mm                331 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                332 include/linux/mmu_notifier.h 		return __mmu_notifier_test_young(mm, address);
mm                336 include/linux/mmu_notifier.h static inline void mmu_notifier_change_pte(struct mm_struct *mm,
mm                339 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                340 include/linux/mmu_notifier.h 		__mmu_notifier_change_pte(mm, address, pte);
mm                349 include/linux/mmu_notifier.h 	if (mm_has_notifiers(range->mm)) {
mm                362 include/linux/mmu_notifier.h 	if (mm_has_notifiers(range->mm)) {
mm                376 include/linux/mmu_notifier.h 	if (mm_has_notifiers(range->mm))
mm                383 include/linux/mmu_notifier.h 	if (mm_has_notifiers(range->mm))
mm                387 include/linux/mmu_notifier.h static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
mm                390 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                391 include/linux/mmu_notifier.h 		__mmu_notifier_invalidate_range(mm, start, end);
mm                394 include/linux/mmu_notifier.h static inline void mmu_notifier_mm_init(struct mm_struct *mm)
mm                396 include/linux/mmu_notifier.h 	mm->mmu_notifier_mm = NULL;
mm                399 include/linux/mmu_notifier.h static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
mm                401 include/linux/mmu_notifier.h 	if (mm_has_notifiers(mm))
mm                402 include/linux/mmu_notifier.h 		__mmu_notifier_mm_destroy(mm);
mm                410 include/linux/mmu_notifier.h 					   struct mm_struct *mm,
mm                416 include/linux/mmu_notifier.h 	range->mm = mm;
mm                544 include/linux/mmu_notifier.h #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
mm                553 include/linux/mmu_notifier.h static inline int mm_has_notifiers(struct mm_struct *mm)
mm                558 include/linux/mmu_notifier.h static inline void mmu_notifier_release(struct mm_struct *mm)
mm                562 include/linux/mmu_notifier.h static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
mm                569 include/linux/mmu_notifier.h static inline int mmu_notifier_test_young(struct mm_struct *mm,
mm                575 include/linux/mmu_notifier.h static inline void mmu_notifier_change_pte(struct mm_struct *mm,
mm                601 include/linux/mmu_notifier.h static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
mm                606 include/linux/mmu_notifier.h static inline void mmu_notifier_mm_init(struct mm_struct *mm)
mm                610 include/linux/mmu_notifier.h static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
mm                 83 include/linux/oom.h static inline bool mm_is_oom_victim(struct mm_struct *mm)
mm                 85 include/linux/oom.h 	return test_bit(MMF_OOM_VICTIM, &mm->flags);
mm                101 include/linux/oom.h static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
mm                103 include/linux/oom.h 	if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
mm                108 include/linux/oom.h bool __oom_reap_task_mm(struct mm_struct *mm);
mm                460 include/linux/pagemap.h extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
mm                503 include/linux/pagemap.h static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
mm                507 include/linux/pagemap.h 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
mm                 55 include/linux/pagewalk.h 	struct mm_struct *mm;
mm                 60 include/linux/pagewalk.h int walk_page_range(struct mm_struct *mm, unsigned long start,
mm                307 include/linux/perf_event.h 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
mm                308 include/linux/perf_event.h 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
mm                 11 include/linux/pkeys.h #define execute_only_pkey(mm) (0)
mm                 21 include/linux/pkeys.h static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
mm                 26 include/linux/pkeys.h static inline int mm_pkey_alloc(struct mm_struct *mm)
mm                 31 include/linux/pkeys.h static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
mm                 35 include/linux/rslib.h 	int		mm;
mm                126 include/linux/rslib.h 		x = (x >> rs->mm) + (x & rs->nn);
mm                727 include/linux/sched.h 	struct mm_struct		*mm;
mm                 17 include/linux/sched/coredump.h extern void set_dumpable(struct mm_struct *mm, int value);
mm                 29 include/linux/sched/coredump.h static inline int get_dumpable(struct mm_struct *mm)
mm                 31 include/linux/sched/coredump.h 	return __get_dumpable(mm->flags);
mm                 34 include/linux/sched/mm.h static inline void mmgrab(struct mm_struct *mm)
mm                 36 include/linux/sched/mm.h 	atomic_inc(&mm->mm_count);
mm                 39 include/linux/sched/mm.h extern void __mmdrop(struct mm_struct *mm);
mm                 41 include/linux/sched/mm.h static inline void mmdrop(struct mm_struct *mm)
mm                 48 include/linux/sched/mm.h 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
mm                 49 include/linux/sched/mm.h 		__mmdrop(mm);
mm                 72 include/linux/sched/mm.h static inline bool mmget_still_valid(struct mm_struct *mm)
mm                 74 include/linux/sched/mm.h 	return likely(!mm->core_state);
mm                 93 include/linux/sched/mm.h static inline void mmget(struct mm_struct *mm)
mm                 95 include/linux/sched/mm.h 	atomic_inc(&mm->mm_users);
mm                 98 include/linux/sched/mm.h static inline bool mmget_not_zero(struct mm_struct *mm)
mm                100 include/linux/sched/mm.h 	return atomic_inc_not_zero(&mm->mm_users);
mm                126 include/linux/sched/mm.h extern void mm_update_next_owner(struct mm_struct *mm);
mm                128 include/linux/sched/mm.h static inline void mm_update_next_owner(struct mm_struct *mm)
mm                134 include/linux/sched/mm.h extern void arch_pick_mmap_layout(struct mm_struct *mm,
mm                144 include/linux/sched/mm.h static inline void arch_pick_mmap_layout(struct mm_struct *mm,
mm                168 include/linux/sched/mm.h 	ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
mm                365 include/linux/sched/mm.h static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
mm                367 include/linux/sched/mm.h 	if (current->mm != mm)
mm                369 include/linux/sched/mm.h 	if (likely(!(atomic_read(&mm->membarrier_state) &
mm                375 include/linux/sched/mm.h extern void membarrier_exec_mmap(struct mm_struct *mm);
mm                385 include/linux/sched/mm.h static inline void membarrier_exec_mmap(struct mm_struct *mm)
mm                388 include/linux/sched/mm.h static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
mm                159 include/linux/security.h extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
mm                277 include/linux/security.h int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
mm                566 include/linux/security.h static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
mm                568 include/linux/security.h 	return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
mm                204 include/linux/swapops.h extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
mm                206 include/linux/swapops.h extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
mm                209 include/linux/swapops.h 		struct mm_struct *mm, pte_t *pte);
mm                229 include/linux/swapops.h static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
mm                231 include/linux/swapops.h static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
mm                234 include/linux/swapops.h 		struct mm_struct *mm, pte_t *pte) { }
mm                251 include/linux/swapops.h extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
mm                 43 include/linux/uprobes.h 				struct mm_struct *mm);
mm                106 include/linux/uprobes.h extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
mm                107 include/linux/uprobes.h extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
mm                112 include/linux/uprobes.h extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
mm                129 include/linux/uprobes.h extern void uprobe_clear_state(struct mm_struct *mm);
mm                130 include/linux/uprobes.h extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
mm                200 include/linux/uprobes.h static inline void uprobe_clear_state(struct mm_struct *mm)
mm                 76 include/linux/userfaultfd_k.h extern void userfaultfd_unmap_complete(struct mm_struct *mm,
mm                140 include/linux/userfaultfd_k.h static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
mm                 14 include/linux/vmacache.h extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
mm                 18 include/linux/vmacache.h extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
mm                 23 include/linux/vmacache.h static inline void vmacache_invalidate(struct mm_struct *mm)
mm                 25 include/linux/vmacache.h 	mm->vmacache_seqnum++;
mm                 60 include/linux/zpool.h 			enum zpool_mapmode mm);
mm                104 include/linux/zpool.h 				enum zpool_mapmode mm);
mm                 53 include/linux/zsmalloc.h 			enum zs_mapmode mm);
mm                 37 include/misc/cxl-base.h void cxl_slbia(struct mm_struct *mm);
mm                 44 include/misc/cxl-base.h static inline void cxl_slbia(struct mm_struct *mm) {}
mm                126 include/misc/cxllib.h int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags);
mm                157 include/misc/ocxl.h 				struct mm_struct *mm);
mm                470 include/misc/ocxl.h 		u64 amr, struct mm_struct *mm,
mm                 51 include/trace/events/huge_memory.h 	TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
mm                 54 include/trace/events/huge_memory.h 	TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
mm                 57 include/trace/events/huge_memory.h 		__field(struct mm_struct *, mm)
mm                 67 include/trace/events/huge_memory.h 		__entry->mm = mm;
mm                 77 include/trace/events/huge_memory.h 		__entry->mm,
mm                 88 include/trace/events/huge_memory.h 	TP_PROTO(struct mm_struct *mm, int isolated, int status),
mm                 90 include/trace/events/huge_memory.h 	TP_ARGS(mm, isolated, status),
mm                 93 include/trace/events/huge_memory.h 		__field(struct mm_struct *, mm)
mm                 99 include/trace/events/huge_memory.h 		__entry->mm = mm;
mm                105 include/trace/events/huge_memory.h 		__entry->mm,
mm                143 include/trace/events/huge_memory.h 	TP_PROTO(struct mm_struct *mm, int swapped_in, int referenced, int ret),
mm                145 include/trace/events/huge_memory.h 	TP_ARGS(mm, swapped_in, referenced, ret),
mm                148 include/trace/events/huge_memory.h 		__field(struct mm_struct *, mm)
mm                155 include/trace/events/huge_memory.h 		__entry->mm = mm;
mm                162 include/trace/events/huge_memory.h 		__entry->mm,
mm                157 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, unsigned long addr,
mm                159 include/trace/events/xen.h 	    TP_ARGS(mm, addr, ptep, pteval),
mm                161 include/trace/events/xen.h 		    __field(struct mm_struct *, mm)
mm                166 include/trace/events/xen.h 	    TP_fast_assign(__entry->mm = mm;
mm                171 include/trace/events/xen.h 		      __entry->mm, __entry->addr, __entry->ptep,
mm                197 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
mm                198 include/trace/events/xen.h 	    TP_ARGS(mm, addr, ptep),
mm                200 include/trace/events/xen.h 		    __field(struct mm_struct *, mm)
mm                204 include/trace/events/xen.h 	    TP_fast_assign(__entry->mm = mm;
mm                208 include/trace/events/xen.h 		      __entry->mm, __entry->addr, __entry->ptep)
mm                279 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, unsigned long addr,
mm                281 include/trace/events/xen.h 	    TP_ARGS(mm, addr, ptep, pteval),
mm                283 include/trace/events/xen.h 		    __field(struct mm_struct *, mm)
mm                288 include/trace/events/xen.h 	    TP_fast_assign(__entry->mm = mm;
mm                293 include/trace/events/xen.h 		      __entry->mm, __entry->addr, __entry->ptep,
mm                299 include/trace/events/xen.h 		     TP_PROTO(struct mm_struct *mm, unsigned long addr,	\
mm                301 include/trace/events/xen.h 		     TP_ARGS(mm, addr, ptep, pteval))
mm                307 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
mm                308 include/trace/events/xen.h 	    TP_ARGS(mm, pfn, level, pinned),
mm                310 include/trace/events/xen.h 		    __field(struct mm_struct *, mm)
mm                315 include/trace/events/xen.h 	    TP_fast_assign(__entry->mm = mm;
mm                320 include/trace/events/xen.h 		      __entry->mm, __entry->pfn, __entry->level,
mm                341 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
mm                342 include/trace/events/xen.h 	    TP_ARGS(mm, pgd),
mm                344 include/trace/events/xen.h 		    __field(struct mm_struct *, mm)
mm                347 include/trace/events/xen.h 	    TP_fast_assign(__entry->mm = mm;
mm                349 include/trace/events/xen.h 	    TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
mm                353 include/trace/events/xen.h 		TP_PROTO(struct mm_struct *mm, pgd_t *pgd),	\
mm                354 include/trace/events/xen.h 		     TP_ARGS(mm, pgd))
mm                370 include/trace/events/xen.h 	    TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
mm                372 include/trace/events/xen.h 	    TP_ARGS(cpus, mm, addr, end),
mm                375 include/trace/events/xen.h 		    __field(struct mm_struct *, mm)
mm                380 include/trace/events/xen.h 			   __entry->mm = mm;
mm                384 include/trace/events/xen.h 		      __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
mm                 76 init/init_task.c 	.mm		= NULL,
mm               1547 ipc/shm.c      	if (down_write_killable(&current->mm->mmap_sem)) {
mm               1557 ipc/shm.c      		if (find_vma_intersection(current->mm, addr, addr + size))
mm               1567 ipc/shm.c      	up_write(&current->mm->mmap_sem);
mm               1628 ipc/shm.c      	struct mm_struct *mm = current->mm;
mm               1641 ipc/shm.c      	if (down_write_killable(&mm->mmap_sem))
mm               1665 ipc/shm.c      	vma = find_vma(mm, addr);
mm               1687 ipc/shm.c      			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
mm               1714 ipc/shm.c      			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
mm               1723 ipc/shm.c      		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
mm               1729 ipc/shm.c      	up_write(&mm->mmap_sem);
mm                539 kernel/acct.c  	if (group_dead && current->mm) {
mm                542 kernel/acct.c  		down_read(&current->mm->mmap_sem);
mm                543 kernel/acct.c  		vma = current->mm->mmap;
mm                548 kernel/acct.c  		up_read(&current->mm->mmap_sem);
mm               2091 kernel/audit.c 			  struct mm_struct *mm)
mm               2095 kernel/audit.c 	if (!mm)
mm               2098 kernel/audit.c 	exe_file = get_mm_exe_file(mm);
mm               2157 kernel/audit.c 	audit_log_d_path_exe(ab, current->mm);
mm                244 kernel/audit.h 				 struct mm_struct *mm);
mm               1002 kernel/auditsc.c 	const char __user *p = (const char __user *)current->mm->arg_start;
mm               2567 kernel/auditsc.c 	audit_log_d_path_exe(ab, current->mm);
mm                307 kernel/bpf/stackmap.c 	if (!user || !current || !current->mm || irq_work_busy ||
mm                308 kernel/bpf/stackmap.c 	    down_read_trylock(&current->mm->mmap_sem) == 0) {
mm                319 kernel/bpf/stackmap.c 		vma = find_vma(current->mm, ips[i]);
mm                333 kernel/bpf/stackmap.c 		up_read(&current->mm->mmap_sem);
mm                335 kernel/bpf/stackmap.c 		work->sem = &current->mm->mmap_sem;
mm                342 kernel/bpf/stackmap.c 		rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
mm               1569 kernel/cgroup/cpuset.c 	struct mm_struct	*mm;
mm               1580 kernel/cgroup/cpuset.c 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
mm               1581 kernel/cgroup/cpuset.c 	mmput(mwork->mm);
mm               1585 kernel/cgroup/cpuset.c static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
mm               1592 kernel/cgroup/cpuset.c 		mwork->mm = mm;
mm               1598 kernel/cgroup/cpuset.c 		mmput(mm);
mm               1667 kernel/cgroup/cpuset.c 		struct mm_struct *mm;
mm               1672 kernel/cgroup/cpuset.c 		mm = get_task_mm(task);
mm               1673 kernel/cgroup/cpuset.c 		if (!mm)
mm               1678 kernel/cgroup/cpuset.c 		mpol_rebind_mm(mm, &cs->mems_allowed);
mm               1680 kernel/cgroup/cpuset.c 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
mm               1682 kernel/cgroup/cpuset.c 			mmput(mm);
mm               2202 kernel/cgroup/cpuset.c 		struct mm_struct *mm = get_task_mm(leader);
mm               2204 kernel/cgroup/cpuset.c 		if (mm) {
mm               2205 kernel/cgroup/cpuset.c 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
mm               2216 kernel/cgroup/cpuset.c 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
mm               2219 kernel/cgroup/cpuset.c 				mmput(mm);
mm                 65 kernel/context_tracking.c 	WARN_ON_ONCE(!current->mm);
mm                837 kernel/cpu.c   		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
mm                459 kernel/cred.c  		if (task->mm)
mm                460 kernel/cred.c  			set_dumpable(task->mm, suid_dumpable);
mm                289 kernel/debug/debug_core.c 	if (current->mm) {
mm                646 kernel/debug/kdb/kdb_support.c 	} else if (!p->mm && state == 'S') {
mm                206 kernel/events/callchain.c 			if  (current->mm)
mm               6545 kernel/events/core.c 		if (current->mm != NULL) {
mm               9215 kernel/events/core.c 				   struct mm_struct *mm,
mm               9220 kernel/events/core.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               9238 kernel/events/core.c 	struct mm_struct *mm = NULL;
mm               9250 kernel/events/core.c 		mm = get_task_mm(event->ctx->task);
mm               9251 kernel/events/core.c 		if (!mm)
mm               9254 kernel/events/core.c 		down_read(&mm->mmap_sem);
mm               9267 kernel/events/core.c 			perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
mm               9280 kernel/events/core.c 		up_read(&mm->mmap_sem);
mm               9282 kernel/events/core.c 		mmput(mm);
mm                 83 kernel/events/uprobes.c 	struct mm_struct *mm;
mm                157 kernel/events/uprobes.c 	struct mm_struct *mm = vma->vm_mm;
mm                167 kernel/events/uprobes.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
mm                196 kernel/events/uprobes.c 		dec_mm_counter(mm, MM_ANONPAGES);
mm                199 kernel/events/uprobes.c 		dec_mm_counter(mm, mm_counter_file(old_page));
mm                200 kernel/events/uprobes.c 		inc_mm_counter(mm, MM_ANONPAGES);
mm                206 kernel/events/uprobes.c 		set_pte_at_notify(mm, addr, pvmw.pte,
mm                293 kernel/events/uprobes.c delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
mm                298 kernel/events/uprobes.c 		if (du->uprobe == uprobe && du->mm == mm)
mm                303 kernel/events/uprobes.c static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
mm                307 kernel/events/uprobes.c 	if (delayed_uprobe_check(uprobe, mm))
mm                315 kernel/events/uprobes.c 	du->mm = mm;
mm                328 kernel/events/uprobes.c static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
mm                333 kernel/events/uprobes.c 	if (!uprobe && !mm)
mm                341 kernel/events/uprobes.c 		if (mm && du->mm != mm)
mm                362 kernel/events/uprobes.c find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
mm                366 kernel/events/uprobes.c 	for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
mm                374 kernel/events/uprobes.c __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
mm                385 kernel/events/uprobes.c 	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
mm                414 kernel/events/uprobes.c 				struct mm_struct *mm, short d)
mm                420 kernel/events/uprobes.c 		(unsigned long long) uprobe->ref_ctr_offset, mm);
mm                423 kernel/events/uprobes.c static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
mm                430 kernel/events/uprobes.c 	rc_vma = find_ref_ctr_vma(uprobe, mm);
mm                434 kernel/events/uprobes.c 		ret = __update_ref_ctr(mm, rc_vaddr, d);
mm                436 kernel/events/uprobes.c 			update_ref_ctr_warn(uprobe, mm, d);
mm                444 kernel/events/uprobes.c 		ret = delayed_uprobe_add(uprobe, mm);
mm                446 kernel/events/uprobes.c 		delayed_uprobe_remove(uprobe, mm);
mm                469 kernel/events/uprobes.c int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
mm                486 kernel/events/uprobes.c 	ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
mm                503 kernel/events/uprobes.c 		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
mm                562 kernel/events/uprobes.c 		update_ref_ctr(uprobe, mm, -1);
mm                566 kernel/events/uprobes.c 		collapse_pte_mapped_thp(mm, vaddr);
mm                580 kernel/events/uprobes.c int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
mm                582 kernel/events/uprobes.c 	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
mm                595 kernel/events/uprobes.c set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
mm                597 kernel/events/uprobes.c 	return uprobe_write_opcode(auprobe, mm, vaddr,
mm                846 kernel/events/uprobes.c 				struct mm_struct *mm, unsigned long vaddr)
mm                866 kernel/events/uprobes.c 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
mm                880 kernel/events/uprobes.c 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
mm                882 kernel/events/uprobes.c 	return !uc->filter || uc->filter(uc, ctx, mm);
mm                886 kernel/events/uprobes.c 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
mm                893 kernel/events/uprobes.c 		ret = consumer_filter(uc, ctx, mm);
mm                903 kernel/events/uprobes.c install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
mm                909 kernel/events/uprobes.c 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
mm                917 kernel/events/uprobes.c 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
mm                919 kernel/events/uprobes.c 		set_bit(MMF_HAS_UPROBES, &mm->flags);
mm                921 kernel/events/uprobes.c 	ret = set_swbp(&uprobe->arch, mm, vaddr);
mm                923 kernel/events/uprobes.c 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
mm                925 kernel/events/uprobes.c 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
mm                931 kernel/events/uprobes.c remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
mm                933 kernel/events/uprobes.c 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
mm                934 kernel/events/uprobes.c 	return set_orig_insn(&uprobe->arch, mm, vaddr);
mm                960 kernel/events/uprobes.c 	struct mm_struct *mm;
mm               1010 kernel/events/uprobes.c 		info->mm = vma->vm_mm;
mm               1020 kernel/events/uprobes.c 		mmput(curr->mm);
mm               1057 kernel/events/uprobes.c 		struct mm_struct *mm = info->mm;
mm               1063 kernel/events/uprobes.c 		down_write(&mm->mmap_sem);
mm               1064 kernel/events/uprobes.c 		vma = find_vma(mm, info->vaddr);
mm               1076 kernel/events/uprobes.c 					UPROBE_FILTER_REGISTER, mm))
mm               1077 kernel/events/uprobes.c 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
mm               1078 kernel/events/uprobes.c 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
mm               1080 kernel/events/uprobes.c 					UPROBE_FILTER_UNREGISTER, mm))
mm               1081 kernel/events/uprobes.c 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
mm               1085 kernel/events/uprobes.c 		up_write(&mm->mmap_sem);
mm               1087 kernel/events/uprobes.c 		mmput(mm);
mm               1244 kernel/events/uprobes.c static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
mm               1249 kernel/events/uprobes.c 	down_read(&mm->mmap_sem);
mm               1250 kernel/events/uprobes.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               1264 kernel/events/uprobes.c 		err |= remove_breakpoint(uprobe, mm, vaddr);
mm               1266 kernel/events/uprobes.c 	up_read(&mm->mmap_sem);
mm               1345 kernel/events/uprobes.c 		if (du->mm != vma->vm_mm ||
mm               1448 kernel/events/uprobes.c static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
mm               1453 kernel/events/uprobes.c 	if (down_write_killable(&mm->mmap_sem))
mm               1456 kernel/events/uprobes.c 	if (mm->uprobes_state.xol_area) {
mm               1471 kernel/events/uprobes.c 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
mm               1481 kernel/events/uprobes.c 	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
mm               1483 kernel/events/uprobes.c 	up_write(&mm->mmap_sem);
mm               1490 kernel/events/uprobes.c 	struct mm_struct *mm = current->mm;
mm               1518 kernel/events/uprobes.c 	if (!xol_add_vma(mm, area))
mm               1538 kernel/events/uprobes.c 	struct mm_struct *mm = current->mm;
mm               1541 kernel/events/uprobes.c 	if (!mm->uprobes_state.xol_area)
mm               1545 kernel/events/uprobes.c 	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
mm               1552 kernel/events/uprobes.c void uprobe_clear_state(struct mm_struct *mm)
mm               1554 kernel/events/uprobes.c 	struct xol_area *area = mm->uprobes_state.xol_area;
mm               1557 kernel/events/uprobes.c 	delayed_uprobe_remove(NULL, mm);
mm               1647 kernel/events/uprobes.c 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
mm               1654 kernel/events/uprobes.c 	area = tsk->mm->uprobes_state.xol_area;
mm               1808 kernel/events/uprobes.c 	struct mm_struct *mm = current->mm;
mm               1816 kernel/events/uprobes.c 	if (mm == t->mm && !(flags & CLONE_VFORK))
mm               1823 kernel/events/uprobes.c 	area = mm->uprobes_state.xol_area;
mm               1827 kernel/events/uprobes.c 	if (mm == t->mm)
mm               1847 kernel/events/uprobes.c 	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
mm               1996 kernel/events/uprobes.c static void mmf_recalc_uprobes(struct mm_struct *mm)
mm               2000 kernel/events/uprobes.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               2013 kernel/events/uprobes.c 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
mm               2016 kernel/events/uprobes.c static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
mm               2038 kernel/events/uprobes.c 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
mm               2052 kernel/events/uprobes.c 	struct mm_struct *mm = current->mm;
mm               2056 kernel/events/uprobes.c 	down_read(&mm->mmap_sem);
mm               2057 kernel/events/uprobes.c 	vma = find_vma(mm, bp_vaddr);
mm               2067 kernel/events/uprobes.c 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
mm               2072 kernel/events/uprobes.c 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
mm               2073 kernel/events/uprobes.c 		mmf_recalc_uprobes(mm);
mm               2074 kernel/events/uprobes.c 	up_read(&mm->mmap_sem);
mm               2106 kernel/events/uprobes.c 		unapply_uprobe(uprobe, current->mm);
mm               2325 kernel/events/uprobes.c 	if (!current->mm)
mm               2328 kernel/events/uprobes.c 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
mm               2344 kernel/events/uprobes.c 	if (!current->mm || !utask || !utask->active_uprobe)
mm                347 kernel/exit.c  void mm_update_next_owner(struct mm_struct *mm)
mm                356 kernel/exit.c  	if (mm->owner != p)
mm                363 kernel/exit.c  	if (atomic_read(&mm->mm_users) <= 1) {
mm                364 kernel/exit.c  		WRITE_ONCE(mm->owner, NULL);
mm                373 kernel/exit.c  		if (c->mm == mm)
mm                381 kernel/exit.c  		if (c->mm == mm)
mm                392 kernel/exit.c  			if (c->mm == mm)
mm                394 kernel/exit.c  			if (c->mm)
mm                404 kernel/exit.c  	WRITE_ONCE(mm->owner, NULL);
mm                420 kernel/exit.c  	if (c->mm != mm) {
mm                425 kernel/exit.c  	WRITE_ONCE(mm->owner, c);
mm                437 kernel/exit.c  	struct mm_struct *mm = current->mm;
mm                440 kernel/exit.c  	exit_mm_release(current, mm);
mm                441 kernel/exit.c  	if (!mm)
mm                443 kernel/exit.c  	sync_mm_rss(mm);
mm                451 kernel/exit.c  	down_read(&mm->mmap_sem);
mm                452 kernel/exit.c  	core_state = mm->core_state;
mm                456 kernel/exit.c  		up_read(&mm->mmap_sem);
mm                474 kernel/exit.c  		down_read(&mm->mmap_sem);
mm                476 kernel/exit.c  	mmgrab(mm);
mm                477 kernel/exit.c  	BUG_ON(mm != current->active_mm);
mm                480 kernel/exit.c  	current->mm = NULL;
mm                481 kernel/exit.c  	up_read(&mm->mmap_sem);
mm                482 kernel/exit.c  	enter_lazy_tlb(mm, current);
mm                484 kernel/exit.c  	mm_update_next_owner(mm);
mm                485 kernel/exit.c  	mmput(mm);
mm                760 kernel/exit.c  	if (tsk->mm)
mm                761 kernel/exit.c  		sync_mm_rss(tsk->mm);
mm                777 kernel/exit.c  		if (tsk->mm)
mm                778 kernel/exit.c  			setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
mm                344 kernel/fork.c  struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
mm                350 kernel/fork.c  		vma_init(vma, mm);
mm                478 kernel/fork.c  static __latent_entropy int dup_mmap(struct mm_struct *mm,
mm                493 kernel/fork.c  	uprobe_dup_mmap(oldmm, mm);
mm                497 kernel/fork.c  	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm                500 kernel/fork.c  	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
mm                502 kernel/fork.c  	mm->total_vm = oldmm->total_vm;
mm                503 kernel/fork.c  	mm->data_vm = oldmm->data_vm;
mm                504 kernel/fork.c  	mm->exec_vm = oldmm->exec_vm;
mm                505 kernel/fork.c  	mm->stack_vm = oldmm->stack_vm;
mm                507 kernel/fork.c  	rb_link = &mm->mm_rb.rb_node;
mm                509 kernel/fork.c  	pprev = &mm->mmap;
mm                510 kernel/fork.c  	retval = ksm_fork(mm, oldmm);
mm                513 kernel/fork.c  	retval = khugepaged_fork(mm, oldmm);
mm                522 kernel/fork.c  			vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
mm                547 kernel/fork.c  		tmp->vm_mm = mm;
mm                595 kernel/fork.c  		__vma_link_rb(mm, tmp, rb_link, rb_parent);
mm                599 kernel/fork.c  		mm->map_count++;
mm                601 kernel/fork.c  			retval = copy_page_range(mm, oldmm, mpnt);
mm                610 kernel/fork.c  	retval = arch_dup_mmap(oldmm, mm);
mm                612 kernel/fork.c  	up_write(&mm->mmap_sem);
mm                629 kernel/fork.c  static inline int mm_alloc_pgd(struct mm_struct *mm)
mm                631 kernel/fork.c  	mm->pgd = pgd_alloc(mm);
mm                632 kernel/fork.c  	if (unlikely(!mm->pgd))
mm                637 kernel/fork.c  static inline void mm_free_pgd(struct mm_struct *mm)
mm                639 kernel/fork.c  	pgd_free(mm, mm->pgd);
mm                642 kernel/fork.c  static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm                645 kernel/fork.c  	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
mm                649 kernel/fork.c  #define mm_alloc_pgd(mm)	(0)
mm                650 kernel/fork.c  #define mm_free_pgd(mm)
mm                653 kernel/fork.c  static void check_mm(struct mm_struct *mm)
mm                661 kernel/fork.c  		long x = atomic_long_read(&mm->rss_stat.count[i]);
mm                665 kernel/fork.c  				 mm, resident_page_types[i], x);
mm                668 kernel/fork.c  	if (mm_pgtables_bytes(mm))
mm                670 kernel/fork.c  				mm_pgtables_bytes(mm));
mm                673 kernel/fork.c  	VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
mm                678 kernel/fork.c  #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
mm                685 kernel/fork.c  void __mmdrop(struct mm_struct *mm)
mm                687 kernel/fork.c  	BUG_ON(mm == &init_mm);
mm                688 kernel/fork.c  	WARN_ON_ONCE(mm == current->mm);
mm                689 kernel/fork.c  	WARN_ON_ONCE(mm == current->active_mm);
mm                690 kernel/fork.c  	mm_free_pgd(mm);
mm                691 kernel/fork.c  	destroy_context(mm);
mm                692 kernel/fork.c  	mmu_notifier_mm_destroy(mm);
mm                693 kernel/fork.c  	check_mm(mm);
mm                694 kernel/fork.c  	put_user_ns(mm->user_ns);
mm                695 kernel/fork.c  	free_mm(mm);
mm                701 kernel/fork.c  	struct mm_struct *mm;
mm                703 kernel/fork.c  	mm = container_of(work, struct mm_struct, async_put_work);
mm                704 kernel/fork.c  	__mmdrop(mm);
mm                707 kernel/fork.c  static void mmdrop_async(struct mm_struct *mm)
mm                709 kernel/fork.c  	if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
mm                710 kernel/fork.c  		INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
mm                711 kernel/fork.c  		schedule_work(&mm->async_put_work);
mm                972 kernel/fork.c  static void mm_init_aio(struct mm_struct *mm)
mm                975 kernel/fork.c  	spin_lock_init(&mm->ioctx_lock);
mm                976 kernel/fork.c  	mm->ioctx_table = NULL;
mm                980 kernel/fork.c  static __always_inline void mm_clear_owner(struct mm_struct *mm,
mm                984 kernel/fork.c  	if (mm->owner == p)
mm                985 kernel/fork.c  		WRITE_ONCE(mm->owner, NULL);
mm                989 kernel/fork.c  static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
mm                992 kernel/fork.c  	mm->owner = p;
mm                996 kernel/fork.c  static void mm_init_uprobes_state(struct mm_struct *mm)
mm                999 kernel/fork.c  	mm->uprobes_state.xol_area = NULL;
mm               1003 kernel/fork.c  static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm               1006 kernel/fork.c  	mm->mmap = NULL;
mm               1007 kernel/fork.c  	mm->mm_rb = RB_ROOT;
mm               1008 kernel/fork.c  	mm->vmacache_seqnum = 0;
mm               1009 kernel/fork.c  	atomic_set(&mm->mm_users, 1);
mm               1010 kernel/fork.c  	atomic_set(&mm->mm_count, 1);
mm               1011 kernel/fork.c  	init_rwsem(&mm->mmap_sem);
mm               1012 kernel/fork.c  	INIT_LIST_HEAD(&mm->mmlist);
mm               1013 kernel/fork.c  	mm->core_state = NULL;
mm               1014 kernel/fork.c  	mm_pgtables_bytes_init(mm);
mm               1015 kernel/fork.c  	mm->map_count = 0;
mm               1016 kernel/fork.c  	mm->locked_vm = 0;
mm               1017 kernel/fork.c  	atomic64_set(&mm->pinned_vm, 0);
mm               1018 kernel/fork.c  	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
mm               1019 kernel/fork.c  	spin_lock_init(&mm->page_table_lock);
mm               1020 kernel/fork.c  	spin_lock_init(&mm->arg_lock);
mm               1021 kernel/fork.c  	mm_init_cpumask(mm);
mm               1022 kernel/fork.c  	mm_init_aio(mm);
mm               1023 kernel/fork.c  	mm_init_owner(mm, p);
mm               1024 kernel/fork.c  	RCU_INIT_POINTER(mm->exe_file, NULL);
mm               1025 kernel/fork.c  	mmu_notifier_mm_init(mm);
mm               1026 kernel/fork.c  	init_tlb_flush_pending(mm);
mm               1028 kernel/fork.c  	mm->pmd_huge_pte = NULL;
mm               1030 kernel/fork.c  	mm_init_uprobes_state(mm);
mm               1032 kernel/fork.c  	if (current->mm) {
mm               1033 kernel/fork.c  		mm->flags = current->mm->flags & MMF_INIT_MASK;
mm               1034 kernel/fork.c  		mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
mm               1036 kernel/fork.c  		mm->flags = default_dump_filter;
mm               1037 kernel/fork.c  		mm->def_flags = 0;
mm               1040 kernel/fork.c  	if (mm_alloc_pgd(mm))
mm               1043 kernel/fork.c  	if (init_new_context(p, mm))
mm               1046 kernel/fork.c  	mm->user_ns = get_user_ns(user_ns);
mm               1047 kernel/fork.c  	return mm;
mm               1050 kernel/fork.c  	mm_free_pgd(mm);
mm               1052 kernel/fork.c  	free_mm(mm);
mm               1061 kernel/fork.c  	struct mm_struct *mm;
mm               1063 kernel/fork.c  	mm = allocate_mm();
mm               1064 kernel/fork.c  	if (!mm)
mm               1067 kernel/fork.c  	memset(mm, 0, sizeof(*mm));
mm               1068 kernel/fork.c  	return mm_init(mm, current, current_user_ns());
mm               1071 kernel/fork.c  static inline void __mmput(struct mm_struct *mm)
mm               1073 kernel/fork.c  	VM_BUG_ON(atomic_read(&mm->mm_users));
mm               1075 kernel/fork.c  	uprobe_clear_state(mm);
mm               1076 kernel/fork.c  	exit_aio(mm);
mm               1077 kernel/fork.c  	ksm_exit(mm);
mm               1078 kernel/fork.c  	khugepaged_exit(mm); /* must run before exit_mmap */
mm               1079 kernel/fork.c  	exit_mmap(mm);
mm               1080 kernel/fork.c  	mm_put_huge_zero_page(mm);
mm               1081 kernel/fork.c  	set_mm_exe_file(mm, NULL);
mm               1082 kernel/fork.c  	if (!list_empty(&mm->mmlist)) {
mm               1084 kernel/fork.c  		list_del(&mm->mmlist);
mm               1087 kernel/fork.c  	if (mm->binfmt)
mm               1088 kernel/fork.c  		module_put(mm->binfmt->module);
mm               1089 kernel/fork.c  	mmdrop(mm);
mm               1095 kernel/fork.c  void mmput(struct mm_struct *mm)
mm               1099 kernel/fork.c  	if (atomic_dec_and_test(&mm->mm_users))
mm               1100 kernel/fork.c  		__mmput(mm);
mm               1107 kernel/fork.c  	struct mm_struct *mm = container_of(work, struct mm_struct,
mm               1110 kernel/fork.c  	__mmput(mm);
mm               1113 kernel/fork.c  void mmput_async(struct mm_struct *mm)
mm               1115 kernel/fork.c  	if (atomic_dec_and_test(&mm->mm_users)) {
mm               1116 kernel/fork.c  		INIT_WORK(&mm->async_put_work, mmput_async_fn);
mm               1117 kernel/fork.c  		schedule_work(&mm->async_put_work);
mm               1133 kernel/fork.c  void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
mm               1142 kernel/fork.c  	old_exe_file = rcu_dereference_raw(mm->exe_file);
mm               1146 kernel/fork.c  	rcu_assign_pointer(mm->exe_file, new_exe_file);
mm               1157 kernel/fork.c  struct file *get_mm_exe_file(struct mm_struct *mm)
mm               1162 kernel/fork.c  	exe_file = rcu_dereference(mm->exe_file);
mm               1180 kernel/fork.c  	struct mm_struct *mm;
mm               1183 kernel/fork.c  	mm = task->mm;
mm               1184 kernel/fork.c  	if (mm) {
mm               1186 kernel/fork.c  			exe_file = get_mm_exe_file(mm);
mm               1204 kernel/fork.c  	struct mm_struct *mm;
mm               1207 kernel/fork.c  	mm = task->mm;
mm               1208 kernel/fork.c  	if (mm) {
mm               1210 kernel/fork.c  			mm = NULL;
mm               1212 kernel/fork.c  			mmget(mm);
mm               1215 kernel/fork.c  	return mm;
mm               1221 kernel/fork.c  	struct mm_struct *mm;
mm               1228 kernel/fork.c  	mm = get_task_mm(task);
mm               1229 kernel/fork.c  	if (mm && mm != current->mm &&
mm               1231 kernel/fork.c  		mmput(mm);
mm               1232 kernel/fork.c  		mm = ERR_PTR(-EACCES);
mm               1236 kernel/fork.c  	return mm;
mm               1286 kernel/fork.c  static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
mm               1291 kernel/fork.c  	deactivate_mm(tsk, mm);
mm               1300 kernel/fork.c  		    atomic_read(&mm->mm_users) > 1) {
mm               1320 kernel/fork.c  void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
mm               1323 kernel/fork.c  	mm_release(tsk, mm);
mm               1326 kernel/fork.c  void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
mm               1329 kernel/fork.c  	mm_release(tsk, mm);
mm               1345 kernel/fork.c  	struct mm_struct *mm;
mm               1348 kernel/fork.c  	mm = allocate_mm();
mm               1349 kernel/fork.c  	if (!mm)
mm               1352 kernel/fork.c  	memcpy(mm, oldmm, sizeof(*mm));
mm               1354 kernel/fork.c  	if (!mm_init(mm, tsk, mm->user_ns))
mm               1357 kernel/fork.c  	err = dup_mmap(mm, oldmm);
mm               1361 kernel/fork.c  	mm->hiwater_rss = get_mm_rss(mm);
mm               1362 kernel/fork.c  	mm->hiwater_vm = mm->total_vm;
mm               1364 kernel/fork.c  	if (mm->binfmt && !try_module_get(mm->binfmt->module))
mm               1367 kernel/fork.c  	return mm;
mm               1371 kernel/fork.c  	mm->binfmt = NULL;
mm               1372 kernel/fork.c  	mm_init_owner(mm, NULL);
mm               1373 kernel/fork.c  	mmput(mm);
mm               1381 kernel/fork.c  	struct mm_struct *mm, *oldmm;
mm               1391 kernel/fork.c  	tsk->mm = NULL;
mm               1399 kernel/fork.c  	oldmm = current->mm;
mm               1408 kernel/fork.c  		mm = oldmm;
mm               1413 kernel/fork.c  	mm = dup_mm(tsk, current->mm);
mm               1414 kernel/fork.c  	if (!mm)
mm               1418 kernel/fork.c  	tsk->mm = mm;
mm               1419 kernel/fork.c  	tsk->active_mm = mm;
mm               2248 kernel/fork.c  	if (p->mm) {
mm               2249 kernel/fork.c  		mm_clear_owner(p->mm, p);
mm               2250 kernel/fork.c  		mmput(p->mm);
mm                336 kernel/futex.c 	mmgrab(key->private.mm);
mm                468 kernel/futex.c 		mmdrop(key->private.mm);
mm                575 kernel/futex.c 	struct mm_struct *mm = current->mm;
mm                602 kernel/futex.c 		key->private.mm = mm;
mm                704 kernel/futex.c 		key->private.mm = mm;
mm                770 kernel/futex.c 	struct mm_struct *mm = current->mm;
mm                773 kernel/futex.c 	down_read(&mm->mmap_sem);
mm                774 kernel/futex.c 	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
mm                776 kernel/futex.c 	up_read(&mm->mmap_sem);
mm                200 kernel/kcmp.c  		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
mm                 93 kernel/latencytop.c 	if (!tsk->mm)
mm                 45 kernel/ptrace.c 	struct mm_struct *mm;
mm                 48 kernel/ptrace.c 	mm = get_task_mm(tsk);
mm                 49 kernel/ptrace.c 	if (!mm)
mm                 54 kernel/ptrace.c 	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
mm                 55 kernel/ptrace.c 	     !ptracer_capable(tsk, mm->user_ns))) {
mm                 56 kernel/ptrace.c 		mmput(mm);
mm                 60 kernel/ptrace.c 	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
mm                 61 kernel/ptrace.c 	mmput(mm);
mm                284 kernel/ptrace.c 	struct mm_struct *mm;
mm                345 kernel/ptrace.c 	mm = task->mm;
mm                346 kernel/ptrace.c 	if (mm &&
mm                347 kernel/ptrace.c 	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
mm                348 kernel/ptrace.c 	     !ptrace_has_cap(cred, mm->user_ns, mode)))
mm               1156 kernel/ptrace.c 		struct mm_struct *mm = get_task_mm(child);
mm               1160 kernel/ptrace.c 		if (!mm)
mm               1165 kernel/ptrace.c 			tmp = mm->context.exec_fdpic_loadmap;
mm               1168 kernel/ptrace.c 			tmp = mm->context.interp_fdpic_loadmap;
mm               1173 kernel/ptrace.c 		mmput(mm);
mm               2086 kernel/sched/core.c 		if (p->mm && printk_ratelimit()) {
mm               3183 kernel/sched/core.c 	struct mm_struct *mm = rq->prev_mm;
mm               3236 kernel/sched/core.c 	if (mm) {
mm               3237 kernel/sched/core.c 		membarrier_mm_sync_core_before_usermode(mm);
mm               3238 kernel/sched/core.c 		mmdrop(mm);
mm               3348 kernel/sched/core.c 	if (!next->mm) {                                // to kernel
mm               3352 kernel/sched/core.c 		if (prev->mm)                           // from user
mm               3357 kernel/sched/core.c 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
mm               3366 kernel/sched/core.c 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
mm               3368 kernel/sched/core.c 		if (!prev->mm) {                        // from kernel
mm               4785 kernel/sched/core.c 	if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
mm               4786 kernel/sched/core.c 	    (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
mm               6177 kernel/sched/core.c 	struct mm_struct *mm = current->active_mm;
mm               6181 kernel/sched/core.c 	if (mm != &init_mm) {
mm               6182 kernel/sched/core.c 		switch_mm(mm, &init_mm, current);
mm               6186 kernel/sched/core.c 	mmdrop(mm);
mm                836 kernel/sched/debug.c 	if (p->mm)
mm                837 kernel/sched/debug.c 		P(mm->numa_scan_seq);
mm               1117 kernel/sched/fair.c 	rss = get_mm_rss(p->mm);
mm               1953 kernel/sched/fair.c 		p->mm->numa_next_scan = jiffies +
mm               2138 kernel/sched/fair.c 	seq = READ_ONCE(p->mm->numa_scan_seq);
mm               2307 kernel/sched/fair.c 	if (tsk->mm == current->mm)
mm               2407 kernel/sched/fair.c 	if (!p->mm)
mm               2476 kernel/sched/fair.c 	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
mm               2477 kernel/sched/fair.c 	p->mm->numa_scan_offset = 0;
mm               2488 kernel/sched/fair.c 	struct mm_struct *mm = p->mm;
mm               2509 kernel/sched/fair.c 	if (!mm->numa_next_scan) {
mm               2510 kernel/sched/fair.c 		mm->numa_next_scan = now +
mm               2517 kernel/sched/fair.c 	migrate = mm->numa_next_scan;
mm               2527 kernel/sched/fair.c 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
mm               2536 kernel/sched/fair.c 	start = mm->numa_scan_offset;
mm               2544 kernel/sched/fair.c 	if (!down_read_trylock(&mm->mmap_sem))
mm               2546 kernel/sched/fair.c 	vma = find_vma(mm, start);
mm               2550 kernel/sched/fair.c 		vma = mm->mmap;
mm               2609 kernel/sched/fair.c 		mm->numa_scan_offset = start;
mm               2612 kernel/sched/fair.c 	up_read(&mm->mmap_sem);
mm               2629 kernel/sched/fair.c 	struct mm_struct *mm = p->mm;
mm               2631 kernel/sched/fair.c 	if (mm) {
mm               2632 kernel/sched/fair.c 		mm_users = atomic_read(&mm->mm_users);
mm               2634 kernel/sched/fair.c 			mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
mm               2635 kernel/sched/fair.c 			mm->numa_scan_seq = 0;
mm               2639 kernel/sched/fair.c 	p->numa_scan_seq		= mm ? mm->numa_scan_seq : 0;
mm               2660 kernel/sched/fair.c 	if (mm) {
mm               2698 kernel/sched/fair.c 		if (!time_before(jiffies, curr->mm->numa_next_scan))
mm               2711 kernel/sched/fair.c 	if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
mm                 35 kernel/sched/membarrier.c 	struct mm_struct *mm = (struct mm_struct *) info;
mm                 37 kernel/sched/membarrier.c 	if (current->mm != mm)
mm                 40 kernel/sched/membarrier.c 		       atomic_read(&mm->membarrier_state));
mm                 50 kernel/sched/membarrier.c void membarrier_exec_mmap(struct mm_struct *mm)
mm                 58 kernel/sched/membarrier.c 	atomic_set(&mm->membarrier_state, 0);
mm                136 kernel/sched/membarrier.c 	struct mm_struct *mm = current->mm;
mm                141 kernel/sched/membarrier.c 		if (!(atomic_read(&mm->membarrier_state) &
mm                145 kernel/sched/membarrier.c 		if (!(atomic_read(&mm->membarrier_state) &
mm                150 kernel/sched/membarrier.c 	if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
mm                178 kernel/sched/membarrier.c 		if (p && p->mm == mm)
mm                200 kernel/sched/membarrier.c static int sync_runqueues_membarrier_state(struct mm_struct *mm)
mm                202 kernel/sched/membarrier.c 	int membarrier_state = atomic_read(&mm->membarrier_state);
mm                206 kernel/sched/membarrier.c 	if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
mm                244 kernel/sched/membarrier.c 		if (p && p->mm == mm)
mm                250 kernel/sched/membarrier.c 	smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1);
mm                262 kernel/sched/membarrier.c 	struct mm_struct *mm = p->mm;
mm                265 kernel/sched/membarrier.c 	if (atomic_read(&mm->membarrier_state) &
mm                268 kernel/sched/membarrier.c 	atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
mm                269 kernel/sched/membarrier.c 	ret = sync_runqueues_membarrier_state(mm);
mm                273 kernel/sched/membarrier.c 		  &mm->membarrier_state);
mm                281 kernel/sched/membarrier.c 	struct mm_struct *mm = p->mm;
mm                298 kernel/sched/membarrier.c 	if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
mm                302 kernel/sched/membarrier.c 	atomic_or(set_state, &mm->membarrier_state);
mm                303 kernel/sched/membarrier.c 	ret = sync_runqueues_membarrier_state(mm);
mm                306 kernel/sched/membarrier.c 	atomic_or(ready_state, &mm->membarrier_state);
mm               2096 kernel/signal.c 	if (unlikely(current->mm->core_state) &&
mm               2097 kernel/signal.c 	    unlikely(current->mm == current->parent->mm))
mm               1770 kernel/sys.c   		struct mm_struct *mm = get_task_mm(p);
mm               1772 kernel/sys.c   		if (mm) {
mm               1773 kernel/sys.c   			setmax_mm_hiwater_rss(&maxrss, mm);
mm               1774 kernel/sys.c   			mmput(mm);
mm               1812 kernel/sys.c   static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
mm               1841 kernel/sys.c   	exe_file = get_mm_exe_file(mm);
mm               1846 kernel/sys.c   		down_read(&mm->mmap_sem);
mm               1847 kernel/sys.c   		for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               1855 kernel/sys.c   		up_read(&mm->mmap_sem);
mm               1862 kernel/sys.c   	old_exe = xchg(&mm->exe_file, exe.file);
mm               1869 kernel/sys.c   	up_read(&mm->mmap_sem);
mm               1953 kernel/sys.c   	struct mm_struct *mm = current->mm;
mm               1956 kernel/sys.c   	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
mm               1978 kernel/sys.c   				prctl_map.auxv_size > sizeof(mm->saved_auxv))
mm               2001 kernel/sys.c   		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
mm               2010 kernel/sys.c   	down_read(&mm->mmap_sem);
mm               2024 kernel/sys.c   	spin_lock(&mm->arg_lock);
mm               2025 kernel/sys.c   	mm->start_code	= prctl_map.start_code;
mm               2026 kernel/sys.c   	mm->end_code	= prctl_map.end_code;
mm               2027 kernel/sys.c   	mm->start_data	= prctl_map.start_data;
mm               2028 kernel/sys.c   	mm->end_data	= prctl_map.end_data;
mm               2029 kernel/sys.c   	mm->start_brk	= prctl_map.start_brk;
mm               2030 kernel/sys.c   	mm->brk		= prctl_map.brk;
mm               2031 kernel/sys.c   	mm->start_stack	= prctl_map.start_stack;
mm               2032 kernel/sys.c   	mm->arg_start	= prctl_map.arg_start;
mm               2033 kernel/sys.c   	mm->arg_end	= prctl_map.arg_end;
mm               2034 kernel/sys.c   	mm->env_start	= prctl_map.env_start;
mm               2035 kernel/sys.c   	mm->env_end	= prctl_map.env_end;
mm               2036 kernel/sys.c   	spin_unlock(&mm->arg_lock);
mm               2047 kernel/sys.c   		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
mm               2049 kernel/sys.c   	up_read(&mm->mmap_sem);
mm               2054 kernel/sys.c   static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
mm               2075 kernel/sys.c   	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
mm               2078 kernel/sys.c   	memcpy(mm->saved_auxv, user_auxv, len);
mm               2087 kernel/sys.c   	struct mm_struct *mm = current->mm;
mm               2110 kernel/sys.c   		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
mm               2113 kernel/sys.c   		return prctl_set_auxv(mm, addr, arg4);
mm               2125 kernel/sys.c   	down_read(&mm->mmap_sem);
mm               2126 kernel/sys.c   	vma = find_vma(mm, addr);
mm               2128 kernel/sys.c   	spin_lock(&mm->arg_lock);
mm               2129 kernel/sys.c   	prctl_map.start_code	= mm->start_code;
mm               2130 kernel/sys.c   	prctl_map.end_code	= mm->end_code;
mm               2131 kernel/sys.c   	prctl_map.start_data	= mm->start_data;
mm               2132 kernel/sys.c   	prctl_map.end_data	= mm->end_data;
mm               2133 kernel/sys.c   	prctl_map.start_brk	= mm->start_brk;
mm               2134 kernel/sys.c   	prctl_map.brk		= mm->brk;
mm               2135 kernel/sys.c   	prctl_map.start_stack	= mm->start_stack;
mm               2136 kernel/sys.c   	prctl_map.arg_start	= mm->arg_start;
mm               2137 kernel/sys.c   	prctl_map.arg_end	= mm->arg_end;
mm               2138 kernel/sys.c   	prctl_map.env_start	= mm->env_start;
mm               2139 kernel/sys.c   	prctl_map.env_end	= mm->env_end;
mm               2202 kernel/sys.c   	mm->start_code	= prctl_map.start_code;
mm               2203 kernel/sys.c   	mm->end_code	= prctl_map.end_code;
mm               2204 kernel/sys.c   	mm->start_data	= prctl_map.start_data;
mm               2205 kernel/sys.c   	mm->end_data	= prctl_map.end_data;
mm               2206 kernel/sys.c   	mm->start_brk	= prctl_map.start_brk;
mm               2207 kernel/sys.c   	mm->brk		= prctl_map.brk;
mm               2208 kernel/sys.c   	mm->start_stack	= prctl_map.start_stack;
mm               2209 kernel/sys.c   	mm->arg_start	= prctl_map.arg_start;
mm               2210 kernel/sys.c   	mm->arg_end	= prctl_map.arg_end;
mm               2211 kernel/sys.c   	mm->env_start	= prctl_map.env_start;
mm               2212 kernel/sys.c   	mm->env_end	= prctl_map.env_end;
mm               2216 kernel/sys.c   	spin_unlock(&mm->arg_lock);
mm               2217 kernel/sys.c   	up_read(&mm->mmap_sem);
mm               2286 kernel/sys.c   		error = get_dumpable(me->mm);
mm               2293 kernel/sys.c   		set_dumpable(me->mm, arg2);
mm               2435 kernel/sys.c   		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
mm               2440 kernel/sys.c   		if (down_write_killable(&me->mm->mmap_sem))
mm               2443 kernel/sys.c   			set_bit(MMF_DISABLE_THP, &me->mm->flags);
mm               2445 kernel/sys.c   			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
mm               2446 kernel/sys.c   		up_write(&me->mm->mmap_sem);
mm               1192 kernel/trace/ring_buffer.c 	bool user_thread = current->mm != NULL;
mm                368 kernel/trace/trace_output.c static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
mm                378 kernel/trace/trace_output.c 	if (mm) {
mm                381 kernel/trace/trace_output.c 		down_read(&mm->mmap_sem);
mm                382 kernel/trace/trace_output.c 		vma = find_vma(mm, ip);
mm                393 kernel/trace/trace_output.c 		up_read(&mm->mmap_sem);
mm               1089 kernel/trace/trace_output.c 	struct mm_struct *mm = NULL;
mm               1105 kernel/trace/trace_output.c 			mm = get_task_mm(task);
mm               1116 kernel/trace/trace_output.c 		seq_print_user_ip(s, mm, ip, flags);
mm               1120 kernel/trace/trace_output.c 	if (mm)
mm               1121 kernel/trace/trace_output.c 		mmput(mm);
mm               1037 kernel/trace/trace_uprobe.c 				struct mm_struct *mm);
mm               1186 kernel/trace/trace_uprobe.c __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
mm               1194 kernel/trace/trace_uprobe.c 		if (event->hw.target->mm == mm)
mm               1205 kernel/trace/trace_uprobe.c 	return __uprobe_perf_filter(filter, event->hw.target->mm);
mm               1309 kernel/trace/trace_uprobe.c 				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
mm               1319 kernel/trace/trace_uprobe.c 	ret = __uprobe_perf_filter(filter, mm);
mm               1382 kernel/trace/trace_uprobe.c 	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
mm                 85 kernel/tsacct.c 	struct mm_struct *mm;
mm                 92 kernel/tsacct.c 	mm = get_task_mm(p);
mm                 93 kernel/tsacct.c 	if (mm) {
mm                 95 kernel/tsacct.c 		stats->hiwater_rss   = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB;
mm                 96 kernel/tsacct.c 		stats->hiwater_vm    = get_mm_hiwater_vm(mm)  * PAGE_SIZE / KB;
mm                 97 kernel/tsacct.c 		mmput(mm);
mm                121 kernel/tsacct.c 	if (!likely(tsk->mm))
mm                136 kernel/tsacct.c 	tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10;
mm                137 kernel/tsacct.c 	tsk->acct_vm_mem1 += delta * tsk->mm->total_vm >> 10;
mm                 18 lib/is_single_threaded.c 	struct mm_struct *mm = task->mm;
mm                 25 lib/is_single_threaded.c 	if (atomic_read(&mm->mm_users) == 1)
mm                 37 lib/is_single_threaded.c 			if (unlikely(t->mm == mm))
mm                 39 lib/is_single_threaded.c 			if (likely(t->mm))
mm                 82 lib/reed_solomon/reed_solomon.c 	rs->mm = symsize;
mm                247 lib/reed_solomon/reed_solomon.c 		if (symsize != cd->mm)
mm                 33 lib/zlib_inflate/inffast.c 	union uu  mm;
mm                 36 lib/zlib_inflate/inffast.c 	mm.b[0] = b[0];
mm                 37 lib/zlib_inflate/inffast.c 	mm.b[1] = b[1];
mm                 38 lib/zlib_inflate/inffast.c 	return mm.us;
mm                291 lib/zlib_inflate/inffast.c 				union uu mm;
mm                293 lib/zlib_inflate/inffast.c 				mm.us = pat16;
mm                294 lib/zlib_inflate/inffast.c 				mm.b[0] = mm.b[1];
mm                295 lib/zlib_inflate/inffast.c 				pat16 = mm.us;
mm                136 mm/debug.c     void dump_mm(const struct mm_struct *mm)
mm                166 mm/debug.c     		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
mm                168 mm/debug.c     		mm->get_unmapped_area,
mm                170 mm/debug.c     		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
mm                171 mm/debug.c     		mm->pgd, atomic_read(&mm->mm_users),
mm                172 mm/debug.c     		atomic_read(&mm->mm_count),
mm                173 mm/debug.c     		mm_pgtables_bytes(mm),
mm                174 mm/debug.c     		mm->map_count,
mm                175 mm/debug.c     		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
mm                176 mm/debug.c     		(u64)atomic64_read(&mm->pinned_vm),
mm                177 mm/debug.c     		mm->data_vm, mm->exec_vm, mm->stack_vm,
mm                178 mm/debug.c     		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
mm                179 mm/debug.c     		mm->start_brk, mm->brk, mm->start_stack,
mm                180 mm/debug.c     		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
mm                181 mm/debug.c     		mm->binfmt, mm->flags, mm->core_state,
mm                183 mm/debug.c     		mm->ioctx_table,
mm                186 mm/debug.c     		mm->owner,
mm                188 mm/debug.c     		mm->exe_file,
mm                190 mm/debug.c     		mm->mmu_notifier_mm,
mm                193 mm/debug.c     		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
mm                195 mm/debug.c     		atomic_read(&mm->tlb_flush_pending),
mm                196 mm/debug.c     		mm->def_flags, &mm->def_flags
mm                866 mm/filemap.c   		error = mem_cgroup_try_charge(page, current->mm,
mm               1408 mm/filemap.c   int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
mm               1419 mm/filemap.c   		up_read(&mm->mmap_sem);
mm               1431 mm/filemap.c   				up_read(&mm->mmap_sem);
mm                 37 mm/frame_vector.c 	struct mm_struct *mm = current->mm;
mm                 51 mm/frame_vector.c 	down_read(&mm->mmap_sem);
mm                 53 mm/frame_vector.c 	vma = find_vma_intersection(mm, start, start + 1);
mm                101 mm/frame_vector.c 		vma = find_vma_intersection(mm, start, start + 1);
mm                105 mm/frame_vector.c 		up_read(&mm->mmap_sem);
mm                401 mm/frontswap.c 		if (security_vm_enough_memory_mm(current->mm, pages)) {
mm                186 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
mm                195 mm/gup.c       	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
mm                212 mm/gup.c       		migration_entry_wait(mm, pmd, address);
mm                325 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
mm                336 mm/gup.c       		page = follow_huge_pmd(mm, address, pmd, flags);
mm                356 mm/gup.c       			pmd_migration_entry_wait(mm, pmd);
mm                367 mm/gup.c       		ptl = pmd_lock(mm, pmd);
mm                380 mm/gup.c       	ptl = pmd_lock(mm, pmd);
mm                389 mm/gup.c       		pmd_migration_entry_wait(mm, pmd);
mm                420 mm/gup.c       			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
mm                440 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
mm                446 mm/gup.c       		page = follow_huge_pud(mm, address, pud, flags);
mm                460 mm/gup.c       		ptl = pud_lock(mm, pud);
mm                523 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
mm                528 mm/gup.c       	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
mm                534 mm/gup.c       	pgd = pgd_offset(mm, address);
mm                540 mm/gup.c       		page = follow_huge_pgd(mm, address, pgd, flags);
mm                569 mm/gup.c       static int get_gate_page(struct mm_struct *mm, unsigned long address,
mm                586 mm/gup.c       		pgd = pgd_offset_gate(mm, address);
mm                602 mm/gup.c       	*vma = get_gate_vma(mm);
mm                788 mm/gup.c       static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
mm                819 mm/gup.c       			vma = find_extend_vma(mm, start);
mm                820 mm/gup.c       			if (!vma && in_gate_area(mm, start)) {
mm                821 mm/gup.c       				ret = get_gate_page(mm, start & PAGE_MASK,
mm                837 mm/gup.c       				i = follow_hugetlb_page(mm, vma, pages, vmas,
mm                963 mm/gup.c       int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
mm                976 mm/gup.c       	vma = find_extend_vma(mm, address);
mm                994 mm/gup.c       		down_read(&mm->mmap_sem);
mm               1014 mm/gup.c       						struct mm_struct *mm,
mm               1038 mm/gup.c       		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
mm               1080 mm/gup.c       		down_read(&mm->mmap_sem);
mm               1081 mm/gup.c       		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
mm               1102 mm/gup.c       		up_read(&mm->mmap_sem);
mm               1164 mm/gup.c       long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
mm               1178 mm/gup.c       	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
mm               1206 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
mm               1214 mm/gup.c       	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
mm               1238 mm/gup.c       	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
mm               1251 mm/gup.c       	struct mm_struct *mm = current->mm;
mm               1266 mm/gup.c       			down_read(&mm->mmap_sem);
mm               1267 mm/gup.c       			vma = find_vma(mm, nstart);
mm               1298 mm/gup.c       		up_read(&mm->mmap_sem);
mm               1322 mm/gup.c       	if (__get_user_pages(current, current->mm, addr, 1,
mm               1332 mm/gup.c       		struct mm_struct *mm, unsigned long start,
mm               1350 mm/gup.c       		vma = find_vma(mm, start);
mm               1449 mm/gup.c       					struct mm_struct *mm,
mm               1522 mm/gup.c       		nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
mm               1536 mm/gup.c       					struct mm_struct *mm,
mm               1552 mm/gup.c       				  struct mm_struct *mm,
mm               1577 mm/gup.c       	rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
mm               1592 mm/gup.c       		rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
mm               1603 mm/gup.c       						  struct mm_struct *mm,
mm               1610 mm/gup.c       	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
mm               1626 mm/gup.c       	return __gup_longterm_locked(current, current->mm, start, nr_pages,
mm               1665 mm/gup.c       	return __get_user_pages_locked(current, current->mm, start, nr_pages,
mm               1689 mm/gup.c       	struct mm_struct *mm = current->mm;
mm               1702 mm/gup.c       	down_read(&mm->mmap_sem);
mm               1703 mm/gup.c       	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
mm               1706 mm/gup.c       		up_read(&mm->mmap_sem);
mm               2289 mm/gup.c       	pgdp = pgd_offset(current->mm, addr);
mm               2394 mm/gup.c       		down_read(&current->mm->mmap_sem);
mm               2395 mm/gup.c       		ret = __gup_longterm_locked(current, current->mm,
mm               2398 mm/gup.c       		up_read(&current->mm->mmap_sem);
mm                 29 mm/hmm.c       static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm)
mm                 55 mm/hmm.c       static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
mm                175 mm/hmm.c       int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
mm                179 mm/hmm.c       	lockdep_assert_held_write(&mm->mmap_sem);
mm                182 mm/hmm.c       	if (!mm || !mirror || !mirror->ops)
mm                185 mm/hmm.c       	mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm);
mm                511 mm/hmm.c       				migration_entry_wait(walk->mm, pmdp, addr);
mm                582 mm/hmm.c       			pmd_migration_entry_wait(walk->mm, pmdp);
mm                740 mm/hmm.c       	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
mm                805 mm/hmm.c       	if (!mmget_not_zero(hmm->mmu_notifier.mm))
mm                843 mm/hmm.c       	mmput(hmm->mmu_notifier.mm);
mm                899 mm/hmm.c       	lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
mm                906 mm/hmm.c       		vma = find_vma(hmm->mmu_notifier.mm, start);
mm                116 mm/huge_memory.c struct page *mm_get_huge_zero_page(struct mm_struct *mm)
mm                118 mm/huge_memory.c 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
mm                124 mm/huge_memory.c 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
mm                130 mm/huge_memory.c void mm_put_huge_zero_page(struct mm_struct *mm)
mm                132 mm/huge_memory.c 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
mm                537 mm/huge_memory.c 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
mm                571 mm/huge_memory.c 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
mm                691 mm/huge_memory.c static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
mm                701 mm/huge_memory.c 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm                702 mm/huge_memory.c 	set_pmd_at(mm, haddr, pmd, entry);
mm                703 mm/huge_memory.c 	mm_inc_nr_ptes(mm);
mm                773 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm                777 mm/huge_memory.c 	ptl = pmd_lock(mm, pmd);
mm                802 mm/huge_memory.c 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm                803 mm/huge_memory.c 		mm_inc_nr_ptes(mm);
mm                807 mm/huge_memory.c 	set_pmd_at(mm, addr, pmd, entry);
mm                813 mm/huge_memory.c 		pte_free(mm, pgtable);
mm                861 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm                865 mm/huge_memory.c 	ptl = pud_lock(mm, pud);
mm                887 mm/huge_memory.c 	set_pud_at(mm, addr, pud, entry);
mm                939 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm                942 mm/huge_memory.c 	assert_spin_locked(pmd_lockptr(mm, pmd));
mm               1085 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               1088 mm/huge_memory.c 	assert_spin_locked(pud_lockptr(mm, pud));
mm               1470 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               1473 mm/huge_memory.c 	assert_spin_locked(pmd_lockptr(mm, pmd));
mm               1700 mm/huge_memory.c 	struct mm_struct *mm = tlb->mm;
mm               1752 mm/huge_memory.c 		set_pmd_at(mm, addr, pmd, orig_pmd);
mm               1764 mm/huge_memory.c static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
mm               1768 mm/huge_memory.c 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
mm               1769 mm/huge_memory.c 	pte_free(mm, pgtable);
mm               1770 mm/huge_memory.c 	mm_dec_nr_ptes(mm);
mm               1790 mm/huge_memory.c 	orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
mm               1795 mm/huge_memory.c 			zap_deposited_table(tlb->mm, pmd);
mm               1800 mm/huge_memory.c 		zap_deposited_table(tlb->mm, pmd);
mm               1823 mm/huge_memory.c 			zap_deposited_table(tlb->mm, pmd);
mm               1824 mm/huge_memory.c 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
mm               1827 mm/huge_memory.c 				zap_deposited_table(tlb->mm, pmd);
mm               1828 mm/huge_memory.c 			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
mm               1870 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               1893 mm/huge_memory.c 		new_ptl = pmd_lockptr(mm, new_pmd);
mm               1896 mm/huge_memory.c 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
mm               1903 mm/huge_memory.c 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
mm               1904 mm/huge_memory.c 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
mm               1907 mm/huge_memory.c 		set_pmd_at(mm, new_addr, new_pmd, pmd);
mm               1927 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               1955 mm/huge_memory.c 			set_pmd_at(mm, addr, pmd, newpmd);
mm               1999 mm/huge_memory.c 	set_pmd_at(mm, addr, pmd, entry);
mm               2055 mm/huge_memory.c 	pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
mm               2108 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               2123 mm/huge_memory.c 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
mm               2124 mm/huge_memory.c 	pmd_populate(mm, &_pmd, pgtable);
mm               2132 mm/huge_memory.c 		set_pte_at(mm, haddr, pte, entry);
mm               2136 mm/huge_memory.c 	pmd_populate(mm, pmd, pgtable);
mm               2142 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               2165 mm/huge_memory.c 			zap_deposited_table(mm, pmd);
mm               2175 mm/huge_memory.c 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
mm               2236 mm/huge_memory.c 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
mm               2237 mm/huge_memory.c 	pmd_populate(mm, &_pmd, pgtable);
mm               2264 mm/huge_memory.c 		set_pte_at(mm, addr, pte, entry);
mm               2289 mm/huge_memory.c 	pmd_populate(mm, pmd, pgtable);
mm               3024 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               3041 mm/huge_memory.c 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
mm               3049 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
mm               3071 mm/huge_memory.c 	set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
mm               3221 mm/hugetlb.c   void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
mm               3224 mm/hugetlb.c   		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
mm               3523 mm/hugetlb.c   	struct mm_struct *mm = vma->vm_mm;
mm               3547 mm/hugetlb.c   	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
mm               3553 mm/hugetlb.c   		ptep = huge_pte_offset(mm, address, sz);
mm               3557 mm/hugetlb.c   		ptl = huge_pte_lock(h, mm, ptep);
mm               3558 mm/hugetlb.c   		if (huge_pmd_unshare(mm, &address, ptep)) {
mm               3578 mm/hugetlb.c   			huge_pte_clear(mm, address, ptep, sz);
mm               3602 mm/hugetlb.c   		pte = huge_ptep_get_and_clear(mm, address, ptep);
mm               3607 mm/hugetlb.c   		hugetlb_count_sub(pages_per_huge_page(h), mm);
mm               3644 mm/hugetlb.c   	struct mm_struct *mm;
mm               3658 mm/hugetlb.c   	mm = vma->vm_mm;
mm               3660 mm/hugetlb.c   	tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
mm               3671 mm/hugetlb.c   static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
mm               3727 mm/hugetlb.c   static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
mm               3784 mm/hugetlb.c   			unmap_ref_private(mm, vma, old_page, haddr);
mm               3787 mm/hugetlb.c   			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
mm               3815 mm/hugetlb.c   	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
mm               3824 mm/hugetlb.c   	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
mm               3830 mm/hugetlb.c   		mmu_notifier_invalidate_range(mm, range.start, range.end);
mm               3831 mm/hugetlb.c   		set_huge_pte_at(mm, haddr, ptep,
mm               3907 mm/hugetlb.c   static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
mm               3988 mm/hugetlb.c   			ptl = huge_pte_lock(h, mm, ptep);
mm               4046 mm/hugetlb.c   	ptl = huge_pte_lock(h, mm, ptep);
mm               4062 mm/hugetlb.c   	set_huge_pte_at(mm, haddr, ptep, new_pte);
mm               4064 mm/hugetlb.c   	hugetlb_count_add(pages_per_huge_page(h), mm);
mm               4067 mm/hugetlb.c   		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
mm               4119 mm/hugetlb.c   vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
mm               4134 mm/hugetlb.c   	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
mm               4138 mm/hugetlb.c   			migration_entry_wait_huge(vma, mm, ptep);
mm               4144 mm/hugetlb.c   		ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
mm               4162 mm/hugetlb.c   		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
mm               4199 mm/hugetlb.c   	ptl = huge_pte_lock(h, mm, ptep);
mm               4221 mm/hugetlb.c   			ret = hugetlb_cow(mm, vma, address, ptep,
mm               4387 mm/hugetlb.c   long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
mm               4420 mm/hugetlb.c   		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
mm               4423 mm/hugetlb.c   			ptl = huge_pte_lock(h, mm, pte);
mm               4471 mm/hugetlb.c   			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
mm               4556 mm/hugetlb.c   	struct mm_struct *mm = vma->vm_mm;
mm               4571 mm/hugetlb.c   				0, vma, mm, start, end);
mm               4581 mm/hugetlb.c   		ptep = huge_pte_offset(mm, address, huge_page_size(h));
mm               4584 mm/hugetlb.c   		ptl = huge_pte_lock(h, mm, ptep);
mm               4585 mm/hugetlb.c   		if (huge_pmd_unshare(mm, &address, ptep)) {
mm               4604 mm/hugetlb.c   				set_huge_swap_pte_at(mm, address, ptep,
mm               4879 mm/hugetlb.c   pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
mm               4881 mm/hugetlb.c   	struct vm_area_struct *vma = find_vma(mm, addr);
mm               4892 mm/hugetlb.c   		return (pte_t *)pmd_alloc(mm, pud, addr);
mm               4913 mm/hugetlb.c   	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
mm               4915 mm/hugetlb.c   		pud_populate(mm, pud,
mm               4917 mm/hugetlb.c   		mm_inc_nr_pmds(mm);
mm               4923 mm/hugetlb.c   	pte = (pte_t *)pmd_alloc(mm, pud, addr);
mm               4940 mm/hugetlb.c   int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
mm               4942 mm/hugetlb.c   	pgd_t *pgd = pgd_offset(mm, *addr);
mm               4952 mm/hugetlb.c   	mm_dec_nr_pmds(mm);
mm               4958 mm/hugetlb.c   pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
mm               4963 mm/hugetlb.c   int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
mm               4976 mm/hugetlb.c   pte_t *huge_pte_alloc(struct mm_struct *mm,
mm               4984 mm/hugetlb.c   	pgd = pgd_offset(mm, addr);
mm               4985 mm/hugetlb.c   	p4d = p4d_alloc(mm, pgd, addr);
mm               4988 mm/hugetlb.c   	pud = pud_alloc(mm, p4d, addr);
mm               4995 mm/hugetlb.c   				pte = huge_pmd_share(mm, addr, pud);
mm               4997 mm/hugetlb.c   				pte = (pte_t *)pmd_alloc(mm, pud, addr);
mm               5014 mm/hugetlb.c   pte_t *huge_pte_offset(struct mm_struct *mm,
mm               5022 mm/hugetlb.c   	pgd = pgd_offset(mm, addr);
mm               5055 mm/hugetlb.c   follow_huge_addr(struct mm_struct *mm, unsigned long address,
mm               5070 mm/hugetlb.c   follow_huge_pmd(struct mm_struct *mm, unsigned long address,
mm               5077 mm/hugetlb.c   	ptl = pmd_lockptr(mm, pmd);
mm               5093 mm/hugetlb.c   			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
mm               5107 mm/hugetlb.c   follow_huge_pud(struct mm_struct *mm, unsigned long address,
mm               5117 mm/hugetlb.c   follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
mm                 94 mm/internal.h  extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
mm                292 mm/internal.h  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
mm                546 mm/internal.h  void flush_tlb_batched_pending(struct mm_struct *mm);
mm                554 mm/internal.h  static inline void flush_tlb_batched_pending(struct mm_struct *mm)
mm                 91 mm/khugepaged.c 	struct mm_struct *mm;
mm                384 mm/khugepaged.c static struct mm_slot *get_mm_slot(struct mm_struct *mm)
mm                388 mm/khugepaged.c 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
mm                389 mm/khugepaged.c 		if (mm == mm_slot->mm)
mm                395 mm/khugepaged.c static void insert_to_mm_slots_hash(struct mm_struct *mm,
mm                398 mm/khugepaged.c 	mm_slot->mm = mm;
mm                399 mm/khugepaged.c 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
mm                402 mm/khugepaged.c static inline int khugepaged_test_exit(struct mm_struct *mm)
mm                404 mm/khugepaged.c 	return atomic_read(&mm->mm_users) == 0;
mm                431 mm/khugepaged.c int __khugepaged_enter(struct mm_struct *mm)
mm                441 mm/khugepaged.c 	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
mm                442 mm/khugepaged.c 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
mm                448 mm/khugepaged.c 	insert_to_mm_slots_hash(mm, mm_slot);
mm                457 mm/khugepaged.c 	mmgrab(mm);
mm                484 mm/khugepaged.c void __khugepaged_exit(struct mm_struct *mm)
mm                490 mm/khugepaged.c 	mm_slot = get_mm_slot(mm);
mm                499 mm/khugepaged.c 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
mm                501 mm/khugepaged.c 		mmdrop(mm);
mm                511 mm/khugepaged.c 		down_write(&mm->mmap_sem);
mm                512 mm/khugepaged.c 		up_write(&mm->mmap_sem);
mm                860 mm/khugepaged.c static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
mm                866 mm/khugepaged.c 	if (unlikely(khugepaged_test_exit(mm)))
mm                869 mm/khugepaged.c 	*vmap = vma = find_vma(mm, address);
mm                890 mm/khugepaged.c static bool __collapse_huge_page_swapin(struct mm_struct *mm,
mm                907 mm/khugepaged.c 		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
mm                921 mm/khugepaged.c 			down_read(&mm->mmap_sem);
mm                922 mm/khugepaged.c 			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
mm                924 mm/khugepaged.c 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
mm                928 mm/khugepaged.c 			if (mm_find_pmd(mm, address) != pmd) {
mm                929 mm/khugepaged.c 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
mm                934 mm/khugepaged.c 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
mm                942 mm/khugepaged.c 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
mm                946 mm/khugepaged.c static void collapse_huge_page(struct mm_struct *mm,
mm                973 mm/khugepaged.c 	up_read(&mm->mmap_sem);
mm                980 mm/khugepaged.c 	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
mm                985 mm/khugepaged.c 	down_read(&mm->mmap_sem);
mm                986 mm/khugepaged.c 	result = hugepage_vma_revalidate(mm, address, &vma);
mm                989 mm/khugepaged.c 		up_read(&mm->mmap_sem);
mm                993 mm/khugepaged.c 	pmd = mm_find_pmd(mm, address);
mm                997 mm/khugepaged.c 		up_read(&mm->mmap_sem);
mm               1006 mm/khugepaged.c 	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
mm               1008 mm/khugepaged.c 		up_read(&mm->mmap_sem);
mm               1012 mm/khugepaged.c 	up_read(&mm->mmap_sem);
mm               1018 mm/khugepaged.c 	down_write(&mm->mmap_sem);
mm               1020 mm/khugepaged.c 	if (!mmget_still_valid(mm))
mm               1022 mm/khugepaged.c 	result = hugepage_vma_revalidate(mm, address, &vma);
mm               1026 mm/khugepaged.c 	if (mm_find_pmd(mm, address) != pmd)
mm               1031 mm/khugepaged.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
mm               1036 mm/khugepaged.c 	pte_ptl = pte_lockptr(mm, pmd);
mm               1038 mm/khugepaged.c 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
mm               1062 mm/khugepaged.c 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
mm               1096 mm/khugepaged.c 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm               1097 mm/khugepaged.c 	set_pmd_at(mm, address, pmd, _pmd);
mm               1106 mm/khugepaged.c 	up_write(&mm->mmap_sem);
mm               1108 mm/khugepaged.c 	trace_mm_collapse_huge_page(mm, isolated, result);
mm               1115 mm/khugepaged.c static int khugepaged_scan_pmd(struct mm_struct *mm,
mm               1131 mm/khugepaged.c 	pmd = mm_find_pmd(mm, address);
mm               1138 mm/khugepaged.c 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
mm               1232 mm/khugepaged.c 		collapse_huge_page(mm, address, hpage, node, referenced);
mm               1235 mm/khugepaged.c 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
mm               1242 mm/khugepaged.c 	struct mm_struct *mm = mm_slot->mm;
mm               1246 mm/khugepaged.c 	if (khugepaged_test_exit(mm)) {
mm               1259 mm/khugepaged.c 		mmdrop(mm);
mm               1268 mm/khugepaged.c static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
mm               1276 mm/khugepaged.c 	mm_slot = get_mm_slot(mm);
mm               1290 mm/khugepaged.c void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
mm               1293 mm/khugepaged.c 	struct vm_area_struct *vma = find_vma(mm, haddr);
mm               1314 mm/khugepaged.c 	pmd = mm_find_pmd(mm, haddr);
mm               1318 mm/khugepaged.c 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
mm               1386 mm/khugepaged.c 	mm_dec_nr_ptes(mm);
mm               1387 mm/khugepaged.c 	pte_free(mm, pmd_pgtable(_pmd));
mm               1396 mm/khugepaged.c 	struct mm_struct *mm = mm_slot->mm;
mm               1402 mm/khugepaged.c 	if (!down_write_trylock(&mm->mmap_sem))
mm               1405 mm/khugepaged.c 	if (unlikely(khugepaged_test_exit(mm)))
mm               1409 mm/khugepaged.c 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
mm               1413 mm/khugepaged.c 	up_write(&mm->mmap_sem);
mm               1492 mm/khugepaged.c static void collapse_file(struct mm_struct *mm,
mm               1518 mm/khugepaged.c 	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
mm               1815 mm/khugepaged.c static void khugepaged_scan_file(struct mm_struct *mm,
mm               1884 mm/khugepaged.c 			collapse_file(mm, file, start, hpage, node);
mm               1891 mm/khugepaged.c static void khugepaged_scan_file(struct mm_struct *mm,
mm               1909 mm/khugepaged.c 	struct mm_struct *mm;
mm               1927 mm/khugepaged.c 	mm = mm_slot->mm;
mm               1933 mm/khugepaged.c 	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
mm               1935 mm/khugepaged.c 	if (likely(!khugepaged_test_exit(mm)))
mm               1936 mm/khugepaged.c 		vma = find_vma(mm, khugepaged_scan.address);
mm               1943 mm/khugepaged.c 		if (unlikely(khugepaged_test_exit(mm))) {
mm               1965 mm/khugepaged.c 			if (unlikely(khugepaged_test_exit(mm)))
mm               1980 mm/khugepaged.c 				up_read(&mm->mmap_sem);
mm               1982 mm/khugepaged.c 				khugepaged_scan_file(mm, file, pgoff, hpage);
mm               1985 mm/khugepaged.c 				ret = khugepaged_scan_pmd(mm, vma,
mm               2000 mm/khugepaged.c 	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
mm               2009 mm/khugepaged.c 	if (khugepaged_test_exit(mm) || !vma) {
mm               1215 mm/kmemleak.c  	if (current->mm)
mm                124 mm/ksm.c       	struct mm_struct *mm;
mm                203 mm/ksm.c       	struct mm_struct *mm;
mm                391 mm/ksm.c       	rmap_item->mm = NULL;	/* debug safety */
mm                424 mm/ksm.c       static struct mm_slot *get_mm_slot(struct mm_struct *mm)
mm                428 mm/ksm.c       	hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
mm                429 mm/ksm.c       		if (slot->mm == mm)
mm                435 mm/ksm.c       static void insert_to_mm_slots_hash(struct mm_struct *mm,
mm                438 mm/ksm.c       	mm_slot->mm = mm;
mm                439 mm/ksm.c       	hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
mm                450 mm/ksm.c       static inline bool ksm_test_exit(struct mm_struct *mm)
mm                452 mm/ksm.c       	return atomic_read(&mm->mm_users) == 0;
mm                519 mm/ksm.c       static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
mm                523 mm/ksm.c       	if (ksm_test_exit(mm))
mm                525 mm/ksm.c       	vma = find_vma(mm, addr);
mm                535 mm/ksm.c       	struct mm_struct *mm = rmap_item->mm;
mm                545 mm/ksm.c       	down_read(&mm->mmap_sem);
mm                546 mm/ksm.c       	vma = find_mergeable_vma(mm, addr);
mm                549 mm/ksm.c       	up_read(&mm->mmap_sem);
mm                554 mm/ksm.c       	struct mm_struct *mm = rmap_item->mm;
mm                559 mm/ksm.c       	down_read(&mm->mmap_sem);
mm                560 mm/ksm.c       	vma = find_mergeable_vma(mm, addr);
mm                575 mm/ksm.c       	up_read(&mm->mmap_sem);
mm                967 mm/ksm.c       	struct mm_struct *mm;
mm                978 mm/ksm.c       		mm = mm_slot->mm;
mm                979 mm/ksm.c       		down_read(&mm->mmap_sem);
mm                980 mm/ksm.c       		for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                981 mm/ksm.c       			if (ksm_test_exit(mm))
mm                992 mm/ksm.c       		up_read(&mm->mmap_sem);
mm                997 mm/ksm.c       		if (ksm_test_exit(mm)) {
mm               1003 mm/ksm.c       			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mm               1004 mm/ksm.c       			mmdrop(mm);
mm               1015 mm/ksm.c       	up_read(&mm->mmap_sem);
mm               1035 mm/ksm.c       	struct mm_struct *mm = vma->vm_mm;
mm               1050 mm/ksm.c       	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
mm               1062 mm/ksm.c       						mm_tlb_flush_pending(mm)) {
mm               1087 mm/ksm.c       			set_pte_at(mm, pvmw.address, pvmw.pte, entry);
mm               1097 mm/ksm.c       		set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
mm               1122 mm/ksm.c       	struct mm_struct *mm = vma->vm_mm;
mm               1135 mm/ksm.c       	pmd = mm_find_pmd(mm, addr);
mm               1139 mm/ksm.c       	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
mm               1143 mm/ksm.c       	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm               1166 mm/ksm.c       		dec_mm_counter(mm, MM_ANONPAGES);
mm               1177 mm/ksm.c       	set_pte_at_notify(mm, addr, ptep, newpte);
mm               1279 mm/ksm.c       	struct mm_struct *mm = rmap_item->mm;
mm               1283 mm/ksm.c       	down_read(&mm->mmap_sem);
mm               1284 mm/ksm.c       	vma = find_mergeable_vma(mm, rmap_item->address);
mm               1299 mm/ksm.c       	up_read(&mm->mmap_sem);
mm               2037 mm/ksm.c       	struct mm_struct *mm = rmap_item->mm;
mm               2113 mm/ksm.c       		down_read(&mm->mmap_sem);
mm               2114 mm/ksm.c       		vma = find_mergeable_vma(mm, rmap_item->address);
mm               2125 mm/ksm.c       		up_read(&mm->mmap_sem);
mm               2216 mm/ksm.c       		rmap_item->mm = mm_slot->mm;
mm               2226 mm/ksm.c       	struct mm_struct *mm;
mm               2287 mm/ksm.c       	mm = slot->mm;
mm               2288 mm/ksm.c       	down_read(&mm->mmap_sem);
mm               2289 mm/ksm.c       	if (ksm_test_exit(mm))
mm               2292 mm/ksm.c       		vma = find_vma(mm, ksm_scan.address);
mm               2303 mm/ksm.c       			if (ksm_test_exit(mm))
mm               2322 mm/ksm.c       				up_read(&mm->mmap_sem);
mm               2331 mm/ksm.c       	if (ksm_test_exit(mm)) {
mm               2359 mm/ksm.c       		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mm               2360 mm/ksm.c       		up_read(&mm->mmap_sem);
mm               2361 mm/ksm.c       		mmdrop(mm);
mm               2363 mm/ksm.c       		up_read(&mm->mmap_sem);
mm               2439 mm/ksm.c       	struct mm_struct *mm = vma->vm_mm;
mm               2464 mm/ksm.c       		if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
mm               2465 mm/ksm.c       			err = __ksm_enter(mm);
mm               2490 mm/ksm.c       int __ksm_enter(struct mm_struct *mm)
mm               2503 mm/ksm.c       	insert_to_mm_slots_hash(mm, mm_slot);
mm               2520 mm/ksm.c       	set_bit(MMF_VM_MERGEABLE, &mm->flags);
mm               2521 mm/ksm.c       	mmgrab(mm);
mm               2529 mm/ksm.c       void __ksm_exit(struct mm_struct *mm)
mm               2544 mm/ksm.c       	mm_slot = get_mm_slot(mm);
mm               2559 mm/ksm.c       		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mm               2560 mm/ksm.c       		mmdrop(mm);
mm               2562 mm/ksm.c       		down_write(&mm->mmap_sem);
mm               2563 mm/ksm.c       		up_write(&mm->mmap_sem);
mm               2641 mm/ksm.c       			if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
mm                 69 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
mm                135 mm/madvise.c   	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
mm                146 mm/madvise.c   		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
mm                150 mm/madvise.c   		error = __split_vma(mm, vma, start, 1);
mm                156 mm/madvise.c   		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
mm                160 mm/madvise.c   		error = __split_vma(mm, vma, end, 0);
mm                291 mm/madvise.c   	up_read(&current->mm->mmap_sem);
mm                296 mm/madvise.c   	down_read(&current->mm->mmap_sem);
mm                307 mm/madvise.c   	struct mm_struct *mm = tlb->mm;
mm                361 mm/madvise.c   			set_pmd_at(mm, addr, pmd, orig_pmd);
mm                389 mm/madvise.c   	flush_tlb_batched_pending(mm);
mm                420 mm/madvise.c   				pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                425 mm/madvise.c   			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                438 mm/madvise.c   			ptent = ptep_get_and_clear_full(mm, addr, pte,
mm                441 mm/madvise.c   			set_pte_at(mm, addr, pte, ptent);
mm                495 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
mm                503 mm/madvise.c   	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
mm                544 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
mm                555 mm/madvise.c   	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
mm                567 mm/madvise.c   	struct mm_struct *mm = tlb->mm;
mm                584 mm/madvise.c   	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                585 mm/madvise.c   	flush_tlb_batched_pending(mm);
mm                605 mm/madvise.c   			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
mm                630 mm/madvise.c   				pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                635 mm/madvise.c   			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm                671 mm/madvise.c   			ptent = ptep_get_and_clear_full(mm, addr, pte,
mm                676 mm/madvise.c   			set_pte_at(mm, addr, pte, ptent);
mm                683 mm/madvise.c   		if (current->mm == mm)
mm                684 mm/madvise.c   			sync_mm_rss(mm);
mm                686 mm/madvise.c   		add_mm_counter(mm, MM_SWAPENTS, nr_swap);
mm                702 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
mm                716 mm/madvise.c   	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
mm                720 mm/madvise.c   	tlb_gather_mmu(&tlb, mm, range.start, range.end);
mm                721 mm/madvise.c   	update_hiwater_rss(mm);
mm                772 mm/madvise.c   		down_read(&current->mm->mmap_sem);
mm                773 mm/madvise.c   		vma = find_vma(current->mm, start);
mm                854 mm/madvise.c   		up_read(&current->mm->mmap_sem);
mm                860 mm/madvise.c   	down_read(&current->mm->mmap_sem);
mm               1091 mm/madvise.c   		if (down_write_killable(&current->mm->mmap_sem))
mm               1094 mm/madvise.c   		down_read(&current->mm->mmap_sem);
mm               1102 mm/madvise.c   	vma = find_vma_prev(current->mm, start, &prev);
mm               1139 mm/madvise.c   			vma = find_vma(current->mm, start);
mm               1144 mm/madvise.c   		up_write(&current->mm->mmap_sem);
mm               1146 mm/madvise.c   		up_read(&current->mm->mmap_sem);
mm                190 mm/memcontrol.c 	struct mm_struct  *mm;
mm                955 mm/memcontrol.c struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
mm                969 mm/memcontrol.c 		if (unlikely(!mm))
mm                972 mm/memcontrol.c 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
mm               1018 mm/memcontrol.c 	return get_mem_cgroup_from_mm(current->mm);
mm               2509 mm/memcontrol.c 	memcg = get_mem_cgroup_from_mm(current->mm);
mm               2910 mm/memcontrol.c 	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
mm               5714 mm/memcontrol.c static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
mm               5718 mm/memcontrol.c 	down_read(&mm->mmap_sem);
mm               5719 mm/memcontrol.c 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
mm               5720 mm/memcontrol.c 	up_read(&mm->mmap_sem);
mm               5728 mm/memcontrol.c static int mem_cgroup_precharge_mc(struct mm_struct *mm)
mm               5730 mm/memcontrol.c 	unsigned long precharge = mem_cgroup_count_precharge(mm);
mm               5783 mm/memcontrol.c 	struct mm_struct *mm = mc.mm;
mm               5794 mm/memcontrol.c 	mc.mm = NULL;
mm               5797 mm/memcontrol.c 	mmput(mm);
mm               5806 mm/memcontrol.c 	struct mm_struct *mm;
mm               5842 mm/memcontrol.c 	mm = get_task_mm(p);
mm               5843 mm/memcontrol.c 	if (!mm)
mm               5846 mm/memcontrol.c 	if (mm->owner == p) {
mm               5854 mm/memcontrol.c 		mc.mm = mm;
mm               5861 mm/memcontrol.c 		ret = mem_cgroup_precharge_mc(mm);
mm               5865 mm/memcontrol.c 		mmput(mm);
mm               6003 mm/memcontrol.c 	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
mm               6019 mm/memcontrol.c 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
mm               6022 mm/memcontrol.c 	up_read(&mc.mm->mmap_sem);
mm               6513 mm/memcontrol.c int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
mm               6549 mm/memcontrol.c 		memcg = get_mem_cgroup_from_mm(mm);
mm               6559 mm/memcontrol.c int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
mm               6566 mm/memcontrol.c 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
mm                218 mm/memory-failure.c 	if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
mm                423 mm/memory-failure.c 	if (!tsk->mm)
mm                463 mm/memory-failure.c 			if (vma->vm_mm == t->mm)
mm                498 mm/memory-failure.c 			if (vma->vm_mm == t->mm)
mm                146 mm/memory.c    void sync_mm_rss(struct mm_struct *mm)
mm                152 mm/memory.c    			add_mm_counter(mm, i, current->rss_stat.count[i]);
mm                159 mm/memory.c    static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
mm                163 mm/memory.c    	if (likely(task->mm == mm))
mm                166 mm/memory.c    		add_mm_counter(mm, member, val);
mm                168 mm/memory.c    #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
mm                169 mm/memory.c    #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
mm                178 mm/memory.c    		sync_mm_rss(task->mm);
mm                182 mm/memory.c    #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
mm                183 mm/memory.c    #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
mm                201 mm/memory.c    	mm_dec_nr_ptes(tlb->mm);
mm                235 mm/memory.c    	mm_dec_nr_pmds(tlb->mm);
mm                269 mm/memory.c    	mm_dec_nr_puds(tlb->mm);
mm                361 mm/memory.c    	pgd = pgd_offset(tlb->mm, addr);
mm                405 mm/memory.c    int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
mm                408 mm/memory.c    	pgtable_t new = pte_alloc_one(mm);
mm                427 mm/memory.c    	ptl = pmd_lock(mm, pmd);
mm                429 mm/memory.c    		mm_inc_nr_ptes(mm);
mm                430 mm/memory.c    		pmd_populate(mm, pmd, new);
mm                435 mm/memory.c    		pte_free(mm, new);
mm                463 mm/memory.c    static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
mm                467 mm/memory.c    	if (current->mm == mm)
mm                468 mm/memory.c    		sync_mm_rss(mm);
mm                471 mm/memory.c    			add_mm_counter(mm, i, rss[i]);
mm               1009 mm/memory.c    	struct mm_struct *mm = tlb->mm;
mm               1020 mm/memory.c    	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
mm               1022 mm/memory.c    	flush_tlb_batched_pending(mm);
mm               1046 mm/memory.c    			ptent = ptep_get_and_clear_full(mm, addr, pte,
mm               1088 mm/memory.c    			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
mm               1109 mm/memory.c    		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
mm               1112 mm/memory.c    	add_mm_rss_vec(mm, rss);
mm               1187 mm/memory.c    				VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
mm               1392 mm/memory.c    pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
mm               1400 mm/memory.c    	pgd = pgd_offset(mm, addr);
mm               1401 mm/memory.c    	p4d = p4d_alloc(mm, pgd, addr);
mm               1404 mm/memory.c    	pud = pud_alloc(mm, p4d, addr);
mm               1407 mm/memory.c    	pmd = pmd_alloc(mm, pud, addr);
mm               1412 mm/memory.c    	return pte_alloc_map_lock(mm, pmd, addr, ptl);
mm               1425 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
mm               1435 mm/memory.c    	pte = get_locked_pte(mm, addr, &ptl);
mm               1444 mm/memory.c    	inc_mm_counter_fast(mm, mm_counter_file(page));
mm               1446 mm/memory.c    	set_pte_at(mm, addr, pte, mk_pte(page, prot));
mm               1584 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
mm               1588 mm/memory.c    	pte = get_locked_pte(mm, addr, &ptl);
mm               1626 mm/memory.c    	set_pte_at(mm, addr, pte, entry);
mm               1791 mm/memory.c    static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
mm               1799 mm/memory.c    	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
mm               1809 mm/memory.c    		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
mm               1817 mm/memory.c    static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
mm               1826 mm/memory.c    	pmd = pmd_alloc(mm, pud, addr);
mm               1832 mm/memory.c    		err = remap_pte_range(mm, pmd, addr, next,
mm               1840 mm/memory.c    static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
mm               1849 mm/memory.c    	pud = pud_alloc(mm, p4d, addr);
mm               1854 mm/memory.c    		err = remap_pmd_range(mm, pud, addr, next,
mm               1862 mm/memory.c    static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
mm               1871 mm/memory.c    	p4d = p4d_alloc(mm, pgd, addr);
mm               1876 mm/memory.c    		err = remap_pud_range(mm, p4d, addr, next,
mm               1902 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
mm               1938 mm/memory.c    	pgd = pgd_offset(mm, addr);
mm               1942 mm/memory.c    		err = remap_p4d_range(mm, pgd, addr, next,
mm               2004 mm/memory.c    static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
mm               2012 mm/memory.c    	pte = (mm == &init_mm) ?
mm               2014 mm/memory.c    		pte_alloc_map_lock(mm, pmd, addr, &ptl);
mm               2030 mm/memory.c    	if (mm != &init_mm)
mm               2035 mm/memory.c    static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
mm               2045 mm/memory.c    	pmd = pmd_alloc(mm, pud, addr);
mm               2050 mm/memory.c    		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
mm               2057 mm/memory.c    static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
mm               2065 mm/memory.c    	pud = pud_alloc(mm, p4d, addr);
mm               2070 mm/memory.c    		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
mm               2077 mm/memory.c    static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
mm               2085 mm/memory.c    	p4d = p4d_alloc(mm, pgd, addr);
mm               2090 mm/memory.c    		err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
mm               2101 mm/memory.c    int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
mm               2112 mm/memory.c    	pgd = pgd_offset(mm, addr);
mm               2115 mm/memory.c    		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
mm               2132 mm/memory.c    static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
mm               2138 mm/memory.c    		spinlock_t *ptl = pte_lockptr(mm, pmd);
mm               2324 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
mm               2348 mm/memory.c    	if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
mm               2353 mm/memory.c    	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
mm               2361 mm/memory.c    	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
mm               2365 mm/memory.c    				dec_mm_counter_fast(mm,
mm               2367 mm/memory.c    				inc_mm_counter_fast(mm, MM_ANONPAGES);
mm               2370 mm/memory.c    			inc_mm_counter_fast(mm, MM_ANONPAGES);
mm               2390 mm/memory.c    		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
mm               3920 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
mm               3925 mm/memory.c    	pgd = pgd_offset(mm, address);
mm               3926 mm/memory.c    	p4d = p4d_alloc(mm, pgd, address);
mm               3930 mm/memory.c    	vmf.pud = pud_alloc(mm, p4d, address);
mm               3956 mm/memory.c    	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
mm               3971 mm/memory.c    				pmd_migration_entry_wait(mm, vmf.pmd);
mm               4049 mm/memory.c    int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
mm               4051 mm/memory.c    	p4d_t *new = p4d_alloc_one(mm, address);
mm               4057 mm/memory.c    	spin_lock(&mm->page_table_lock);
mm               4059 mm/memory.c    		p4d_free(mm, new);
mm               4061 mm/memory.c    		pgd_populate(mm, pgd, new);
mm               4062 mm/memory.c    	spin_unlock(&mm->page_table_lock);
mm               4072 mm/memory.c    int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
mm               4074 mm/memory.c    	pud_t *new = pud_alloc_one(mm, address);
mm               4080 mm/memory.c    	spin_lock(&mm->page_table_lock);
mm               4083 mm/memory.c    		mm_inc_nr_puds(mm);
mm               4084 mm/memory.c    		p4d_populate(mm, p4d, new);
mm               4086 mm/memory.c    		pud_free(mm, new);
mm               4089 mm/memory.c    		mm_inc_nr_puds(mm);
mm               4090 mm/memory.c    		pgd_populate(mm, p4d, new);
mm               4092 mm/memory.c    		pud_free(mm, new);
mm               4094 mm/memory.c    	spin_unlock(&mm->page_table_lock);
mm               4104 mm/memory.c    int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
mm               4107 mm/memory.c    	pmd_t *new = pmd_alloc_one(mm, address);
mm               4113 mm/memory.c    	ptl = pud_lock(mm, pud);
mm               4116 mm/memory.c    		mm_inc_nr_pmds(mm);
mm               4117 mm/memory.c    		pud_populate(mm, pud, new);
mm               4119 mm/memory.c    		pmd_free(mm, new);
mm               4122 mm/memory.c    		mm_inc_nr_pmds(mm);
mm               4123 mm/memory.c    		pgd_populate(mm, pud, new);
mm               4125 mm/memory.c    		pmd_free(mm, new);
mm               4132 mm/memory.c    static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
mm               4142 mm/memory.c    	pgd = pgd_offset(mm, address);
mm               4163 mm/memory.c    						NULL, mm, address & PMD_MASK,
mm               4167 mm/memory.c    		*ptlp = pmd_lock(mm, pmd);
mm               4181 mm/memory.c    		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
mm               4186 mm/memory.c    	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
mm               4199 mm/memory.c    static inline int follow_pte(struct mm_struct *mm, unsigned long address,
mm               4206 mm/memory.c    			   !(res = __follow_pte_pmd(mm, address, NULL,
mm               4211 mm/memory.c    int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
mm               4219 mm/memory.c    			   !(res = __follow_pte_pmd(mm, address, range,
mm               4313 mm/memory.c    int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
mm               4320 mm/memory.c    	if (down_read_killable(&mm->mmap_sem))
mm               4329 mm/memory.c    		ret = get_user_pages_remote(tsk, mm, addr, 1,
mm               4339 mm/memory.c    			vma = find_vma(mm, addr);
mm               4371 mm/memory.c    	up_read(&mm->mmap_sem);
mm               4388 mm/memory.c    int access_remote_vm(struct mm_struct *mm, unsigned long addr,
mm               4391 mm/memory.c    	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
mm               4402 mm/memory.c    	struct mm_struct *mm;
mm               4405 mm/memory.c    	mm = get_task_mm(tsk);
mm               4406 mm/memory.c    	if (!mm)
mm               4409 mm/memory.c    	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
mm               4411 mm/memory.c    	mmput(mm);
mm               4422 mm/memory.c    	struct mm_struct *mm = current->mm;
mm               4428 mm/memory.c    	if (!down_read_trylock(&mm->mmap_sem))
mm               4431 mm/memory.c    	vma = find_vma(mm, ip);
mm               4447 mm/memory.c    	up_read(&mm->mmap_sem);
mm               4465 mm/memory.c    	if (current->mm)
mm               4466 mm/memory.c    		might_lock_read(&current->mm->mmap_sem);
mm                378 mm/mempolicy.c void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
mm                382 mm/mempolicy.c 	down_write(&mm->mmap_sem);
mm                383 mm/mempolicy.c 	for (vma = mm->mmap; vma; vma = vma->vm_next)
mm                385 mm/mempolicy.c 	up_write(&mm->mmap_sem);
mm                513 mm/mempolicy.c 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
mm                565 mm/mempolicy.c 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
mm                680 mm/mempolicy.c queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
mm                691 mm/mempolicy.c 	return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
mm                731 mm/mempolicy.c static int mbind_range(struct mm_struct *mm, unsigned long start,
mm                742 mm/mempolicy.c 	vma = find_vma(mm, start);
mm                760 mm/mempolicy.c 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
mm                854 mm/mempolicy.c static int lookup_node(struct mm_struct *mm, unsigned long addr)
mm                866 mm/mempolicy.c 		up_read(&mm->mmap_sem);
mm                875 mm/mempolicy.c 	struct mm_struct *mm = current->mm;
mm                899 mm/mempolicy.c 		down_read(&mm->mmap_sem);
mm                900 mm/mempolicy.c 		vma = find_vma_intersection(mm, addr, addr+1);
mm                902 mm/mempolicy.c 			up_read(&mm->mmap_sem);
mm                926 mm/mempolicy.c 			err = lookup_node(mm, addr);
mm                961 mm/mempolicy.c 		up_read(&mm->mmap_sem);
mm               1024 mm/mempolicy.c static int migrate_to_node(struct mm_struct *mm, int source, int dest,
mm               1040 mm/mempolicy.c 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
mm               1059 mm/mempolicy.c int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
mm               1070 mm/mempolicy.c 	down_read(&mm->mmap_sem);
mm               1145 mm/mempolicy.c 		err = migrate_to_node(mm, source, dest, flags);
mm               1151 mm/mempolicy.c 	up_read(&mm->mmap_sem);
mm               1170 mm/mempolicy.c 	vma = find_vma(current->mm, start);
mm               1205 mm/mempolicy.c int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
mm               1221 mm/mempolicy.c 	struct mm_struct *mm = current->mm;
mm               1274 mm/mempolicy.c 			down_write(&mm->mmap_sem);
mm               1279 mm/mempolicy.c 				up_write(&mm->mmap_sem);
mm               1287 mm/mempolicy.c 	ret = queue_pages_range(mm, start, end, nmask,
mm               1295 mm/mempolicy.c 	err = mbind_range(mm, start, end, new);
mm               1316 mm/mempolicy.c 	up_write(&mm->mmap_sem);
mm               1463 mm/mempolicy.c 	struct mm_struct *mm = NULL;
mm               1524 mm/mempolicy.c 	mm = get_task_mm(task);
mm               1527 mm/mempolicy.c 	if (!mm) {
mm               1532 mm/mempolicy.c 	err = do_migrate_pages(mm, old, new,
mm               1535 mm/mempolicy.c 	mmput(mm);
mm                307 mm/migrate.c   void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
mm                339 mm/migrate.c   void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
mm                342 mm/migrate.c   	spinlock_t *ptl = pte_lockptr(mm, pmd);
mm                344 mm/migrate.c   	__migration_entry_wait(mm, ptep, ptl);
mm                348 mm/migrate.c   		struct mm_struct *mm, pte_t *pte)
mm                350 mm/migrate.c   	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
mm                351 mm/migrate.c   	__migration_entry_wait(mm, pte, ptl);
mm                355 mm/migrate.c   void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
mm                360 mm/migrate.c   	ptl = pmd_lock(mm, pmd);
mm               1501 mm/migrate.c   static int do_move_pages_to_node(struct mm_struct *mm,
mm               1525 mm/migrate.c   static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
mm               1533 mm/migrate.c   	down_read(&mm->mmap_sem);
mm               1535 mm/migrate.c   	vma = find_vma(mm, addr);
mm               1586 mm/migrate.c   	up_read(&mm->mmap_sem);
mm               1594 mm/migrate.c   static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
mm               1633 mm/migrate.c   			err = do_move_pages_to_node(mm, &pagelist, current_node);
mm               1658 mm/migrate.c   		err = add_page_for_migration(mm, addr, current_node,
mm               1676 mm/migrate.c   		err = do_move_pages_to_node(mm, &pagelist, current_node);
mm               1694 mm/migrate.c   	err1 = do_move_pages_to_node(mm, &pagelist, current_node);
mm               1713 mm/migrate.c   static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
mm               1718 mm/migrate.c   	down_read(&mm->mmap_sem);
mm               1726 mm/migrate.c   		vma = find_vma(mm, addr);
mm               1745 mm/migrate.c   	up_read(&mm->mmap_sem);
mm               1752 mm/migrate.c   static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
mm               1770 mm/migrate.c   		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
mm               1792 mm/migrate.c   	struct mm_struct *mm;
mm               1828 mm/migrate.c   	mm = get_task_mm(task);
mm               1831 mm/migrate.c   	if (!mm)
mm               1835 mm/migrate.c   		err = do_pages_move(mm, task_nodes, nr_pages, pages,
mm               1838 mm/migrate.c   		err = do_pages_stat(mm, nr_pages, pages, status);
mm               1840 mm/migrate.c   	mmput(mm);
mm               2024 mm/migrate.c   int migrate_misplaced_transhuge_page(struct mm_struct *mm,
mm               2064 mm/migrate.c   	ptl = pmd_lock(mm, pmd);
mm               2109 mm/migrate.c   	set_pmd_at(mm, start, pmd, entry);
mm               2138 mm/migrate.c   	ptl = pmd_lock(mm, pmd);
mm               2141 mm/migrate.c   		set_pmd_at(mm, start, pmd, entry);
mm               2195 mm/migrate.c   	struct mm_struct *mm = vma->vm_mm;
mm               2207 mm/migrate.c   		ptl = pmd_lock(mm, pmdp);
mm               2243 mm/migrate.c   	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
mm               2316 mm/migrate.c   			ptep_get_and_clear(mm, addr, ptep);
mm               2324 mm/migrate.c   			set_pte_at(mm, addr, ptep, swp_pte);
mm               2710 mm/migrate.c   	struct mm_struct *mm = vma->vm_mm;
mm               2725 mm/migrate.c   	pgdp = pgd_offset(mm, addr);
mm               2726 mm/migrate.c   	p4dp = p4d_alloc(mm, pgdp, addr);
mm               2729 mm/migrate.c   	pudp = pud_alloc(mm, p4dp, addr);
mm               2732 mm/migrate.c   	pmdp = pmd_alloc(mm, pudp, addr);
mm               2749 mm/migrate.c   	if (pte_alloc(mm, pmdp))
mm               2781 mm/migrate.c   	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
mm               2808 mm/migrate.c   	inc_mm_counter(mm, MM_ANONPAGES);
mm               2818 mm/migrate.c   		set_pte_at_notify(mm, addr, ptep, entry);
mm               2822 mm/migrate.c   		set_pte_at(mm, addr, ptep, entry);
mm                143 mm/mincore.c   	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
mm                213 mm/mincore.c   	vma = find_vma(current->mm, addr);
mm                286 mm/mincore.c   		down_read(&current->mm->mmap_sem);
mm                288 mm/mincore.c   		up_read(&current->mm->mmap_sem);
mm                522 mm/mlock.c     	struct mm_struct *mm = vma->vm_mm;
mm                530 mm/mlock.c     	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
mm                536 mm/mlock.c     	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
mm                545 mm/mlock.c     		ret = split_vma(mm, vma, start, 1);
mm                551 mm/mlock.c     		ret = split_vma(mm, vma, end, 0);
mm                565 mm/mlock.c     	mm->locked_vm += nr_pages;
mm                597 mm/mlock.c     	vma = find_vma(current->mm, start);
mm                639 mm/mlock.c     static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
mm                645 mm/mlock.c     	if (mm == NULL)
mm                646 mm/mlock.c     		mm = current->mm;
mm                648 mm/mlock.c     	vma = find_vma(mm, start);
mm                650 mm/mlock.c     		vma = mm->mmap;
mm                689 mm/mlock.c     	if (down_write_killable(&current->mm->mmap_sem))
mm                692 mm/mlock.c     	locked += current->mm->locked_vm;
mm                700 mm/mlock.c     		locked -= count_mm_mlocked_page_nr(current->mm,
mm                708 mm/mlock.c     	up_write(&current->mm->mmap_sem);
mm                745 mm/mlock.c     	if (down_write_killable(&current->mm->mmap_sem))
mm                748 mm/mlock.c     	up_write(&current->mm->mmap_sem);
mm                768 mm/mlock.c     	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
mm                770 mm/mlock.c     		current->mm->def_flags |= VM_LOCKED;
mm                773 mm/mlock.c     			current->mm->def_flags |= VM_LOCKONFAULT;
mm                785 mm/mlock.c     	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
mm                814 mm/mlock.c     	if (down_write_killable(&current->mm->mmap_sem))
mm                818 mm/mlock.c     	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
mm                821 mm/mlock.c     	up_write(&current->mm->mmap_sem);
mm                832 mm/mlock.c     	if (down_write_killable(&current->mm->mmap_sem))
mm                835 mm/mlock.c     	up_write(&current->mm->mmap_sem);
mm                 76 mm/mmap.c      static void unmap_region(struct mm_struct *mm,
mm                191 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm                198 mm/mmap.c      	if (down_write_killable(&mm->mmap_sem))
mm                201 mm/mmap.c      	origbrk = mm->brk;
mm                210 mm/mmap.c      		min_brk = mm->start_brk;
mm                212 mm/mmap.c      		min_brk = mm->end_data;
mm                214 mm/mmap.c      	min_brk = mm->start_brk;
mm                225 mm/mmap.c      	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
mm                226 mm/mmap.c      			      mm->end_data, mm->start_data))
mm                230 mm/mmap.c      	oldbrk = PAGE_ALIGN(mm->brk);
mm                232 mm/mmap.c      		mm->brk = brk;
mm                240 mm/mmap.c      	if (brk <= mm->brk) {
mm                248 mm/mmap.c      		mm->brk = brk;
mm                249 mm/mmap.c      		ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
mm                251 mm/mmap.c      			mm->brk = origbrk;
mm                260 mm/mmap.c      	next = find_vma(mm, oldbrk);
mm                267 mm/mmap.c      	mm->brk = brk;
mm                270 mm/mmap.c      	populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
mm                272 mm/mmap.c      		up_read(&mm->mmap_sem);
mm                274 mm/mmap.c      		up_write(&mm->mmap_sem);
mm                275 mm/mmap.c      	userfaultfd_unmap_complete(mm, &uf);
mm                282 mm/mmap.c      	up_write(&mm->mmap_sem);
mm                326 mm/mmap.c      static int browse_rb(struct mm_struct *mm)
mm                328 mm/mmap.c      	struct rb_root *root = &mm->mm_rb;
mm                351 mm/mmap.c      		spin_lock(&mm->page_table_lock);
mm                358 mm/mmap.c      		spin_unlock(&mm->page_table_lock);
mm                387 mm/mmap.c      static void validate_mm(struct mm_struct *mm)
mm                392 mm/mmap.c      	struct vm_area_struct *vma = mm->mmap;
mm                409 mm/mmap.c      	if (i != mm->map_count) {
mm                410 mm/mmap.c      		pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
mm                413 mm/mmap.c      	if (highest_address != mm->highest_vm_end) {
mm                415 mm/mmap.c      			  mm->highest_vm_end, highest_address);
mm                418 mm/mmap.c      	i = browse_rb(mm);
mm                419 mm/mmap.c      	if (i != mm->map_count) {
mm                421 mm/mmap.c      			pr_emerg("map_count %d rb %d\n", mm->map_count, i);
mm                424 mm/mmap.c      	VM_BUG_ON_MM(bug, mm);
mm                428 mm/mmap.c      #define validate_mm(mm) do { } while (0)
mm                526 mm/mmap.c      static int find_vma_links(struct mm_struct *mm, unsigned long addr,
mm                532 mm/mmap.c      	__rb_link = &mm->mm_rb.rb_node;
mm                560 mm/mmap.c      static unsigned long count_vma_pages_range(struct mm_struct *mm,
mm                567 mm/mmap.c      	vma = find_vma_intersection(mm, addr, end);
mm                588 mm/mmap.c      void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
mm                595 mm/mmap.c      		mm->highest_vm_end = vm_end_gap(vma);
mm                609 mm/mmap.c      	vma_rb_insert(vma, &mm->mm_rb);
mm                632 mm/mmap.c      __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
mm                636 mm/mmap.c      	__vma_link_list(mm, vma, prev, rb_parent);
mm                637 mm/mmap.c      	__vma_link_rb(mm, vma, rb_link, rb_parent);
mm                640 mm/mmap.c      static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
mm                651 mm/mmap.c      	__vma_link(mm, vma, prev, rb_link, rb_parent);
mm                657 mm/mmap.c      	mm->map_count++;
mm                658 mm/mmap.c      	validate_mm(mm);
mm                665 mm/mmap.c      static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
mm                670 mm/mmap.c      	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
mm                673 mm/mmap.c      	__vma_link(mm, vma, prev, rb_link, rb_parent);
mm                674 mm/mmap.c      	mm->map_count++;
mm                677 mm/mmap.c      static __always_inline void __vma_unlink_common(struct mm_struct *mm,
mm                685 mm/mmap.c      	vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
mm                694 mm/mmap.c      			mm->mmap = next;
mm                700 mm/mmap.c      	vmacache_invalidate(mm);
mm                703 mm/mmap.c      static inline void __vma_unlink_prev(struct mm_struct *mm,
mm                707 mm/mmap.c      	__vma_unlink_common(mm, vma, prev, true, vma);
mm                721 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
mm                884 mm/mmap.c      			__vma_unlink_prev(mm, next, vma);
mm                895 mm/mmap.c      			__vma_unlink_common(mm, next, NULL, false, vma);
mm                904 mm/mmap.c      		__insert_vm_struct(mm, insert);
mm                910 mm/mmap.c      				mm->highest_vm_end = vm_end_gap(vma);
mm                939 mm/mmap.c      		mm->map_count--;
mm                995 mm/mmap.c      			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
mm               1001 mm/mmap.c      	validate_mm(mm);
mm               1135 mm/mmap.c      struct vm_area_struct *vma_merge(struct mm_struct *mm,
mm               1156 mm/mmap.c      		next = mm->mmap;
mm               1336 mm/mmap.c      static inline int mlock_future_check(struct mm_struct *mm,
mm               1345 mm/mmap.c      		locked += mm->locked_vm;
mm               1395 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               1430 mm/mmap.c      	if (mm->map_count > sysctl_max_map_count)
mm               1441 mm/mmap.c      		struct vm_area_struct *vma = find_vma(mm, addr);
mm               1448 mm/mmap.c      		pkey = execute_only_pkey(mm);
mm               1458 mm/mmap.c      			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
mm               1464 mm/mmap.c      	if (mlock_future_check(mm, vm_flags, len))
mm               1715 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               1722 mm/mmap.c      	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
mm               1729 mm/mmap.c      		nr_pages = count_vma_pages_range(mm, addr, addr + len);
mm               1731 mm/mmap.c      		if (!may_expand_vm(mm, vm_flags,
mm               1737 mm/mmap.c      	while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
mm               1739 mm/mmap.c      		if (do_munmap(mm, addr, len, uf))
mm               1748 mm/mmap.c      		if (security_vm_enough_memory_mm(mm, charged))
mm               1756 mm/mmap.c      	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
mm               1766 mm/mmap.c      	vma = vm_area_alloc(mm);
mm               1819 mm/mmap.c      	vma_link(mm, vma, prev, rb_link, rb_parent);
mm               1831 mm/mmap.c      	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
mm               1835 mm/mmap.c      					vma == get_gate_vma(current->mm))
mm               1838 mm/mmap.c      			mm->locked_vm += (len >> PAGE_SHIFT);
mm               1862 mm/mmap.c      	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
mm               1887 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               1906 mm/mmap.c      	if (RB_EMPTY_ROOT(&mm->mm_rb))
mm               1908 mm/mmap.c      	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
mm               1962 mm/mmap.c      	gap_start = mm->highest_vm_end;
mm               1982 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               2005 mm/mmap.c      	gap_start = mm->highest_vm_end;
mm               2010 mm/mmap.c      	if (RB_EMPTY_ROOT(&mm->mm_rb))
mm               2012 mm/mmap.c      	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
mm               2104 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               2117 mm/mmap.c      		vma = find_vma_prev(mm, addr, &prev);
mm               2126 mm/mmap.c      	info.low_limit = mm->mmap_base;
mm               2144 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               2158 mm/mmap.c      		vma = find_vma_prev(mm, addr, &prev);
mm               2168 mm/mmap.c      	info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
mm               2205 mm/mmap.c      	get_area = current->mm->get_unmapped_area;
mm               2235 mm/mmap.c      struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
mm               2241 mm/mmap.c      	vma = vmacache_find(mm, addr);
mm               2245 mm/mmap.c      	rb_node = mm->mm_rb.rb_node;
mm               2272 mm/mmap.c      find_vma_prev(struct mm_struct *mm, unsigned long addr,
mm               2277 mm/mmap.c      	vma = find_vma(mm, addr);
mm               2281 mm/mmap.c      		struct rb_node *rb_node = rb_last(&mm->mm_rb);
mm               2296 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
mm               2300 mm/mmap.c      	if (!may_expand_vm(mm, vma->vm_flags, grow))
mm               2311 mm/mmap.c      		locked = mm->locked_vm + grow;
mm               2328 mm/mmap.c      	if (security_vm_enough_memory_mm(mm, grow))
mm               2341 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
mm               2403 mm/mmap.c      				spin_lock(&mm->page_table_lock);
mm               2405 mm/mmap.c      					mm->locked_vm += grow;
mm               2406 mm/mmap.c      				vm_stat_account(mm, vma->vm_flags, grow);
mm               2413 mm/mmap.c      					mm->highest_vm_end = vm_end_gap(vma);
mm               2414 mm/mmap.c      				spin_unlock(&mm->page_table_lock);
mm               2422 mm/mmap.c      	validate_mm(mm);
mm               2433 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
mm               2483 mm/mmap.c      				spin_lock(&mm->page_table_lock);
mm               2485 mm/mmap.c      					mm->locked_vm += grow;
mm               2486 mm/mmap.c      				vm_stat_account(mm, vma->vm_flags, grow);
mm               2492 mm/mmap.c      				spin_unlock(&mm->page_table_lock);
mm               2500 mm/mmap.c      	validate_mm(mm);
mm               2527 mm/mmap.c      find_extend_vma(struct mm_struct *mm, unsigned long addr)
mm               2532 mm/mmap.c      	vma = find_vma_prev(mm, addr, &prev);
mm               2536 mm/mmap.c      	if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
mm               2549 mm/mmap.c      find_extend_vma(struct mm_struct *mm, unsigned long addr)
mm               2555 mm/mmap.c      	vma = find_vma(mm, addr);
mm               2563 mm/mmap.c      	if (!mmget_still_valid(mm))
mm               2582 mm/mmap.c      static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
mm               2587 mm/mmap.c      	update_hiwater_vm(mm);
mm               2593 mm/mmap.c      		vm_stat_account(mm, vma->vm_flags, -nrpages);
mm               2597 mm/mmap.c      	validate_mm(mm);
mm               2605 mm/mmap.c      static void unmap_region(struct mm_struct *mm,
mm               2609 mm/mmap.c      	struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
mm               2613 mm/mmap.c      	tlb_gather_mmu(&tlb, mm, start, end);
mm               2614 mm/mmap.c      	update_hiwater_rss(mm);
mm               2626 mm/mmap.c      detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
mm               2632 mm/mmap.c      	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
mm               2635 mm/mmap.c      		vma_rb_erase(vma, &mm->mm_rb);
mm               2636 mm/mmap.c      		mm->map_count--;
mm               2645 mm/mmap.c      		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
mm               2649 mm/mmap.c      	vmacache_invalidate(mm);
mm               2656 mm/mmap.c      int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mm               2720 mm/mmap.c      int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mm               2723 mm/mmap.c      	if (mm->map_count >= sysctl_max_map_count)
mm               2726 mm/mmap.c      	return __split_vma(mm, vma, addr, new_below);
mm               2734 mm/mmap.c      int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
mm               2753 mm/mmap.c      	arch_unmap(mm, start, end);
mm               2756 mm/mmap.c      	vma = find_vma(mm, start);
mm               2781 mm/mmap.c      		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
mm               2784 mm/mmap.c      		error = __split_vma(mm, vma, start, 0);
mm               2791 mm/mmap.c      	last = find_vma(mm, end);
mm               2793 mm/mmap.c      		int error = __split_vma(mm, last, end, 1);
mm               2797 mm/mmap.c      	vma = prev ? prev->vm_next : mm->mmap;
mm               2817 mm/mmap.c      	if (mm->locked_vm) {
mm               2821 mm/mmap.c      				mm->locked_vm -= vma_pages(tmp);
mm               2830 mm/mmap.c      	detach_vmas_to_be_unmapped(mm, vma, prev, end);
mm               2833 mm/mmap.c      		downgrade_write(&mm->mmap_sem);
mm               2835 mm/mmap.c      	unmap_region(mm, vma, prev, start, end);
mm               2838 mm/mmap.c      	remove_vma_list(mm, vma);
mm               2843 mm/mmap.c      int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
mm               2846 mm/mmap.c      	return __do_munmap(mm, start, len, uf, false);
mm               2852 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               2855 mm/mmap.c      	if (down_write_killable(&mm->mmap_sem))
mm               2858 mm/mmap.c      	ret = __do_munmap(mm, start, len, &uf, downgrade);
mm               2865 mm/mmap.c      		up_read(&mm->mmap_sem);
mm               2868 mm/mmap.c      		up_write(&mm->mmap_sem);
mm               2870 mm/mmap.c      	userfaultfd_unmap_complete(mm, &uf);
mm               2895 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               2916 mm/mmap.c      	if (down_write_killable(&mm->mmap_sem))
mm               2919 mm/mmap.c      	vma = find_vma(mm, start);
mm               2979 mm/mmap.c      	up_write(&mm->mmap_sem);
mm               2994 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               3003 mm/mmap.c      	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
mm               3009 mm/mmap.c      	error = mlock_future_check(mm, mm->def_flags, len);
mm               3016 mm/mmap.c      	while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
mm               3018 mm/mmap.c      		if (do_munmap(mm, addr, len, uf))
mm               3023 mm/mmap.c      	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
mm               3026 mm/mmap.c      	if (mm->map_count > sysctl_max_map_count)
mm               3029 mm/mmap.c      	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
mm               3033 mm/mmap.c      	vma = vma_merge(mm, prev, addr, addr + len, flags,
mm               3041 mm/mmap.c      	vma = vm_area_alloc(mm);
mm               3053 mm/mmap.c      	vma_link(mm, vma, prev, rb_link, rb_parent);
mm               3056 mm/mmap.c      	mm->total_vm += len >> PAGE_SHIFT;
mm               3057 mm/mmap.c      	mm->data_vm += len >> PAGE_SHIFT;
mm               3059 mm/mmap.c      		mm->locked_vm += (len >> PAGE_SHIFT);
mm               3066 mm/mmap.c      	struct mm_struct *mm = current->mm;
mm               3078 mm/mmap.c      	if (down_write_killable(&mm->mmap_sem))
mm               3082 mm/mmap.c      	populate = ((mm->def_flags & VM_LOCKED) != 0);
mm               3083 mm/mmap.c      	up_write(&mm->mmap_sem);
mm               3084 mm/mmap.c      	userfaultfd_unmap_complete(mm, &uf);
mm               3098 mm/mmap.c      void exit_mmap(struct mm_struct *mm)
mm               3105 mm/mmap.c      	mmu_notifier_release(mm);
mm               3107 mm/mmap.c      	if (unlikely(mm_is_oom_victim(mm))) {
mm               3124 mm/mmap.c      		(void)__oom_reap_task_mm(mm);
mm               3126 mm/mmap.c      		set_bit(MMF_OOM_SKIP, &mm->flags);
mm               3127 mm/mmap.c      		down_write(&mm->mmap_sem);
mm               3128 mm/mmap.c      		up_write(&mm->mmap_sem);
mm               3131 mm/mmap.c      	if (mm->locked_vm) {
mm               3132 mm/mmap.c      		vma = mm->mmap;
mm               3140 mm/mmap.c      	arch_exit_mmap(mm);
mm               3142 mm/mmap.c      	vma = mm->mmap;
mm               3147 mm/mmap.c      	flush_cache_mm(mm);
mm               3148 mm/mmap.c      	tlb_gather_mmu(&tlb, mm, 0, -1);
mm               3171 mm/mmap.c      int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
mm               3176 mm/mmap.c      	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
mm               3180 mm/mmap.c      	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
mm               3200 mm/mmap.c      	vma_link(mm, vma, prev, rb_link, rb_parent);
mm               3214 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
mm               3228 mm/mmap.c      	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
mm               3230 mm/mmap.c      	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
mm               3270 mm/mmap.c      		vma_link(mm, new_vma, prev, rb_link, rb_parent);
mm               3287 mm/mmap.c      bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
mm               3289 mm/mmap.c      	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
mm               3293 mm/mmap.c      	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
mm               3296 mm/mmap.c      		    mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
mm               3301 mm/mmap.c      			     (mm->data_vm + npages) << PAGE_SHIFT,
mm               3312 mm/mmap.c      void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
mm               3314 mm/mmap.c      	mm->total_vm += npages;
mm               3317 mm/mmap.c      		mm->exec_vm += npages;
mm               3319 mm/mmap.c      		mm->stack_vm += npages;
mm               3321 mm/mmap.c      		mm->data_vm += npages;
mm               3342 mm/mmap.c      	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
mm               3394 mm/mmap.c      	struct mm_struct *mm,
mm               3402 mm/mmap.c      	vma = vm_area_alloc(mm);
mm               3409 mm/mmap.c      	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
mm               3415 mm/mmap.c      	ret = insert_vm_struct(mm, vma);
mm               3419 mm/mmap.c      	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
mm               3448 mm/mmap.c      	struct mm_struct *mm,
mm               3452 mm/mmap.c      	return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
mm               3456 mm/mmap.c      int install_special_mapping(struct mm_struct *mm,
mm               3461 mm/mmap.c      		mm, addr, len, vm_flags, (void *)pages,
mm               3469 mm/mmap.c      static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
mm               3476 mm/mmap.c      		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
mm               3492 mm/mmap.c      static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
mm               3506 mm/mmap.c      		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
mm               3547 mm/mmap.c      int mm_take_all_locks(struct mm_struct *mm)
mm               3552 mm/mmap.c      	BUG_ON(down_read_trylock(&mm->mmap_sem));
mm               3556 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               3561 mm/mmap.c      			vm_lock_mapping(mm, vma->vm_file->f_mapping);
mm               3564 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               3569 mm/mmap.c      			vm_lock_mapping(mm, vma->vm_file->f_mapping);
mm               3572 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               3577 mm/mmap.c      				vm_lock_anon_vma(mm, avc->anon_vma);
mm               3583 mm/mmap.c      	mm_drop_all_locks(mm);
mm               3627 mm/mmap.c      void mm_drop_all_locks(struct mm_struct *mm)
mm               3632 mm/mmap.c      	BUG_ON(down_read_trylock(&mm->mmap_sem));
mm               3635 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                 22 mm/mmu_context.c void use_mm(struct mm_struct *mm)
mm                 29 mm/mmu_context.c 	if (active_mm != mm) {
mm                 30 mm/mmu_context.c 		mmgrab(mm);
mm                 31 mm/mmu_context.c 		tsk->active_mm = mm;
mm                 33 mm/mmu_context.c 	tsk->mm = mm;
mm                 34 mm/mmu_context.c 	switch_mm(active_mm, mm, tsk);
mm                 40 mm/mmu_context.c 	if (active_mm != mm)
mm                 53 mm/mmu_context.c void unuse_mm(struct mm_struct *mm)
mm                 58 mm/mmu_context.c 	sync_mm_rss(mm);
mm                 59 mm/mmu_context.c 	tsk->mm = NULL;
mm                 61 mm/mmu_context.c 	enter_lazy_tlb(mm, tsk);
mm                206 mm/mmu_gather.c void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
mm                209 mm/mmu_gather.c 	tlb->mm = mm;
mm                231 mm/mmu_gather.c 	inc_tlb_flush_pending(tlb->mm);
mm                258 mm/mmu_gather.c 	if (mm_tlb_flush_nested(tlb->mm)) {
mm                277 mm/mmu_gather.c 	dec_tlb_flush_pending(tlb->mm);
mm                 42 mm/mmu_notifier.c void __mmu_notifier_release(struct mm_struct *mm)
mm                 52 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
mm                 60 mm/mmu_notifier.c 			mn->ops->release(mn, mm);
mm                 62 mm/mmu_notifier.c 	spin_lock(&mm->mmu_notifier_mm->lock);
mm                 63 mm/mmu_notifier.c 	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
mm                 64 mm/mmu_notifier.c 		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
mm                 75 mm/mmu_notifier.c 	spin_unlock(&mm->mmu_notifier_mm->lock);
mm                 95 mm/mmu_notifier.c int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
mm                103 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
mm                105 mm/mmu_notifier.c 			young |= mn->ops->clear_flush_young(mn, mm, start, end);
mm                112 mm/mmu_notifier.c int __mmu_notifier_clear_young(struct mm_struct *mm,
mm                120 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
mm                122 mm/mmu_notifier.c 			young |= mn->ops->clear_young(mn, mm, start, end);
mm                129 mm/mmu_notifier.c int __mmu_notifier_test_young(struct mm_struct *mm,
mm                136 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
mm                138 mm/mmu_notifier.c 			young = mn->ops->test_young(mn, mm, address);
mm                148 mm/mmu_notifier.c void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
mm                155 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
mm                157 mm/mmu_notifier.c 			mn->ops->change_pte(mn, mm, address, pte);
mm                169 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
mm                201 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
mm                216 mm/mmu_notifier.c 			mn->ops->invalidate_range(mn, range->mm,
mm                231 mm/mmu_notifier.c void __mmu_notifier_invalidate_range(struct mm_struct *mm,
mm                238 mm/mmu_notifier.c 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
mm                240 mm/mmu_notifier.c 			mn->ops->invalidate_range(mn, mm, start, end);
mm                249 mm/mmu_notifier.c int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
mm                254 mm/mmu_notifier.c 	lockdep_assert_held_write(&mm->mmap_sem);
mm                255 mm/mmu_notifier.c 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
mm                264 mm/mmu_notifier.c 	mn->mm = mm;
mm                267 mm/mmu_notifier.c 	if (!mm->mmu_notifier_mm) {
mm                282 mm/mmu_notifier.c 	ret = mm_take_all_locks(mm);
mm                287 mm/mmu_notifier.c 	mmgrab(mm);
mm                298 mm/mmu_notifier.c 		mm->mmu_notifier_mm = mmu_notifier_mm;
mm                300 mm/mmu_notifier.c 	spin_lock(&mm->mmu_notifier_mm->lock);
mm                301 mm/mmu_notifier.c 	hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
mm                302 mm/mmu_notifier.c 	spin_unlock(&mm->mmu_notifier_mm->lock);
mm                304 mm/mmu_notifier.c 	mm_drop_all_locks(mm);
mm                305 mm/mmu_notifier.c 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
mm                333 mm/mmu_notifier.c int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
mm                337 mm/mmu_notifier.c 	down_write(&mm->mmap_sem);
mm                338 mm/mmu_notifier.c 	ret = __mmu_notifier_register(mn, mm);
mm                339 mm/mmu_notifier.c 	up_write(&mm->mmap_sem);
mm                345 mm/mmu_notifier.c find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
mm                349 mm/mmu_notifier.c 	spin_lock(&mm->mmu_notifier_mm->lock);
mm                350 mm/mmu_notifier.c 	hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) {
mm                358 mm/mmu_notifier.c 		spin_unlock(&mm->mmu_notifier_mm->lock);
mm                361 mm/mmu_notifier.c 	spin_unlock(&mm->mmu_notifier_mm->lock);
mm                383 mm/mmu_notifier.c 					     struct mm_struct *mm)
mm                388 mm/mmu_notifier.c 	lockdep_assert_held_write(&mm->mmap_sem);
mm                390 mm/mmu_notifier.c 	if (mm->mmu_notifier_mm) {
mm                391 mm/mmu_notifier.c 		mn = find_get_mmu_notifier(mm, ops);
mm                396 mm/mmu_notifier.c 	mn = ops->alloc_notifier(mm);
mm                400 mm/mmu_notifier.c 	ret = __mmu_notifier_register(mn, mm);
mm                411 mm/mmu_notifier.c void __mmu_notifier_mm_destroy(struct mm_struct *mm)
mm                413 mm/mmu_notifier.c 	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
mm                414 mm/mmu_notifier.c 	kfree(mm->mmu_notifier_mm);
mm                415 mm/mmu_notifier.c 	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
mm                428 mm/mmu_notifier.c void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
mm                430 mm/mmu_notifier.c 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
mm                445 mm/mmu_notifier.c 			mn->ops->release(mn, mm);
mm                448 mm/mmu_notifier.c 		spin_lock(&mm->mmu_notifier_mm->lock);
mm                454 mm/mmu_notifier.c 		spin_unlock(&mm->mmu_notifier_mm->lock);
mm                463 mm/mmu_notifier.c 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
mm                465 mm/mmu_notifier.c 	mmdrop(mm);
mm                472 mm/mmu_notifier.c 	struct mm_struct *mm = mn->mm;
mm                476 mm/mmu_notifier.c 	mmdrop(mm);
mm                503 mm/mmu_notifier.c 	struct mm_struct *mm = mn->mm;
mm                505 mm/mmu_notifier.c 	spin_lock(&mm->mmu_notifier_mm->lock);
mm                509 mm/mmu_notifier.c 	spin_unlock(&mm->mmu_notifier_mm->lock);
mm                515 mm/mmu_notifier.c 	spin_unlock(&mm->mmu_notifier_mm->lock);
mm                305 mm/mprotect.c  	struct mm_struct *mm = vma->vm_mm;
mm                312 mm/mprotect.c  	pgd = pgd_offset(mm, addr);
mm                314 mm/mprotect.c  	inc_tlb_flush_pending(mm);
mm                326 mm/mprotect.c  	dec_tlb_flush_pending(mm);
mm                376 mm/mprotect.c  	struct mm_struct *mm = vma->vm_mm;
mm                399 mm/mprotect.c  		error = walk_page_range(current->mm, start, end,
mm                413 mm/mprotect.c  		if (!may_expand_vm(mm, newflags, nrpages) &&
mm                414 mm/mprotect.c  				may_expand_vm(mm, oldflags, nrpages))
mm                419 mm/mprotect.c  			if (security_vm_enough_memory_mm(mm, charged))
mm                429 mm/mprotect.c  	*pprev = vma_merge(mm, *pprev, start, end, newflags,
mm                441 mm/mprotect.c  		error = split_vma(mm, vma, start, 1);
mm                447 mm/mprotect.c  		error = split_vma(mm, vma, end, 0);
mm                473 mm/mprotect.c  	vm_stat_account(mm, oldflags, -nrpages);
mm                474 mm/mprotect.c  	vm_stat_account(mm, newflags, nrpages);
mm                515 mm/mprotect.c  	if (down_write_killable(&current->mm->mmap_sem))
mm                523 mm/mprotect.c  	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
mm                526 mm/mprotect.c  	vma = find_vma(current->mm, start);
mm                605 mm/mprotect.c  	up_write(&current->mm->mmap_sem);
mm                635 mm/mprotect.c  	down_write(&current->mm->mmap_sem);
mm                636 mm/mprotect.c  	pkey = mm_pkey_alloc(current->mm);
mm                644 mm/mprotect.c  		mm_pkey_free(current->mm, pkey);
mm                649 mm/mprotect.c  	up_write(&current->mm->mmap_sem);
mm                657 mm/mprotect.c  	down_write(&current->mm->mmap_sem);
mm                658 mm/mprotect.c  	ret = mm_pkey_free(current->mm, pkey);
mm                659 mm/mprotect.c  	up_write(&current->mm->mmap_sem);
mm                 33 mm/mremap.c    static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
mm                 40 mm/mremap.c    	pgd = pgd_offset(mm, addr);
mm                 59 mm/mremap.c    static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
mm                 67 mm/mremap.c    	pgd = pgd_offset(mm, addr);
mm                 68 mm/mremap.c    	p4d = p4d_alloc(mm, pgd, addr);
mm                 71 mm/mremap.c    	pud = pud_alloc(mm, p4d, addr);
mm                 75 mm/mremap.c    	pmd = pmd_alloc(mm, pud, addr);
mm                120 mm/mremap.c    	struct mm_struct *mm = vma->vm_mm;
mm                151 mm/mremap.c    	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
mm                153 mm/mremap.c    	new_ptl = pte_lockptr(mm, new_pmd);
mm                164 mm/mremap.c    		pte = ptep_get_and_clear(mm, old_addr, old_pte);
mm                180 mm/mremap.c    		set_pte_at(mm, new_addr, new_pte, pte);
mm                200 mm/mremap.c    	struct mm_struct *mm = vma->vm_mm;
mm                219 mm/mremap.c    	new_ptl = pmd_lockptr(mm, new_pmd);
mm                230 mm/mremap.c    	set_pmd_at(mm, new_addr, new_pmd, pmd);
mm                324 mm/mremap.c    	struct mm_struct *mm = vma->vm_mm;
mm                339 mm/mremap.c    	if (mm->map_count >= sysctl_max_map_count - 3)
mm                382 mm/mremap.c    		arch_remap(mm, old_addr, old_addr + old_len,
mm                404 mm/mremap.c    	hiwater_vm = mm->hiwater_vm;
mm                405 mm/mremap.c    	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
mm                411 mm/mremap.c    	if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
mm                416 mm/mremap.c    	mm->hiwater_vm = hiwater_vm;
mm                426 mm/mremap.c    		mm->locked_vm += new_len >> PAGE_SHIFT;
mm                436 mm/mremap.c    	struct mm_struct *mm = current->mm;
mm                437 mm/mremap.c    	struct vm_area_struct *vma = find_vma(mm, addr);
mm                477 mm/mremap.c    		locked = mm->locked_vm << PAGE_SHIFT;
mm                484 mm/mremap.c    	if (!may_expand_vm(mm, vma->vm_flags,
mm                490 mm/mremap.c    		if (security_vm_enough_memory_mm(mm, charged))
mm                504 mm/mremap.c    	struct mm_struct *mm = current->mm;
mm                534 mm/mremap.c    	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
mm                537 mm/mremap.c    	ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
mm                542 mm/mremap.c    		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
mm                599 mm/mremap.c    	struct mm_struct *mm = current->mm;
mm                641 mm/mremap.c    	if (down_write_killable(&current->mm->mmap_sem))
mm                659 mm/mremap.c    		retval = __do_munmap(mm, addr+new_len, old_len - new_len,
mm                693 mm/mremap.c    			vm_stat_account(mm, vma->vm_flags, pages);
mm                695 mm/mremap.c    				mm->locked_vm += pages;
mm                732 mm/mremap.c    		up_read(&current->mm->mmap_sem);
mm                734 mm/mremap.c    		up_write(&current->mm->mmap_sem);
mm                737 mm/mremap.c    	userfaultfd_unmap_complete(mm, &uf_unmap_early);
mm                739 mm/mremap.c    	userfaultfd_unmap_complete(mm, &uf_unmap);
mm                 35 mm/msync.c     	struct mm_struct *mm = current->mm;
mm                 60 mm/msync.c     	down_read(&mm->mmap_sem);
mm                 61 mm/msync.c     	vma = find_vma(mm, start);
mm                 91 mm/msync.c     			up_read(&mm->mmap_sem);
mm                 96 mm/msync.c     			down_read(&mm->mmap_sem);
mm                 97 mm/msync.c     			vma = find_vma(mm, start);
mm                107 mm/msync.c     	up_read(&mm->mmap_sem);
mm                102 mm/nommu.c     		vma = find_vma(current->mm, (unsigned long)objp);
mm                166 mm/nommu.c     		down_write(&current->mm->mmap_sem);
mm                167 mm/nommu.c     		vma = find_vma(current->mm, (unsigned long)ret);
mm                170 mm/nommu.c     		up_write(&current->mm->mmap_sem);
mm                416 mm/nommu.c     	struct mm_struct *mm = current->mm;
mm                418 mm/nommu.c     	if (brk < mm->start_brk || brk > mm->context.end_brk)
mm                419 mm/nommu.c     		return mm->brk;
mm                421 mm/nommu.c     	if (mm->brk == brk)
mm                422 mm/nommu.c     		return mm->brk;
mm                427 mm/nommu.c     	if (brk <= mm->brk) {
mm                428 mm/nommu.c     		mm->brk = brk;
mm                435 mm/nommu.c     	flush_icache_range(mm->brk, brk);
mm                436 mm/nommu.c     	return mm->brk = brk;
mm                586 mm/nommu.c     static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
mm                594 mm/nommu.c     	mm->map_count++;
mm                595 mm/nommu.c     	vma->vm_mm = mm;
mm                610 mm/nommu.c     	p = &mm->mm_rb.rb_node;
mm                637 mm/nommu.c     	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
mm                644 mm/nommu.c     	__vma_link_list(mm, vma, prev, parent);
mm                654 mm/nommu.c     	struct mm_struct *mm = vma->vm_mm;
mm                657 mm/nommu.c     	mm->map_count--;
mm                661 mm/nommu.c     			vmacache_invalidate(mm);
mm                678 mm/nommu.c     	rb_erase(&vma->vm_rb, &mm->mm_rb);
mm                683 mm/nommu.c     		mm->mmap = vma->vm_next;
mm                692 mm/nommu.c     static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
mm                706 mm/nommu.c     struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
mm                711 mm/nommu.c     	vma = vmacache_find(mm, addr);
mm                717 mm/nommu.c     	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm                734 mm/nommu.c     struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
mm                736 mm/nommu.c     	return find_vma(mm, addr);
mm                752 mm/nommu.c     static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
mm                760 mm/nommu.c     	vma = vmacache_find_exact(mm, addr, end);
mm                766 mm/nommu.c     	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               1136 mm/nommu.c     	vma = vm_area_alloc(current->mm);
mm               1277 mm/nommu.c     	current->mm->total_vm += len >> PAGE_SHIFT;
mm               1280 mm/nommu.c     	add_vma_to_mm(current->mm, vma);
mm               1383 mm/nommu.c     int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mm               1395 mm/nommu.c     	if (mm->map_count >= sysctl_max_map_count)
mm               1437 mm/nommu.c     	add_vma_to_mm(mm, vma);
mm               1438 mm/nommu.c     	add_vma_to_mm(mm, new);
mm               1446 mm/nommu.c     static int shrink_vma(struct mm_struct *mm,
mm               1459 mm/nommu.c     	add_vma_to_mm(mm, vma);
mm               1485 mm/nommu.c     int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
mm               1498 mm/nommu.c     	vma = find_vma(mm, start);
mm               1531 mm/nommu.c     			ret = split_vma(mm, vma, start, 1);
mm               1535 mm/nommu.c     		return shrink_vma(mm, vma, start, end);
mm               1540 mm/nommu.c     	delete_vma(mm, vma);
mm               1547 mm/nommu.c     	struct mm_struct *mm = current->mm;
mm               1550 mm/nommu.c     	down_write(&mm->mmap_sem);
mm               1551 mm/nommu.c     	ret = do_munmap(mm, addr, len, NULL);
mm               1552 mm/nommu.c     	up_write(&mm->mmap_sem);
mm               1565 mm/nommu.c     void exit_mmap(struct mm_struct *mm)
mm               1569 mm/nommu.c     	if (!mm)
mm               1572 mm/nommu.c     	mm->total_vm = 0;
mm               1574 mm/nommu.c     	while ((vma = mm->mmap)) {
mm               1575 mm/nommu.c     		mm->mmap = vma->vm_next;
mm               1577 mm/nommu.c     		delete_vma(mm, vma);
mm               1615 mm/nommu.c     	vma = find_vma_exact(current->mm, addr, old_len);
mm               1639 mm/nommu.c     	down_write(&current->mm->mmap_sem);
mm               1641 mm/nommu.c     	up_write(&current->mm->mmap_sem);
mm               1707 mm/nommu.c     int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
mm               1713 mm/nommu.c     	if (down_read_killable(&mm->mmap_sem))
mm               1717 mm/nommu.c     	vma = find_vma(mm, addr);
mm               1736 mm/nommu.c     	up_read(&mm->mmap_sem);
mm               1751 mm/nommu.c     int access_remote_vm(struct mm_struct *mm, unsigned long addr,
mm               1754 mm/nommu.c     	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
mm               1764 mm/nommu.c     	struct mm_struct *mm;
mm               1769 mm/nommu.c     	mm = get_task_mm(tsk);
mm               1770 mm/nommu.c     	if (!mm)
mm               1773 mm/nommu.c     	len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
mm               1775 mm/nommu.c     	mmput(mm);
mm                140 mm/oom_kill.c  		if (likely(t->mm))
mm                217 mm/oom_kill.c  			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
mm                227 mm/oom_kill.c  	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
mm                228 mm/oom_kill.c  		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
mm                404 mm/oom_kill.c  		task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
mm                405 mm/oom_kill.c  		mm_pgtables_bytes(task->mm),
mm                406 mm/oom_kill.c  		get_mm_counter(task->mm, MM_SWAPENTS),
mm                490 mm/oom_kill.c  bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
mm                495 mm/oom_kill.c  		struct mm_struct *t_mm = READ_ONCE(t->mm);
mm                497 mm/oom_kill.c  			return t_mm == mm;
mm                512 mm/oom_kill.c  bool __oom_reap_task_mm(struct mm_struct *mm)
mm                523 mm/oom_kill.c  	set_bit(MMF_UNSTABLE, &mm->flags);
mm                525 mm/oom_kill.c  	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
mm                544 mm/oom_kill.c  						vma, mm, vma->vm_start,
mm                546 mm/oom_kill.c  			tlb_gather_mmu(&tlb, mm, range.start, range.end);
mm                567 mm/oom_kill.c  static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
mm                571 mm/oom_kill.c  	if (!down_read_trylock(&mm->mmap_sem)) {
mm                582 mm/oom_kill.c  	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
mm                590 mm/oom_kill.c  	ret = __oom_reap_task_mm(mm);
mm                596 mm/oom_kill.c  			K(get_mm_counter(mm, MM_ANONPAGES)),
mm                597 mm/oom_kill.c  			K(get_mm_counter(mm, MM_FILEPAGES)),
mm                598 mm/oom_kill.c  			K(get_mm_counter(mm, MM_SHMEMPAGES)));
mm                602 mm/oom_kill.c  	up_read(&mm->mmap_sem);
mm                611 mm/oom_kill.c  	struct mm_struct *mm = tsk->signal->oom_mm;
mm                614 mm/oom_kill.c  	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
mm                618 mm/oom_kill.c  	    test_bit(MMF_OOM_SKIP, &mm->flags))
mm                632 mm/oom_kill.c  	set_bit(MMF_OOM_SKIP, &mm->flags);
mm                698 mm/oom_kill.c  	struct mm_struct *mm = tsk->mm;
mm                706 mm/oom_kill.c  	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
mm                708 mm/oom_kill.c  		set_bit(MMF_OOM_VICTIM, &mm->flags);
mm                811 mm/oom_kill.c  	struct mm_struct *mm = task->mm;
mm                820 mm/oom_kill.c  	if (!mm)
mm                830 mm/oom_kill.c  	if (test_bit(MMF_OOM_SKIP, &mm->flags))
mm                833 mm/oom_kill.c  	if (atomic_read(&mm->mm_users) <= 1)
mm                843 mm/oom_kill.c  		if (!process_shares_mm(p, mm))
mm                859 mm/oom_kill.c  	struct mm_struct *mm;
mm                873 mm/oom_kill.c  	mm = victim->mm;
mm                874 mm/oom_kill.c  	mmgrab(mm);
mm                878 mm/oom_kill.c  	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
mm                888 mm/oom_kill.c  		message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
mm                889 mm/oom_kill.c  		K(get_mm_counter(mm, MM_ANONPAGES)),
mm                890 mm/oom_kill.c  		K(get_mm_counter(mm, MM_FILEPAGES)),
mm                891 mm/oom_kill.c  		K(get_mm_counter(mm, MM_SHMEMPAGES)),
mm                893 mm/oom_kill.c  		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
mm                907 mm/oom_kill.c  		if (!process_shares_mm(p, mm))
mm                913 mm/oom_kill.c  			set_bit(MMF_OOM_SKIP, &mm->flags);
mm                932 mm/oom_kill.c  	mmdrop(mm);
mm               1088 mm/oom_kill.c  	    current->mm && !oom_unkillable_task(current) &&
mm                140 mm/page_vma_mapped.c 	struct mm_struct *mm = pvmw->vma->vm_mm;
mm                156 mm/page_vma_mapped.c 		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
mm                160 mm/page_vma_mapped.c 		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
mm                167 mm/page_vma_mapped.c 	pgd = pgd_offset(mm, pvmw->address);
mm                184 mm/page_vma_mapped.c 		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
mm                242 mm/page_vma_mapped.c 			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
mm                155 mm/pagewalk.c  	pgd = pgd_offset(walk->mm, addr);
mm                196 mm/pagewalk.c  		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
mm                301 mm/pagewalk.c  int walk_page_range(struct mm_struct *mm, unsigned long start,
mm                310 mm/pagewalk.c  		.mm		= mm,
mm                317 mm/pagewalk.c  	if (!walk.mm)
mm                320 mm/pagewalk.c  	lockdep_assert_held(&walk.mm->mmap_sem);
mm                322 mm/pagewalk.c  	vma = find_vma(walk.mm, start);
mm                361 mm/pagewalk.c  		.mm		= vma->vm_mm,
mm                367 mm/pagewalk.c  	if (!walk.mm)
mm                370 mm/pagewalk.c  	lockdep_assert_held(&walk.mm->mmap_sem);
mm                 84 mm/pgtable-generic.c 	struct mm_struct *mm = (vma)->vm_mm;
mm                 86 mm/pgtable-generic.c 	pte = ptep_get_and_clear(mm, address, ptep);
mm                 87 mm/pgtable-generic.c 	if (pte_accessible(mm, pte))
mm                152 mm/pgtable-generic.c void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
mm                155 mm/pgtable-generic.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                158 mm/pgtable-generic.c 	if (!pmd_huge_pte(mm, pmdp))
mm                161 mm/pgtable-generic.c 		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
mm                162 mm/pgtable-generic.c 	pmd_huge_pte(mm, pmdp) = pgtable;
mm                168 mm/pgtable-generic.c pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
mm                172 mm/pgtable-generic.c 	assert_spin_locked(pmd_lockptr(mm, pmdp));
mm                175 mm/pgtable-generic.c 	pgtable = pmd_huge_pte(mm, pmdp);
mm                176 mm/pgtable-generic.c 	pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
mm                178 mm/pgtable-generic.c 	if (pmd_huge_pte(mm, pmdp))
mm                 78 mm/process_vm_access.c 				    struct mm_struct *mm,
mm                108 mm/process_vm_access.c 		down_read(&mm->mmap_sem);
mm                109 mm/process_vm_access.c 		pages = get_user_pages_remote(task, mm, pa, pages, flags,
mm                112 mm/process_vm_access.c 			up_read(&mm->mmap_sem);
mm                159 mm/process_vm_access.c 	struct mm_struct *mm;
mm                203 mm/process_vm_access.c 	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
mm                204 mm/process_vm_access.c 	if (!mm || IS_ERR(mm)) {
mm                205 mm/process_vm_access.c 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
mm                218 mm/process_vm_access.c 			iter, process_pages, mm, task, vm_write);
mm                229 mm/process_vm_access.c 	mmput(mm);
mm                178 mm/rmap.c      	struct mm_struct *mm = vma->vm_mm;
mm                199 mm/rmap.c      	spin_lock(&mm->page_table_lock);
mm                208 mm/rmap.c      	spin_unlock(&mm->page_table_lock);
mm                603 mm/rmap.c      static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
mm                607 mm/rmap.c      	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
mm                615 mm/rmap.c      	mm->tlb_flush_batched = true;
mm                630 mm/rmap.c      static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
mm                638 mm/rmap.c      	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
mm                660 mm/rmap.c      void flush_tlb_batched_pending(struct mm_struct *mm)
mm                662 mm/rmap.c      	if (mm->tlb_flush_batched) {
mm                663 mm/rmap.c      		flush_tlb_mm(mm);
mm                670 mm/rmap.c      		mm->tlb_flush_batched = false;
mm                674 mm/rmap.c      static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
mm                678 mm/rmap.c      static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
mm                711 mm/rmap.c      pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
mm                719 mm/rmap.c      	pgd = pgd_offset(mm, address);
mm               1344 mm/rmap.c      	struct mm_struct *mm = vma->vm_mm;
mm               1431 mm/rmap.c      			if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
mm               1441 mm/rmap.c      				mmu_notifier_invalidate_range(mm, range.start,
mm               1464 mm/rmap.c      			pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
mm               1475 mm/rmap.c      			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
mm               1502 mm/rmap.c      		if (should_defer_flush(mm, flags)) {
mm               1511 mm/rmap.c      			pteval = ptep_get_and_clear(mm, address, pvmw.pte);
mm               1513 mm/rmap.c      			set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
mm               1523 mm/rmap.c      		update_hiwater_rss(mm);
mm               1528 mm/rmap.c      				hugetlb_count_sub(compound_nr(page), mm);
mm               1529 mm/rmap.c      				set_huge_swap_pte_at(mm, address,
mm               1533 mm/rmap.c      				dec_mm_counter(mm, mm_counter(page));
mm               1534 mm/rmap.c      				set_pte_at(mm, address, pvmw.pte, pteval);
mm               1548 mm/rmap.c      			dec_mm_counter(mm, mm_counter(page));
mm               1550 mm/rmap.c      			mmu_notifier_invalidate_range(mm, address,
mm               1557 mm/rmap.c      			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
mm               1558 mm/rmap.c      				set_pte_at(mm, address, pvmw.pte, pteval);
mm               1574 mm/rmap.c      			set_pte_at(mm, address, pvmw.pte, swp_pte);
mm               1590 mm/rmap.c      				mmu_notifier_invalidate_range(mm, address,
mm               1600 mm/rmap.c      					mmu_notifier_invalidate_range(mm,
mm               1602 mm/rmap.c      					dec_mm_counter(mm, MM_ANONPAGES);
mm               1610 mm/rmap.c      				set_pte_at(mm, address, pvmw.pte, pteval);
mm               1618 mm/rmap.c      				set_pte_at(mm, address, pvmw.pte, pteval);
mm               1623 mm/rmap.c      			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
mm               1624 mm/rmap.c      				set_pte_at(mm, address, pvmw.pte, pteval);
mm               1629 mm/rmap.c      			if (list_empty(&mm->mmlist)) {
mm               1631 mm/rmap.c      				if (list_empty(&mm->mmlist))
mm               1632 mm/rmap.c      					list_add(&mm->mmlist, &init_mm.mmlist);
mm               1635 mm/rmap.c      			dec_mm_counter(mm, MM_ANONPAGES);
mm               1636 mm/rmap.c      			inc_mm_counter(mm, MM_SWAPENTS);
mm               1640 mm/rmap.c      			set_pte_at(mm, address, pvmw.pte, swp_pte);
mm               1642 mm/rmap.c      			mmu_notifier_invalidate_range(mm, address,
mm               1655 mm/rmap.c      			dec_mm_counter(mm, mm_counter_file(page));
mm                172 mm/shmem.c     		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
mm                186 mm/shmem.c     			return security_vm_enough_memory_mm(current->mm,
mm                205 mm/shmem.c     	return security_vm_enough_memory_mm(current->mm,
mm               1633 mm/shmem.c     	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
mm               1768 mm/shmem.c     	charge_mm = vma ? vma->vm_mm : current->mm;
mm               2088 mm/shmem.c     	get_area = current->mm->get_unmapped_area;
mm               4075 mm/shmem.c     	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
mm               2067 mm/swapfile.c  static int unuse_mm(struct mm_struct *mm, unsigned int type,
mm               2073 mm/swapfile.c  	down_read(&mm->mmap_sem);
mm               2074 mm/swapfile.c  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
mm               2083 mm/swapfile.c  	up_read(&mm->mmap_sem);
mm               2127 mm/swapfile.c  	struct mm_struct *mm;
mm               2155 mm/swapfile.c  		mm = list_entry(p, struct mm_struct, mmlist);
mm               2156 mm/swapfile.c  		if (!mmget_not_zero(mm))
mm               2160 mm/swapfile.c  		prev_mm = mm;
mm               2161 mm/swapfile.c  		retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
mm               2531 mm/swapfile.c  	BUG_ON(!current->mm);
mm               2557 mm/swapfile.c  	if (!security_vm_enough_memory_mm(current->mm, p->pages))
mm                147 mm/userfaultfd.c static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
mm                153 mm/userfaultfd.c 	pgd = pgd_offset(mm, address);
mm                154 mm/userfaultfd.c 	p4d = p4d_alloc(mm, pgd, address);
mm                157 mm/userfaultfd.c 	pud = pud_alloc(mm, p4d, address);
mm                165 mm/userfaultfd.c 	return pmd_alloc(mm, pud, address);
mm                273 mm/util.c      void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
mm                283 mm/util.c      		mm->mmap = vma;
mm                324 mm/util.c      unsigned long arch_randomize_brk(struct mm_struct *mm)
mm                328 mm/util.c      		return randomize_page(mm->brk, SZ_32M);
mm                330 mm/util.c      	return randomize_page(mm->brk, SZ_1G);
mm                386 mm/util.c      void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                394 mm/util.c      		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm                395 mm/util.c      		mm->get_unmapped_area = arch_get_unmapped_area;
mm                397 mm/util.c      		mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm                398 mm/util.c      		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm                402 mm/util.c      void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm                404 mm/util.c      	mm->mmap_base = TASK_UNMAPPED_BASE;
mm                405 mm/util.c      	mm->get_unmapped_area = arch_get_unmapped_area;
mm                424 mm/util.c      int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
mm                430 mm/util.c      	lockdep_assert_held_write(&mm->mmap_sem);
mm                432 mm/util.c      	locked_vm = mm->locked_vm;
mm                440 mm/util.c      			mm->locked_vm = locked_vm + pages;
mm                443 mm/util.c      		mm->locked_vm = locked_vm - pages;
mm                467 mm/util.c      int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
mm                471 mm/util.c      	if (pages == 0 || !mm)
mm                474 mm/util.c      	down_write(&mm->mmap_sem);
mm                475 mm/util.c      	ret = __account_locked_vm(mm, pages, inc, current,
mm                477 mm/util.c      	up_write(&mm->mmap_sem);
mm                488 mm/util.c      	struct mm_struct *mm = current->mm;
mm                494 mm/util.c      		if (down_write_killable(&mm->mmap_sem))
mm                498 mm/util.c      		up_write(&mm->mmap_sem);
mm                499 mm/util.c      		userfaultfd_unmap_complete(mm, &uf);
mm                805 mm/util.c      int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
mm                837 mm/util.c      	if (mm) {
mm                840 mm/util.c      		allowed -= min_t(long, mm->total_vm / 32, reserve);
mm                865 mm/util.c      	struct mm_struct *mm = get_task_mm(task);
mm                867 mm/util.c      	if (!mm)
mm                869 mm/util.c      	if (!mm->arg_end)
mm                872 mm/util.c      	spin_lock(&mm->arg_lock);
mm                873 mm/util.c      	arg_start = mm->arg_start;
mm                874 mm/util.c      	arg_end = mm->arg_end;
mm                875 mm/util.c      	env_start = mm->env_start;
mm                876 mm/util.c      	env_end = mm->env_end;
mm                877 mm/util.c      	spin_unlock(&mm->arg_lock);
mm                905 mm/util.c      	mmput(mm);
mm                 31 mm/vmacache.c  static inline bool vmacache_valid_mm(struct mm_struct *mm)
mm                 33 mm/vmacache.c  	return current->mm == mm && !(current->flags & PF_KTHREAD);
mm                 42 mm/vmacache.c  static bool vmacache_valid(struct mm_struct *mm)
mm                 46 mm/vmacache.c  	if (!vmacache_valid_mm(mm))
mm                 50 mm/vmacache.c  	if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
mm                 55 mm/vmacache.c  		curr->vmacache.seqnum = mm->vmacache_seqnum;
mm                 62 mm/vmacache.c  struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
mm                 69 mm/vmacache.c  	if (!vmacache_valid(mm))
mm                 77 mm/vmacache.c  			if (WARN_ON_ONCE(vma->vm_mm != mm))
mm                 93 mm/vmacache.c  struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
mm                102 mm/vmacache.c  	if (!vmacache_valid(mm))
mm               1531 mm/z3fold.c    			enum zpool_mapmode mm)
mm                190 mm/zbud.c      			enum zpool_mapmode mm)
mm                416 mm/zsmalloc.c  			enum zpool_mapmode mm)
mm                420 mm/zsmalloc.c  	switch (mm) {
mm               1292 mm/zsmalloc.c  			enum zs_mapmode mm)
mm               1328 mm/zsmalloc.c  	area->vm_mm = mm;
mm               1766 net/ipv4/tcp.c 	down_read(&current->mm->mmap_sem);
mm               1768 net/ipv4/tcp.c 	vma = find_vma(current->mm, address);
mm               1770 net/ipv4/tcp.c 		up_read(&current->mm->mmap_sem);
mm               1829 net/ipv4/tcp.c 	up_read(&current->mm->mmap_sem);
mm                293 net/xdp/xdp_umem.c 	down_read(&current->mm->mmap_sem);
mm                296 net/xdp/xdp_umem.c 	up_read(&current->mm->mmap_sem);
mm                 44 samples/kprobes/kretprobe_example.c 	if (!current->mm)
mm               1301 security/commoncap.c int cap_vm_enough_memory(struct mm_struct *mm, long pages)
mm               1650 security/keys/keyctl.c 	if (parent->pid <= 1 || !parent->mm)
mm                752 security/security.c int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
mm                766 security/security.c 		rc = hp->hook.vm_enough_memory(mm, pages);
mm                772 security/security.c 	return __vm_enough_memory(mm, pages, cap_sys_admin);
mm               2247 security/selinux/hooks.c static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
mm                917 security/tomoyo/domain.c 	if (get_user_pages_remote(current, bprm->mm, pos, 1,
mm                948 security/tomoyo/util.c 	struct mm_struct *mm = current->mm;
mm                950 security/tomoyo/util.c 	if (!mm)
mm                952 security/tomoyo/util.c 	exe_file = get_mm_exe_file(mm);
mm                320 sound/sparc/dbri.c 	struct cs4215 mm;	/* mmcodec special info */
mm               1351 sound/sparc/dbri.c static int cs4215_init_data(struct cs4215 *mm)
mm               1360 sound/sparc/dbri.c 	mm->data[0] = CS4215_LO(0x20) | CS4215_HE | CS4215_LE;
mm               1361 sound/sparc/dbri.c 	mm->data[1] = CS4215_RO(0x20) | CS4215_SE;
mm               1362 sound/sparc/dbri.c 	mm->data[2] = CS4215_LG(0x8) | CS4215_IS | CS4215_PIO0 | CS4215_PIO1;
mm               1363 sound/sparc/dbri.c 	mm->data[3] = CS4215_RG(0x8) | CS4215_MA(0xf);
mm               1372 sound/sparc/dbri.c 	mm->ctrl[0] = CS4215_RSRVD_1 | CS4215_MLB;
mm               1373 sound/sparc/dbri.c 	mm->ctrl[1] = CS4215_DFR_ULAW | CS4215_FREQ[0].csval;
mm               1374 sound/sparc/dbri.c 	mm->ctrl[2] = CS4215_XCLK | CS4215_BSEL_128 | CS4215_FREQ[0].xtal;
mm               1375 sound/sparc/dbri.c 	mm->ctrl[3] = 0;
mm               1377 sound/sparc/dbri.c 	mm->status = 0;
mm               1378 sound/sparc/dbri.c 	mm->version = 0xff;
mm               1379 sound/sparc/dbri.c 	mm->precision = 8;	/* For ULAW */
mm               1380 sound/sparc/dbri.c 	mm->channels = 1;
mm               1388 sound/sparc/dbri.c 		dbri->mm.data[0] |= 63;
mm               1389 sound/sparc/dbri.c 		dbri->mm.data[1] |= 63;
mm               1390 sound/sparc/dbri.c 		dbri->mm.data[2] &= ~15;
mm               1391 sound/sparc/dbri.c 		dbri->mm.data[3] &= ~15;
mm               1398 sound/sparc/dbri.c 		dbri->mm.data[0] &= ~0x3f;	/* Reset the volume bits */
mm               1399 sound/sparc/dbri.c 		dbri->mm.data[1] &= ~0x3f;
mm               1400 sound/sparc/dbri.c 		dbri->mm.data[0] |= (DBRI_MAX_VOLUME - left_gain);
mm               1401 sound/sparc/dbri.c 		dbri->mm.data[1] |= (DBRI_MAX_VOLUME - right_gain);
mm               1407 sound/sparc/dbri.c 		dbri->mm.data[2] |= CS4215_LG(left_gain);
mm               1408 sound/sparc/dbri.c 		dbri->mm.data[3] |= CS4215_RG(right_gain);
mm               1411 sound/sparc/dbri.c 	xmit_fixed(dbri, 20, *(int *)dbri->mm.data);
mm               1424 sound/sparc/dbri.c 		dbri->mm.channels, dbri->mm.precision);
mm               1453 sound/sparc/dbri.c 		    (dbri->mm.onboard ? D_PIO0 : D_PIO2), dbri->regs + REG2);
mm               1463 sound/sparc/dbri.c 	data_width = dbri->mm.channels * dbri->mm.precision;
mm               1465 sound/sparc/dbri.c 	link_time_slot(dbri, 4, 16, 16, data_width, dbri->mm.offset);
mm               1466 sound/sparc/dbri.c 	link_time_slot(dbri, 20, 4, 16, 32, dbri->mm.offset + 32);
mm               1467 sound/sparc/dbri.c 	link_time_slot(dbri, 6, 16, 16, data_width, dbri->mm.offset);
mm               1468 sound/sparc/dbri.c 	link_time_slot(dbri, 21, 6, 16, 16, dbri->mm.offset + 40);
mm               1500 sound/sparc/dbri.c 	val = D_ENPIO | D_PIO1 | (dbri->mm.onboard ? D_PIO0 : D_PIO2);
mm               1537 sound/sparc/dbri.c 	link_time_slot(dbri, 17, 16, 16, 32, dbri->mm.offset);
mm               1538 sound/sparc/dbri.c 	link_time_slot(dbri, 18, 16, 16, 8, dbri->mm.offset);
mm               1539 sound/sparc/dbri.c 	link_time_slot(dbri, 19, 18, 16, 8, dbri->mm.offset + 48);
mm               1543 sound/sparc/dbri.c 	dbri->mm.ctrl[0] &= ~CS4215_CLB;
mm               1544 sound/sparc/dbri.c 	xmit_fixed(dbri, 17, *(int *)dbri->mm.ctrl);
mm               1552 sound/sparc/dbri.c 	for (i = 10; ((dbri->mm.status & 0xe4) != 0x20); --i)
mm               1557 sound/sparc/dbri.c 			dbri->mm.status);
mm               1569 sound/sparc/dbri.c 	dbri->mm.ctrl[0] |= CS4215_CLB;
mm               1570 sound/sparc/dbri.c 	xmit_fixed(dbri, 17, *(int *)dbri->mm.ctrl);
mm               1604 sound/sparc/dbri.c 		dbri->mm.ctrl[1] = CS4215_DFR_ULAW;
mm               1605 sound/sparc/dbri.c 		dbri->mm.precision = 8;
mm               1608 sound/sparc/dbri.c 		dbri->mm.ctrl[1] = CS4215_DFR_ALAW;
mm               1609 sound/sparc/dbri.c 		dbri->mm.precision = 8;
mm               1612 sound/sparc/dbri.c 		dbri->mm.ctrl[1] = CS4215_DFR_LINEAR8;
mm               1613 sound/sparc/dbri.c 		dbri->mm.precision = 8;
mm               1616 sound/sparc/dbri.c 		dbri->mm.ctrl[1] = CS4215_DFR_LINEAR16;
mm               1617 sound/sparc/dbri.c 		dbri->mm.precision = 16;
mm               1625 sound/sparc/dbri.c 	dbri->mm.ctrl[1] |= CS4215_FREQ[freq_idx].csval;
mm               1626 sound/sparc/dbri.c 	dbri->mm.ctrl[2] = CS4215_XCLK |
mm               1629 sound/sparc/dbri.c 	dbri->mm.channels = channels;
mm               1631 sound/sparc/dbri.c 		dbri->mm.ctrl[1] |= CS4215_DFR_STEREO;
mm               1651 sound/sparc/dbri.c 		dbri->mm.onboard = 1;
mm               1655 sound/sparc/dbri.c 		dbri->mm.onboard = 0;
mm               1670 sound/sparc/dbri.c 	cs4215_init_data(&dbri->mm);
mm               1673 sound/sparc/dbri.c 	recv_fixed(dbri, 18, &dbri->mm.status);
mm               1674 sound/sparc/dbri.c 	recv_fixed(dbri, 19, &dbri->mm.version);
mm               1676 sound/sparc/dbri.c 	dbri->mm.offset = dbri->mm.onboard ? 0 : 8;
mm               1677 sound/sparc/dbri.c 	if (cs4215_setctrl(dbri) == -1 || dbri->mm.version == 0xff) {
mm               1679 sound/sparc/dbri.c 			dbri->mm.offset);
mm               1682 sound/sparc/dbri.c 	dprintk(D_MM, "Found CS4215 at offset %d\n", dbri->mm.offset);
mm               2354 sound/sparc/dbri.c 		    (dbri->mm.data[elem] >> shift) & mask;
mm               2357 sound/sparc/dbri.c 		    (dbri->mm.ctrl[elem - 4] >> shift) & mask;
mm               2385 sound/sparc/dbri.c 		dbri->mm.data[elem] = (dbri->mm.data[elem] &
mm               2387 sound/sparc/dbri.c 		changed = (val != dbri->mm.data[elem]);
mm               2389 sound/sparc/dbri.c 		dbri->mm.ctrl[elem - 4] = (dbri->mm.ctrl[elem - 4] &
mm               2391 sound/sparc/dbri.c 		changed = (val != dbri->mm.ctrl[elem - 4]);
mm               2397 sound/sparc/dbri.c 		dbri->mm.data[elem & 3]);
mm               2656 sound/sparc/dbri.c 	       dbri->irq, op->dev.of_node->name[9], dbri->mm.version);
mm                768 tools/perf/arch/arm/util/cs-etm.c 				int idx, struct auxtrace_mmap *mm,
mm                794 tools/perf/arch/arm/util/cs-etm.c 	if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
mm                800 tools/perf/arch/arm/util/cs-etm.c 		  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
mm                810 tools/perf/arch/arm/util/cs-etm.c 	if (*head >= mm->len) {
mm                811 tools/perf/arch/arm/util/cs-etm.c 		*old = *head - mm->len;
mm                813 tools/perf/arch/arm/util/cs-etm.c 		*head += mm->len;
mm                814 tools/perf/arch/arm/util/cs-etm.c 		*old = *head - mm->len;
mm                360 tools/perf/arch/x86/util/intel-bts.c 				   struct auxtrace_mmap *mm, unsigned char *data,
mm                378 tools/perf/arch/x86/util/intel-bts.c 	if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) {
mm                391 tools/perf/arch/x86/util/intel-bts.c 		*head += mm->len;
mm                393 tools/perf/arch/x86/util/intel-bts.c 		if (mm->mask)
mm                394 tools/perf/arch/x86/util/intel-bts.c 			*old &= mm->mask;
mm                396 tools/perf/arch/x86/util/intel-bts.c 			*old %= mm->len;
mm                398 tools/perf/arch/x86/util/intel-bts.c 			*head += mm->len;
mm                992 tools/perf/arch/x86/util/intel-pt.c 			     struct auxtrace_mmap *mm, unsigned char *data,
mm                999 tools/perf/arch/x86/util/intel-pt.c 				       ptr->snapshot_ref_buf_size, mm->len,
mm               1002 tools/perf/arch/x86/util/intel-pt.c 	intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
mm               1026 tools/perf/arch/x86/util/intel-pt.c 				  struct auxtrace_mmap *mm, unsigned char *data,
mm               1037 tools/perf/arch/x86/util/intel-pt.c 	err = intel_pt_snapshot_init(ptr, mm->len);
mm               1049 tools/perf/arch/x86/util/intel-pt.c 			err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
mm               1053 tools/perf/arch/x86/util/intel-pt.c 		wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
mm               1056 tools/perf/arch/x86/util/intel-pt.c 		if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
mm               1070 tools/perf/arch/x86/util/intel-pt.c 		*head += mm->len;
mm               1072 tools/perf/arch/x86/util/intel-pt.c 		if (mm->mask)
mm               1073 tools/perf/arch/x86/util/intel-pt.c 			*old &= mm->mask;
mm               1075 tools/perf/arch/x86/util/intel-pt.c 			*old %= mm->len;
mm               1077 tools/perf/arch/x86/util/intel-pt.c 			*head += mm->len;
mm                 66 tools/perf/util/auxtrace.c int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
mm                 72 tools/perf/util/auxtrace.c 	WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
mm                 74 tools/perf/util/auxtrace.c 	mm->userpg = userpg;
mm                 75 tools/perf/util/auxtrace.c 	mm->mask = mp->mask;
mm                 76 tools/perf/util/auxtrace.c 	mm->len = mp->len;
mm                 77 tools/perf/util/auxtrace.c 	mm->prev = 0;
mm                 78 tools/perf/util/auxtrace.c 	mm->idx = mp->idx;
mm                 79 tools/perf/util/auxtrace.c 	mm->tid = mp->tid;
mm                 80 tools/perf/util/auxtrace.c 	mm->cpu = mp->cpu;
mm                 83 tools/perf/util/auxtrace.c 		mm->base = NULL;
mm                 95 tools/perf/util/auxtrace.c 	mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
mm                 96 tools/perf/util/auxtrace.c 	if (mm->base == MAP_FAILED) {
mm                 98 tools/perf/util/auxtrace.c 		mm->base = NULL;
mm                105 tools/perf/util/auxtrace.c void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
mm                107 tools/perf/util/auxtrace.c 	if (mm->base) {
mm                108 tools/perf/util/auxtrace.c 		munmap(mm->base, mm->len);
mm                109 tools/perf/util/auxtrace.c 		mm->base = NULL;
mm                553 tools/perf/util/auxtrace.c 				   struct auxtrace_mmap *mm,
mm                557 tools/perf/util/auxtrace.c 		return itr->find_snapshot(itr, idx, mm, data, head, old);
mm               1236 tools/perf/util/auxtrace.c 	struct auxtrace_mmap *mm = &map->auxtrace_mmap;
mm               1237 tools/perf/util/auxtrace.c 	u64 head, old = mm->prev, offset, ref;
mm               1238 tools/perf/util/auxtrace.c 	unsigned char *data = mm->base;
mm               1244 tools/perf/util/auxtrace.c 		head = auxtrace_mmap__read_snapshot_head(mm);
mm               1245 tools/perf/util/auxtrace.c 		if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
mm               1249 tools/perf/util/auxtrace.c 		head = auxtrace_mmap__read_head(mm);
mm               1256 tools/perf/util/auxtrace.c 		  mm->idx, old, head, head - old);
mm               1258 tools/perf/util/auxtrace.c 	if (mm->mask) {
mm               1259 tools/perf/util/auxtrace.c 		head_off = head & mm->mask;
mm               1260 tools/perf/util/auxtrace.c 		old_off = old & mm->mask;
mm               1262 tools/perf/util/auxtrace.c 		head_off = head % mm->len;
mm               1263 tools/perf/util/auxtrace.c 		old_off = old % mm->len;
mm               1269 tools/perf/util/auxtrace.c 		size = mm->len - (old_off - head_off);
mm               1276 tools/perf/util/auxtrace.c 	if (head > old || size <= head || mm->mask) {
mm               1284 tools/perf/util/auxtrace.c 		u64 rem = (0ULL - mm->len) % mm->len;
mm               1291 tools/perf/util/auxtrace.c 		data1 = &data[mm->len - len1];
mm               1319 tools/perf/util/auxtrace.c 	ev.auxtrace.idx = mm->idx;
mm               1320 tools/perf/util/auxtrace.c 	ev.auxtrace.tid = mm->tid;
mm               1321 tools/perf/util/auxtrace.c 	ev.auxtrace.cpu = mm->cpu;
mm               1326 tools/perf/util/auxtrace.c 	mm->prev = head;
mm               1329 tools/perf/util/auxtrace.c 		auxtrace_mmap__write_tail(mm, head);
mm               1333 tools/perf/util/auxtrace.c 			err = itr->read_finish(itr, mm->idx);
mm                331 tools/perf/util/auxtrace.h 			     struct auxtrace_mmap *mm, unsigned char *data,
mm                393 tools/perf/util/auxtrace.h static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
mm                395 tools/perf/util/auxtrace.h 	struct perf_event_mmap_page *pc = mm->userpg;
mm                403 tools/perf/util/auxtrace.h static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
mm                405 tools/perf/util/auxtrace.h 	struct perf_event_mmap_page *pc = mm->userpg;
mm                417 tools/perf/util/auxtrace.h static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
mm                419 tools/perf/util/auxtrace.h 	struct perf_event_mmap_page *pc = mm->userpg;
mm                435 tools/perf/util/auxtrace.h int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
mm                438 tools/perf/util/auxtrace.h void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
mm                513 tools/perf/util/auxtrace.h 				   struct auxtrace_mmap *mm,
mm                708 tools/perf/util/auxtrace.h int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
mm                711 tools/perf/util/auxtrace.h void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
mm                138 tools/perf/util/mmap.c int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
mm                146 tools/perf/util/mmap.c void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
mm                 52 tools/perf/util/mmap.h static inline u64 perf_mmap__read_head(struct mmap *mm)
mm                 54 tools/perf/util/mmap.h 	return ring_buffer_read_head(mm->core.base);
mm                944 virt/kvm/arm/mmu.c 		struct vm_area_struct *vma = find_vma(current->mm, hva);
mm                978 virt/kvm/arm/mmu.c 	down_read(&current->mm->mmap_sem);
mm                986 virt/kvm/arm/mmu.c 	up_read(&current->mm->mmap_sem);
mm               1696 virt/kvm/arm/mmu.c 	down_read(&current->mm->mmap_sem);
mm               1697 virt/kvm/arm/mmu.c 	vma = find_vma_intersection(current->mm, hva, hva + 1);
mm               1700 virt/kvm/arm/mmu.c 		up_read(&current->mm->mmap_sem);
mm               1722 virt/kvm/arm/mmu.c 	up_read(&current->mm->mmap_sem);
mm               2298 virt/kvm/arm/mmu.c 	down_read(&current->mm->mmap_sem);
mm               2312 virt/kvm/arm/mmu.c 		struct vm_area_struct *vma = find_vma(current->mm, hva);
mm               2366 virt/kvm/arm/mmu.c 	up_read(&current->mm->mmap_sem);
mm                 64 virt/kvm/async_pf.c 	struct mm_struct *mm = apf->mm;
mm                 77 virt/kvm/async_pf.c 	down_read(&mm->mmap_sem);
mm                 78 virt/kvm/async_pf.c 	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
mm                 81 virt/kvm/async_pf.c 		up_read(&mm->mmap_sem);
mm                100 virt/kvm/async_pf.c 	mmput(mm);
mm                127 virt/kvm/async_pf.c 			mmput(work->mm);
mm                191 virt/kvm/async_pf.c 	work->mm = current->mm;
mm                192 virt/kvm/async_pf.c 	mmget(work->mm);
mm                210 virt/kvm/async_pf.c 	mmput(work->mm);
mm                384 virt/kvm/kvm_main.c 					      struct mm_struct *mm,
mm                396 virt/kvm/kvm_main.c 					struct mm_struct *mm,
mm                465 virt/kvm/kvm_main.c 					      struct mm_struct *mm,
mm                486 virt/kvm/kvm_main.c 					struct mm_struct *mm,
mm                516 virt/kvm/kvm_main.c 				       struct mm_struct *mm,
mm                532 virt/kvm/kvm_main.c 				     struct mm_struct *mm)
mm                556 virt/kvm/kvm_main.c 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
mm                695 virt/kvm/kvm_main.c 	mmgrab(current->mm);
mm                696 virt/kvm/kvm_main.c 	kvm->mm = current->mm;
mm                759 virt/kvm/kvm_main.c 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
mm                776 virt/kvm/kvm_main.c 	mmdrop(current->mm);
mm                798 virt/kvm/kvm_main.c 	struct mm_struct *mm = kvm->mm;
mm                818 virt/kvm/kvm_main.c 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
mm                831 virt/kvm/kvm_main.c 	mmdrop(mm);
mm               1414 virt/kvm/kvm_main.c 	down_read(&current->mm->mmap_sem);
mm               1415 virt/kvm/kvm_main.c 	vma = find_vma(current->mm, addr);
mm               1422 virt/kvm/kvm_main.c 	up_read(&current->mm->mmap_sem);
mm               1609 virt/kvm/kvm_main.c 		r = fixup_user_fault(current, current->mm, addr,
mm               1677 virt/kvm/kvm_main.c 	down_read(&current->mm->mmap_sem);
mm               1685 virt/kvm/kvm_main.c 	vma = find_vma_intersection(current->mm, addr, addr + 1);
mm               1701 virt/kvm/kvm_main.c 	up_read(&current->mm->mmap_sem);
mm               2882 virt/kvm/kvm_main.c 	if (vcpu->kvm->mm != current->mm)
mm               3089 virt/kvm/kvm_main.c 	if (vcpu->kvm->mm != current->mm)
mm               3154 virt/kvm/kvm_main.c 	if (dev->kvm->mm != current->mm)
mm               3355 virt/kvm/kvm_main.c 	if (kvm->mm != current->mm)
mm               3553 virt/kvm/kvm_main.c 	if (kvm->mm != current->mm)