Searched refs:mm (Results 1 - 200 of 2856) sorted by relevance

1234567891011>>

/linux-4.4.14/arch/arm/xen/
H A DMakefile1 obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
/linux-4.4.14/mm/
H A Dmmu_context.c6 #include <linux/mm.h>
16 * mm context.
20 void use_mm(struct mm_struct *mm) use_mm() argument
27 if (active_mm != mm) { use_mm()
28 atomic_inc(&mm->mm_count); use_mm()
29 tsk->active_mm = mm; use_mm()
31 tsk->mm = mm; use_mm()
32 switch_mm(active_mm, mm, tsk); use_mm()
38 if (active_mm != mm) use_mm()
46 * specified mm context which was earlier taken on
51 void unuse_mm(struct mm_struct *mm) unuse_mm() argument
56 sync_mm_rss(mm); unuse_mm()
57 tsk->mm = NULL; unuse_mm()
58 /* active_mm is still 'mm' */ unuse_mm()
59 enter_lazy_tlb(mm, tsk); unuse_mm()
H A Ddebug.c2 * mm/debug.c
4 * mm/ specific debug routines.
9 #include <linux/mm.h>
157 "next %p prev %p mm %p\n" dump_vma()
169 void dump_mm(const struct mm_struct *mm) dump_mm() argument
171 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" dump_mm()
201 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, dump_mm()
203 mm->get_unmapped_area, dump_mm()
205 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, dump_mm()
206 mm->pgd, atomic_read(&mm->mm_users), dump_mm()
207 atomic_read(&mm->mm_count), dump_mm()
208 atomic_long_read((atomic_long_t *)&mm->nr_ptes), dump_mm()
209 mm_nr_pmds((struct mm_struct *)mm), dump_mm()
210 mm->map_count, dump_mm()
211 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, dump_mm()
212 mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm, dump_mm()
213 mm->start_code, mm->end_code, mm->start_data, mm->end_data, dump_mm()
214 mm->start_brk, mm->brk, mm->start_stack, dump_mm()
215 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, dump_mm()
216 mm->binfmt, mm->flags, mm->core_state, dump_mm()
218 mm->ioctx_table, dump_mm()
221 mm->owner, dump_mm()
223 mm->exe_file, dump_mm()
225 mm->mmu_notifier_mm, dump_mm()
228 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, dump_mm()
231 mm->tlb_flush_pending, dump_mm()
236 dump_flags(mm->def_flags, vmaflags_names, dump_mm()
H A Dvmacache.c5 #include <linux/mm.h>
9 * Flush vma caches for threads that share a given mm.
16 void vmacache_flush_all(struct mm_struct *mm) vmacache_flush_all() argument
25 * since the mm's seqnum was increased and don't have vmacache_flush_all()
29 if (atomic_read(&mm->mm_users) == 1) vmacache_flush_all()
36 * mm seqnum is already set and curr's will for_each_process_thread()
40 if (mm == p->mm) for_each_process_thread()
47 * This task may be accessing a foreign mm via (for example)
49 * task's vmacache pertains to a different mm (ie, its own). There is
52 * Also handle the case where a kernel thread has adopted this mm via use_mm().
53 * That kernel thread's vmacache is not applicable to this mm.
55 static inline bool vmacache_valid_mm(struct mm_struct *mm) vmacache_valid_mm() argument
57 return current->mm == mm && !(current->flags & PF_KTHREAD); vmacache_valid_mm()
66 static bool vmacache_valid(struct mm_struct *mm) vmacache_valid() argument
70 if (!vmacache_valid_mm(mm)) vmacache_valid()
74 if (mm->vmacache_seqnum != curr->vmacache_seqnum) { vmacache_valid()
79 curr->vmacache_seqnum = mm->vmacache_seqnum; vmacache_valid()
86 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) vmacache_find() argument
90 if (!vmacache_valid(mm)) vmacache_find()
100 if (WARN_ON_ONCE(vma->vm_mm != mm)) vmacache_find()
112 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, vmacache_find_exact() argument
118 if (!vmacache_valid(mm)) vmacache_find_exact()
H A Dmmu_notifier.c2 * linux/mm/mmu_notifier.c
15 #include <linux/mm.h>
46 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
48 * in parallel despite there being no task using this mm any more,
56 void __mmu_notifier_release(struct mm_struct *mm) __mmu_notifier_release() argument
66 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) __mmu_notifier_release()
71 * sptes before all the pages in the mm are freed. __mmu_notifier_release()
74 mn->ops->release(mn, mm); __mmu_notifier_release()
76 spin_lock(&mm->mmu_notifier_mm->lock); __mmu_notifier_release()
77 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { __mmu_notifier_release()
78 mn = hlist_entry(mm->mmu_notifier_mm->list.first, __mmu_notifier_release()
89 spin_unlock(&mm->mmu_notifier_mm->lock); __mmu_notifier_release()
94 * exit_mmap (which would proceed with freeing all pages in the mm) __mmu_notifier_release()
109 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, __mmu_notifier_clear_flush_young() argument
117 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_clear_flush_young()
119 young |= mn->ops->clear_flush_young(mn, mm, start, end); __mmu_notifier_clear_flush_young()
126 int __mmu_notifier_clear_young(struct mm_struct *mm, __mmu_notifier_clear_young() argument
134 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_clear_young()
136 young |= mn->ops->clear_young(mn, mm, start, end); __mmu_notifier_clear_young()
143 int __mmu_notifier_test_young(struct mm_struct *mm, __mmu_notifier_test_young() argument
150 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_test_young()
152 young = mn->ops->test_young(mn, mm, address); __mmu_notifier_test_young()
162 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, __mmu_notifier_change_pte() argument
169 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_change_pte()
171 mn->ops->change_pte(mn, mm, address, pte); __mmu_notifier_change_pte()
176 void __mmu_notifier_invalidate_page(struct mm_struct *mm, __mmu_notifier_invalidate_page() argument
183 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_page()
185 mn->ops->invalidate_page(mn, mm, address); __mmu_notifier_invalidate_page()
190 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, __mmu_notifier_invalidate_range_start() argument
197 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_range_start()
199 mn->ops->invalidate_range_start(mn, mm, start, end); __mmu_notifier_invalidate_range_start()
205 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, __mmu_notifier_invalidate_range_end() argument
212 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_range_end()
222 mn->ops->invalidate_range(mn, mm, start, end); __mmu_notifier_invalidate_range_end()
224 mn->ops->invalidate_range_end(mn, mm, start, end); __mmu_notifier_invalidate_range_end()
230 void __mmu_notifier_invalidate_range(struct mm_struct *mm, __mmu_notifier_invalidate_range() argument
237 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_range()
239 mn->ops->invalidate_range(mn, mm, start, end); __mmu_notifier_invalidate_range()
246 struct mm_struct *mm, do_mmu_notifier_register()
252 BUG_ON(atomic_read(&mm->mm_users) <= 0); do_mmu_notifier_register()
266 down_write(&mm->mmap_sem); do_mmu_notifier_register()
267 ret = mm_take_all_locks(mm); do_mmu_notifier_register()
271 if (!mm_has_notifiers(mm)) { do_mmu_notifier_register()
275 mm->mmu_notifier_mm = mmu_notifier_mm; do_mmu_notifier_register()
278 atomic_inc(&mm->mm_count); do_mmu_notifier_register()
284 * current->mm or explicitly with get_task_mm() or similar). do_mmu_notifier_register()
288 spin_lock(&mm->mmu_notifier_mm->lock); do_mmu_notifier_register()
289 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); do_mmu_notifier_register()
290 spin_unlock(&mm->mmu_notifier_mm->lock); do_mmu_notifier_register()
292 mm_drop_all_locks(mm); do_mmu_notifier_register()
295 up_write(&mm->mmap_sem); do_mmu_notifier_register()
298 BUG_ON(atomic_read(&mm->mm_users) <= 0); do_mmu_notifier_register()
306 * so mm has to be current->mm or the mm should be pinned safely such
307 * as with get_task_mm(). If the mm is not current->mm, the mm_users
315 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) mmu_notifier_register() argument
317 return do_mmu_notifier_register(mn, mm, 1); mmu_notifier_register()
325 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) __mmu_notifier_register() argument
327 return do_mmu_notifier_register(mn, mm, 0); __mmu_notifier_register()
332 void __mmu_notifier_mm_destroy(struct mm_struct *mm) __mmu_notifier_mm_destroy() argument
334 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); __mmu_notifier_mm_destroy()
335 kfree(mm->mmu_notifier_mm); __mmu_notifier_mm_destroy()
336 mm->mmu_notifier_mm = LIST_POISON1; /* debug */ __mmu_notifier_mm_destroy()
340 * This releases the mm_count pin automatically and frees the mm
349 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) mmu_notifier_unregister() argument
351 BUG_ON(atomic_read(&mm->mm_count) <= 0); mmu_notifier_unregister()
366 mn->ops->release(mn, mm); mmu_notifier_unregister()
369 spin_lock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister()
375 spin_unlock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister()
384 BUG_ON(atomic_read(&mm->mm_count) <= 0); mmu_notifier_unregister()
386 mmdrop(mm); mmu_notifier_unregister()
394 struct mm_struct *mm) mmu_notifier_unregister_no_release()
396 spin_lock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister_no_release()
402 spin_unlock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister_no_release()
404 BUG_ON(atomic_read(&mm->mm_count) <= 0); mmu_notifier_unregister_no_release()
405 mmdrop(mm); mmu_notifier_unregister_no_release()
245 do_mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm, int take_mmap_sem) do_mmu_notifier_register() argument
393 mmu_notifier_unregister_no_release(struct mmu_notifier *mn, struct mm_struct *mm) mmu_notifier_unregister_no_release() argument
H A Dmsync.c2 * linux/mm/msync.c
11 #include <linux/mm.h>
34 struct mm_struct *mm = current->mm; SYSCALL_DEFINE3() local
57 down_read(&mm->mmap_sem); SYSCALL_DEFINE3()
58 vma = find_vma(mm, start); SYSCALL_DEFINE3()
88 up_read(&mm->mmap_sem); SYSCALL_DEFINE3()
93 down_read(&mm->mmap_sem); SYSCALL_DEFINE3()
94 vma = find_vma(mm, start); SYSCALL_DEFINE3()
104 up_read(&mm->mmap_sem); SYSCALL_DEFINE3()
H A Dmremap.c2 * mm/mremap.c
10 #include <linux/mm.h>
25 #include <linux/mm-arch-hooks.h>
32 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) get_old_pmd() argument
38 pgd = pgd_offset(mm, addr); get_old_pmd()
53 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, alloc_new_pmd() argument
60 pgd = pgd_offset(mm, addr); alloc_new_pmd()
61 pud = pud_alloc(mm, pgd, addr); alloc_new_pmd()
65 pmd = pmd_alloc(mm, pud, addr); alloc_new_pmd()
96 struct mm_struct *mm = vma->vm_mm; move_ptes() local
133 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); move_ptes()
135 new_ptl = pte_lockptr(mm, new_pmd); move_ptes()
144 pte = ptep_get_and_clear(mm, old_addr, old_pte); move_ptes()
147 set_pte_at(mm, new_addr, new_pte, pte); move_ptes()
240 struct mm_struct *mm = vma->vm_mm; move_vma() local
255 if (mm->map_count >= sysctl_max_map_count - 3) move_vma()
297 arch_remap(mm, old_addr, old_addr + old_len, move_vma()
319 hiwater_vm = mm->hiwater_vm; move_vma()
320 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); move_vma()
322 if (do_munmap(mm, old_addr, old_len) < 0) { move_vma()
327 mm->hiwater_vm = hiwater_vm; move_vma()
337 mm->locked_vm += new_len >> PAGE_SHIFT; move_vma()
347 struct mm_struct *mm = current->mm; vma_to_resize() local
348 struct vm_area_struct *vma = find_vma(mm, addr); vma_to_resize()
375 locked = mm->locked_vm << PAGE_SHIFT; vma_to_resize()
382 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) vma_to_resize()
387 if (security_vm_enough_memory_mm(mm, charged)) vma_to_resize()
398 struct mm_struct *mm = current->mm; mremap_to() local
414 ret = do_munmap(mm, new_addr, new_len); mremap_to()
419 ret = do_munmap(mm, addr+new_len, old_len - new_len); mremap_to()
475 struct mm_struct *mm = current->mm; SYSCALL_DEFINE5() local
501 down_write(&current->mm->mmap_sem); SYSCALL_DEFINE5()
515 ret = do_munmap(mm, addr+new_len, old_len - new_len); SYSCALL_DEFINE5()
544 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); SYSCALL_DEFINE5()
546 mm->locked_vm += pages; SYSCALL_DEFINE5()
581 up_write(&current->mm->mmap_sem); SYSCALL_DEFINE5()
H A Dpgtable-generic.c2 * mm/pgtable-generic.c
76 struct mm_struct *mm = (vma)->vm_mm; ptep_clear_flush() local
78 pte = ptep_get_and_clear(mm, address, ptep); ptep_clear_flush()
79 if (pte_accessible(mm, pte)) ptep_clear_flush()
155 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_trans_huge_deposit() argument
158 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_deposit()
161 if (!pmd_huge_pte(mm, pmdp)) pgtable_trans_huge_deposit()
164 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); pgtable_trans_huge_deposit()
165 pmd_huge_pte(mm, pmdp) = pgtable; pgtable_trans_huge_deposit()
171 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_trans_huge_withdraw() argument
175 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_withdraw()
178 pgtable = pmd_huge_pte(mm, pmdp); pgtable_trans_huge_withdraw()
180 pmd_huge_pte(mm, pmdp) = NULL; pgtable_trans_huge_withdraw()
182 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next, pgtable_trans_huge_withdraw()
/linux-4.4.14/include/linux/
H A Dmmu_context.h6 void use_mm(struct mm_struct *mm);
7 void unuse_mm(struct mm_struct *mm);
H A Dvmacache.h5 #include <linux/mm.h>
18 extern void vmacache_flush_all(struct mm_struct *mm);
20 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
24 extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
29 static inline void vmacache_invalidate(struct mm_struct *mm) vmacache_invalidate() argument
31 mm->vmacache_seqnum++; vmacache_invalidate()
34 if (unlikely(mm->vmacache_seqnum == 0)) vmacache_invalidate()
35 vmacache_flush_all(mm); vmacache_invalidate()
H A Dkhugepaged.h7 extern int __khugepaged_enter(struct mm_struct *mm);
8 extern void __khugepaged_exit(struct mm_struct *mm);
26 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) khugepaged_fork() argument
29 return __khugepaged_enter(mm); khugepaged_fork()
33 static inline void khugepaged_exit(struct mm_struct *mm) khugepaged_exit() argument
35 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) khugepaged_exit()
36 __khugepaged_exit(mm); khugepaged_exit()
51 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) khugepaged_fork() argument
55 static inline void khugepaged_exit(struct mm_struct *mm) khugepaged_exit() argument
H A Delf-randomize.h12 # define arch_randomize_brk(mm) (mm->brk)
16 extern unsigned long arch_randomize_brk(struct mm_struct *mm);
H A Dmm-arch-hooks.h2 * Generic mm no-op hooks.
14 #include <asm/mm-arch-hooks.h>
17 static inline void arch_remap(struct mm_struct *mm, arch_remap() argument
H A Dmmu_notifier.h16 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
21 /* all mmu notifiers registerd in this mm are queued in this list */
29 * Called either by mmu_notifier_unregister or when the mm is
32 * methods (the ones invoked outside the mm context) and it
37 * tsk->mm == mm exits.
44 * last thread of this mm quits, you've also to be sure that
52 struct mm_struct *mm);
64 struct mm_struct *mm,
74 struct mm_struct *mm,
85 struct mm_struct *mm,
93 struct mm_struct *mm,
105 struct mm_struct *mm,
152 struct mm_struct *mm,
155 struct mm_struct *mm,
178 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
198 static inline int mm_has_notifiers(struct mm_struct *mm) mm_has_notifiers() argument
200 return unlikely(mm->mmu_notifier_mm); mm_has_notifiers()
204 struct mm_struct *mm);
206 struct mm_struct *mm);
208 struct mm_struct *mm);
210 struct mm_struct *mm);
211 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
212 extern void __mmu_notifier_release(struct mm_struct *mm);
213 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
216 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
219 extern int __mmu_notifier_test_young(struct mm_struct *mm,
221 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
223 extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
225 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
227 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
229 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
232 static inline void mmu_notifier_release(struct mm_struct *mm) mmu_notifier_release() argument
234 if (mm_has_notifiers(mm)) mmu_notifier_release()
235 __mmu_notifier_release(mm); mmu_notifier_release()
238 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, mmu_notifier_clear_flush_young() argument
242 if (mm_has_notifiers(mm)) mmu_notifier_clear_flush_young()
243 return __mmu_notifier_clear_flush_young(mm, start, end); mmu_notifier_clear_flush_young()
247 static inline int mmu_notifier_clear_young(struct mm_struct *mm, mmu_notifier_clear_young() argument
251 if (mm_has_notifiers(mm)) mmu_notifier_clear_young()
252 return __mmu_notifier_clear_young(mm, start, end); mmu_notifier_clear_young()
256 static inline int mmu_notifier_test_young(struct mm_struct *mm, mmu_notifier_test_young() argument
259 if (mm_has_notifiers(mm)) mmu_notifier_test_young()
260 return __mmu_notifier_test_young(mm, address); mmu_notifier_test_young()
264 static inline void mmu_notifier_change_pte(struct mm_struct *mm, mmu_notifier_change_pte() argument
267 if (mm_has_notifiers(mm)) mmu_notifier_change_pte()
268 __mmu_notifier_change_pte(mm, address, pte); mmu_notifier_change_pte()
271 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, mmu_notifier_invalidate_page() argument
274 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_page()
275 __mmu_notifier_invalidate_page(mm, address); mmu_notifier_invalidate_page()
278 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, mmu_notifier_invalidate_range_start() argument
281 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_range_start()
282 __mmu_notifier_invalidate_range_start(mm, start, end); mmu_notifier_invalidate_range_start()
285 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, mmu_notifier_invalidate_range_end() argument
288 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_range_end()
289 __mmu_notifier_invalidate_range_end(mm, start, end); mmu_notifier_invalidate_range_end()
292 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, mmu_notifier_invalidate_range() argument
295 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_range()
296 __mmu_notifier_invalidate_range(mm, start, end); mmu_notifier_invalidate_range()
299 static inline void mmu_notifier_mm_init(struct mm_struct *mm) mmu_notifier_mm_init() argument
301 mm->mmu_notifier_mm = NULL; mmu_notifier_mm_init()
304 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) mmu_notifier_mm_destroy() argument
306 if (mm_has_notifiers(mm)) mmu_notifier_mm_destroy()
307 __mmu_notifier_mm_destroy(mm); mmu_notifier_mm_destroy()
422 static inline void mmu_notifier_release(struct mm_struct *mm) mmu_notifier_release() argument
426 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, mmu_notifier_clear_flush_young() argument
433 static inline int mmu_notifier_test_young(struct mm_struct *mm, mmu_notifier_test_young() argument
439 static inline void mmu_notifier_change_pte(struct mm_struct *mm, mmu_notifier_change_pte() argument
444 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, mmu_notifier_invalidate_page() argument
449 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, mmu_notifier_invalidate_range_start() argument
454 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, mmu_notifier_invalidate_range_end() argument
459 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, mmu_notifier_invalidate_range() argument
464 static inline void mmu_notifier_mm_init(struct mm_struct *mm) mmu_notifier_mm_init() argument
468 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) mmu_notifier_mm_destroy() argument
H A Dhugetlb_inline.h6 #include <linux/mm.h>
H A Dksm.h11 #include <linux/mm.h>
22 int __ksm_enter(struct mm_struct *mm);
23 void __ksm_exit(struct mm_struct *mm);
25 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ksm_fork() argument
28 return __ksm_enter(mm); ksm_fork()
32 static inline void ksm_exit(struct mm_struct *mm) ksm_exit() argument
34 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) ksm_exit()
35 __ksm_exit(mm); ksm_exit()
69 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ksm_fork() argument
74 static inline void ksm_exit(struct mm_struct *mm) ksm_exit() argument
/linux-4.4.14/arch/s390/include/asm/
H A Dmmu_context.h16 struct mm_struct *mm) init_new_context()
18 spin_lock_init(&mm->context.list_lock); init_new_context()
19 INIT_LIST_HEAD(&mm->context.pgtable_list); init_new_context()
20 INIT_LIST_HEAD(&mm->context.gmap_list); init_new_context()
21 cpumask_clear(&mm->context.cpu_attach_mask); init_new_context()
22 atomic_set(&mm->context.attach_count, 0); init_new_context()
23 mm->context.flush_mm = 0; init_new_context()
25 mm->context.alloc_pgste = page_table_allocate_pgste; init_new_context()
26 mm->context.has_pgste = 0; init_new_context()
27 mm->context.use_skey = 0; init_new_context()
29 switch (mm->context.asce_limit) { init_new_context()
33 * mm->pgd init_new_context()
37 mm->context.asce_limit = STACK_TOP_MAX; init_new_context()
38 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | init_new_context()
42 /* forked 4-level task, set new asce with new mm->pgd */ init_new_context()
43 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | init_new_context()
47 /* forked 2-level compat task, set new asce with new mm->pgd */ init_new_context()
48 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | init_new_context()
50 /* pgd_alloc() did not increase mm->nr_pmds */ init_new_context()
51 mm_inc_nr_pmds(mm); init_new_context()
53 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); init_new_context()
57 #define destroy_context(mm) do { } while (0)
59 static inline void set_user_asce(struct mm_struct *mm) set_user_asce() argument
61 S390_lowcore.user_asce = mm->context.asce; set_user_asce()
108 struct mm_struct *mm = tsk->mm; finish_arch_post_lock_switch() local
111 if (mm) { finish_arch_post_lock_switch()
113 while (atomic_read(&mm->context.attach_count) >> 16) finish_arch_post_lock_switch()
116 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); finish_arch_post_lock_switch()
117 if (mm->context.flush_mm) finish_arch_post_lock_switch()
118 __tlb_flush_mm(mm); finish_arch_post_lock_switch()
124 #define enter_lazy_tlb(mm,tsk) do { } while (0)
125 #define deactivate_mm(tsk,mm) do { } while (0)
136 struct mm_struct *mm) arch_dup_mmap()
140 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument
144 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument
150 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument
15 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
135 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
H A Dpgalloc.h16 #include <linux/mm.h>
26 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
28 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
50 static inline unsigned long pgd_entry_type(struct mm_struct *mm) pgd_entry_type() argument
52 if (mm->context.asce_limit <= (1UL << 31)) pgd_entry_type()
54 if (mm->context.asce_limit <= (1UL << 42)) pgd_entry_type()
62 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) pud_alloc_one() argument
64 unsigned long *table = crst_table_alloc(mm); pud_alloc_one()
69 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
71 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) pmd_alloc_one() argument
73 unsigned long *table = crst_table_alloc(mm); pmd_alloc_one()
79 crst_table_free(mm, table); pmd_alloc_one()
85 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
88 crst_table_free(mm, (unsigned long *) pmd); pmd_free()
91 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_populate() argument
96 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
101 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
103 unsigned long *table = crst_table_alloc(mm); pgd_alloc()
107 if (mm->context.asce_limit == (1UL << 31)) { pgd_alloc()
110 crst_table_free(mm, table); pgd_alloc()
117 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
119 if (mm->context.asce_limit == (1UL << 31)) pgd_free()
121 crst_table_free(mm, (unsigned long *) pgd); pgd_free()
124 static inline void pmd_populate(struct mm_struct *mm, pmd_populate() argument
130 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
138 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
139 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
141 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
142 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
H A Dtlbflush.h4 #include <linux/mm.h>
22 /* Global TLB flush for the mm */ __tlb_flush_idte()
33 /* Local TLB flush for the mm */ __tlb_flush_idte_local()
61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
64 static inline void __tlb_flush_full(struct mm_struct *mm) __tlb_flush_full() argument
67 atomic_add(0x10000, &mm->context.attach_count); __tlb_flush_full()
68 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { __tlb_flush_full()
76 cpumask_copy(mm_cpumask(mm), __tlb_flush_full()
77 &mm->context.cpu_attach_mask); __tlb_flush_full()
79 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_full()
86 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) __tlb_flush_asce() argument
91 active = (mm == current->active_mm) ? 1 : 0; __tlb_flush_asce()
92 count = atomic_add_return(0x10000, &mm->context.attach_count); __tlb_flush_asce()
94 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { __tlb_flush_asce()
103 cpumask_copy(mm_cpumask(mm), __tlb_flush_asce()
104 &mm->context.cpu_attach_mask); __tlb_flush_asce()
106 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_asce()
119 #define __tlb_flush_full(mm) __tlb_flush_local()
124 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) __tlb_flush_asce() argument
141 static inline void __tlb_flush_mm(struct mm_struct * mm) __tlb_flush_mm() argument
144 * If the machine has IDTE we prefer to do a per mm flush __tlb_flush_mm()
145 * on all cpus instead of doing a local flush if the mm __tlb_flush_mm()
148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_mm()
149 __tlb_flush_asce(mm, mm->context.asce); __tlb_flush_mm()
151 __tlb_flush_full(mm); __tlb_flush_mm()
154 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) __tlb_flush_mm_lazy() argument
156 if (mm->context.flush_mm) { __tlb_flush_mm_lazy()
157 __tlb_flush_mm(mm); __tlb_flush_mm_lazy()
158 mm->context.flush_mm = 0; __tlb_flush_mm_lazy()
164 * flush_tlb() - flushes the current mm struct TLBs
166 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
176 * ptep_get_and_clear do not flush the TLBs directly if the mm has
184 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
186 __tlb_flush_mm_lazy(mm); flush_tlb_mm()
H A Dhugetlb.h15 #define is_hugepage_only_range(mm, addr, len) 0
19 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
22 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
41 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, huge_pte_clear() argument
65 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
68 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); huge_ptep_set_wrprotect()
69 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); huge_ptep_set_wrprotect()
/linux-4.4.14/arch/arm/include/asm/
H A Dmmu_context.h24 void __check_vmalloc_seq(struct mm_struct *mm);
28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
32 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
35 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, a15_erratum_get_cpumask() argument
45 static inline void check_and_switch_context(struct mm_struct *mm, check_and_switch_context() argument
48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) check_and_switch_context()
49 __check_vmalloc_seq(mm); check_and_switch_context()
55 * running with the old mm. Since we only support UP systems check_and_switch_context()
56 * on non-ASID CPUs, the old mm will remain valid until the check_and_switch_context()
59 mm->context.switch_pending = 1; check_and_switch_context()
61 cpu_switch_mm(mm->pgd, mm); check_and_switch_context()
68 struct mm_struct *mm = current->mm; finish_arch_post_lock_switch() local
70 if (mm && mm->context.switch_pending) { finish_arch_post_lock_switch()
75 * switch to this mm was already done. finish_arch_post_lock_switch()
78 if (mm->context.switch_pending) { finish_arch_post_lock_switch()
79 mm->context.switch_pending = 0; finish_arch_post_lock_switch()
80 cpu_switch_mm(mm->pgd, mm); finish_arch_post_lock_switch()
88 #define init_new_context(tsk,mm) 0
92 #define destroy_context(mm) do { } while(0)
98 * mm: describes the currently active mm context
102 * tsk->mm will be NULL
105 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
110 * This is the actual mm switch as far as the scheduler
112 * calling the CPU specific function when the mm hasn't
140 #define deactivate_mm(tsk,mm) do { } while (0)
H A Dmmu.h22 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
24 #define ASID(mm) (0)
H A Dpgalloc.h30 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument
35 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
41 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
51 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
52 #define pmd_free(mm, pmd) do { } while (0)
53 #define pud_populate(mm,pmd,pte) BUG()
57 extern pgd_t *pgd_alloc(struct mm_struct *mm);
58 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
84 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument
96 pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument
119 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
125 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
144 * of the mm address space.
149 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) pmd_populate_kernel() argument
158 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) pmd_populate() argument
H A Dvdso.h12 void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
20 static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr) arm_install_vdso() argument
/linux-4.4.14/arch/um/kernel/skas/
H A Dmmu.c7 #include <linux/mm.h>
17 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, init_stub_pte() argument
25 pgd = pgd_offset(mm, proc); init_stub_pte()
26 pud = pud_alloc(mm, pgd, proc); init_stub_pte()
30 pmd = pmd_alloc(mm, pud, proc); init_stub_pte()
34 pte = pte_alloc_map(mm, NULL, pmd, proc); init_stub_pte()
43 pmd_free(mm, pmd); init_stub_pte()
45 pud_free(mm, pud); init_stub_pte()
50 int init_new_context(struct task_struct *task, struct mm_struct *mm) init_new_context() argument
53 struct mm_context *to_mm = &mm->context; init_new_context()
62 if (current->mm != NULL && current->mm != &init_mm) init_new_context()
63 from_mm = &current->mm->context; init_new_context()
93 void uml_setup_stubs(struct mm_struct *mm) uml_setup_stubs() argument
97 ret = init_stub_pte(mm, STUB_CODE, uml_setup_stubs()
102 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); uml_setup_stubs()
106 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); uml_setup_stubs()
107 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); uml_setup_stubs()
110 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, uml_setup_stubs()
113 mm->context.stub_pages); uml_setup_stubs()
124 void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument
128 pte = virt_to_pte(mm, STUB_CODE); arch_exit_mmap()
130 pte_clear(mm, STUB_CODE, pte); arch_exit_mmap()
132 pte = virt_to_pte(mm, STUB_DATA); arch_exit_mmap()
136 pte_clear(mm, STUB_DATA, pte); arch_exit_mmap()
139 void destroy_context(struct mm_struct *mm) destroy_context() argument
141 struct mm_context *mmu = &mm->context; destroy_context()
/linux-4.4.14/arch/x86/mm/kmemcheck/
H A Dpte.h4 #include <linux/mm.h>
H A Dpte.c1 #include <linux/mm.h>
/linux-4.4.14/arch/blackfin/mm/
H A DMakefile2 # arch/blackfin/mm/Makefile
/linux-4.4.14/arch/c6x/include/asm/
H A Dtlb.h4 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/linux-4.4.14/arch/parisc/mm/
H A DMakefile2 # Makefile for arch/parisc/mm
H A Dhugetlbpage.c10 #include <linux/mm.h>
46 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument
61 pgd = pgd_offset(mm, addr); huge_pte_alloc()
62 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc()
64 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc()
66 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
71 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument
80 pgd = pgd_offset(mm, addr); huge_pte_offset()
96 static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) purge_tlb_entries_huge() argument
108 purge_tlb_entries(mm, addr); purge_tlb_entries_huge()
114 static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, __set_huge_pte_at() argument
131 purge_tlb_entries_huge(mm, addr_start); __set_huge_pte_at()
134 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
140 __set_huge_pte_at(mm, addr, ptep, entry); set_huge_pte_at()
145 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, huge_ptep_get_and_clear() argument
153 __set_huge_pte_at(mm, addr, ptep, __pte(0)); huge_ptep_get_and_clear()
160 void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
168 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); huge_ptep_set_wrprotect()
/linux-4.4.14/arch/arm64/xen/
H A DMakefile1 xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
/linux-4.4.14/arch/m32r/include/asm/
H A Dmmu_context.h26 #define mm_context(mm) mm->context
30 #define mm_context(mm) mm->context[smp_processor_id()]
37 #define enter_lazy_tlb(mm, tsk) do { } while (0)
39 static inline void get_new_mmu_context(struct mm_struct *mm) get_new_mmu_context() argument
52 mm_context(mm) = mc; get_new_mmu_context()
58 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
60 if (mm) { get_mmu_context()
65 if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK) get_mmu_context()
66 get_new_mmu_context(mm); get_mmu_context()
75 struct mm_struct *mm) init_new_context()
78 mm->context = NO_CONTEXT; init_new_context()
84 mm->context[i] = NO_CONTEXT; init_new_context()
94 #define destroy_context(mm) do { } while (0)
112 * After we have set current->mm to a new value, this activates
113 * the context for the new mm so we see the new mappings.
115 static inline void activate_context(struct mm_struct *mm) activate_context() argument
117 get_mmu_context(mm); activate_context()
118 set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK); activate_context()
143 #define deactivate_mm(tsk, mm) do { } while (0)
149 #define get_mmu_context(mm) do { } while (0)
150 #define init_new_context(tsk,mm) (0)
151 #define destroy_context(mm) do { } while (0)
154 #define activate_context(mm) do { } while (0)
156 #define deactivate_mm(mm,tsk) do { } while (0)
158 #define enter_lazy_tlb(mm,tsk) do { } while (0)
74 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
H A Dpgalloc.h4 #include <linux/mm.h>
8 #define pmd_populate_kernel(mm, pmd, pte) \
11 static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
21 static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
28 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
33 static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
41 static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
60 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
66 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
74 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
75 #define pmd_free(mm, x) do { } while (0)
77 #define pgd_populate(mm, pmd, pte) BUG()
H A Dtlb.h13 * .. because we flush the whole mm when it
16 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
H A Dtlbflush.h9 * - flush_tlb() flushes the current mm struct TLBs
11 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
26 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
33 #define flush_tlb_mm(mm) do { } while (0)
45 #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
H A Dcacheflush.h4 #include <linux/mm.h>
11 #define flush_cache_mm(mm) do { } while (0)
12 #define flush_cache_dup_mm(mm) do { } while (0)
33 #define flush_cache_mm(mm) do { } while (0)
34 #define flush_cache_dup_mm(mm) do { } while (0)
47 #define flush_cache_mm(mm) do { } while (0)
48 #define flush_cache_dup_mm(mm) do { } while (0)
/linux-4.4.14/arch/powerpc/mm/
H A Dmmu_context_hash64.c18 #include <linux/mm.h>
62 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
74 if (slice_mm_new_context(mm)) init_new_context()
75 slice_set_user_psize(mm, mmu_virtual_psize); init_new_context()
76 subpage_prot_init_new_context(mm); init_new_context()
77 mm->context.id = index; init_new_context()
79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); init_new_context()
80 if (!mm->context.cop_lockp) { init_new_context()
82 subpage_prot_free(mm); init_new_context()
83 mm->context.id = MMU_NO_CONTEXT; init_new_context()
86 spin_lock_init(mm->context.cop_lockp); init_new_context()
90 mm->context.pte_frag = NULL; init_new_context()
93 mm_iommu_init(&mm->context); init_new_context()
107 static void destroy_pagetable_page(struct mm_struct *mm) destroy_pagetable_page() argument
113 pte_frag = mm->context.pte_frag; destroy_pagetable_page()
129 static inline void destroy_pagetable_page(struct mm_struct *mm) destroy_pagetable_page() argument
136 void destroy_context(struct mm_struct *mm) destroy_context() argument
139 mm_iommu_cleanup(&mm->context); destroy_context()
143 drop_cop(mm->context.acop, mm); destroy_context()
144 kfree(mm->context.cop_lockp); destroy_context()
145 mm->context.cop_lockp = NULL; destroy_context()
148 destroy_pagetable_page(mm); destroy_context()
149 __destroy_context(mm->context.id); destroy_context()
150 subpage_prot_free(mm); destroy_context()
151 mm->context.id = MMU_NO_CONTEXT; destroy_context()
H A Dicswx_pid.c17 #include <linux/mm.h>
58 int get_cop_pid(struct mm_struct *mm) get_cop_pid() argument
62 if (mm->context.cop_pid == COP_PID_NONE) { get_cop_pid()
66 mm->context.cop_pid = pid; get_cop_pid()
68 return mm->context.cop_pid; get_cop_pid()
71 int disable_cop_pid(struct mm_struct *mm) disable_cop_pid() argument
75 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { disable_cop_pid()
76 free_pid = mm->context.cop_pid; disable_cop_pid()
77 mm->context.cop_pid = COP_PID_NONE; disable_cop_pid()
H A Dsubpage-prot.c14 #include <linux/mm.h>
26 void subpage_prot_free(struct mm_struct *mm) subpage_prot_free() argument
28 struct subpage_prot_table *spt = &mm->context.spt; subpage_prot_free()
53 void subpage_prot_init_new_context(struct mm_struct *mm) subpage_prot_init_new_context() argument
55 struct subpage_prot_table *spt = &mm->context.spt; subpage_prot_init_new_context()
60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, hpte_flush_range() argument
69 pgd = pgd_offset(mm, addr); hpte_flush_range()
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); hpte_flush_range()
81 pte_update(mm, addr, pte, 0, 0, 0); hpte_flush_range()
95 struct mm_struct *mm = current->mm; subpage_prot_clear() local
96 struct subpage_prot_table *spt = &mm->context.spt; subpage_prot_clear()
102 down_write(&mm->mmap_sem); subpage_prot_clear()
128 hpte_flush_range(mm, addr, nw); subpage_prot_clear()
130 up_write(&mm->mmap_sem); subpage_prot_clear()
142 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, subpage_mark_vma_nohuge() argument
147 .mm = mm, subpage_mark_vma_nohuge()
155 vma = find_vma(mm, addr); subpage_mark_vma_nohuge()
171 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, subpage_mark_vma_nohuge() argument
190 struct mm_struct *mm = current->mm; sys_subpage_prot() local
191 struct subpage_prot_table *spt = &mm->context.spt; sys_subpage_prot()
203 if (is_hugepage_only_range(mm, addr, len)) sys_subpage_prot()
215 down_write(&mm->mmap_sem); sys_subpage_prot()
216 subpage_mark_vma_nohuge(mm, addr, len); sys_subpage_prot()
242 demote_segment_4k(mm, addr); sys_subpage_prot()
250 up_write(&mm->mmap_sem); sys_subpage_prot()
255 down_write(&mm->mmap_sem); sys_subpage_prot()
258 hpte_flush_range(mm, addr, nw); sys_subpage_prot()
264 up_write(&mm->mmap_sem); sys_subpage_prot()
H A Dslice.c28 #include <linux/mm.h>
100 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, slice_area_is_free() argument
105 if ((mm->task_size - len) < addr) slice_area_is_free()
107 vma = find_vma(mm, addr); slice_area_is_free()
111 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) slice_low_has_vma() argument
113 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, slice_low_has_vma()
117 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) slice_high_has_vma() argument
128 return !slice_area_is_free(mm, start, end - start); slice_high_has_vma()
131 static struct slice_mask slice_mask_for_free(struct mm_struct *mm) slice_mask_for_free() argument
137 if (!slice_low_has_vma(mm, i)) slice_mask_for_free()
140 if (mm->task_size <= SLICE_LOW_TOP) slice_mask_for_free()
144 if (!slice_high_has_vma(mm, i)) slice_mask_for_free()
150 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) slice_mask_for_size() argument
158 lpsizes = mm->context.low_slices_psize; slice_mask_for_size()
163 hpsizes = mm->context.high_slices_psize; slice_mask_for_size()
182 struct mm_struct *mm = parm; slice_flush_segments() local
185 if (mm != current->active_mm) slice_flush_segments()
196 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) slice_convert() argument
204 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); slice_convert()
212 lpsizes = mm->context.low_slices_psize; slice_convert()
219 mm->context.low_slices_psize = lpsizes; slice_convert()
221 hpsizes = mm->context.high_slices_psize; slice_convert()
232 mm->context.low_slices_psize, slice_convert()
233 mm->context.high_slices_psize); slice_convert()
237 copro_flush_all_slbs(mm); slice_convert()
265 static unsigned long slice_find_area_bottomup(struct mm_struct *mm, slice_find_area_bottomup() argument
308 static unsigned long slice_find_area_topdown(struct mm_struct *mm, slice_find_area_topdown() argument
322 addr = mm->mmap_base; slice_find_area_topdown()
354 return slice_find_area_bottomup(mm, len, available, psize); slice_find_area_topdown()
358 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, slice_find_area() argument
363 return slice_find_area_topdown(mm, len, mask, psize); slice_find_area()
365 return slice_find_area_bottomup(mm, len, mask, psize); slice_find_area()
394 struct mm_struct *mm = current->mm; slice_get_unmapped_area() local
398 BUG_ON(mm->task_size == 0); slice_get_unmapped_area()
400 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_get_unmapped_area()
404 if (len > mm->task_size) slice_get_unmapped_area()
410 if (fixed && addr > (mm->task_size - len)) slice_get_unmapped_area()
418 if (addr > mm->task_size - len || slice_get_unmapped_area()
419 !slice_area_is_free(mm, addr, len)) slice_get_unmapped_area()
426 good_mask = slice_mask_for_size(mm, psize); slice_get_unmapped_area()
451 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); slice_get_unmapped_area()
474 newaddr = slice_find_area(mm, len, good_mask, psize, topdown); slice_get_unmapped_area()
487 potential_mask = slice_mask_for_free(mm); slice_get_unmapped_area()
506 addr = slice_find_area(mm, len, good_mask, psize, topdown); slice_get_unmapped_area()
516 addr = slice_find_area(mm, len, potential_mask, psize, topdown); slice_get_unmapped_area()
522 addr = slice_find_area(mm, len, potential_mask, psize, slice_get_unmapped_area()
538 slice_convert(mm, mask, psize); slice_get_unmapped_area()
540 on_each_cpu(slice_flush_segments, mm, 1); slice_get_unmapped_area()
554 current->mm->context.user_psize, 0); arch_get_unmapped_area()
564 current->mm->context.user_psize, 1); arch_get_unmapped_area_topdown()
567 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) get_slice_psize() argument
574 lpsizes = mm->context.low_slices_psize; get_slice_psize()
578 hpsizes = mm->context.high_slices_psize; get_slice_psize()
593 * N.B. This may be called before mm->context.id has been set.
599 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) slice_set_user_psize() argument
607 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); slice_set_user_psize()
611 old_psize = mm->context.user_psize; slice_set_user_psize()
616 mm->context.user_psize = psize; slice_set_user_psize()
619 lpsizes = mm->context.low_slices_psize; slice_set_user_psize()
625 mm->context.low_slices_psize = lpsizes; slice_set_user_psize()
627 hpsizes = mm->context.high_slices_psize; slice_set_user_psize()
641 mm->context.low_slices_psize, slice_set_user_psize()
642 mm->context.high_slices_psize); slice_set_user_psize()
648 void slice_set_range_psize(struct mm_struct *mm, unsigned long start, slice_set_range_psize() argument
653 slice_convert(mm, mask, psize); slice_set_range_psize()
676 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, is_hugepage_only_range() argument
680 unsigned int psize = mm->context.user_psize; is_hugepage_only_range()
683 available = slice_mask_for_size(mm, psize); is_hugepage_only_range()
688 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); is_hugepage_only_range()
694 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", is_hugepage_only_range()
695 mm, addr, len); is_hugepage_only_range()
H A Dtlb_hash32.c8 * Derived from arch/ppc/mm/init.c:
15 * Derived from "arch/i386/mm/init.c"
26 #include <linux/mm.h>
40 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) flush_hash_entry() argument
46 flush_hash_pages(mm->context.id, addr, ptephys, 1); flush_hash_entry()
80 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
90 static void flush_range(struct mm_struct *mm, unsigned long start, flush_range() argument
96 unsigned int ctx = mm->context.id; flush_range()
106 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); flush_range()
132 * Flush all the (user) entries for the address space described by mm.
134 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
144 * It is safe to go down the mm's list of vmas when called flush_tlb_mm()
149 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) flush_tlb_mm()
156 struct mm_struct *mm; flush_tlb_page() local
163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; flush_tlb_page()
164 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); flush_tlb_page()
166 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); flush_tlb_page()
H A Dcopro_fault.c2 * CoProcessor (SPU/AFU) mm fault handler
24 #include <linux/mm.h>
36 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, copro_handle_mm_fault() argument
43 if (mm == NULL) copro_handle_mm_fault()
46 if (mm->pgd == NULL) copro_handle_mm_fault()
49 down_read(&mm->mmap_sem); copro_handle_mm_fault()
51 vma = find_vma(mm, ea); copro_handle_mm_fault()
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); copro_handle_mm_fault()
96 up_read(&mm->mmap_sem); copro_handle_mm_fault()
101 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) copro_calculate_slb() argument
109 psize = get_slice_psize(mm, ea); copro_calculate_slb()
111 vsid = get_vsid(mm->context.id, ea, ssize); copro_calculate_slb()
148 void copro_flush_all_slbs(struct mm_struct *mm) copro_flush_all_slbs() argument
151 spu_flush_all_slbs(mm); copro_flush_all_slbs()
153 cxl_slbia(mm); copro_flush_all_slbs()
H A Dmmu_context_nohash.c9 * Derived from previous arch/powerpc/mm/mmu_context.c
25 * also clear mm->cpu_vm_mask bits when processes are migrated
44 #include <linux/mm.h>
87 struct mm_struct *mm; steal_context_smp() local
94 /* Pick up the victim mm */ steal_context_smp()
95 mm = context_mm[id]; steal_context_smp()
100 if (mm->context.active) { steal_context_smp()
106 pr_hardcont(" | steal %d from 0x%p", id, mm); steal_context_smp()
108 /* Mark this mm has having no context anymore */ steal_context_smp()
109 mm->context.id = MMU_NO_CONTEXT; steal_context_smp()
111 /* Mark it stale on all CPUs that used this mm. For threaded steal_context_smp()
116 for_each_cpu(cpu, mm_cpumask(mm)) { for_each_cpu()
141 struct mm_struct *mm; steal_all_contexts() local
146 /* Pick up the victim mm */ steal_all_contexts()
147 mm = context_mm[id]; steal_all_contexts()
149 pr_hardcont(" | steal %d from 0x%p", id, mm); steal_all_contexts()
151 /* Mark this mm as having no context anymore */ steal_all_contexts()
152 mm->context.id = MMU_NO_CONTEXT; steal_all_contexts()
157 mm->context.active = 0; steal_all_contexts()
178 struct mm_struct *mm; steal_context_up() local
181 /* Pick up the victim mm */ steal_context_up()
182 mm = context_mm[id]; steal_context_up()
184 pr_hardcont(" | steal %d from 0x%p", id, mm); steal_context_up()
187 local_flush_tlb_mm(mm); steal_context_up()
189 /* Mark this mm has having no context anymore */ steal_context_up()
190 mm->context.id = MMU_NO_CONTEXT; steal_context_up()
237 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", switch_mmu_context()
257 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", switch_mmu_context()
329 int init_new_context(struct task_struct *t, struct mm_struct *mm) init_new_context() argument
331 pr_hard("initing context for mm @%p\n", mm); init_new_context()
333 mm->context.id = MMU_NO_CONTEXT; init_new_context()
334 mm->context.active = 0; init_new_context()
337 if (slice_mm_new_context(mm)) init_new_context()
338 slice_set_user_psize(mm, mmu_virtual_psize); init_new_context()
347 void destroy_context(struct mm_struct *mm) destroy_context() argument
352 if (mm->context.id == MMU_NO_CONTEXT) destroy_context()
355 WARN_ON(mm->context.active != 0); destroy_context()
358 id = mm->context.id; destroy_context()
361 mm->context.id = MMU_NO_CONTEXT; destroy_context()
363 mm->context.active = 0; destroy_context()
H A Dmmu_context_hash32.c8 * Derived from arch/ppc/mm/init.c:
15 * Derived from "arch/i386/mm/init.c"
25 #include <linux/mm.h>
53 * function is changed then arch/ppc/mm/hashtable.S will have to be
82 int init_new_context(struct task_struct *t, struct mm_struct *mm) init_new_context() argument
84 mm->context.id = __init_new_context(); init_new_context()
101 void destroy_context(struct mm_struct *mm) destroy_context() argument
104 if (mm->context.id != NO_CONTEXT) { destroy_context()
105 __destroy_context(mm->context.id); destroy_context()
106 mm->context.id = NO_CONTEXT; destroy_context()
H A Dicswx.c17 #include <linux/mm.h>
79 * @mm: The mm the coprocessor to associate with. Most likely current mm.
85 int use_cop(unsigned long acop, struct mm_struct *mm) use_cop() argument
92 if (!mm || !acop) use_cop()
96 spin_lock(&mm->page_table_lock); use_cop()
97 spin_lock(mm->context.cop_lockp); use_cop()
99 ret = get_cop_pid(mm); use_cop()
104 mm->context.acop |= acop; use_cop()
106 sync_cop(mm); use_cop()
113 if (atomic_read(&mm->mm_users) > 1) use_cop()
114 smp_call_function(sync_cop, mm, 1); use_cop()
117 spin_unlock(mm->context.cop_lockp); use_cop()
118 spin_unlock(&mm->page_table_lock); use_cop()
127 * @mm: The mm the coprocessor associated with.
129 void drop_cop(unsigned long acop, struct mm_struct *mm) drop_cop() argument
136 if (WARN_ON_ONCE(!mm)) drop_cop()
140 spin_lock(&mm->page_table_lock); drop_cop()
141 spin_lock(mm->context.cop_lockp); drop_cop()
143 mm->context.acop &= ~acop; drop_cop()
145 free_pid = disable_cop_pid(mm); drop_cop()
146 sync_cop(mm); drop_cop()
153 if (atomic_read(&mm->mm_users) > 1) drop_cop()
154 smp_call_function(sync_cop, mm, 1); drop_cop()
159 spin_unlock(mm->context.cop_lockp); drop_cop()
160 spin_unlock(&mm->page_table_lock); drop_cop()
235 * the threads (see smp_call_function(sync_cop, mm, 1)), but acop_handle_fault()
H A Dtlb_hash64.c5 * Derived from arch/ppc64/mm/init.c:
12 * Derived from "arch/i386/mm/init.c"
25 #include <linux/mm.h>
43 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, hpte_need_flush() argument
65 psize = get_slice_psize(mm, addr); hpte_need_flush()
70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ hpte_need_flush()
73 psize = pte_pagesize_index(mm, addr, pte); hpte_need_flush()
85 vsid = get_vsid(mm->context.id, addr, ssize); hpte_need_flush()
97 * in that case, might be worth testing the mm cpu mask though hpte_need_flush()
116 if (i != 0 && (mm != batch->mm || batch->psize != psize || hpte_need_flush()
122 batch->mm = mm; hpte_need_flush()
148 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) __flush_tlb_pending()
177 * @mm : mm_struct of the target address space (generally init_mm)
190 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, __flush_hash_table_range() argument
200 BUG_ON(!mm->pgd); __flush_hash_table_range()
212 pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp, __flush_hash_table_range()
224 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); __flush_hash_table_range()
226 hpte_need_flush(mm, start, ptep, pte, hugepage_shift); __flush_hash_table_range()
232 void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) flush_tlb_pmd_range() argument
252 hpte_need_flush(mm, addr, pte, pteval, 0); flush_tlb_pmd_range()
/linux-4.4.14/arch/m68k/include/asm/
H A Dmmu_context.h6 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
30 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
34 if (mm->context != NO_CONTEXT) get_mmu_context()
47 mm->context = ctx; get_mmu_context()
48 context_mm[ctx] = mm; get_mmu_context()
54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
59 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
61 if (mm->context != NO_CONTEXT) { destroy_context()
62 clear_bit(mm->context, context_map); destroy_context()
63 mm->context = NO_CONTEXT; destroy_context()
76 get_mmu_context(tsk->mm); switch_mm()
77 set_context(tsk->mm->context, next->pgd); switch_mm()
81 * After we have set current->mm to a new value, this activates
82 * the context for the new mm so we see the new mappings.
85 struct mm_struct *mm) activate_mm()
87 get_mmu_context(mm); activate_mm()
88 set_context(mm->context, mm->pgd); activate_mm()
91 #define deactivate_mm(tsk, mm) do { } while (0)
99 struct mm_struct *mm; load_ksp_mmu() local
116 mm = &init_mm; load_ksp_mmu()
118 pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm); load_ksp_mmu()
119 mm = task->mm; load_ksp_mmu()
122 if (!mm) load_ksp_mmu()
125 pgd = pgd_offset(mm, mmuar); load_ksp_mmu()
139 asid = mm->context & 0xff; load_ksp_mmu()
155 pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar); load_ksp_mmu()
164 extern unsigned long get_free_context(struct mm_struct *mm);
169 struct mm_struct *mm) init_new_context()
171 mm->context = SUN3_INVALID_CONTEXT; init_new_context()
177 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
179 if (mm->context == SUN3_INVALID_CONTEXT) get_mmu_context()
180 mm->context = get_free_context(mm); get_mmu_context()
184 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
186 if (mm->context != SUN3_INVALID_CONTEXT) destroy_context()
187 clear_context(mm->context); destroy_context()
190 static inline void activate_context(struct mm_struct *mm) activate_context() argument
192 get_mmu_context(mm); activate_context()
193 sun3_put_context(mm->context); activate_context()
199 activate_context(tsk->mm); switch_mm()
202 #define deactivate_mm(tsk, mm) do { } while (0)
217 struct mm_struct *mm) init_new_context()
219 mm->context = virt_to_phys(mm->pgd); init_new_context()
223 #define destroy_context(mm) do { } while(0)
225 static inline void switch_mm_0230(struct mm_struct *mm) switch_mm_0230() argument
228 0x80000000 | _PAGE_TABLE, mm->context switch_mm_0230()
258 static inline void switch_mm_0460(struct mm_struct *mm) switch_mm_0460() argument
266 asm volatile ("movec %0,%%urp" : : "r" (mm->context)); switch_mm_0460()
292 #define deactivate_mm(tsk,mm) do { } while (0)
309 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
319 #define destroy_context(mm) do { } while (0)
320 #define deactivate_mm(tsk,mm) do { } while (0)
84 activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) activate_mm() argument
168 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
216 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
H A Dmotorola_pgalloc.h10 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument
24 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
30 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument
51 static inline void pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
69 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument
74 static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
86 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
88 pmd_free(mm, (pmd_t *)pgd); pgd_free()
91 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
97 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
102 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
108 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
H A Dsun3_pgalloc.h17 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
20 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
25 static inline void pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
37 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
49 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
66 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
71 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
81 #define pmd_free(mm, x) do { } while (0)
84 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
89 static inline pgd_t * pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
99 #define pgd_populate(mm, pmd, pte) BUG()
H A Dmcf_pgalloc.h7 extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
14 extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
31 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
32 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
34 #define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
39 #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
51 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
76 extern inline void pte_free(struct mm_struct *mm, struct page *page) pte_free() argument
85 #define pmd_free(mm, pmd) BUG()
87 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
92 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
104 #define pgd_populate(mm, pmd, pte) BUG()
H A Dtlb.h13 * .. because we flush the whole mm when it
16 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/linux-4.4.14/arch/sparc/mm/
H A Dtlb.c1 /* arch/sparc64/mm/tlb.c
8 #include <linux/mm.h>
26 struct mm_struct *mm = tb->mm; flush_tlb_pending() local
33 if (CTX_VALID(mm->context)) { flush_tlb_pending()
35 global_flush_tlb_page(mm, tb->vaddrs[0]); flush_tlb_pending()
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, flush_tlb_pending()
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), flush_tlb_pending()
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, tlb_batch_add_one() argument
81 if (unlikely(nr != 0 && mm != tb->mm)) { tlb_batch_add_one()
87 flush_tsb_user_page(mm, vaddr, huge); tlb_batch_add_one()
88 global_flush_tlb_page(mm, vaddr); tlb_batch_add_one()
93 tb->mm = mm; tlb_batch_add_one()
112 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, tlb_batch_add() argument
137 flush_dcache_page_all(mm, page); tlb_batch_add()
142 tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); tlb_batch_add()
146 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, tlb_batch_pmd_scan() argument
158 tlb_batch_add_one(mm, vaddr, exec, false); tlb_batch_pmd_scan()
166 void set_pmd_at(struct mm_struct *mm, unsigned long addr, set_pmd_at() argument
173 if (mm == &init_mm) set_pmd_at()
178 mm->context.huge_pte_count++; set_pmd_at()
180 mm->context.huge_pte_count--; set_pmd_at()
198 tlb_batch_add_one(mm, addr, exec, true); set_pmd_at()
199 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, set_pmd_at()
202 tlb_batch_pmd_scan(mm, addr, orig); set_pmd_at()
218 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_trans_huge_deposit() argument
223 assert_spin_locked(&mm->page_table_lock); pgtable_trans_huge_deposit()
226 if (!pmd_huge_pte(mm, pmdp)) pgtable_trans_huge_deposit()
229 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pgtable_trans_huge_deposit()
230 pmd_huge_pte(mm, pmdp) = pgtable; pgtable_trans_huge_deposit()
233 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_trans_huge_withdraw() argument
238 assert_spin_locked(&mm->page_table_lock); pgtable_trans_huge_withdraw()
241 pgtable = pmd_huge_pte(mm, pmdp); pgtable_trans_huge_withdraw()
244 pmd_huge_pte(mm, pmdp) = NULL; pgtable_trans_huge_withdraw()
246 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; pgtable_trans_huge_withdraw()
H A Dhugetlbpage.c8 #include <linux/mm.h>
60 struct mm_struct *mm = current->mm; hugetlb_get_unmapped_area_topdown() local
70 info.high_limit = mm->mmap_base; hugetlb_get_unmapped_area_topdown()
96 struct mm_struct *mm = current->mm; hugetlb_get_unmapped_area() local
116 vma = find_vma(mm, addr); hugetlb_get_unmapped_area()
121 if (mm->get_unmapped_area == arch_get_unmapped_area) hugetlb_get_unmapped_area()
129 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument
144 pgd = pgd_offset(mm, addr); huge_pte_alloc()
145 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc()
147 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc()
149 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
154 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument
163 pgd = pgd_offset(mm, addr); huge_pte_offset()
175 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
183 mm->context.huge_pte_count++; set_huge_pte_at()
200 maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0); set_huge_pte_at()
203 maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0); set_huge_pte_at()
206 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, huge_ptep_get_and_clear() argument
215 mm->context.huge_pte_count--; huge_ptep_get_and_clear()
228 maybe_tlb_batch_add(mm, addr, ptep, entry, 0); huge_ptep_get_and_clear()
231 maybe_tlb_batch_add(mm, addr, ptep, entry, 0); huge_ptep_get_and_clear()
H A Dtsb.c1 /* arch/sparc64/mm/tsb.c
74 struct mm_struct *mm = tb->mm; flush_tsb_user() local
77 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user()
80 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user()
81 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user()
87 if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user()
88 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user()
89 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user()
95 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user()
98 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) flush_tsb_user_page() argument
102 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user_page()
105 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user_page()
106 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user_page()
112 if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user_page()
113 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user_page()
114 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user_page()
120 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user_page()
131 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) setup_tsb_params() argument
136 mm->context.tsb_block[tsb_idx].tsb_nentries = setup_tsb_params()
153 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); setup_tsb_params()
214 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params()
215 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; setup_tsb_params()
216 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; setup_tsb_params()
222 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params()
223 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; setup_tsb_params()
224 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; setup_tsb_params()
229 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; setup_tsb_params()
333 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) tsb_grow() argument
369 if (mm->context.tsb_block[tsb_index].tsb == NULL && tsb_grow()
380 if (mm->context.tsb_block[tsb_index].tsb != NULL) tsb_grow()
381 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; tsb_grow()
392 * We have to hold mm->context.lock while committing to the tsb_grow()
410 spin_lock_irqsave(&mm->context.lock, flags); tsb_grow()
412 old_tsb = mm->context.tsb_block[tsb_index].tsb; tsb_grow()
414 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); tsb_grow()
415 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * tsb_grow()
424 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { tsb_grow()
425 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow()
431 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; tsb_grow()
448 mm->context.tsb_block[tsb_index].tsb = new_tsb; tsb_grow()
449 setup_tsb_params(mm, tsb_index, new_size); tsb_grow()
451 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow()
458 tsb_context_switch(mm); tsb_grow()
462 smp_tsb_sync(mm); tsb_grow()
470 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
477 spin_lock_init(&mm->context.lock); init_new_context()
479 mm->context.sparc64_ctx_val = 0UL; init_new_context()
486 huge_pte_count = mm->context.huge_pte_count; init_new_context()
487 mm->context.huge_pte_count = 0; init_new_context()
495 mm->context.tsb_block[i].tsb = NULL; init_new_context()
500 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); init_new_context()
504 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); init_new_context()
507 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) init_new_context()
525 void destroy_context(struct mm_struct *mm) destroy_context() argument
530 tsb_destroy_one(&mm->context.tsb_block[i]); destroy_context()
534 if (CTX_VALID(mm->context)) { destroy_context()
535 unsigned long nr = CTX_NRBITS(mm->context); destroy_context()
H A Dfault_32.c19 #include <linux/mm.h>
49 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", unhandled_fault()
50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); unhandled_fault()
51 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", unhandled_fault()
52 (tsk->mm ? (unsigned long) tsk->mm->pgd : unhandled_fault()
172 struct mm_struct *mm = tsk->mm; do_sparc_fault() local
199 if (pagefault_disabled() || !mm) do_sparc_fault()
205 down_read(&mm->mmap_sem); do_sparc_fault()
210 vma = find_vma(mm, address); do_sparc_fault()
244 fault = handle_mm_fault(mm, vma, address, flags); do_sparc_fault()
273 /* No need to up_read(&mm->mmap_sem) as we would do_sparc_fault()
275 * in mm/filemap.c. do_sparc_fault()
282 up_read(&mm->mmap_sem); do_sparc_fault()
290 up_read(&mm->mmap_sem); do_sparc_fault()
339 up_read(&mm->mmap_sem); do_sparc_fault()
347 up_read(&mm->mmap_sem); do_sparc_fault()
388 struct mm_struct *mm = tsk->mm; force_user_fault() local
394 down_read(&mm->mmap_sem); force_user_fault()
395 vma = find_vma(mm, address); force_user_fault()
414 switch (handle_mm_fault(mm, vma, address, flags)) { force_user_fault()
419 up_read(&mm->mmap_sem); force_user_fault()
422 up_read(&mm->mmap_sem); force_user_fault()
427 up_read(&mm->mmap_sem); force_user_fault()
/linux-4.4.14/arch/sh/mm/
H A Dhugetlbpage.c2 * arch/sh/mm/hugetlbpage.c
13 #include <linux/mm.h>
24 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument
32 pgd = pgd_offset(mm, addr); huge_pte_alloc()
34 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc()
36 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc()
38 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
45 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument
52 pgd = pgd_offset(mm, addr); huge_pte_offset()
H A Dextable_32.c2 * linux/arch/sh/mm/extable.c
4 * linux/arch/i386/mm/extable.c
H A Dtlbflush_32.c11 #include <linux/mm.h>
28 if (vma->vm_mm != current->mm) { local_flush_tlb_page()
42 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range() local
45 if (cpu_context(cpu, mm) != NO_CONTEXT) { local_flush_tlb_range()
52 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_range()
53 if (mm == current->mm) local_flush_tlb_range()
54 activate_context(mm, cpu); local_flush_tlb_range()
59 asid = cpu_asid(cpu, mm); local_flush_tlb_range()
63 if (mm != current->mm) { local_flush_tlb_range()
106 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument
112 if (cpu_context(cpu, mm) != NO_CONTEXT) { local_flush_tlb_mm()
116 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_mm()
117 if (mm == current->mm) local_flush_tlb_mm()
118 activate_context(mm, cpu); local_flush_tlb_mm()
H A Dpgtable.c1 #include <linux/mm.h>
32 pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
37 void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
43 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
48 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument
53 void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
H A Dtlbflush_64.c2 * arch/sh/mm/tlb-flush_64.c
21 #include <linux/mm.h>
86 struct mm_struct *mm; local_flush_tlb_range() local
88 mm = vma->vm_mm; local_flush_tlb_range()
89 if (cpu_context(cpu, mm) == NO_CONTEXT) local_flush_tlb_range()
97 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; local_flush_tlb_range()
128 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument
133 if (cpu_context(cpu, mm) == NO_CONTEXT) local_flush_tlb_mm()
138 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_mm()
139 if (mm == current->mm) local_flush_tlb_mm()
140 activate_context(mm, cpu); local_flush_tlb_mm()
/linux-4.4.14/arch/cris/include/asm/
H A Dpgalloc.h5 #include <linux/mm.h>
7 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
8 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte))
15 static inline pgd_t *pgd_alloc (struct mm_struct *mm) pgd_alloc() argument
20 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
25 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument
31 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument
44 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
49 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
H A Dmmu_context.h6 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
7 extern void get_mmu_context(struct mm_struct *mm);
8 extern void destroy_context(struct mm_struct *mm);
12 #define deactivate_mm(tsk,mm) do { } while (0)
27 /* defined in arch/cris/mm/fault.c */
30 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
H A Dtlbflush.h4 #include <linux/mm.h>
10 * TLB flushing (implemented in arch/cris/mm/tlb.c):
12 * - flush_tlb() flushes the current mm struct TLBs
14 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
16 * - flush_tlb_range(mm, start, end) flushes a range of pages
21 extern void __flush_tlb_mm(struct mm_struct *mm);
36 flush_tlb_mm(current->mm); flush_tlb()
/linux-4.4.14/arch/ia64/include/asm/
H A Dpgalloc.h18 #include <linux/mm.h>
25 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
37 pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) pgd_populate() argument
42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument
47 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument
51 #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
55 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_populate() argument
60 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument
65 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
70 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
73 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) pmd_populate() argument
80 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) pmd_populate_kernel() argument
85 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument
101 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
107 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
113 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
123 #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
H A Dtlbflush.h10 #include <linux/mm.h>
30 extern void smp_flush_tlb_mm (struct mm_struct *mm);
39 local_finish_flush_tlb_mm (struct mm_struct *mm) local_finish_flush_tlb_mm() argument
41 if (mm == current->active_mm) local_finish_flush_tlb_mm()
42 activate_context(mm); local_finish_flush_tlb_mm()
51 flush_tlb_mm (struct mm_struct *mm) flush_tlb_mm() argument
53 if (!mm) flush_tlb_mm()
56 set_bit(mm->context, ia64_ctx.flushmap); flush_tlb_mm()
57 mm->context = 0; flush_tlb_mm()
59 if (atomic_read(&mm->mm_users) == 0) flush_tlb_mm()
63 smp_flush_tlb_mm(mm); flush_tlb_mm()
65 local_finish_flush_tlb_mm(mm); flush_tlb_mm()
H A Dhugetlb.h15 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument
23 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
26 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at()
29 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
32 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear()
50 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
53 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
H A Dmmu_context.h48 extern void wrap_mmu_context (struct mm_struct *mm);
51 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
78 get_mmu_context (struct mm_struct *mm) get_mmu_context() argument
81 nv_mm_context_t context = mm->context; get_mmu_context()
88 context = mm->context; get_mmu_context()
90 cpumask_clear(mm_cpumask(mm)); get_mmu_context()
97 wrap_mmu_context(mm); get_mmu_context()
99 mm->context = context = ia64_ctx.next++; get_mmu_context()
118 init_new_context (struct task_struct *p, struct mm_struct *mm) init_new_context() argument
120 mm->context = 0; init_new_context()
125 destroy_context (struct mm_struct *mm) destroy_context() argument
163 activate_context (struct mm_struct *mm) activate_context() argument
168 context = get_mmu_context(mm); activate_context()
169 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) activate_context()
170 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); activate_context()
176 } while (unlikely(context != mm->context)); activate_context()
179 #define deactivate_mm(tsk,mm) do { } while (0)
/linux-4.4.14/arch/sparc/include/asm/
H A Dmmu_context_64.h12 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
20 void get_new_mmu_context(struct mm_struct *mm);
27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
28 void destroy_context(struct mm_struct *mm);
35 static inline void tsb_context_switch(struct mm_struct *mm) tsb_context_switch() argument
37 __tsb_context_switch(__pa(mm->pgd), tsb_context_switch()
38 &mm->context.tsb_block[0], tsb_context_switch()
40 (mm->context.tsb_block[1].tsb ? tsb_context_switch()
41 &mm->context.tsb_block[1] : tsb_context_switch()
46 , __pa(&mm->context.tsb_descr[0])); tsb_context_switch()
49 void tsb_grow(struct mm_struct *mm,
53 void smp_tsb_sync(struct mm_struct *mm);
74 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) switch_mm() argument
79 if (unlikely(mm == &init_mm)) switch_mm()
82 spin_lock_irqsave(&mm->context.lock, flags); switch_mm()
83 ctx_valid = CTX_VALID(mm->context); switch_mm()
85 get_new_mmu_context(mm); switch_mm()
117 load_secondary_context(mm); switch_mm()
118 tsb_context_switch(mm); switch_mm()
125 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { switch_mm()
126 cpumask_set_cpu(cpu, mm_cpumask(mm)); switch_mm()
127 __flush_tlb_mm(CTX_HWBITS(mm->context), switch_mm()
130 spin_unlock_irqrestore(&mm->context.lock, flags); switch_mm()
133 #define deactivate_mm(tsk,mm) do { } while (0)
136 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) activate_mm() argument
141 spin_lock_irqsave(&mm->context.lock, flags); activate_mm()
142 if (!CTX_VALID(mm->context)) activate_mm()
143 get_new_mmu_context(mm); activate_mm()
145 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) activate_mm()
146 cpumask_set_cpu(cpu, mm_cpumask(mm)); activate_mm()
148 load_secondary_context(mm); activate_mm()
149 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); activate_mm()
150 tsb_context_switch(mm); activate_mm()
151 spin_unlock_irqrestore(&mm->context.lock, flags); activate_mm()
H A Dmmu_32.h7 /* mm/srmmu.c */
H A Dtlb_64.h16 void smp_flush_tlb_mm(struct mm_struct *mm);
17 #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
19 #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
H A Dpgalloc_32.h27 #define pgd_free(mm, pgd) free_pgd_fast(pgd)
28 #define pgd_alloc(mm) get_pgd_fast()
39 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, pmd_alloc_one() argument
51 #define pmd_free(mm, pmd) free_pmd_fast(pmd)
52 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
54 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
60 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
62 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
74 #define pte_free_kernel(mm, pte) free_pte_fast(pte)
76 void pte_free(struct mm_struct * mm, pgtable_t pte);
77 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
H A Dmmu_context_32.h8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
15 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22 void destroy_context(struct mm_struct *mm);
25 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
28 #define deactivate_mm(tsk,mm) do { } while (0)
31 #define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
H A Dpgalloc_64.h6 #include <linux/mm.h>
25 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument
48 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument
53 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument
59 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
64 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
66 pgtable_t pte_alloc_one(struct mm_struct *mm,
68 void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
69 void pte_free(struct mm_struct *mm, pgtable_t ptepage);
H A Dtlbflush_32.h8 #define flush_tlb_mm(mm) \
9 sparc32_cachetlb_ops->tlb_mm(mm)
H A Dtlbflush_64.h12 struct mm_struct *mm; member in struct:tlb_batch
20 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
24 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
54 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) global_flush_tlb_page() argument
56 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); global_flush_tlb_page()
62 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
64 #define global_flush_tlb_page(mm, vaddr) \
65 smp_flush_tlb_page(mm, vaddr)
H A Dcacheflush_32.h8 #define flush_cache_mm(mm) \
9 sparc32_cachetlb_ops->cache_mm(mm)
10 #define flush_cache_dup_mm(mm) \
11 sparc32_cachetlb_ops->cache_mm(mm)
34 #define flush_sig_insns(mm,insn_addr) \
35 sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
/linux-4.4.14/arch/unicore32/mm/
H A Dpgd.c2 * linux/arch/unicore32/mm/pgd.c
12 #include <linux/mm.h>
20 #include "mm.h"
27 pgd_t *get_pgd_slow(struct mm_struct *mm) get_pgd_slow() argument
53 new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0); get_pgd_slow()
57 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); get_pgd_slow()
71 pmd_free(mm, new_pmd); get_pgd_slow()
72 mm_dec_nr_pmds(mm); get_pgd_slow()
79 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) free_pgd_slow() argument
99 pte_free(mm, pte); free_pgd_slow()
100 atomic_long_dec(&mm->nr_ptes); free_pgd_slow()
101 pmd_free(mm, pmd); free_pgd_slow()
102 mm_dec_nr_pmds(mm); free_pgd_slow()
H A Dproc-syms.c2 * linux/arch/unicore32/mm/proc-syms.c
13 #include <linux/mm.h>
/linux-4.4.14/arch/alpha/include/asm/
H A Dpgalloc.h4 #include <linux/mm.h>
14 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) pmd_populate() argument
21 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
27 pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
32 extern pgd_t *pgd_alloc(struct mm_struct *mm);
35 pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
41 pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument
48 pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
54 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument
61 pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
67 pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument
69 pte_t *pte = pte_alloc_one_kernel(mm, address); pte_alloc_one()
83 pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
H A Dtlb.h8 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
12 #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
13 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
H A Dtlbflush.h4 #include <linux/mm.h>
21 ev4_flush_tlb_current(struct mm_struct *mm) ev4_flush_tlb_current() argument
23 __load_new_mm_context(mm); ev4_flush_tlb_current()
28 ev5_flush_tlb_current(struct mm_struct *mm) ev5_flush_tlb_current() argument
30 __load_new_mm_context(mm); ev5_flush_tlb_current()
38 ev4_flush_tlb_current_page(struct mm_struct * mm, ev4_flush_tlb_current_page() argument
44 __load_new_mm_context(mm); ev4_flush_tlb_current_page()
51 ev5_flush_tlb_current_page(struct mm_struct * mm, ev5_flush_tlb_current_page() argument
56 __load_new_mm_context(mm); ev5_flush_tlb_current_page()
89 flush_tlb_other(struct mm_struct *mm) flush_tlb_other() argument
91 unsigned long *mmc = &mm->context[smp_processor_id()]; flush_tlb_other()
107 flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
109 if (mm == current->active_mm) flush_tlb_mm()
110 flush_tlb_current(mm); flush_tlb_mm()
112 flush_tlb_other(mm); flush_tlb_mm()
119 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local
121 if (mm == current->active_mm) flush_tlb_page()
122 flush_tlb_current_page(mm, vma, addr); flush_tlb_page()
124 flush_tlb_other(mm); flush_tlb_page()
H A Dcacheflush.h4 #include <linux/mm.h>
8 #define flush_cache_mm(mm) do { } while (0)
9 #define flush_cache_dup_mm(mm) do { } while (0)
42 that icache entries are tagged with the ASN and load a new mm context. */
54 struct mm_struct *mm = vma->vm_mm; flush_icache_user_range() local
55 if (current->active_mm == mm) flush_icache_user_range()
56 __load_new_mm_context(mm); flush_icache_user_range()
58 mm->context[smp_processor_id()] = 0; flush_icache_user_range()
/linux-4.4.14/arch/sparc/power/
H A Dhibernate.c7 #include <linux/mm.h>
36 struct mm_struct *mm = current->active_mm; restore_processor_state() local
38 load_secondary_context(mm); restore_processor_state()
39 tsb_context_switch(mm); restore_processor_state()
/linux-4.4.14/arch/xtensa/include/asm/
H A Dnommu_context.h5 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
9 static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm) init_new_context() argument
14 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
27 static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm() argument
H A Dmmu_context.h68 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) get_new_mmu_context() argument
80 mm->context.asid[cpu] = asid; get_new_mmu_context()
81 mm->context.cpu = cpu; get_new_mmu_context()
84 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) get_mmu_context() argument
90 if (mm) { get_mmu_context()
91 unsigned long asid = mm->context.asid[cpu]; get_mmu_context()
95 get_new_mmu_context(mm, cpu); get_mmu_context()
99 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) activate_context() argument
101 get_mmu_context(mm, cpu); activate_context()
102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); activate_context()
113 struct mm_struct *mm) init_new_context()
117 mm->context.asid[cpu] = NO_CONTEXT; for_each_possible_cpu()
119 mm->context.cpu = -1;
138 #define deactivate_mm(tsk, mm) do { } while (0)
144 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
150 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
112 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
H A Dpgalloc.h24 #define pmd_populate_kernel(mm, pmdp, ptep) \
26 #define pmd_populate(mm, pmdp, page) \
31 pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
36 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
41 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
55 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
61 pte = pte_alloc_one_kernel(mm, addr); pte_alloc_one()
72 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
77 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
/linux-4.4.14/include/asm-generic/
H A Dmm_hooks.h10 struct mm_struct *mm) arch_dup_mmap()
14 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument
18 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument
24 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument
9 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
H A Dmm-arch-hooks.h2 * Architecture specific mm hooks
10 * the architecture which doesn't need specific mm hooks.
12 * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
H A D4level-fixup.h14 #define pmd_alloc(mm, pud, address) \
15 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
18 #define pud_alloc(mm, pgd, address) (pgd)
26 #define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
32 #define pud_free(mm, x) do { } while (0)
H A Dmmu_context.h14 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
20 struct mm_struct *mm) init_new_context()
25 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
30 struct mm_struct *mm) deactivate_mm()
19 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
29 deactivate_mm(struct task_struct *task, struct mm_struct *mm) deactivate_mm() argument
/linux-4.4.14/arch/cris/mm/
H A Dtlb.c2 * linux/arch/cris/mm/tlb.c
16 /* The TLB can host up to 64 different mm contexts at the same time.
19 * of which mm we have assigned to which page_id, so that we know when
36 alloc_context(struct mm_struct *mm) alloc_context() argument
40 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); alloc_context()
42 /* did we replace an mm ? */ alloc_context()
47 /* throw out any TLB entries belonging to the mm we replace alloc_context()
57 mm->context.page_id = map_replace_ptr; alloc_context()
58 page_id_map[map_replace_ptr] = mm; alloc_context()
67 * if needed, get a new MMU context for the mm. otherwise nothing is done.
71 get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
73 if(mm->context.page_id == NO_CONTEXT) get_mmu_context()
74 alloc_context(mm); get_mmu_context()
78 * destroying the mm itself. this is only called when the last user of the mm
86 destroy_context(struct mm_struct *mm) destroy_context() argument
88 if(mm->context.page_id != NO_CONTEXT) { destroy_context()
89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); destroy_context()
90 flush_tlb_mm(mm); /* TODO this might be redundant ? */ destroy_context()
91 page_id_map[mm->context.page_id] = NULL; destroy_context()
/linux-4.4.14/arch/arm/mm/
H A Dpgd.c2 * linux/arch/arm/mm/pgd.c
10 #include <linux/mm.h>
20 #include "mm.h"
33 pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), pgd_alloc()
64 new_pmd = pmd_alloc(mm, new_pud, 0); pgd_alloc()
75 new_pud = pud_alloc(mm, new_pgd, 0); pgd_alloc()
79 new_pmd = pmd_alloc(mm, new_pud, 0); pgd_alloc()
83 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); pgd_alloc()
109 pmd_free(mm, new_pmd); pgd_alloc()
110 mm_dec_nr_pmds(mm); pgd_alloc()
112 pud_free(mm, new_pud); pgd_alloc()
119 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) pgd_free() argument
143 pte_free(mm, pte); pgd_free()
144 atomic_long_dec(&mm->nr_ptes); pgd_free()
147 pmd_free(mm, pmd); pgd_free()
148 mm_dec_nr_pmds(mm); pgd_free()
151 pud_free(mm, pud); pgd_free()
167 pmd_free(mm, pmd); pgd_free()
168 mm_dec_nr_pmds(mm); pgd_free()
170 pud_free(mm, pud); pgd_free()
H A Dmmap.c2 * linux/arch/arm/mm/mmap.c
5 #include <linux/mm.h>
58 struct mm_struct *mm = current->mm; arch_get_unmapped_area() local
90 vma = find_vma(mm, addr); arch_get_unmapped_area()
98 info.low_limit = mm->mmap_base; arch_get_unmapped_area()
111 struct mm_struct *mm = current->mm; arch_get_unmapped_area_topdown() local
141 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
150 info.high_limit = mm->mmap_base; arch_get_unmapped_area_topdown()
164 info.low_limit = mm->mmap_base; arch_get_unmapped_area_topdown()
182 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument
190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; arch_pick_mmap_layout()
191 mm->get_unmapped_area = arch_get_unmapped_area; arch_pick_mmap_layout()
193 mm->mmap_base = mmap_base(random_factor); arch_pick_mmap_layout()
194 mm->get_unmapped_area = arch_get_unmapped_area_topdown; arch_pick_mmap_layout()
/linux-4.4.14/arch/um/include/asm/
H A Dtlbflush.h9 #include <linux/mm.h>
14 * - flush_tlb() flushes the current mm struct TLBs
16 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
23 extern void flush_tlb_mm(struct mm_struct *mm);
H A Dmmu_context.h12 extern void uml_setup_stubs(struct mm_struct *mm);
16 static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
18 uml_setup_stubs(mm); arch_dup_mmap()
20 extern void arch_exit_mmap(struct mm_struct *mm); arch_unmap()
21 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument
26 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument
34 #define deactivate_mm(tsk,mm) do { } while (0)
42 * when the new ->mm is used for the first time. activate_mm()
63 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
68 extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
70 extern void destroy_context(struct mm_struct *mm);
H A Dpgalloc.h11 #include <linux/mm.h>
13 #define pmd_populate_kernel(mm, pmd, pte) \
16 #define pmd_populate(mm, pmd, pte) \
26 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
31 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
36 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
50 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
H A Dtlb.h12 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14 /* struct mmu_gather is an opaque type used by the mm code for passing around
18 struct mm_struct *mm; member in struct:mmu_gather
22 unsigned int fullmm; /* non-zero means full mm flush */
48 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) tlb_gather_mmu() argument
50 tlb->mm = mm; tlb_gather_mmu()
58 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
64 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); tlb_flush_mmu_tlbonly()
132 #define tlb_migrate_finish(mm) do {} while (0)
/linux-4.4.14/arch/unicore32/include/asm/
H A Dmmu_context.h17 #include <linux/mm.h>
24 #define init_new_context(tsk, mm) 0
26 #define destroy_context(mm) do { } while (0)
31 * mm: describes the currently active mm context
35 * tsk->mm will be NULL
38 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
43 * This is the actual mm switch as far as the scheduler
45 * calling the CPU specific function when the mm hasn't
58 #define deactivate_mm(tsk, mm) do { } while (0)
66 * (the macro is used as remove_vma() is static to mm/mmap.c)
68 #define arch_exit_mmap(mm) \
70 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
76 mm->mmap = NULL; \
77 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
78 vmacache_invalidate(mm); \
79 mm->map_count--; \
85 struct mm_struct *mm) arch_dup_mmap()
89 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument
95 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument
84 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
H A Dpgalloc.h25 extern pgd_t *get_pgd_slow(struct mm_struct *mm);
26 extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
28 #define pgd_alloc(mm) get_pgd_slow(mm)
29 #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
37 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument
49 pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument
70 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
76 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
90 * of the mm address space.
93 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) pmd_populate_kernel() argument
105 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) pmd_populate() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/
H A Dmm.c24 #include <core/mm.h>
26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) nvkm_mm_dump() argument
36 list_for_each_entry(node, &mm->nodes, nl_entry) { nvkm_mm_dump()
41 list_for_each_entry(node, &mm->free, fl_entry) { nvkm_mm_dump()
48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) nvkm_mm_free() argument
72 list_for_each_entry(prev, &mm->free, fl_entry) { nvkm_mm_free()
86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) region_head() argument
111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, nvkm_mm_head() argument
121 list_for_each_entry(this, &mm->free, fl_entry) { nvkm_mm_head()
131 s = roundup(s, mm->block_size); nvkm_mm_head()
135 e = rounddown(e, mm->block_size); nvkm_mm_head()
143 if (splitoff && !region_head(mm, this, splitoff)) nvkm_mm_head()
146 this = region_head(mm, this, min(size_max, e - s)); nvkm_mm_head()
160 region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) region_tail() argument
185 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, nvkm_mm_tail() argument
193 list_for_each_entry_reverse(this, &mm->free, fl_entry) { nvkm_mm_tail()
204 s = roundup(s, mm->block_size); nvkm_mm_tail()
208 e = rounddown(e, mm->block_size); nvkm_mm_tail()
221 if (c && !region_tail(mm, this, c)) nvkm_mm_tail()
224 this = region_tail(mm, this, a); nvkm_mm_tail()
238 nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) nvkm_mm_init() argument
243 if (nvkm_mm_initialised(mm)) { nvkm_mm_init()
244 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); nvkm_mm_init()
253 list_add_tail(&node->nl_entry, &mm->nodes); nvkm_mm_init()
255 BUG_ON(block != mm->block_size); nvkm_mm_init()
257 INIT_LIST_HEAD(&mm->nodes); nvkm_mm_init()
258 INIT_LIST_HEAD(&mm->free); nvkm_mm_init()
259 mm->block_size = block; nvkm_mm_init()
260 mm->heap_nodes = 0; nvkm_mm_init()
268 node->offset = roundup(offset, mm->block_size); nvkm_mm_init()
269 node->length = rounddown(offset + length, mm->block_size); nvkm_mm_init()
273 list_add_tail(&node->nl_entry, &mm->nodes); nvkm_mm_init()
274 list_add_tail(&node->fl_entry, &mm->free); nvkm_mm_init()
275 node->heap = ++mm->heap_nodes; nvkm_mm_init()
280 nvkm_mm_fini(struct nvkm_mm *mm) nvkm_mm_fini() argument
285 if (!nvkm_mm_initialised(mm)) nvkm_mm_fini()
288 list_for_each_entry(node, &mm->nodes, nl_entry) { nvkm_mm_fini()
290 if (++nodes > mm->heap_nodes) { nvkm_mm_fini()
291 nvkm_mm_dump(mm, "mm not clean!"); nvkm_mm_fini()
297 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) { nvkm_mm_fini()
302 mm->heap_nodes = 0; nvkm_mm_fini()
/linux-4.4.14/fs/proc/
H A Dtask_nommu.c2 #include <linux/mm.h>
18 void task_mem(struct seq_file *m, struct mm_struct *mm) task_mem() argument
25 down_read(&mm->mmap_sem); task_mem()
26 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { task_mem()
39 if (atomic_read(&mm->mm_count) > 1 || task_mem()
49 if (atomic_read(&mm->mm_count) > 1) task_mem()
50 sbytes += kobjsize(mm); task_mem()
52 bytes += kobjsize(mm); task_mem()
77 up_read(&mm->mmap_sem); task_mem()
80 unsigned long task_vsize(struct mm_struct *mm) task_vsize() argument
86 down_read(&mm->mmap_sem); task_vsize()
87 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { task_vsize()
91 up_read(&mm->mmap_sem); task_vsize()
95 unsigned long task_statm(struct mm_struct *mm, task_statm() argument
102 unsigned long size = kobjsize(mm); task_statm()
104 down_read(&mm->mmap_sem); task_statm()
105 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { task_statm()
115 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) task_statm()
117 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) task_statm()
119 up_read(&mm->mmap_sem); task_statm()
151 struct mm_struct *mm = vma->vm_mm; nommu_vma_show() local
184 } else if (mm) { nommu_vma_show()
193 if (!is_pid || (vma->vm_start <= mm->start_stack && nommu_vma_show()
194 vma->vm_end >= mm->start_stack)) nommu_vma_show()
229 struct mm_struct *mm; m_start() local
233 /* pin the task and mm whilst we play with them */ m_start()
238 mm = priv->mm; m_start()
239 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) m_start()
242 down_read(&mm->mmap_sem); m_start()
244 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) m_start()
248 up_read(&mm->mmap_sem); m_start()
249 mmput(mm); m_start()
258 up_read(&priv->mm->mmap_sem); m_stop()
259 mmput(priv->mm); m_stop()
299 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); maps_open()
300 if (IS_ERR(priv->mm)) { maps_open()
301 int err = PTR_ERR(priv->mm); maps_open()
316 if (priv->mm) map_release()
317 mmdrop(priv->mm); map_release()
H A Dtask_mmu.c1 #include <linux/mm.h>
23 void task_mem(struct seq_file *m, struct mm_struct *mm) task_mem() argument
29 * Note: to minimize their overhead, mm maintains hiwater_vm and task_mem()
35 hiwater_vm = total_vm = mm->total_vm; task_mem()
36 if (hiwater_vm < mm->hiwater_vm) task_mem()
37 hiwater_vm = mm->hiwater_vm; task_mem()
38 hiwater_rss = total_rss = get_mm_rss(mm); task_mem()
39 if (hiwater_rss < mm->hiwater_rss) task_mem()
40 hiwater_rss = mm->hiwater_rss; task_mem()
42 data = mm->total_vm - mm->shared_vm - mm->stack_vm; task_mem()
43 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; task_mem()
44 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; task_mem()
45 swap = get_mm_counter(mm, MM_SWAPENTS); task_mem()
46 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); task_mem()
47 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); task_mem()
64 mm->locked_vm << (PAGE_SHIFT-10), task_mem()
65 mm->pinned_vm << (PAGE_SHIFT-10), task_mem()
69 mm->stack_vm << (PAGE_SHIFT-10), text, lib, task_mem()
73 hugetlb_report_usage(m, mm); task_mem()
76 unsigned long task_vsize(struct mm_struct *mm) task_vsize() argument
78 return PAGE_SIZE * mm->total_vm; task_vsize()
81 unsigned long task_statm(struct mm_struct *mm, task_statm() argument
85 *shared = get_mm_counter(mm, MM_FILEPAGES); task_statm()
86 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) task_statm()
88 *data = mm->total_vm - mm->shared_vm; task_statm()
89 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); task_statm()
90 return mm->total_vm; task_statm()
121 struct mm_struct *mm = priv->mm; vma_stop() local
124 up_read(&mm->mmap_sem); vma_stop()
125 mmput(mm); vma_stop()
146 struct mm_struct *mm; m_start() local
158 mm = priv->mm; m_start()
159 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) m_start()
162 down_read(&mm->mmap_sem); m_start()
164 priv->tail_vma = get_gate_vma(mm); m_start()
167 vma = find_vma(mm, last_addr); m_start()
173 if (pos < mm->map_count) { m_start()
174 for (vma = mm->mmap; pos; pos--) { m_start()
182 if (pos == mm->map_count && priv->tail_vma) m_start()
222 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); proc_maps_open()
223 if (IS_ERR(priv->mm)) { proc_maps_open()
224 int err = PTR_ERR(priv->mm); proc_maps_open()
238 if (priv->mm) proc_map_release()
239 mmdrop(priv->mm); proc_map_release()
273 struct mm_struct *mm = vma->vm_mm; show_map_vma() local
329 if (!mm) { show_map_vma()
334 if (vma->vm_start <= mm->brk && show_map_vma()
335 vma->vm_end >= mm->start_brk) { show_map_vma()
346 if (!is_pid || (vma->vm_start <= mm->start_stack && show_map_vma()
347 vma->vm_end >= mm->start_stack)) { show_map_vma()
612 [ilog2(VM_MIXEDMAP)] = "mm", show_smap_vma_flags()
669 .mm = vma->vm_mm, show_smap()
914 struct mm_struct *mm; clear_refs_write() local
935 mm = get_task_mm(task); clear_refs_write()
936 if (mm) { clear_refs_write()
943 .mm = mm, clear_refs_write()
950 * resident set size to this mm's current rss value. clear_refs_write()
952 down_write(&mm->mmap_sem); clear_refs_write()
953 reset_mm_hiwater_rss(mm); clear_refs_write()
954 up_write(&mm->mmap_sem); clear_refs_write()
958 down_read(&mm->mmap_sem); clear_refs_write()
960 for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_write()
963 up_read(&mm->mmap_sem); clear_refs_write()
964 down_write(&mm->mmap_sem); clear_refs_write()
965 for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_write()
969 downgrade_write(&mm->mmap_sem); clear_refs_write()
972 mmu_notifier_invalidate_range_start(mm, 0, -1); clear_refs_write()
976 mmu_notifier_invalidate_range_end(mm, 0, -1); clear_refs_write()
977 flush_tlb_mm(mm); clear_refs_write()
978 up_read(&mm->mmap_sem); clear_refs_write()
980 mmput(mm); clear_refs_write()
1038 struct vm_area_struct *vma = find_vma(walk->mm, addr); pagemap_pte_hole()
1161 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); pagemap_pmd_range()
1253 struct mm_struct *mm = file->private_data; pagemap_read() local
1262 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) pagemap_read()
1288 pagemap_walk.mm = mm; pagemap_read()
1294 end_vaddr = mm->task_size; pagemap_read()
1297 if (svpfn > mm->task_size >> PAGE_SHIFT) pagemap_read()
1316 down_read(&mm->mmap_sem); pagemap_read()
1318 up_read(&mm->mmap_sem); pagemap_read()
1337 mmput(mm); pagemap_read()
1344 struct mm_struct *mm; pagemap_open() local
1346 mm = proc_mem_open(inode, PTRACE_MODE_READ); pagemap_open()
1347 if (IS_ERR(mm)) pagemap_open()
1348 return PTR_ERR(mm); pagemap_open()
1349 file->private_data = mm; pagemap_open()
1355 struct mm_struct *mm = file->private_data; pagemap_release() local
1357 if (mm) pagemap_release()
1358 mmdrop(mm); pagemap_release()
1488 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); gather_pte_stats()
1537 struct mm_struct *mm = vma->vm_mm; show_numa_map() local
1542 .mm = mm, show_numa_map()
1548 if (!mm) show_numa_map()
1567 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { show_numa_map()
1576 if (!is_pid || (vma->vm_start <= mm->start_stack && show_numa_map()
1577 vma->vm_end >= mm->start_stack)) show_numa_map()
/linux-4.4.14/arch/s390/mm/
H A Dpgtable.c10 #include <linux/mm.h>
27 unsigned long *crst_table_alloc(struct mm_struct *mm) crst_table_alloc() argument
36 void crst_table_free(struct mm_struct *mm, unsigned long *table) crst_table_free() argument
43 struct mm_struct *mm = arg; __crst_table_upgrade() local
45 if (current->active_mm == mm) { __crst_table_upgrade()
47 set_user_asce(mm); __crst_table_upgrade()
52 int crst_table_upgrade(struct mm_struct *mm) crst_table_upgrade() argument
57 BUG_ON(mm->context.asce_limit != (1UL << 42)); crst_table_upgrade()
59 table = crst_table_alloc(mm); crst_table_upgrade()
63 spin_lock_bh(&mm->page_table_lock); crst_table_upgrade()
64 pgd = (unsigned long *) mm->pgd; crst_table_upgrade()
66 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); crst_table_upgrade()
67 mm->pgd = (pgd_t *) table; crst_table_upgrade()
68 mm->context.asce_limit = 1UL << 53; crst_table_upgrade()
69 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | crst_table_upgrade()
71 mm->task_size = mm->context.asce_limit; crst_table_upgrade()
72 spin_unlock_bh(&mm->page_table_lock); crst_table_upgrade()
74 on_each_cpu(__crst_table_upgrade, mm, 0); crst_table_upgrade()
78 void crst_table_downgrade(struct mm_struct *mm) crst_table_downgrade() argument
83 BUG_ON(mm->context.asce_limit != (1UL << 42)); crst_table_downgrade()
85 if (current->active_mm == mm) { crst_table_downgrade()
87 __tlb_flush_mm(mm); crst_table_downgrade()
90 pgd = mm->pgd; crst_table_downgrade()
91 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); crst_table_downgrade()
92 mm->context.asce_limit = 1UL << 31; crst_table_downgrade()
93 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | crst_table_downgrade()
95 mm->task_size = mm->context.asce_limit; crst_table_downgrade()
96 crst_table_free(mm, (unsigned long *) pgd); crst_table_downgrade()
98 if (current->active_mm == mm) crst_table_downgrade()
99 set_user_asce(mm); crst_table_downgrade()
106 * @mm: pointer to the parent mm_struct
111 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) gmap_alloc() argument
142 gmap->mm = mm; gmap_alloc()
154 down_write(&mm->mmap_sem); gmap_alloc()
155 list_add(&gmap->list, &mm->context.gmap_list); gmap_alloc()
156 up_write(&mm->mmap_sem); gmap_alloc()
169 __tlb_flush_asce(gmap->mm, gmap->asce); gmap_flush_tlb()
208 __tlb_flush_asce(gmap->mm, gmap->asce); gmap_free()
217 down_write(&gmap->mm->mmap_sem); gmap_free()
219 up_write(&gmap->mm->mmap_sem); gmap_free()
259 spin_lock(&gmap->mm->page_table_lock); gmap_alloc_table()
267 spin_unlock(&gmap->mm->page_table_lock); gmap_alloc_table()
348 down_write(&gmap->mm->mmap_sem); gmap_unmap_segment()
351 up_write(&gmap->mm->mmap_sem); gmap_unmap_segment()
380 down_write(&gmap->mm->mmap_sem); gmap_map_segment()
390 up_write(&gmap->mm->mmap_sem); gmap_map_segment()
408 * The mmap_sem of the mm that belongs to the address space must be held
434 down_read(&gmap->mm->mmap_sem); gmap_translate()
436 up_read(&gmap->mm->mmap_sem); gmap_translate()
447 static void gmap_unlink(struct mm_struct *mm, unsigned long *table, gmap_unlink() argument
453 list_for_each_entry(gmap, &mm->context.gmap_list, list) { gmap_unlink()
468 * The mmap_sem of the mm that belongs to the address space must be held
473 struct mm_struct *mm; __gmap_link() local
508 /* Walk the parent mm page table */ __gmap_link()
509 mm = gmap->mm; __gmap_link()
510 pgd = pgd_offset(mm, vmaddr); __gmap_link()
523 ptl = pmd_lock(mm, pmd); __gmap_link()
553 down_read(&gmap->mm->mmap_sem); gmap_fault()
559 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { gmap_fault()
565 up_read(&gmap->mm->mmap_sem); gmap_fault()
570 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) gmap_zap_swap_entry() argument
573 dec_mm_counter(mm, MM_SWAPENTS); gmap_zap_swap_entry()
578 dec_mm_counter(mm, MM_ANONPAGES); gmap_zap_swap_entry()
580 dec_mm_counter(mm, MM_FILEPAGES); gmap_zap_swap_entry()
602 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); __gmap_zap()
614 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); __gmap_zap()
615 pte_clear(gmap->mm, vmaddr, ptep); __gmap_zap()
628 down_read(&gmap->mm->mmap_sem); gmap_discard()
638 /* Find vma in the parent mm */ gmap_discard()
639 vma = find_vma(gmap->mm, vmaddr); gmap_discard()
643 up_read(&gmap->mm->mmap_sem); gmap_discard()
695 down_read(&gmap->mm->mmap_sem); gmap_ipte_notify()
704 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { gmap_ipte_notify()
712 ptep = get_locked_pte(gmap->mm, addr, &ptl); gmap_ipte_notify()
725 up_read(&gmap->mm->mmap_sem); gmap_ipte_notify()
732 * @mm: pointer to the process mm_struct
739 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) gmap_do_ipte_notify() argument
749 list_for_each_entry(gmap, &mm->context.gmap_list, list) { gmap_do_ipte_notify()
762 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, set_guest_storage_key() argument
769 down_read(&mm->mmap_sem); set_guest_storage_key()
771 ptep = get_locked_pte(mm, addr, &ptl); set_guest_storage_key()
773 up_read(&mm->mmap_sem); set_guest_storage_key()
779 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { set_guest_storage_key()
780 up_read(&mm->mmap_sem); set_guest_storage_key()
810 up_read(&mm->mmap_sem); set_guest_storage_key()
815 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) get_guest_storage_key() argument
823 down_read(&mm->mmap_sem); get_guest_storage_key()
824 ptep = get_locked_pte(mm, addr, &ptl); get_guest_storage_key()
826 up_read(&mm->mmap_sem); get_guest_storage_key()
849 up_read(&mm->mmap_sem); get_guest_storage_key()
890 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, gmap_unlink() argument
911 unsigned long *page_table_alloc(struct mm_struct *mm) page_table_alloc() argument
918 if (!mm_alloc_pgste(mm)) { page_table_alloc()
920 spin_lock_bh(&mm->context.list_lock); page_table_alloc()
921 if (!list_empty(&mm->context.pgtable_list)) { page_table_alloc()
922 page = list_first_entry(&mm->context.pgtable_list, page_table_alloc()
935 spin_unlock_bh(&mm->context.list_lock); page_table_alloc()
949 if (mm_alloc_pgste(mm)) { page_table_alloc()
958 spin_lock_bh(&mm->context.list_lock); page_table_alloc()
959 list_add(&page->lru, &mm->context.pgtable_list); page_table_alloc()
960 spin_unlock_bh(&mm->context.list_lock); page_table_alloc()
965 void page_table_free(struct mm_struct *mm, unsigned long *table) page_table_free() argument
971 if (!mm_alloc_pgste(mm)) { page_table_free()
974 spin_lock_bh(&mm->context.list_lock); page_table_free()
977 list_add(&page->lru, &mm->context.pgtable_list); page_table_free()
980 spin_unlock_bh(&mm->context.list_lock); page_table_free()
993 struct mm_struct *mm; page_table_free_rcu() local
997 mm = tlb->mm; page_table_free_rcu()
999 if (mm_alloc_pgste(mm)) { page_table_free_rcu()
1000 gmap_unlink(mm, table, vmaddr); page_table_free_rcu()
1006 spin_lock_bh(&mm->context.list_lock); page_table_free_rcu()
1009 list_add_tail(&page->lru, &mm->context.pgtable_list); page_table_free_rcu()
1012 spin_unlock_bh(&mm->context.list_lock); page_table_free_rcu()
1085 tlb->mm->context.flush_mm = 1; tlb_remove_table()
1090 __tlb_flush_mm_lazy(tlb->mm); tlb_remove_table()
1110 static inline void thp_split_mm(struct mm_struct *mm) thp_split_mm() argument
1114 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { thp_split_mm()
1119 mm->def_flags |= VM_NOHUGEPAGE; thp_split_mm()
1122 static inline void thp_split_mm(struct mm_struct *mm) thp_split_mm() argument
1132 struct mm_struct *mm = current->mm; s390_enable_sie() local
1135 if (mm_has_pgste(mm)) s390_enable_sie()
1138 if (!mm_alloc_pgste(mm)) s390_enable_sie()
1140 down_write(&mm->mmap_sem); s390_enable_sie()
1141 mm->context.has_pgste = 1; s390_enable_sie()
1143 thp_split_mm(mm); s390_enable_sie()
1144 up_write(&mm->mmap_sem); s390_enable_sie()
1166 ptep_flush_direct(walk->mm, addr, pte); __s390_enable_skey()
1182 struct mm_struct *mm = current->mm; s390_enable_skey() local
1186 down_write(&mm->mmap_sem); s390_enable_skey()
1187 if (mm_use_skey(mm)) s390_enable_skey()
1190 mm->context.use_skey = 1; s390_enable_skey()
1191 for (vma = mm->mmap; vma; vma = vma->vm_next) { s390_enable_skey()
1194 mm->context.use_skey = 0; s390_enable_skey()
1199 mm->def_flags &= ~VM_MERGEABLE; s390_enable_skey()
1201 walk.mm = mm; s390_enable_skey()
1205 up_write(&mm->mmap_sem); s390_enable_skey()
1224 void s390_reset_cmma(struct mm_struct *mm) s390_reset_cmma() argument
1228 down_write(&mm->mmap_sem); s390_reset_cmma()
1229 walk.mm = mm; s390_reset_cmma()
1231 up_write(&mm->mmap_sem); s390_reset_cmma()
1244 pte = get_locked_pte(gmap->mm, address, &ptl); gmap_test_and_clear_dirty()
1248 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) gmap_test_and_clear_dirty()
1298 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_trans_huge_deposit() argument
1303 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_deposit()
1306 if (!pmd_huge_pte(mm, pmdp)) pgtable_trans_huge_deposit()
1309 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pgtable_trans_huge_deposit()
1310 pmd_huge_pte(mm, pmdp) = pgtable; pgtable_trans_huge_deposit()
1313 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_trans_huge_withdraw() argument
1319 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_withdraw()
1322 pgtable = pmd_huge_pte(mm, pmdp); pgtable_trans_huge_withdraw()
1325 pmd_huge_pte(mm, pmdp) = NULL; pgtable_trans_huge_withdraw()
1327 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; pgtable_trans_huge_withdraw()
H A Dhugetlbpage.c8 #include <linux/mm.h>
88 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
104 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
110 pmdp_flush_direct(mm, addr, pmdp); huge_ptep_get_and_clear()
115 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument
122 pgdp = pgd_offset(mm, addr); huge_pte_alloc()
123 pudp = pud_alloc(mm, pgdp, addr); huge_pte_alloc()
125 pmdp = pmd_alloc(mm, pudp, addr); huge_pte_alloc()
129 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument
135 pgdp = pgd_offset(mm, addr); huge_pte_offset()
H A Dmmap.c26 #include <linux/mm.h>
86 struct mm_struct *mm = current->mm; arch_get_unmapped_area() local
98 vma = find_vma(mm, addr); arch_get_unmapped_area()
106 info.low_limit = mm->mmap_base; arch_get_unmapped_area()
122 struct mm_struct *mm = current->mm; arch_get_unmapped_area_topdown() local
136 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
145 info.high_limit = mm->mmap_base; arch_get_unmapped_area_topdown()
177 return crst_table_upgrade(current->mm); s390_mmap_check()
185 struct mm_struct *mm = current->mm; s390_get_unmapped_area() local
194 rc = crst_table_upgrade(mm); s390_get_unmapped_area()
207 struct mm_struct *mm = current->mm; s390_get_unmapped_area_topdown() local
216 rc = crst_table_upgrade(mm); s390_get_unmapped_area_topdown()
228 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument
240 mm->mmap_base = mmap_base_legacy(random_factor); arch_pick_mmap_layout()
241 mm->get_unmapped_area = s390_get_unmapped_area; arch_pick_mmap_layout()
243 mm->mmap_base = mmap_base(random_factor); arch_pick_mmap_layout()
244 mm->get_unmapped_area = s390_get_unmapped_area_topdown; arch_pick_mmap_layout()
/linux-4.4.14/arch/mn10300/include/asm/
H A Dmmu_context.h37 #define enter_lazy_tlb(mm, tsk) do {} while (0)
39 static inline void cpu_ran_vm(int cpu, struct mm_struct *mm) cpu_ran_vm() argument
42 cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_ran_vm()
46 static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm) cpu_maybe_ran_vm() argument
49 return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm)); cpu_maybe_ran_vm()
57 #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
61 * @mm: The userspace VM context being set up
63 static inline unsigned long allocate_mmu_context(struct mm_struct *mm) allocate_mmu_context() argument
78 mm_context(mm) = mc; allocate_mmu_context()
85 static inline unsigned long get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
89 if (mm) { get_mmu_context()
91 mc = mm_context(mm); get_mmu_context()
95 mc = allocate_mmu_context(mm); get_mmu_context()
104 struct mm_struct *mm) init_new_context()
109 mm->context.tlbpid[i] = MMU_NO_CONTEXT; init_new_context()
114 * after we have set current->mm to a new value, this activates the context for
115 * the new mm so we see the new mappings.
117 static inline void activate_context(struct mm_struct *mm) activate_context() argument
119 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; activate_context()
123 #define init_new_context(tsk, mm) (0)
124 #define activate_context(mm) local_flush_tlb()
129 * destroy_context - Destroy mm context information
130 * @mm: The MM being destroyed.
135 #define destroy_context(mm) do {} while (0)
158 #define deactivate_mm(tsk, mm) do {} while (0)
103 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
H A Dtlbflush.h14 #include <linux/mm.h>
56 * @mm: The MM to flush for
60 void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr) local_flush_tlb_page() argument
70 cnx = mm->context.tlbpid[smp_processor_id()]; local_flush_tlb_page()
90 * - flush_tlb() flushes the current mm struct TLBs
92 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
94 * - flush_tlb_range(mm, start, end) flushes a range of pages
95 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
123 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
149 static inline void flush_tlb_pgtables(struct mm_struct *mm, flush_tlb_pgtables() argument
/linux-4.4.14/arch/sh/include/asm/
H A Dpgalloc.h10 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
13 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
15 extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
18 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
24 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
34 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
40 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
57 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
62 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
H A Dhugetlb.h9 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument
37 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
40 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at()
43 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
46 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear()
64 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
67 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
H A Dmmu_context.h38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu])
40 #define cpu_asid(cpu, mm) \
41 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
57 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) get_mmu_context() argument
62 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context()
90 cpu_context(cpu, mm) = asid_cache(cpu) = asid; get_mmu_context()
98 struct mm_struct *mm) init_new_context()
103 cpu_context(i, mm) = NO_CONTEXT; init_new_context()
109 * After we have set current->mm to a new value, this activates
110 * the context for the new mm so we see the new mappings.
112 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) activate_context() argument
114 get_mmu_context(mm, cpu); activate_context()
115 set_asid(cpu_asid(cpu, mm)); activate_context()
134 #define deactivate_mm(tsk,mm) do { } while (0)
135 #define enter_lazy_tlb(mm,tsk) do { } while (0)
141 #define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; })
97 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
H A Dtlbflush.h8 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
14 extern void local_flush_tlb_mm(struct mm_struct *mm);
29 extern void flush_tlb_mm(struct mm_struct *mm);
39 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
/linux-4.4.14/arch/parisc/include/asm/
H A Dmmu_context.h4 #include <linux/mm.h>
11 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
22 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
24 BUG_ON(atomic_read(&mm->mm_users) != 1); init_new_context()
26 mm->context = alloc_sid(); init_new_context()
31 destroy_context(struct mm_struct *mm) destroy_context() argument
33 free_sid(mm->context); destroy_context()
34 mm->context = 0; destroy_context()
61 #define deactivate_mm(tsk,mm) do { } while (0)
67 * for a new mm created in the exec path. There's also activate_mm()
H A Dtlb.h6 flush_tlb_mm((tlb)->mm);\
24 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
25 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
H A Dpgalloc.h5 #include <linux/mm.h>
21 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
46 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
58 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
64 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument
73 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
80 * done by generic mm code. pmd_free()
82 mm_inc_nr_pmds(mm); pmd_free()
97 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
98 #define pmd_free(mm, x) do { } while (0)
99 #define pgd_populate(mm, pmd, pte) BUG()
104 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
120 #define pmd_populate(mm, pmd, pte_page) \
121 pmd_populate_kernel(mm, pmd, page_address(pte_page))
125 pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument
138 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument
144 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
149 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
152 pte_free_kernel(mm, page_address(pte)); pte_free()
H A Dtraps.h11 /* mm/fault.c */
/linux-4.4.14/arch/powerpc/include/asm/
H A Dpgalloc-32.h11 extern pgd_t *pgd_alloc(struct mm_struct *mm);
12 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
18 /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
19 #define pmd_free(mm, x) do { } while (0)
21 /* #define pgd_populate(mm, pmd, pte) BUG() */
24 #define pmd_populate_kernel(mm, pmd, pte) \
26 #define pmd_populate(mm, pmd, pte) \
30 #define pmd_populate_kernel(mm, pmd, pte) \
32 #define pmd_populate(mm, pmd, pte) \
37 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
38 extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
40 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
45 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_free() argument
H A Dcopro.h18 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
21 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
25 void copro_flush_all_slbs(struct mm_struct *mm);
27 static inline void copro_flush_all_slbs(struct mm_struct *mm) {} argument
H A Dmm-arch-hooks.h2 * Architecture specific mm hooks
15 static inline void arch_remap(struct mm_struct *mm, arch_remap() argument
23 if (old_start == mm->context.vdso_base) arch_remap()
24 mm->context.vdso_base = new_start; arch_remap()
H A Dtlbflush.h7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
41 extern void local_flush_tlb_mm(struct mm_struct *mm);
44 extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
48 extern void flush_tlb_mm(struct mm_struct *mm);
50 extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
53 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
55 #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
64 extern void flush_tlb_mm(struct mm_struct *mm);
75 static inline void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument
77 flush_tlb_mm(mm); local_flush_tlb_mm()
96 struct mm_struct *mm; member in struct:ppc64_tlb_batch
134 static inline void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument
138 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
168 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
170 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
H A Dmmu_context.h6 #include <linux/mm.h>
16 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
17 extern void destroy_context(struct mm_struct *mm);
38 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
52 extern int use_cop(unsigned long acop, struct mm_struct *mm); mmu_context_init()
53 extern void drop_cop(unsigned long acop, struct mm_struct *mm); mmu_context_init()
104 #define deactivate_mm(tsk,mm) do { } while (0)
107 * After we have set current->mm to a new value, this activates
108 * the context for the new mm so we see the new mappings.
120 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
130 struct mm_struct *mm) arch_dup_mmap()
134 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument
138 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument
142 if (start <= mm->context.vdso_base && mm->context.vdso_base < end) arch_unmap()
143 mm->context.vdso_base = 0; arch_unmap()
146 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument
129 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
H A Dpgalloc-64.h44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument
64 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument
69 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
74 #define pmd_populate(mm, pmd, pte_page) \
75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
76 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
79 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
85 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
91 pte = pte_alloc_one_kernel(mm, address); pte_alloc_one()
102 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
107 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_free() argument
175 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
177 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
183 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
194 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
197 return (pte_t *)page_table_alloc(mm, address, 1); pte_alloc_one_kernel()
200 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
203 return (pgtable_t)page_table_alloc(mm, address, 0); pte_alloc_one()
206 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
208 page_table_free(mm, (unsigned long *)pte, 1); pte_free_kernel()
211 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_free() argument
213 page_table_free(mm, (unsigned long *)ptepage, 0); pte_free()
224 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument
230 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
H A Dpage_64.h124 extern unsigned int get_slice_psize(struct mm_struct *mm,
127 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
128 extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
131 #define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT)
137 #define get_slice_psize(mm, addr) ((mm)->context.user_psize)
138 #define slice_set_user_psize(mm, psize) \
140 (mm)->context.user_psize = (psize); \
141 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
145 #define get_slice_psize(mm, addr) MMU_PAGE_64K
147 #define get_slice_psize(mm, addr) MMU_PAGE_4K
149 #define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
152 #define slice_set_range_psize(mm, start, len, psize) \
153 slice_set_user_psize((mm), (psize))
154 #define slice_mm_new_context(mm) 1
/linux-4.4.14/arch/x86/include/asm/
H A Dpgalloc.h5 #include <linux/mm.h> /* for struct page */
8 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } __paravirt_pgd_alloc() argument
13 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) paravirt_pgd_free()
14 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} paravirt_alloc_pte() argument
15 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} paravirt_alloc_pmd() argument
16 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} paravirt_alloc_pmd_clone() argument
19 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} paravirt_release_pte() argument
34 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); paravirt_release_pud()
42 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
48 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
62 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_populate_kernel() argument
65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); pmd_populate_kernel()
69 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
74 paravirt_alloc_pte(mm, pfn); pmd_populate()
81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument
94 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
110 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
112 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
114 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pud_populate()
120 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_populate() argument
122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); pgd_populate()
126 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument
131 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument
H A Dtlb.h11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
H A Dhugetlb.h9 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument
38 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
41 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at()
44 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
47 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear()
66 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
69 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
H A Dmmu_context.h24 static inline void load_mm_cr4(struct mm_struct *mm) load_mm_cr4() argument
27 atomic_read(&mm->context.perf_rdpmc_allowed)) load_mm_cr4()
33 static inline void load_mm_cr4(struct mm_struct *mm) {} load_mm_cr4() argument
55 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
56 void destroy_context(struct mm_struct *mm);
59 struct mm_struct *mm) init_new_context()
63 static inline void destroy_context(struct mm_struct *mm) {} destroy_context() argument
66 static inline void load_mm_ldt(struct mm_struct *mm) load_mm_ldt() argument
72 ldt = lockless_dereference(mm->context.ldt); load_mm_ldt()
75 * Any change to mm->context.ldt is followed by an IPI to all load_mm_ldt()
76 * CPUs with the mm active. The LDT will not be freed until load_mm_ldt()
99 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
151 /* Stop flush ipis for the previous mm */ switch_mm()
154 /* Load per-mm CR4 state */ switch_mm()
166 * never set context.ldt to NULL while the mm still switch_mm()
212 #define deactivate_mm(tsk, mm) \
217 #define deactivate_mm(tsk, mm) \
225 struct mm_struct *mm) arch_dup_mmap()
227 paravirt_arch_dup_mmap(oldmm, mm); arch_dup_mmap()
230 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument
232 paravirt_arch_exit_mmap(mm); arch_exit_mmap()
236 static inline bool is_64bit_mm(struct mm_struct *mm) is_64bit_mm() argument
239 !(mm->context.ia32_compat == TIF_IA32); is_64bit_mm()
242 static inline bool is_64bit_mm(struct mm_struct *mm) is_64bit_mm() argument
248 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument
251 mpx_mm_init(mm); arch_bprm_mm_init()
254 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, arch_unmap() argument
275 mpx_notify_unmap(mm, vma, start, end); arch_unmap()
58 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
224 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
H A Dmpx.h60 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) kernel_managing_mpx_tables() argument
62 return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR); kernel_managing_mpx_tables()
64 static inline void mpx_mm_init(struct mm_struct *mm) mpx_mm_init() argument
70 mm->bd_addr = MPX_INVALID_BOUNDS_DIR; mpx_mm_init()
72 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
83 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) kernel_managing_mpx_tables() argument
87 static inline void mpx_mm_init(struct mm_struct *mm) mpx_mm_init() argument
90 static inline void mpx_notify_unmap(struct mm_struct *mm, mpx_notify_unmap() argument
/linux-4.4.14/arch/x86/um/vdso/
H A Dvma.c11 #include <linux/mm.h>
59 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local
64 down_write(&mm->mmap_sem); arch_setup_additional_pages()
66 err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, arch_setup_additional_pages()
71 up_write(&mm->mmap_sem); arch_setup_additional_pages()
/linux-4.4.14/arch/tile/mm/
H A Dmmap.c17 #include <linux/mm.h>
32 static inline unsigned long mmap_base(struct mm_struct *mm) mmap_base() argument
52 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument
81 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; arch_pick_mmap_layout()
82 mm->get_unmapped_area = arch_get_unmapped_area; arch_pick_mmap_layout()
84 mm->mmap_base = mmap_base(mm); arch_pick_mmap_layout()
85 mm->get_unmapped_area = arch_get_unmapped_area_topdown; arch_pick_mmap_layout()
89 unsigned long arch_randomize_brk(struct mm_struct *mm) arch_randomize_brk() argument
91 unsigned long range_end = mm->brk + 0x02000000; arch_randomize_brk()
92 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; arch_randomize_brk()
H A Delf.c15 #include <linux/mm.h>
41 static int notify_exec(struct mm_struct *mm) notify_exec() argument
55 exe_file = get_mm_exe_file(mm); notify_exec()
63 down_read(&mm->mmap_sem); notify_exec()
64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { notify_exec()
66 up_read(&mm->mmap_sem); notify_exec()
94 up_read(&mm->mmap_sem); notify_exec()
120 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local
128 if (!notify_exec(mm)) arch_setup_additional_pages()
131 down_write(&mm->mmap_sem); arch_setup_additional_pages()
152 up_write(&mm->mmap_sem); arch_setup_additional_pages()
/linux-4.4.14/arch/x86/um/
H A Dmem_64.c1 #include <linux/mm.h>
H A Dmem_32.c9 #include <linux/mm.h>
29 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) get_gate_vma() argument
45 int in_gate_area(struct mm_struct *mm, unsigned long addr) in_gate_area() argument
47 struct vm_area_struct *vma = get_gate_vma(mm); in_gate_area()
/linux-4.4.14/arch/score/include/asm/
H A Dpgalloc.h4 #include <linux/mm.h>
6 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
12 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
20 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
35 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
40 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
51 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
67 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
72 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
H A Dmmu_context.h42 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
47 get_new_mmu_context(struct mm_struct *mm) get_new_mmu_context() argument
57 mm->context = asid; get_new_mmu_context()
66 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
68 mm->context = 0; init_new_context()
90 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
94 deactivate_mm(struct task_struct *task, struct mm_struct *mm) deactivate_mm() argument
98 * After we have set current->mm to a new value, this activates
99 * the context for the new mm so we see the new mappings.
/linux-4.4.14/arch/metag/include/asm/
H A Dpgalloc.h5 #include <linux/mm.h>
7 #define pmd_populate_kernel(mm, pmd, pte) \
10 #define pmd_populate(mm, pmd, pte) \
29 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
37 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
42 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
50 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
64 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
69 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
H A Dtlbflush.h12 * - flush_tlb() flushes the current mm struct TLBs
14 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
16 * - flush_tlb_range(mm, start, end) flushes a range of pages
18 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
46 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
48 if (mm == current->active_mm) flush_tlb_mm()
64 static inline void flush_tlb_pgtables(struct mm_struct *mm, flush_tlb_pgtables() argument
67 flush_tlb_mm(mm); flush_tlb_pgtables()
H A Dhugetlb.h8 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument
25 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
28 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at()
31 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
34 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear()
52 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
55 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
H A Dmmu_context.h13 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
19 struct mm_struct *mm) init_new_context()
26 mm->context.pgd_base = (unsigned long) mm->pgd; init_new_context()
29 INIT_LIST_HEAD(&mm->context.tcm); init_new_context()
39 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { list_for_each_entry_safe()
50 #define destroy_context(mm) do { } while (0)
111 #define deactivate_mm(tsk, mm) do { } while (0)
18 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
/linux-4.4.14/arch/nios2/include/asm/
H A Dpgalloc.h13 #include <linux/mm.h>
15 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
21 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
33 extern pgd_t *pgd_alloc(struct mm_struct *mm);
35 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
40 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
51 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
67 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
72 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
H A Dmmu_context.h27 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
38 struct mm_struct *mm) init_new_context()
40 mm->context = 0; init_new_context()
48 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
56 struct mm_struct *mm) deactivate_mm()
61 * After we have set current->mm to a new value, this activates
62 * the context for the new mm so we see the new mappings.
37 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
55 deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm() argument
/linux-4.4.14/arch/avr32/include/asm/
H A Dpgalloc.h11 #include <linux/mm.h>
19 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_populate_kernel() argument
25 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
54 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
60 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
79 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
84 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
H A Dtlbflush.h16 * - flush_tlb() flushes the current mm struct TLBs
18 * - flush_tlb_mm(mm) flushes the specified mm context TLBs
25 extern void flush_tlb_mm(struct mm_struct *mm);
H A Dmmu_context.h41 get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
45 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context()
64 mm->context = mc; get_mmu_context()
72 struct mm_struct *mm) init_new_context()
74 mm->context = NO_CONTEXT; init_new_context()
82 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
102 static inline void activate_context(struct mm_struct *mm) activate_context() argument
104 get_mmu_context(mm); activate_context()
105 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); activate_context()
120 #define deactivate_mm(tsk,mm) do { } while(0)
125 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
71 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
/linux-4.4.14/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_mqd_manager_cik.c37 static int init_mqd(struct mqd_manager *mm, void **mqd, init_mqd() argument
45 BUG_ON(!mm || !q || !mqd); init_mqd()
49 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), init_mqd()
104 retval = mm->update_mqd(mm, m, q); init_mqd()
109 static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, init_mqd_sdma() argument
116 BUG_ON(!mm || !mqd || !mqd_mem_obj); init_mqd_sdma()
118 retval = kfd_gtt_sa_allocate(mm->dev, init_mqd_sdma()
133 retval = mm->update_mqd(mm, m, q); init_mqd_sdma()
138 static void uninit_mqd(struct mqd_manager *mm, void *mqd, uninit_mqd() argument
141 BUG_ON(!mm || !mqd); uninit_mqd()
142 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); uninit_mqd()
145 static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, uninit_mqd_sdma() argument
148 BUG_ON(!mm || !mqd); uninit_mqd_sdma()
149 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); uninit_mqd_sdma()
152 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, load_mqd() argument
155 return mm->dev->kfd2kgd->hqd_load load_mqd()
156 (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); load_mqd()
159 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, load_mqd_sdma() argument
163 return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); load_mqd_sdma()
166 static int update_mqd(struct mqd_manager *mm, void *mqd, update_mqd() argument
171 BUG_ON(!mm || !q || !mqd); update_mqd()
210 static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, update_mqd_sdma() argument
215 BUG_ON(!mm || !mqd || !q); update_mqd_sdma()
250 static int destroy_mqd(struct mqd_manager *mm, void *mqd, destroy_mqd() argument
255 return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, destroy_mqd()
263 static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, destroy_mqd_sdma() argument
268 return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); destroy_mqd_sdma()
271 static bool is_occupied(struct mqd_manager *mm, void *mqd, is_occupied() argument
276 return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, is_occupied()
281 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, is_occupied_sdma() argument
285 return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); is_occupied_sdma()
294 static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, init_mqd_hiq() argument
302 BUG_ON(!mm || !q || !mqd || !mqd_mem_obj); init_mqd_hiq()
306 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), init_mqd_hiq()
350 retval = mm->update_mqd(mm, m, q); init_mqd_hiq()
355 static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, update_mqd_hiq() argument
360 BUG_ON(!mm || !q || !mqd); update_mqd_hiq()
H A Dkfd_mqd_manager_vi.c39 static int init_mqd(struct mqd_manager *mm, void **mqd, init_mqd() argument
47 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd), init_mqd()
88 retval = mm->update_mqd(mm, m, q); init_mqd()
93 static int load_mqd(struct mqd_manager *mm, void *mqd, load_mqd() argument
97 return mm->dev->kfd2kgd->hqd_load load_mqd()
98 (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); load_mqd()
101 static int __update_mqd(struct mqd_manager *mm, void *mqd, __update_mqd() argument
107 BUG_ON(!mm || !q || !mqd); __update_mqd()
170 static int update_mqd(struct mqd_manager *mm, void *mqd, update_mqd() argument
173 return __update_mqd(mm, mqd, q, MTYPE_CC, 1); update_mqd()
176 static int destroy_mqd(struct mqd_manager *mm, void *mqd, destroy_mqd() argument
181 return mm->dev->kfd2kgd->hqd_destroy destroy_mqd()
182 (mm->dev->kgd, type, timeout, destroy_mqd()
186 static void uninit_mqd(struct mqd_manager *mm, void *mqd, uninit_mqd() argument
189 BUG_ON(!mm || !mqd); uninit_mqd()
190 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); uninit_mqd()
193 static bool is_occupied(struct mqd_manager *mm, void *mqd, is_occupied() argument
197 return mm->dev->kfd2kgd->hqd_is_occupied( is_occupied()
198 mm->dev->kgd, queue_address, is_occupied()
202 static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, init_mqd_hiq() argument
207 int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); init_mqd_hiq()
220 static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, update_mqd_hiq() argument
224 int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0); update_mqd_hiq()
/linux-4.4.14/arch/arm64/include/asm/
H A Dpgalloc.h34 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument
39 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
45 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
54 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument
59 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument
65 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_populate() argument
72 extern pgd_t *pgd_alloc(struct mm_struct *mm);
73 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
76 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument
82 pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument
99 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
105 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
119 * of the mm address space.
122 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) pmd_populate_kernel() argument
131 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) pmd_populate() argument
H A Dhugetlb.h29 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
32 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at()
41 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
44 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
47 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
50 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear()
68 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument
/linux-4.4.14/arch/openrisc/include/asm/
H A Dpgalloc.h24 #include <linux/mm.h>
30 #define pmd_populate_kernel(mm, pmd, pte) \
33 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
60 * current_pgd (from mm->pgd) to load kernel pages so we need it
63 extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
69 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
74 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
76 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
91 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
96 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
H A Dtlbflush.h22 #include <linux/mm.h>
30 * - flush_tlb() flushes the current mm struct TLBs
32 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
34 * - flush_tlb_range(mm, start, end) flushes a range of pages
38 void flush_tlb_mm(struct mm_struct *mm);
46 flush_tlb_mm(current->mm); flush_tlb()
/linux-4.4.14/arch/tile/include/asm/
H A Dpgalloc.h19 #include <linux/mm.h>
50 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_populate_kernel() argument
57 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
68 extern pgd_t *pgd_alloc(struct mm_struct *mm);
69 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
71 extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
73 extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
75 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
78 return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER); pte_alloc_one()
81 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
83 pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER); pte_free()
89 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument
91 return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); pte_alloc_one_kernel()
94 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
97 pte_free(mm, virt_to_page(pte)); pte_free_kernel()
125 #define pud_populate(mm, pud, pmd) \
126 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
144 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument
146 struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER); pmd_alloc_one()
150 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) pmd_free() argument
152 pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER); pmd_free()
/linux-4.4.14/lib/
H A Dis_single_threaded.c16 * Returns true if the task does not share ->mm with another thread/process.
21 struct mm_struct *mm = task->mm; current_is_single_threaded() local
28 if (atomic_read(&mm->mm_users) == 1) current_is_single_threaded()
40 if (unlikely(t->mm == mm)) for_each_thread()
42 if (likely(t->mm)) for_each_thread()
45 * t->mm == NULL. Make sure next_thread/next_task for_each_thread()
/linux-4.4.14/arch/microblaze/mm/
H A Dmmu_context.c6 * Derived from arch/ppc/mm/4xx_mmu.c:
9 * Derived from arch/ppc/mm/init.c:
17 * Derived from "arch/i386/mm/init.c"
27 #include <linux/mm.h>
61 struct mm_struct *mm; steal_context() local
67 mm = context_mm[next_mmu_context]; steal_context()
68 flush_tlb_mm(mm); steal_context()
69 destroy_context(mm); steal_context()
/linux-4.4.14/arch/blackfin/include/asm/
H A Dtlb.h15 * .. because we flush the whole mm when it
18 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
H A Dmmu_context.h60 activate_l1stack(struct mm_struct *mm, unsigned long sp_base) activate_l1stack() argument
64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; activate_l1stack()
69 #define deactivate_mm(tsk,mm) do { } while (0)
120 static inline void protect_page(struct mm_struct *mm, unsigned long addr, protect_page() argument
123 unsigned long *mask = mm->context.page_rwx_mask; protect_page()
151 static inline void update_protections(struct mm_struct *mm) update_protections() argument
154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { update_protections()
156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu); update_protections()
167 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
173 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
177 mm->context.page_rwx_mask = (unsigned long *)p; init_new_context()
178 memset(mm->context.page_rwx_mask, 0, init_new_context()
184 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
192 if (current_l1_stack_save == mm->context.l1_stack_save) destroy_context()
194 if (mm->context.l1_stack_save) destroy_context()
198 while ((tmp = mm->context.sram_list)) { destroy_context()
199 mm->context.sram_list = tmp->next; destroy_context()
204 if (current_rwx_mask[cpu] == mm->context.page_rwx_mask) destroy_context()
206 free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); destroy_context()
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_mm.c93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
112 struct drm_mm *mm = hole_node->mm; drm_mm_insert_helper() local
120 if (mm->color_adjust) drm_mm_insert_helper()
121 mm->color_adjust(hole_node, color, &adj_start, &adj_end); drm_mm_insert_helper()
149 node->mm = mm; drm_mm_insert_helper()
160 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_insert_helper()
167 * @mm: drm_mm allocator to insert @node into
179 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) drm_mm_reserve_node() argument
189 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { drm_mm_for_each_hole()
193 node->mm = mm; drm_mm_for_each_hole()
206 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_for_each_hole()
219 * @mm: drm_mm to allocate from
232 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, drm_mm_insert_node_generic() argument
240 hole_node = drm_mm_search_free_generic(mm, size, alignment, drm_mm_insert_node_generic()
257 struct drm_mm *mm = hole_node->mm; drm_mm_insert_helper_range() local
270 if (mm->color_adjust) drm_mm_insert_helper_range()
271 mm->color_adjust(hole_node, color, &adj_start, &adj_end); drm_mm_insert_helper_range()
296 node->mm = mm; drm_mm_insert_helper_range()
310 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_insert_helper_range()
317 * @mm: drm_mm to allocate from
332 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, drm_mm_insert_node_in_range_generic() argument
341 hole_node = drm_mm_search_free_in_range_generic(mm, drm_mm_insert_node_in_range_generic()
364 struct drm_mm *mm = node->mm; drm_mm_remove_node() local
387 list_add(&prev_node->hole_stack, &mm->hole_stack); drm_mm_remove_node()
389 list_move(&prev_node->hole_stack, &mm->hole_stack); drm_mm_remove_node()
413 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, drm_mm_search_free_generic() argument
425 BUG_ON(mm->scanned_blocks); drm_mm_search_free_generic()
430 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, __drm_mm_for_each_hole()
434 if (mm->color_adjust) { __drm_mm_for_each_hole()
435 mm->color_adjust(entry, color, &adj_start, &adj_end); __drm_mm_for_each_hole()
455 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, drm_mm_search_free_in_range_generic() argument
469 BUG_ON(mm->scanned_blocks); drm_mm_search_free_in_range_generic()
474 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, __drm_mm_for_each_hole()
483 if (mm->color_adjust) { __drm_mm_for_each_hole()
484 mm->color_adjust(entry, color, &adj_start, &adj_end); __drm_mm_for_each_hole()
518 new->mm = old->mm; drm_mm_replace_node()
558 * @mm: drm_mm to scan
571 void drm_mm_init_scan(struct drm_mm *mm, drm_mm_init_scan() argument
576 mm->scan_color = color; drm_mm_init_scan()
577 mm->scan_alignment = alignment; drm_mm_init_scan()
578 mm->scan_size = size; drm_mm_init_scan()
579 mm->scanned_blocks = 0; drm_mm_init_scan()
580 mm->scan_hit_start = 0; drm_mm_init_scan()
581 mm->scan_hit_end = 0; drm_mm_init_scan()
582 mm->scan_check_range = 0; drm_mm_init_scan()
583 mm->prev_scanned_node = NULL; drm_mm_init_scan()
589 * @mm: drm_mm to scan
604 void drm_mm_init_scan_with_range(struct drm_mm *mm, drm_mm_init_scan_with_range() argument
611 mm->scan_color = color; drm_mm_init_scan_with_range()
612 mm->scan_alignment = alignment; drm_mm_init_scan_with_range()
613 mm->scan_size = size; drm_mm_init_scan_with_range()
614 mm->scanned_blocks = 0; drm_mm_init_scan_with_range()
615 mm->scan_hit_start = 0; drm_mm_init_scan_with_range()
616 mm->scan_hit_end = 0; drm_mm_init_scan_with_range()
617 mm->scan_start = start; drm_mm_init_scan_with_range()
618 mm->scan_end = end; drm_mm_init_scan_with_range()
619 mm->scan_check_range = 1; drm_mm_init_scan_with_range()
620 mm->prev_scanned_node = NULL; drm_mm_init_scan_with_range()
636 struct drm_mm *mm = node->mm; drm_mm_scan_add_block() local
641 mm->scanned_blocks++; drm_mm_scan_add_block()
653 node->node_list.next = &mm->prev_scanned_node->node_list; drm_mm_scan_add_block()
654 mm->prev_scanned_node = node; drm_mm_scan_add_block()
659 if (mm->scan_check_range) { drm_mm_scan_add_block()
660 if (adj_start < mm->scan_start) drm_mm_scan_add_block()
661 adj_start = mm->scan_start; drm_mm_scan_add_block()
662 if (adj_end > mm->scan_end) drm_mm_scan_add_block()
663 adj_end = mm->scan_end; drm_mm_scan_add_block()
666 if (mm->color_adjust) drm_mm_scan_add_block()
667 mm->color_adjust(prev_node, mm->scan_color, drm_mm_scan_add_block()
671 mm->scan_size, mm->scan_alignment)) { drm_mm_scan_add_block()
672 mm->scan_hit_start = hole_start; drm_mm_scan_add_block()
673 mm->scan_hit_end = hole_end; drm_mm_scan_add_block()
699 struct drm_mm *mm = node->mm; drm_mm_scan_remove_block() local
702 mm->scanned_blocks--; drm_mm_scan_remove_block()
713 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && drm_mm_scan_remove_block()
714 node->start < mm->scan_hit_end); drm_mm_scan_remove_block()
720 * @mm: drm_mm allocator to check
726 bool drm_mm_clean(struct drm_mm * mm) drm_mm_clean() argument
728 struct list_head *head = &mm->head_node.node_list; drm_mm_clean()
735 * drm_mm_init - initialize a drm-mm allocator
736 * @mm: the drm_mm structure to initialize
737 * @start: start of the range managed by @mm
738 * @size: end of the range managed by @mm
740 * Note that @mm must be cleared to 0 before calling this function.
742 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) drm_mm_init() argument
744 INIT_LIST_HEAD(&mm->hole_stack); drm_mm_init()
745 mm->scanned_blocks = 0; drm_mm_init()
748 INIT_LIST_HEAD(&mm->head_node.node_list); drm_mm_init()
749 INIT_LIST_HEAD(&mm->head_node.hole_stack); drm_mm_init()
750 mm->head_node.hole_follows = 1; drm_mm_init()
751 mm->head_node.scanned_block = 0; drm_mm_init()
752 mm->head_node.scanned_prev_free = 0; drm_mm_init()
753 mm->head_node.scanned_next_free = 0; drm_mm_init()
754 mm->head_node.mm = mm; drm_mm_init()
755 mm->head_node.start = start + size; drm_mm_init()
756 mm->head_node.size = start - mm->head_node.start; drm_mm_init()
757 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); drm_mm_init()
759 mm->color_adjust = NULL; drm_mm_init()
765 * @mm: drm_mm allocator to clean up
770 void drm_mm_takedown(struct drm_mm * mm) drm_mm_takedown() argument
772 WARN(!list_empty(&mm->head_node.node_list), drm_mm_takedown()
796 * @mm: drm_mm allocator to dump
799 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) drm_mm_debug_table() argument
804 total_free += drm_mm_debug_hole(&mm->head_node, prefix); drm_mm_debug_table()
806 drm_mm_for_each_node(entry, mm) { drm_mm_for_each_node()
839 * @mm: drm_mm allocator to dump
841 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) drm_mm_dump_table() argument
846 total_free += drm_mm_dump_hole(m, &mm->head_node); drm_mm_dump_table()
848 drm_mm_for_each_node(entry, mm) { drm_mm_for_each_node()
/linux-4.4.14/arch/mips/include/asm/
H A Dmmu_context.h85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
86 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
89 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) get_new_mmu_context() argument
119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; get_new_mmu_context()
127 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
132 cpu_context(i, mm) = 0; init_new_context()
134 atomic_set(&mm->context.fp_mode_switching, 0); init_new_context()
168 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
172 #define deactivate_mm(tsk, mm) do { } while (0)
175 * After we have set current->mm to a new value, this activates
176 * the context for the new mm so we see the new mappings.
202 * If mm is currently active_mm, we can't really drop it. Instead,
206 drop_mmu_context(struct mm_struct *mm, unsigned cpu) drop_mmu_context() argument
213 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { drop_mmu_context()
214 get_new_mmu_context(mm, cpu); drop_mmu_context()
215 write_c0_entryhi(cpu_asid(cpu, mm)); drop_mmu_context()
218 cpu_context(cpu, mm) = 0; drop_mmu_context()
H A Dpgalloc.h13 #include <linux/mm.h>
16 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
36 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
47 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument
62 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument
67 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument
77 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
93 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
98 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument
122 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
127 #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
H A Dtlbflush.h4 #include <linux/mm.h>
10 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
16 extern void local_flush_tlb_mm(struct mm_struct *mm);
38 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
H A Dhugetlb.h16 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument
50 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument
53 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at()
56 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument
63 set_pte_at(mm, addr, ptep, clear); huge_ptep_get_and_clear()
84 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument
87 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
/linux-4.4.14/arch/metag/mm/
H A Dhugetlbpage.c2 * arch/metag/mm/hugetlbpage.c
15 #include <linux/mm.h>
33 struct mm_struct *mm = current->mm; prepare_hugepage_range() local
44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); prepare_hugepage_range()
48 vma = find_vma(mm, addr); prepare_hugepage_range()
59 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument
67 pgd = pgd_offset(mm, addr); huge_pte_alloc()
70 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
77 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument
84 pgd = pgd_offset(mm, addr); huge_pte_offset()
102 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, follow_huge_pmd() argument
125 struct mm_struct *mm = current->mm; hugetlb_get_unmapped_area_existing() local
130 if (mm->context.part_huge) { hugetlb_get_unmapped_area_existing()
131 start_addr = mm->context.part_huge; hugetlb_get_unmapped_area_existing()
140 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { hugetlb_get_unmapped_area_existing()
160 mm->context.part_huge = end; hugetlb_get_unmapped_area_existing()
161 else if (addr == mm->context.part_huge) hugetlb_get_unmapped_area_existing()
162 mm->context.part_huge = 0; hugetlb_get_unmapped_area_existing()
H A Dfault.c8 #include <linux/mm.h>
52 struct mm_struct *mm; do_page_fault() local
106 mm = tsk->mm; do_page_fault()
108 if (faulthandler_disabled() || !mm) do_page_fault()
114 down_read(&mm->mmap_sem); do_page_fault()
116 vma = find_vma_prev(mm, address, &prev_vma); do_page_fault()
136 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
160 * No need to up_read(&mm->mmap_sem) as we would do_page_fault()
162 * in mm/filemap.c. do_page_fault()
169 up_read(&mm->mmap_sem); do_page_fault()
178 up_read(&mm->mmap_sem); do_page_fault()
206 up_read(&mm->mmap_sem); do_page_fault()
230 up_read(&mm->mmap_sem); do_page_fault()
/linux-4.4.14/drivers/gpio/
H A Dgpio-mpc8xxx.c53 to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) to_mpc8xxx_gpio_chip() argument
55 return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); to_mpc8xxx_gpio_chip()
58 static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) mpc8xxx_gpio_save_regs() argument
60 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_save_regs()
62 mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); mpc8xxx_gpio_save_regs()
73 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8572_gpio_get() local
74 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8572_gpio_get()
77 out_mask = in_be32(mm->regs + GPIO_DIR); mpc8572_gpio_get()
79 val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; mpc8572_gpio_get()
87 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_get() local
89 return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); mpc8xxx_gpio_get()
94 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_set() local
95 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_set()
105 out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); mpc8xxx_gpio_set()
113 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_set_multiple() local
114 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_set_multiple()
131 out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); mpc8xxx_gpio_set_multiple()
138 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_dir_in() local
139 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_dir_in()
144 clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); mpc8xxx_gpio_dir_in()
153 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_dir_out() local
154 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_dir_out()
161 setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); mpc8xxx_gpio_dir_out()
188 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_to_irq() local
189 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_to_irq()
201 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_gpio_irq_cascade() local
204 mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); mpc8xxx_gpio_irq_cascade()
215 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_unmask() local
220 setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); mpc8xxx_irq_unmask()
228 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_mask() local
233 clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); mpc8xxx_irq_mask()
241 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_ack() local
243 out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); mpc8xxx_irq_ack()
249 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_set_type() local
255 setbits32(mm->regs + GPIO_ICR, mpc8xxx_irq_set_type()
262 clrbits32(mm->regs + GPIO_ICR, mpc8xxx_irq_set_type()
277 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc512x_irq_set_type() local
284 reg = mm->regs + GPIO_ICR; mpc512x_irq_set_type()
287 reg = mm->regs + GPIO_ICR2; mpc512x_irq_set_type()
/linux-4.4.14/arch/x86/mm/
H A Dtlb.c3 #include <linux/mm.h>
39 * instead update mm->cpu_vm_mask.
64 * 1a) thread switch to a different mm
69 * Now cpu0 accepts tlb flushes for the new mm.
74 * Stop ipi delivery for the old mm. This is not synchronized with
76 * mm, and in the worst case we perform a superfluous tlb flush.
77 * 1b) thread switch without mm change
87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
99 * 2) Leave the mm if we are in the lazy tlb mode.
132 struct mm_struct *mm, unsigned long start, native_flush_tlb_others()
139 info.flush_mm = mm; native_flush_tlb_others()
154 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); native_flush_tlb_others()
165 struct mm_struct *mm = current->mm; flush_tlb_current_task() local
175 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_current_task()
176 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); flush_tlb_current_task() local
192 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, flush_tlb_mm_range() argument
200 if (current->active_mm != mm) { flush_tlb_mm_range()
207 if (!current->mm) { flush_tlb_mm_range()
240 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_mm_range()
241 flush_tlb_others(mm_cpumask(mm), mm, start, end); flush_tlb_mm_range() local
247 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local
251 if (current->active_mm == mm) { flush_tlb_page()
252 if (current->mm) { flush_tlb_page()
266 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_page()
267 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); flush_tlb_page() local
131 native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) native_flush_tlb_others() argument
/linux-4.4.14/arch/microblaze/include/asm/
H A Dmmu_context_mm.h36 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
79 * Get a new mmu context for the address space described by `mm'.
81 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument
85 if (mm->context != NO_CONTEXT) get_mmu_context()
96 mm->context = ctx; get_mmu_context()
97 context_mm[ctx] = mm; get_mmu_context()
103 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
108 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
110 if (mm->context != NO_CONTEXT) { destroy_context()
111 clear_bit(mm->context, context_map); destroy_context()
112 mm->context = NO_CONTEXT; destroy_context()
126 * After we have set current->mm to a new value, this activates
127 * the context for the new mm so we see the new mappings.
130 struct mm_struct *mm) activate_mm()
132 current->thread.pgdir = mm->pgd; activate_mm()
133 get_mmu_context(mm); activate_mm()
134 set_context(mm->context, mm->pgd); activate_mm()
129 activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) activate_mm() argument
H A Dpgalloc.h99 #define pgd_free(mm, pgd) free_pgd_fast(pgd)
100 #define pgd_alloc(mm) get_pgd_fast()
108 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
109 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
111 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
113 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument
135 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, pte_alloc_one_fast() argument
156 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
166 static inline void pte_free(struct mm_struct *mm, struct page *ptepage) pte_free() argument
172 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
174 #define pmd_populate(mm, pmd, pte) \
177 #define pmd_populate_kernel(mm, pmd, pte) \
184 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
185 #define pmd_free(mm, x) do { } while (0)
186 #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
187 #define pgd_populate(mm, pmd, pte) BUG()
/linux-4.4.14/arch/frv/mm/
H A Dmmu-context.c13 #include <linux/mm.h>
29 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
31 memset(&mm->context, 0, sizeof(mm->context)); init_new_context()
32 INIT_LIST_HEAD(&mm->context.id_link); init_new_context()
33 mm->context.itlb_cached_pge = 0xffffffffUL; init_new_context()
34 mm->context.dtlb_cached_pge = 0xffffffffUL; init_new_context()
130 void destroy_context(struct mm_struct *mm) destroy_context() argument
132 mm_context_t *ctx = &mm->context; destroy_context()
154 char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer) proc_pid_status_frv_cxnr() argument
157 buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); proc_pid_status_frv_cxnr()
171 struct mm_struct *mm = NULL; cxn_pin_by_pid() local
189 if (tsk->mm) { cxn_pin_by_pid()
190 mm = tsk->mm; cxn_pin_by_pid()
191 atomic_inc(&mm->mm_users); cxn_pin_by_pid()
203 cxn_pinned = get_cxn(&mm->context); cxn_pin_by_pid()
206 mmput(mm); cxn_pin_by_pid()
/linux-4.4.14/drivers/misc/cxl/
H A Dfault.c13 #include <linux/mm.h>
87 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, cxl_fault_segment() argument
93 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { cxl_fault_segment()
116 struct mm_struct *mm, u64 ea) cxl_handle_segment_miss()
123 if ((rc = cxl_fault_segment(ctx, mm, ea))) cxl_handle_segment_miss()
135 struct mm_struct *mm, u64 dsisr, u64 dar) cxl_handle_page_fault()
143 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { cxl_handle_page_fault()
162 hash_page_mm(mm, dar, access, 0x300, inv_flags); cxl_handle_page_fault()
176 struct mm_struct *mm = NULL; cxl_handle_fault() local
204 if (!(mm = get_task_mm(task))) { cxl_handle_fault()
205 pr_devel("cxl_handle_fault unable to get mm %i\n", cxl_handle_fault()
213 cxl_handle_segment_miss(ctx, mm, dar); cxl_handle_fault()
215 cxl_handle_page_fault(ctx, mm, dsisr, dar); cxl_handle_fault()
219 if (mm) cxl_handle_fault()
220 mmput(mm); cxl_handle_fault()
230 struct mm_struct *mm; cxl_prefault_one() local
237 if (!(mm = get_task_mm(task))) { cxl_prefault_one()
238 pr_devel("cxl_prefault_one unable to get mm %i\n", cxl_prefault_one()
244 rc = cxl_fault_segment(ctx, mm, ea); cxl_prefault_one()
246 mmput(mm); cxl_prefault_one()
267 struct mm_struct *mm; cxl_prefault_vma() local
274 if (!(mm = get_task_mm(task))) { cxl_prefault_vma()
275 pr_devel("cxl_prefault_vm unable to get mm %i\n", cxl_prefault_vma()
280 down_read(&mm->mmap_sem); cxl_prefault_vma()
281 for (vma = mm->mmap; vma; vma = vma->vm_next) { cxl_prefault_vma()
284 rc = copro_calculate_slb(mm, ea, &slb); cxl_prefault_vma()
295 up_read(&mm->mmap_sem); cxl_prefault_vma()
297 mmput(mm); cxl_prefault_vma()
115 cxl_handle_segment_miss(struct cxl_context *ctx, struct mm_struct *mm, u64 ea) cxl_handle_segment_miss() argument
134 cxl_handle_page_fault(struct cxl_context *ctx, struct mm_struct *mm, u64 dsisr, u64 dar) cxl_handle_page_fault() argument
/linux-4.4.14/arch/sh/kernel/vsyscall/
H A Dvsyscall.c13 #include <linux/mm.h>
63 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local
67 down_write(&mm->mmap_sem); arch_setup_additional_pages()
74 ret = install_special_mapping(mm, addr, PAGE_SIZE, arch_setup_additional_pages()
81 current->mm->context.vdso = (void *)addr; arch_setup_additional_pages()
84 up_write(&mm->mmap_sem); arch_setup_additional_pages()
/linux-4.4.14/arch/mips/mm/
H A Dhugetlbpage.c15 #include <linux/mm.h>
24 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, huge_pte_alloc() argument
31 pgd = pgd_offset(mm, addr); huge_pte_alloc()
32 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc()
34 pte = (pte_t *)pmd_alloc(mm, pud, addr); huge_pte_alloc()
39 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument
45 pgd = pgd_offset(mm, addr); huge_pte_offset()
H A Dmmap.c11 #include <linux/mm.h>
58 struct mm_struct *mm = current->mm; arch_get_unmapped_area_common() local
93 vma = find_vma(mm, addr); arch_get_unmapped_area_common()
106 info.high_limit = mm->mmap_base; arch_get_unmapped_area_common()
121 info.low_limit = mm->mmap_base; arch_get_unmapped_area_common()
159 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument
167 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; arch_pick_mmap_layout()
168 mm->get_unmapped_area = arch_get_unmapped_area; arch_pick_mmap_layout()
170 mm->mmap_base = mmap_base(random_factor); arch_pick_mmap_layout()
171 mm->get_unmapped_area = arch_get_unmapped_area_topdown; arch_pick_mmap_layout()
189 unsigned long arch_randomize_brk(struct mm_struct *mm) arch_randomize_brk() argument
191 unsigned long base = mm->brk; arch_randomize_brk()
196 if (ret < mm->brk) arch_randomize_brk()
197 return mm->brk; arch_randomize_brk()
/linux-4.4.14/arch/mn10300/mm/
H A Dtlb-smp.c52 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
91 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
94 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, flush_tlb_others() argument
104 BUG_ON(!mm); flush_tlb_others()
119 flush_mm = mm; flush_tlb_others()
141 * @mm: The VM context to invalidate.
143 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
148 cpumask_copy(&cpu_mask, mm_cpumask(mm)); flush_tlb_mm()
153 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_mm()
163 struct mm_struct *mm = current->mm; flush_tlb_current_task() local
167 cpumask_copy(&cpu_mask, mm_cpumask(mm)); flush_tlb_current_task()
172 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_current_task()
184 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local
188 cpumask_copy(&cpu_mask, mm_cpumask(mm)); flush_tlb_page()
191 local_flush_tlb_page(mm, va); flush_tlb_page()
193 flush_tlb_others(cpu_mask, mm, va); flush_tlb_page()
/linux-4.4.14/include/trace/events/
H A Dxen.h168 TP_PROTO(struct mm_struct *mm, unsigned long addr,
170 TP_ARGS(mm, addr, ptep, pteval),
172 __field(struct mm_struct *, mm)
177 TP_fast_assign(__entry->mm = mm;
181 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
182 __entry->mm, __entry->addr, __entry->ptep,
188 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
189 TP_ARGS(mm, addr, ptep),
191 __field(struct mm_struct *, mm)
195 TP_fast_assign(__entry->mm = mm;
198 TP_printk("mm %p addr %lx ptep %p",
199 __entry->mm, __entry->addr, __entry->ptep)
300 TP_PROTO(struct mm_struct *mm, unsigned long addr,
302 TP_ARGS(mm, addr, ptep, pteval),
304 __field(struct mm_struct *, mm)
309 TP_fast_assign(__entry->mm = mm;
313 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
314 __entry->mm, __entry->addr, __entry->ptep,
320 TP_PROTO(struct mm_struct *mm, unsigned long addr, \
322 TP_ARGS(mm, addr, ptep, pteval))
328 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
329 TP_ARGS(mm, pfn, level, pinned),
331 __field(struct mm_struct *, mm)
336 TP_fast_assign(__entry->mm = mm;
340 TP_printk("mm %p pfn %lx level %d %spinned",
341 __entry->mm, __entry->pfn, __entry->level,
362 TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
363 TP_ARGS(mm, pgd),
365 __field(struct mm_struct *, mm)
368 TP_fast_assign(__entry->mm = mm;
370 TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
374 TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
375 TP_ARGS(mm, pgd))
407 TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
409 TP_ARGS(cpus, mm, addr, end),
412 __field(struct mm_struct *, mm)
417 __entry->mm = mm;
420 TP_printk("ncpus %d mm %p addr %lx, end %lx",
421 __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
/linux-4.4.14/kernel/
H A Dfork.c11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
31 #include <linux/mm.h>
212 /* SLAB cache for mm_struct structures (tsk->mm) */
397 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) dup_mmap() argument
407 uprobe_dup_mmap(oldmm, mm); dup_mmap()
411 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); dup_mmap()
414 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); dup_mmap()
416 mm->total_vm = oldmm->total_vm; dup_mmap()
417 mm->shared_vm = oldmm->shared_vm; dup_mmap()
418 mm->exec_vm = oldmm->exec_vm; dup_mmap()
419 mm->stack_vm = oldmm->stack_vm; dup_mmap()
421 rb_link = &mm->mm_rb.rb_node; dup_mmap()
423 pprev = &mm->mmap; dup_mmap()
424 retval = ksm_fork(mm, oldmm); dup_mmap()
427 retval = khugepaged_fork(mm, oldmm); dup_mmap()
436 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, dup_mmap()
456 tmp->vm_mm = mm; dup_mmap()
498 __vma_link_rb(mm, tmp, rb_link, rb_parent); dup_mmap()
502 mm->map_count++; dup_mmap()
503 retval = copy_page_range(mm, oldmm, mpnt); dup_mmap()
511 /* a new mm has just been created */ dup_mmap()
512 arch_dup_mmap(oldmm, mm); dup_mmap()
515 up_write(&mm->mmap_sem); dup_mmap()
530 static inline int mm_alloc_pgd(struct mm_struct *mm) mm_alloc_pgd() argument
532 mm->pgd = pgd_alloc(mm); mm_alloc_pgd()
533 if (unlikely(!mm->pgd)) mm_alloc_pgd()
538 static inline void mm_free_pgd(struct mm_struct *mm) mm_free_pgd() argument
540 pgd_free(mm, mm->pgd); mm_free_pgd()
543 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) dup_mmap() argument
546 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); dup_mmap()
550 #define mm_alloc_pgd(mm) (0)
551 #define mm_free_pgd(mm)
557 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
573 static void mm_init_aio(struct mm_struct *mm) mm_init_aio() argument
576 spin_lock_init(&mm->ioctx_lock); mm_init_aio()
577 mm->ioctx_table = NULL; mm_init_aio()
581 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) mm_init_owner() argument
584 mm->owner = p; mm_init_owner()
588 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) mm_init() argument
590 mm->mmap = NULL; mm_init()
591 mm->mm_rb = RB_ROOT; mm_init()
592 mm->vmacache_seqnum = 0; mm_init()
593 atomic_set(&mm->mm_users, 1); mm_init()
594 atomic_set(&mm->mm_count, 1); mm_init()
595 init_rwsem(&mm->mmap_sem); mm_init()
596 INIT_LIST_HEAD(&mm->mmlist); mm_init()
597 mm->core_state = NULL; mm_init()
598 atomic_long_set(&mm->nr_ptes, 0); mm_init()
599 mm_nr_pmds_init(mm); mm_init()
600 mm->map_count = 0; mm_init()
601 mm->locked_vm = 0; mm_init()
602 mm->pinned_vm = 0; mm_init()
603 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); mm_init()
604 spin_lock_init(&mm->page_table_lock); mm_init()
605 mm_init_cpumask(mm); mm_init()
606 mm_init_aio(mm); mm_init()
607 mm_init_owner(mm, p); mm_init()
608 mmu_notifier_mm_init(mm); mm_init()
609 clear_tlb_flush_pending(mm); mm_init()
611 mm->pmd_huge_pte = NULL; mm_init()
614 if (current->mm) { mm_init()
615 mm->flags = current->mm->flags & MMF_INIT_MASK; mm_init()
616 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; mm_init()
618 mm->flags = default_dump_filter; mm_init()
619 mm->def_flags = 0; mm_init()
622 if (mm_alloc_pgd(mm)) mm_init()
625 if (init_new_context(p, mm)) mm_init()
628 return mm; mm_init()
631 mm_free_pgd(mm); mm_init()
633 free_mm(mm); mm_init()
637 static void check_mm(struct mm_struct *mm) check_mm() argument
642 long x = atomic_long_read(&mm->rss_stat.count[i]); check_mm()
646 "mm:%p idx:%d val:%ld\n", mm, i, x); check_mm()
649 if (atomic_long_read(&mm->nr_ptes)) check_mm()
650 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", check_mm()
651 atomic_long_read(&mm->nr_ptes)); check_mm()
652 if (mm_nr_pmds(mm)) check_mm()
653 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", check_mm()
654 mm_nr_pmds(mm)); check_mm()
657 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); check_mm()
666 struct mm_struct *mm; mm_alloc() local
668 mm = allocate_mm(); mm_alloc()
669 if (!mm) mm_alloc()
672 memset(mm, 0, sizeof(*mm)); mm_alloc()
673 return mm_init(mm, current); mm_alloc()
677 * Called when the last reference to the mm
679 * mmput. Free the page directory and the mm.
681 void __mmdrop(struct mm_struct *mm) __mmdrop() argument
683 BUG_ON(mm == &init_mm); __mmdrop()
684 mm_free_pgd(mm); __mmdrop()
685 destroy_context(mm); __mmdrop()
686 mmu_notifier_mm_destroy(mm); __mmdrop()
687 check_mm(mm); __mmdrop()
688 free_mm(mm); __mmdrop()
693 * Decrement the use count and release all resources for an mm.
695 void mmput(struct mm_struct *mm) mmput() argument
699 if (atomic_dec_and_test(&mm->mm_users)) { mmput()
700 uprobe_clear_state(mm); mmput()
701 exit_aio(mm); mmput()
702 ksm_exit(mm); mmput()
703 khugepaged_exit(mm); /* must run before exit_mmap */ mmput()
704 exit_mmap(mm); mmput()
705 set_mm_exe_file(mm, NULL); mmput()
706 if (!list_empty(&mm->mmlist)) { mmput()
708 list_del(&mm->mmlist); mmput()
711 if (mm->binfmt) mmput()
712 module_put(mm->binfmt->module); mmput()
713 mmdrop(mm); mmput()
719 * set_mm_exe_file - change a reference to the mm's executable file
721 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
726 * mm->exe_file, but does so without using set_mm_exe_file() in order
729 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) set_mm_exe_file() argument
736 * this mm -- see comment above for justification. set_mm_exe_file()
738 old_exe_file = rcu_dereference_raw(mm->exe_file); set_mm_exe_file()
742 rcu_assign_pointer(mm->exe_file, new_exe_file); set_mm_exe_file()
748 * get_mm_exe_file - acquire a reference to the mm's executable file
750 * Returns %NULL if mm has no associated executable file.
753 struct file *get_mm_exe_file(struct mm_struct *mm) get_mm_exe_file() argument
758 exe_file = rcu_dereference(mm->exe_file); get_mm_exe_file()
767 * get_task_mm - acquire a reference to the task's mm
769 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
770 * this kernel workthread has transiently adopted a user mm with use_mm,
772 * bumping up the use count. User must release the mm via mmput()
777 struct mm_struct *mm; get_task_mm() local
780 mm = task->mm; get_task_mm()
781 if (mm) { get_task_mm()
783 mm = NULL; get_task_mm()
785 atomic_inc(&mm->mm_users); get_task_mm()
788 return mm; get_task_mm()
794 struct mm_struct *mm; mm_access() local
801 mm = get_task_mm(task); mm_access()
802 if (mm && mm != current->mm && mm_access()
804 mmput(mm); mm_access()
805 mm = ERR_PTR(-EACCES); mm_access()
809 return mm; mm_access()
857 void mm_release(struct task_struct *tsk, struct mm_struct *mm) mm_release() argument
859 /* Get rid of any futexes when releasing the mm */ mm_release()
878 deactivate_mm(tsk, mm); mm_release()
884 * trouble, say, a killed vfork parent shouldn't touch this mm. mm_release()
889 atomic_read(&mm->mm_users) > 1) { mm_release()
902 * All done, finally we can wake up parent and return this mm to him. mm_release()
910 * Allocate a new mm structure and copy contents from the
911 * mm structure of the passed in task structure.
915 struct mm_struct *mm, *oldmm = current->mm; dup_mm() local
918 mm = allocate_mm(); dup_mm()
919 if (!mm) dup_mm()
922 memcpy(mm, oldmm, sizeof(*mm)); dup_mm()
924 if (!mm_init(mm, tsk)) dup_mm()
927 err = dup_mmap(mm, oldmm); dup_mm()
931 mm->hiwater_rss = get_mm_rss(mm); dup_mm()
932 mm->hiwater_vm = mm->total_vm; dup_mm()
934 if (mm->binfmt && !try_module_get(mm->binfmt->module)) dup_mm()
937 return mm; dup_mm()
941 mm->binfmt = NULL; dup_mm()
942 mmput(mm); dup_mm()
950 struct mm_struct *mm, *oldmm; copy_mm() local
959 tsk->mm = NULL; copy_mm()
967 oldmm = current->mm; copy_mm()
976 mm = oldmm; copy_mm()
981 mm = dup_mm(tsk); copy_mm()
982 if (!mm) copy_mm()
986 tsk->mm = mm; copy_mm()
987 tsk->active_mm = mm; copy_mm()
1632 if (p->mm) copy_process()
1633 mmput(p->mm); copy_process()
H A Delfcore.c3 #include <linux/mm.h>
H A Dtsacct.c24 #include <linux/mm.h>
94 struct mm_struct *mm; xacct_add_tsk() local
99 mm = get_task_mm(p); xacct_add_tsk()
100 if (mm) { xacct_add_tsk()
102 stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB; xacct_add_tsk()
103 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; xacct_add_tsk()
104 mmput(mm); xacct_add_tsk()
126 if (likely(tsk->mm)) { __acct_update_integrals()
142 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); __acct_update_integrals()
143 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; __acct_update_integrals()
150 * acct_update_integrals - update mm integral fields in task_struct
162 * acct_account_cputime - update mm integral after cputime update
171 * acct_clear_integrals - clear the mm integral fields in task_struct
/linux-4.4.14/arch/h8300/mm/
H A Dmemory.c2 * linux/arch/h8300/mm/memory.c
8 * linux/arch/m68knommu/mm/memory.c
15 * linux/arch/m68k/mm/memory.c
20 #include <linux/mm.h>
/linux-4.4.14/arch/m32r/mm/
H A Dextable.c2 * linux/arch/m32r/mm/extable.c
/linux-4.4.14/arch/arm64/kernel/
H A Dvdso.c27 #include <linux/mm.h>
89 struct mm_struct *mm = current->mm; aarch32_setup_vectors_page() local
98 down_write(&mm->mmap_sem); aarch32_setup_vectors_page()
99 current->mm->context.vdso = (void *)addr; aarch32_setup_vectors_page()
102 ret = _install_special_mapping(mm, addr, PAGE_SIZE, aarch32_setup_vectors_page()
106 up_write(&mm->mmap_sem); aarch32_setup_vectors_page()
158 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local
166 down_write(&mm->mmap_sem); arch_setup_additional_pages()
172 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, arch_setup_additional_pages()
179 mm->context.vdso = (void *)vdso_base; arch_setup_additional_pages()
180 ret = _install_special_mapping(mm, vdso_base, vdso_text_len, arch_setup_additional_pages()
188 up_write(&mm->mmap_sem); arch_setup_additional_pages()
192 mm->context.vdso = NULL; arch_setup_additional_pages()
193 up_write(&mm->mmap_sem); arch_setup_additional_pages()
/linux-4.4.14/arch/arm64/mm/
H A Dextable.c2 * Based on arch/arm/mm/extable.c
/linux-4.4.14/arch/arm/mach-gemini/
H A DMakefile7 obj-y := irq.o mm.o time.o devices.o gpio.o idle.o reset.o
/linux-4.4.14/arch/x86/xen/
H A Dmmu.h18 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
19 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/linux-4.4.14/include/drm/
H A Ddrm_mm.h73 struct drm_mm *mm; member in struct:drm_mm_node
114 * @mm: drm_mm to check
120 * True if the @mm is initialized.
122 static inline bool drm_mm_initialized(struct drm_mm *mm) drm_mm_initialized() argument
124 return mm->hole_stack.next; drm_mm_initialized()
174 * @mm: drm_mm allocator to walk
179 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
180 &(mm)->head_node.node_list, \
186 * @mm: drm_mm allocator to walk
202 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
203 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
204 &entry->hole_stack != &(mm)->hole_stack ? \
210 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
211 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
212 &entry->hole_stack != &(mm)->hole_stack ? \
221 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
223 int drm_mm_insert_node_generic(struct drm_mm *mm,
232 * @mm: drm_mm to allocate from
246 static inline int drm_mm_insert_node(struct drm_mm *mm, drm_mm_insert_node() argument
252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, drm_mm_insert_node()
256 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
267 * @mm: drm_mm to allocate from
283 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, drm_mm_insert_node_in_range() argument
291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, drm_mm_insert_node_in_range()
298 void drm_mm_init(struct drm_mm *mm,
301 void drm_mm_takedown(struct drm_mm *mm);
302 bool drm_mm_clean(struct drm_mm *mm);
304 void drm_mm_init_scan(struct drm_mm *mm,
308 void drm_mm_init_scan_with_range(struct drm_mm *mm,
317 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
319 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
/linux-4.4.14/arch/frv/include/asm/
H A Dtlb.h20 * .. because we flush the whole mm when it fills up
22 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
H A Dmmu_context.h20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
25 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
27 extern void destroy_context(struct mm_struct *mm);
30 #define init_new_context(tsk, mm) ({ 0; })
32 #define destroy_context(mm) do {} while(0)
46 #define deactivate_mm(tsk, mm) \
H A Dpgalloc.h23 #define pmd_populate_kernel(mm, pmd, pte) __set_pmd(pmd, __pa(pte) | _PAGE_TABLE)
35 extern void pgd_free(struct mm_struct *mm, pgd_t *);
41 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument
46 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
63 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
64 #define pmd_free(mm, x) do { } while (0)
/linux-4.4.14/arch/hexagon/include/asm/
H A Dmmu_context.h29 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
38 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
47 struct mm_struct *mm) deactivate_mm()
54 * @mm: pointer to a new mm struct
57 struct mm_struct *mm) init_new_context()
59 /* mm->context is set up by pgd_alloc */ init_new_context()
64 * Switch active mm context
46 deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm() argument
56 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
/linux-4.4.14/arch/m68k/mm/
H A Dfault.c2 * linux/arch/m68k/mm/fault.c
8 #include <linux/mm.h>
72 struct mm_struct *mm = current->mm; do_page_fault() local
78 regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); do_page_fault()
84 if (faulthandler_disabled() || !mm) do_page_fault()
90 down_read(&mm->mmap_sem); do_page_fault()
92 vma = find_vma(mm, address); do_page_fault()
139 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
172 * No need to up_read(&mm->mmap_sem) as we would do_page_fault()
174 * in mm/filemap.c. do_page_fault()
181 up_read(&mm->mmap_sem); do_page_fault()
189 up_read(&mm->mmap_sem); do_page_fault()
218 up_read(&mm->mmap_sem); do_page_fault()
H A Dmcfmmu.c2 * Based upon linux/arch/m68k/mm/sun3mmu.c
3 * Based upon linux/arch/ppc/mm/mmu_context.c
5 * Implementations of mm routines specific to the Coldfire MMU.
12 #include <linux/mm.h>
76 current->mm = NULL; paging_init()
87 struct mm_struct *mm; cf_tlb_miss() local
98 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; cf_tlb_miss()
99 if (!mm) { cf_tlb_miss()
104 pgd = pgd_offset(mm, mmuar); cf_tlb_miss()
132 asid = mm->context & 0xff; cf_tlb_miss()
184 struct mm_struct *mm; steal_context() local
191 mm = context_mm[next_mmu_context]; steal_context()
192 flush_tlb_mm(mm); steal_context()
193 destroy_context(mm); steal_context()
/linux-4.4.14/arch/x86/kernel/
H A Dldt.c13 #include <linux/mm.h>
106 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
112 mutex_init(&mm->context.lock); init_new_context()
113 old_mm = current->mm; init_new_context()
115 mm->context.ldt = NULL; init_new_context()
121 mm->context.ldt = NULL; init_new_context()
135 mm->context.ldt = new_ldt; init_new_context()
147 void destroy_context(struct mm_struct *mm) destroy_context() argument
149 free_ldt_struct(mm->context.ldt); destroy_context()
150 mm->context.ldt = NULL; destroy_context()
157 struct mm_struct *mm = current->mm; read_ldt() local
159 mutex_lock(&mm->context.lock); read_ldt()
161 if (!mm->context.ldt) { read_ldt()
169 size = mm->context.ldt->size * LDT_ENTRY_SIZE; read_ldt()
173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { read_ldt()
188 mutex_unlock(&mm->context.lock); read_ldt()
209 struct mm_struct *mm = current->mm; write_ldt() local
248 mutex_lock(&mm->context.lock); write_ldt()
250 old_ldt = mm->context.ldt; write_ldt()
264 install_ldt(mm, new_ldt); write_ldt()
269 mutex_unlock(&mm->context.lock); write_ldt()
/linux-4.4.14/arch/xtensa/mm/
H A Dtlb.c2 * arch/xtensa/mm/tlb.c
17 #include <linux/mm.h>
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
63 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument
67 if (mm == current->active_mm) { local_flush_tlb_mm()
70 mm->context.asid[cpu] = NO_CONTEXT; local_flush_tlb_mm()
71 activate_context(mm, cpu); local_flush_tlb_mm()
74 mm->context.asid[cpu] = NO_CONTEXT; local_flush_tlb_mm()
75 mm->context.cpu = -1; local_flush_tlb_mm()
92 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range() local
95 if (mm->context.asid[cpu] == NO_CONTEXT) local_flush_tlb_range()
100 (unsigned long)mm->context.asid[cpu], start, end); local_flush_tlb_range()
107 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); local_flush_tlb_range()
123 local_flush_tlb_mm(mm); local_flush_tlb_range()
131 struct mm_struct* mm = vma->vm_mm; local_flush_tlb_page() local
135 if (mm->context.asid[cpu] == NO_CONTEXT) local_flush_tlb_page()
141 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); local_flush_tlb_page()
172 struct mm_struct *mm = task->mm; get_pte_for_vaddr() local
177 if (!mm) get_pte_for_vaddr()
178 mm = task->active_mm; get_pte_for_vaddr()
179 pgd = pgd_offset(mm, vaddr); get_pte_for_vaddr()
/linux-4.4.14/arch/sh/kernel/
H A Dsmp.c19 #include <linux/mm.h>
180 struct mm_struct *mm = &init_mm; start_secondary() local
183 atomic_inc(&mm->mm_count); start_secondary()
184 atomic_inc(&mm->mm_users); start_secondary()
185 current->active_mm = mm; start_secondary()
186 enter_lazy_tlb(mm, current); start_secondary()
341 static void flush_tlb_mm_ipi(void *mm) flush_tlb_mm_ipi() argument
343 local_flush_tlb_mm((struct mm_struct *)mm); flush_tlb_mm_ipi()
351 * at switch_mm time, should the mm ever be used on other cpus. For
354 * mm might be active on another cpu (eg debuggers doing the flushes on
358 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
362 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { flush_tlb_mm()
363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); flush_tlb_mm()
368 cpu_context(i, mm) = 0; flush_tlb_mm()
370 local_flush_tlb_mm(mm); flush_tlb_mm()
391 struct mm_struct *mm = vma->vm_mm; flush_tlb_range() local
394 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { flush_tlb_range()
405 cpu_context(i, mm) = 0; flush_tlb_range()
438 (current->mm != vma->vm_mm)) { flush_tlb_page()
/linux-4.4.14/arch/tile/kernel/
H A Dtlb.c34 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
38 for_each_cpu(cpu, mm_cpumask(mm)) { for_each_cpu()
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
50 flush_tlb_mm(current->mm); flush_tlb_current_task()
53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, flush_tlb_page_mm() argument
58 flush_remote(0, cache, mm_cpumask(mm), flush_tlb_page_mm()
59 va, size, size, mm_cpumask(mm), NULL, 0); flush_tlb_page_mm()
72 struct mm_struct *mm = vma->vm_mm; flush_tlb_range() local
74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, flush_tlb_range()
75 mm_cpumask(mm), NULL, 0); flush_tlb_range()
/linux-4.4.14/arch/cris/arch-v10/mm/
H A Dtlb.c2 * linux/arch/cris/arch-v10/mm/tlb.c
19 /* The TLB can host up to 64 different mm contexts at the same time.
22 * of which mm's we have assigned which page_id's, so that we know when
61 /* invalidate the selected mm context only */
64 flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
67 int page_id = mm->context.page_id; flush_tlb_mm()
70 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); flush_tlb_mm()
101 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local
102 int page_id = mm->context.page_id; flush_tlb_page()
106 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); flush_tlb_page()
113 /* invalidate those TLB entries that match both the mm context flush_tlb_page()
143 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
145 mm->context.page_id = NO_CONTEXT; init_new_context()
/linux-4.4.14/arch/hexagon/kernel/
H A Dvdso.c22 #include <linux/mm.h>
66 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local
68 down_write(&mm->mmap_sem); arch_setup_additional_pages()
80 ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, arch_setup_additional_pages()
88 mm->context.vdso = (void *)vdso_base; arch_setup_additional_pages()
91 up_write(&mm->mmap_sem); arch_setup_additional_pages()
/linux-4.4.14/arch/alpha/kernel/
H A Dsmp.c18 #include <linux/mm.h>
146 /* All kernel threads share the same mm context. */ smp_callin()
642 struct mm_struct *mm = (struct mm_struct *) x; ipi_flush_tlb_mm() local
643 if (mm == current->active_mm && !asn_locked()) ipi_flush_tlb_mm()
644 flush_tlb_current(mm); ipi_flush_tlb_mm()
646 flush_tlb_other(mm); ipi_flush_tlb_mm()
650 flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument
654 if (mm == current->active_mm) { flush_tlb_mm()
655 flush_tlb_current(mm); flush_tlb_mm()
656 if (atomic_read(&mm->mm_users) <= 1) { flush_tlb_mm()
661 if (mm->context[cpu]) flush_tlb_mm()
662 mm->context[cpu] = 0; flush_tlb_mm()
669 if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { flush_tlb_mm()
679 struct mm_struct *mm; member in struct:flush_tlb_page_struct
687 struct mm_struct * mm = data->mm; ipi_flush_tlb_page() local
689 if (mm == current->active_mm && !asn_locked()) ipi_flush_tlb_page()
690 flush_tlb_current_page(mm, data->vma, data->addr); ipi_flush_tlb_page()
692 flush_tlb_other(mm); ipi_flush_tlb_page()
699 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local
703 if (mm == current->active_mm) { flush_tlb_page()
704 flush_tlb_current_page(mm, vma, addr); flush_tlb_page()
705 if (atomic_read(&mm->mm_users) <= 1) { flush_tlb_page()
710 if (mm->context[cpu]) flush_tlb_page()
711 mm->context[cpu] = 0; flush_tlb_page()
719 data.mm = mm; flush_tlb_page()
741 struct mm_struct *mm = (struct mm_struct *) x; ipi_flush_icache_page() local
742 if (mm == current->active_mm && !asn_locked()) ipi_flush_icache_page()
743 __load_new_mm_context(mm); ipi_flush_icache_page()
745 flush_tlb_other(mm); ipi_flush_icache_page()
752 struct mm_struct *mm = vma->vm_mm; flush_icache_user_range() local
759 if (mm == current->active_mm) { flush_icache_user_range()
760 __load_new_mm_context(mm); flush_icache_user_range()
761 if (atomic_read(&mm->mm_users) <= 1) { flush_icache_user_range()
766 if (mm->context[cpu]) flush_icache_user_range()
767 mm->context[cpu] = 0; flush_icache_user_range()
774 if (smp_call_function(ipi_flush_icache_page, mm, 1)) { flush_icache_user_range()
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dradeon_mn.c42 struct mm_struct *mm; member in struct:radeon_mn
90 mmu_notifier_unregister(&rmn->mn, rmn->mm); radeon_mn_destroy()
95 * radeon_mn_release - callback to notify about mm destruction
98 * @mn: the mm this callback is about
103 struct mm_struct *mm) radeon_mn_release()
111 * radeon_mn_invalidate_range_start - callback to notify about mm change
114 * @mn: the mm this callback is about
122 struct mm_struct *mm, radeon_mn_invalidate_range_start()
181 * Creates a notifier context for current->mm.
185 struct mm_struct *mm = current->mm; radeon_mn_get() local
189 down_write(&mm->mmap_sem); radeon_mn_get()
192 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) radeon_mn_get()
193 if (rmn->mm == mm) radeon_mn_get()
203 rmn->mm = mm; radeon_mn_get()
208 r = __mmu_notifier_register(&rmn->mn, mm); radeon_mn_get()
212 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); radeon_mn_get()
216 up_write(&mm->mmap_sem); radeon_mn_get()
222 up_write(&mm->mmap_sem); radeon_mn_get()
102 radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) radeon_mn_release() argument
121 radeon_mn_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) radeon_mn_invalidate_range_start() argument
/linux-4.4.14/arch/arc/include/asm/
H A Dmmu_context.h9 * -Refactored get_new_mmu_context( ) to only handle live-mm.
10 * retiring-mm handled in other hooks
39 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
50 #define asid_mm(mm, cpu) mm->context.asid[cpu]
51 #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
60 static inline void get_new_mmu_context(struct mm_struct *mm) get_new_mmu_context() argument
69 * This is done by ensuring that the generation bits in both mm->ASID get_new_mmu_context()
77 if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) get_new_mmu_context()
95 asid_mm(mm, cpu) = asid_cpu(cpu); get_new_mmu_context()
98 write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); get_new_mmu_context()
108 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
113 asid_mm(mm, i) = MM_CTXT_NO_ASID; init_new_context()
118 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument
124 asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; destroy_context()
150 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ switch_mm()
160 * vs. in switch_mm(). Here it always returns a new ASID, because mm has
167 * for retiring-mm. However destroy_context( ) still needs to do that because
173 #define deactivate_mm(tsk, mm) do { } while (0)
175 #define enter_lazy_tlb(mm, tsk)
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_mn.c42 struct mm_struct *mm; member in struct:amdgpu_mn
90 mmu_notifier_unregister(&rmn->mn, rmn->mm); amdgpu_mn_destroy()
95 * amdgpu_mn_release - callback to notify about mm destruction
98 * @mn: the mm this callback is about
103 struct mm_struct *mm) amdgpu_mn_release()
111 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
114 * @mn: the mm this callback is about
122 struct mm_struct *mm, amdgpu_mn_invalidate_range_start()
182 * Creates a notifier context for current->mm.
186 struct mm_struct *mm = current->mm; amdgpu_mn_get() local
190 down_write(&mm->mmap_sem); amdgpu_mn_get()
193 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) amdgpu_mn_get()
194 if (rmn->mm == mm) amdgpu_mn_get()
204 rmn->mm = mm; amdgpu_mn_get()
209 r = __mmu_notifier_register(&rmn->mn, mm); amdgpu_mn_get()
213 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); amdgpu_mn_get()
217 up_write(&mm->mmap_sem); amdgpu_mn_get()
223 up_write(&mm->mmap_sem); amdgpu_mn_get()
102 amdgpu_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) amdgpu_mn_release() argument
121 amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) amdgpu_mn_invalidate_range_start() argument
/linux-4.4.14/arch/score/mm/
H A Dtlb-score.c2 * arch/score/mm/tlb-score.c
60 * If mm is currently active_mm, we can't really drop it. Instead,
64 drop_mmu_context(struct mm_struct *mm) drop_mmu_context() argument
69 get_new_mmu_context(mm); drop_mmu_context()
70 pevn_set(mm->context & ASID_MASK); drop_mmu_context()
74 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument
76 if (mm->context != 0) local_flush_tlb_mm()
77 drop_mmu_context(mm); local_flush_tlb_mm()
83 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range() local
84 unsigned long vma_mm_context = mm->context; local_flush_tlb_range()
85 if (mm->context != 0) { local_flush_tlb_range()
115 get_new_mmu_context(mm); local_flush_tlb_range()
116 if (mm == current->active_mm) local_flush_tlb_range()

Completed in 8030 milliseconds

1234567891011>>