/linux-4.1.27/arch/arm/xen/ |
H A D | Makefile | 1 obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
|
/linux-4.1.27/arch/parisc/mm/ |
H A D | Makefile | 2 # Makefile for arch/parisc/mm
|
/linux-4.1.27/mm/ |
H A D | mmu_context.c | 6 #include <linux/mm.h> 16 * mm context. 20 void use_mm(struct mm_struct *mm) use_mm() argument 27 if (active_mm != mm) { use_mm() 28 atomic_inc(&mm->mm_count); use_mm() 29 tsk->active_mm = mm; use_mm() 31 tsk->mm = mm; use_mm() 32 switch_mm(active_mm, mm, tsk); use_mm() 38 if (active_mm != mm) use_mm() 46 * specified mm context which was earlier taken on 51 void unuse_mm(struct mm_struct *mm) unuse_mm() argument 56 sync_mm_rss(mm); unuse_mm() 57 tsk->mm = NULL; unuse_mm() 58 /* active_mm is still 'mm' */ unuse_mm() 59 enter_lazy_tlb(mm, tsk); unuse_mm()
|
H A D | debug.c | 2 * mm/debug.c 4 * mm/ specific debug routines. 9 #include <linux/mm.h> 157 "next %p prev %p mm %p\n" dump_vma() 169 void dump_mm(const struct mm_struct *mm) dump_mm() argument 171 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" dump_mm() 201 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, dump_mm() 203 mm->get_unmapped_area, dump_mm() 205 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, dump_mm() 206 mm->pgd, atomic_read(&mm->mm_users), dump_mm() 207 atomic_read(&mm->mm_count), dump_mm() 208 atomic_long_read((atomic_long_t *)&mm->nr_ptes), dump_mm() 209 mm_nr_pmds((struct mm_struct *)mm), dump_mm() 210 mm->map_count, dump_mm() 211 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, dump_mm() 212 mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm, dump_mm() 213 mm->start_code, mm->end_code, mm->start_data, mm->end_data, dump_mm() 214 mm->start_brk, mm->brk, mm->start_stack, dump_mm() 215 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, dump_mm() 216 mm->binfmt, mm->flags, mm->core_state, dump_mm() 218 mm->ioctx_table, dump_mm() 221 mm->owner, dump_mm() 223 mm->exe_file, dump_mm() 225 mm->mmu_notifier_mm, dump_mm() 228 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, dump_mm() 231 mm->tlb_flush_pending, dump_mm() 236 dump_flags(mm->def_flags, vmaflags_names, dump_mm()
|
H A D | mmu_notifier.c | 2 * linux/mm/mmu_notifier.c 15 #include <linux/mm.h> 46 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 48 * in parallel despite there being no task using this mm any more, 56 void __mmu_notifier_release(struct mm_struct *mm) __mmu_notifier_release() argument 66 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) __mmu_notifier_release() 71 * sptes before all the pages in the mm are freed. __mmu_notifier_release() 74 mn->ops->release(mn, mm); __mmu_notifier_release() 76 spin_lock(&mm->mmu_notifier_mm->lock); __mmu_notifier_release() 77 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { __mmu_notifier_release() 78 mn = hlist_entry(mm->mmu_notifier_mm->list.first, __mmu_notifier_release() 89 spin_unlock(&mm->mmu_notifier_mm->lock); __mmu_notifier_release() 94 * exit_mmap (which would proceed with freeing all pages in the mm) __mmu_notifier_release() 109 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, __mmu_notifier_clear_flush_young() argument 117 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_clear_flush_young() 119 young |= mn->ops->clear_flush_young(mn, mm, start, end); __mmu_notifier_clear_flush_young() 126 int __mmu_notifier_test_young(struct mm_struct *mm, __mmu_notifier_test_young() argument 133 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_test_young() 135 young = mn->ops->test_young(mn, mm, address); __mmu_notifier_test_young() 145 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, __mmu_notifier_change_pte() argument 152 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_change_pte() 154 mn->ops->change_pte(mn, mm, address, pte); __mmu_notifier_change_pte() 159 void __mmu_notifier_invalidate_page(struct mm_struct *mm, __mmu_notifier_invalidate_page() argument 166 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_page() 168 mn->ops->invalidate_page(mn, mm, address); __mmu_notifier_invalidate_page() 173 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, __mmu_notifier_invalidate_range_start() argument 180 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_range_start() 182 mn->ops->invalidate_range_start(mn, mm, start, end); __mmu_notifier_invalidate_range_start() 188 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, __mmu_notifier_invalidate_range_end() argument 195 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_range_end() 205 mn->ops->invalidate_range(mn, mm, start, end); __mmu_notifier_invalidate_range_end() 207 mn->ops->invalidate_range_end(mn, mm, start, end); __mmu_notifier_invalidate_range_end() 213 void __mmu_notifier_invalidate_range(struct mm_struct *mm, __mmu_notifier_invalidate_range() argument 220 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { __mmu_notifier_invalidate_range() 222 mn->ops->invalidate_range(mn, mm, start, end); __mmu_notifier_invalidate_range() 229 struct mm_struct *mm, do_mmu_notifier_register() 235 BUG_ON(atomic_read(&mm->mm_users) <= 0); do_mmu_notifier_register() 249 down_write(&mm->mmap_sem); do_mmu_notifier_register() 250 ret = mm_take_all_locks(mm); do_mmu_notifier_register() 254 if (!mm_has_notifiers(mm)) { do_mmu_notifier_register() 258 mm->mmu_notifier_mm = mmu_notifier_mm; do_mmu_notifier_register() 261 atomic_inc(&mm->mm_count); do_mmu_notifier_register() 267 * current->mm or explicitly with get_task_mm() or similar). do_mmu_notifier_register() 271 spin_lock(&mm->mmu_notifier_mm->lock); do_mmu_notifier_register() 272 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); do_mmu_notifier_register() 273 spin_unlock(&mm->mmu_notifier_mm->lock); do_mmu_notifier_register() 275 mm_drop_all_locks(mm); do_mmu_notifier_register() 278 up_write(&mm->mmap_sem); do_mmu_notifier_register() 281 BUG_ON(atomic_read(&mm->mm_users) <= 0); do_mmu_notifier_register() 289 * so mm has to be current->mm or the mm should be pinned safely such 290 * as with get_task_mm(). If the mm is not current->mm, the mm_users 298 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) mmu_notifier_register() argument 300 return do_mmu_notifier_register(mn, mm, 1); mmu_notifier_register() 308 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) __mmu_notifier_register() argument 310 return do_mmu_notifier_register(mn, mm, 0); __mmu_notifier_register() 315 void __mmu_notifier_mm_destroy(struct mm_struct *mm) __mmu_notifier_mm_destroy() argument 317 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); __mmu_notifier_mm_destroy() 318 kfree(mm->mmu_notifier_mm); __mmu_notifier_mm_destroy() 319 mm->mmu_notifier_mm = LIST_POISON1; /* debug */ __mmu_notifier_mm_destroy() 323 * This releases the mm_count pin automatically and frees the mm 332 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) mmu_notifier_unregister() argument 334 BUG_ON(atomic_read(&mm->mm_count) <= 0); mmu_notifier_unregister() 349 mn->ops->release(mn, mm); mmu_notifier_unregister() 352 spin_lock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister() 358 spin_unlock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister() 367 BUG_ON(atomic_read(&mm->mm_count) <= 0); mmu_notifier_unregister() 369 mmdrop(mm); mmu_notifier_unregister() 377 struct mm_struct *mm) mmu_notifier_unregister_no_release() 379 spin_lock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister_no_release() 385 spin_unlock(&mm->mmu_notifier_mm->lock); mmu_notifier_unregister_no_release() 387 BUG_ON(atomic_read(&mm->mm_count) <= 0); mmu_notifier_unregister_no_release() 388 mmdrop(mm); mmu_notifier_unregister_no_release() 228 do_mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm, int take_mmap_sem) do_mmu_notifier_register() argument 376 mmu_notifier_unregister_no_release(struct mmu_notifier *mn, struct mm_struct *mm) mmu_notifier_unregister_no_release() argument
|
H A D | vmacache.c | 5 #include <linux/mm.h> 9 * Flush vma caches for threads that share a given mm. 16 void vmacache_flush_all(struct mm_struct *mm) vmacache_flush_all() argument 25 * since the mm's seqnum was increased and don't have vmacache_flush_all() 29 if (atomic_read(&mm->mm_users) == 1) vmacache_flush_all() 36 * mm seqnum is already set and curr's will for_each_process_thread() 40 if (mm == p->mm) for_each_process_thread() 47 * This task may be accessing a foreign mm via (for example) 49 * task's vmacache pertains to a different mm (ie, its own). There is 52 * Also handle the case where a kernel thread has adopted this mm via use_mm(). 53 * That kernel thread's vmacache is not applicable to this mm. 55 static bool vmacache_valid_mm(struct mm_struct *mm) vmacache_valid_mm() argument 57 return current->mm == mm && !(current->flags & PF_KTHREAD); vmacache_valid_mm() 66 static bool vmacache_valid(struct mm_struct *mm) vmacache_valid() argument 70 if (!vmacache_valid_mm(mm)) vmacache_valid() 74 if (mm->vmacache_seqnum != curr->vmacache_seqnum) { vmacache_valid() 79 curr->vmacache_seqnum = mm->vmacache_seqnum; vmacache_valid() 86 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) vmacache_find() argument 90 if (!vmacache_valid(mm)) vmacache_find() 100 if (WARN_ON_ONCE(vma->vm_mm != mm)) vmacache_find() 112 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, vmacache_find_exact() argument 118 if (!vmacache_valid(mm)) vmacache_find_exact()
|
H A D | msync.c | 2 * linux/mm/msync.c 11 #include <linux/mm.h> 34 struct mm_struct *mm = current->mm; SYSCALL_DEFINE3() local 57 down_read(&mm->mmap_sem); SYSCALL_DEFINE3() 58 vma = find_vma(mm, start); SYSCALL_DEFINE3() 88 up_read(&mm->mmap_sem); SYSCALL_DEFINE3() 93 down_read(&mm->mmap_sem); SYSCALL_DEFINE3() 94 vma = find_vma(mm, start); SYSCALL_DEFINE3() 104 up_read(&mm->mmap_sem); SYSCALL_DEFINE3()
|
H A D | pgtable-generic.c | 2 * mm/pgtable-generic.c 113 struct mm_struct *mm = (vma)->vm_mm; ptep_clear_flush() local 115 pte = ptep_get_and_clear(mm, address, ptep); ptep_clear_flush() 116 if (pte_accessible(mm, pte)) ptep_clear_flush() 152 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_trans_huge_deposit() argument 155 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_deposit() 158 if (!pmd_huge_pte(mm, pmdp)) pgtable_trans_huge_deposit() 161 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); pgtable_trans_huge_deposit() 162 pmd_huge_pte(mm, pmdp) = pgtable; pgtable_trans_huge_deposit() 170 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_trans_huge_withdraw() argument 174 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_withdraw() 177 pgtable = pmd_huge_pte(mm, pmdp); pgtable_trans_huge_withdraw() 179 pmd_huge_pte(mm, pmdp) = NULL; pgtable_trans_huge_withdraw() 181 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next, pgtable_trans_huge_withdraw()
|
H A D | mremap.c | 2 * mm/mremap.c 10 #include <linux/mm.h> 31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) get_old_pmd() argument 37 pgd = pgd_offset(mm, addr); get_old_pmd() 52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, alloc_new_pmd() argument 59 pgd = pgd_offset(mm, addr); alloc_new_pmd() 60 pud = pud_alloc(mm, pgd, addr); alloc_new_pmd() 64 pmd = pmd_alloc(mm, pud, addr); alloc_new_pmd() 95 struct mm_struct *mm = vma->vm_mm; move_ptes() local 132 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); move_ptes() 134 new_ptl = pte_lockptr(mm, new_pmd); move_ptes() 143 pte = ptep_get_and_clear(mm, old_addr, old_pte); move_ptes() 146 set_pte_at(mm, new_addr, new_pte, pte); move_ptes() 239 struct mm_struct *mm = vma->vm_mm; move_vma() local 254 if (mm->map_count >= sysctl_max_map_count - 3) move_vma() 316 hiwater_vm = mm->hiwater_vm; move_vma() 317 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); move_vma() 319 if (do_munmap(mm, old_addr, old_len) < 0) { move_vma() 324 mm->hiwater_vm = hiwater_vm; move_vma() 334 mm->locked_vm += new_len >> PAGE_SHIFT; move_vma() 344 struct mm_struct *mm = current->mm; vma_to_resize() local 345 struct vm_area_struct *vma = find_vma(mm, addr); vma_to_resize() 371 locked = mm->locked_vm << PAGE_SHIFT; vma_to_resize() 378 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) vma_to_resize() 383 if (security_vm_enough_memory_mm(mm, charged)) vma_to_resize() 394 struct mm_struct *mm = current->mm; mremap_to() local 415 ret = do_munmap(mm, new_addr, new_len); mremap_to() 420 ret = do_munmap(mm, addr+new_len, old_len - new_len); mremap_to() 476 struct mm_struct *mm = current->mm; SYSCALL_DEFINE5() local 502 down_write(¤t->mm->mmap_sem); SYSCALL_DEFINE5() 516 ret = do_munmap(mm, addr+new_len, old_len - new_len); SYSCALL_DEFINE5() 545 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); SYSCALL_DEFINE5() 547 mm->locked_vm += pages; SYSCALL_DEFINE5() 580 up_write(¤t->mm->mmap_sem); SYSCALL_DEFINE5()
|
/linux-4.1.27/include/linux/ |
H A D | mmu_context.h | 6 void use_mm(struct mm_struct *mm); 7 void unuse_mm(struct mm_struct *mm);
|
H A D | vmacache.h | 5 #include <linux/mm.h> 18 extern void vmacache_flush_all(struct mm_struct *mm); 20 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, 24 extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, 29 static inline void vmacache_invalidate(struct mm_struct *mm) vmacache_invalidate() argument 31 mm->vmacache_seqnum++; vmacache_invalidate() 34 if (unlikely(mm->vmacache_seqnum == 0)) vmacache_invalidate() 35 vmacache_flush_all(mm); vmacache_invalidate()
|
H A D | khugepaged.h | 7 extern int __khugepaged_enter(struct mm_struct *mm); 8 extern void __khugepaged_exit(struct mm_struct *mm); 26 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) khugepaged_fork() argument 29 return __khugepaged_enter(mm); khugepaged_fork() 33 static inline void khugepaged_exit(struct mm_struct *mm) khugepaged_exit() argument 35 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) khugepaged_exit() 36 __khugepaged_exit(mm); khugepaged_exit() 51 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) khugepaged_fork() argument 55 static inline void khugepaged_exit(struct mm_struct *mm) khugepaged_exit() argument
|
H A D | elf-randomize.h | 12 # define arch_randomize_brk(mm) (mm->brk) 16 extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
H A D | mmu_notifier.h | 16 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected 21 /* all mmu notifiers registerd in this mm are queued in this list */ 29 * Called either by mmu_notifier_unregister or when the mm is 32 * methods (the ones invoked outside the mm context) and it 37 * tsk->mm == mm exits. 44 * last thread of this mm quits, you've also to be sure that 52 struct mm_struct *mm); 64 struct mm_struct *mm, 75 struct mm_struct *mm, 83 struct mm_struct *mm, 95 struct mm_struct *mm, 142 struct mm_struct *mm, 145 struct mm_struct *mm, 168 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, 188 static inline int mm_has_notifiers(struct mm_struct *mm) mm_has_notifiers() argument 190 return unlikely(mm->mmu_notifier_mm); mm_has_notifiers() 194 struct mm_struct *mm); 196 struct mm_struct *mm); 198 struct mm_struct *mm); 200 struct mm_struct *mm); 201 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); 202 extern void __mmu_notifier_release(struct mm_struct *mm); 203 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 206 extern int __mmu_notifier_test_young(struct mm_struct *mm, 208 extern void __mmu_notifier_change_pte(struct mm_struct *mm, 210 extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, 212 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, 214 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 216 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, 219 static inline void mmu_notifier_release(struct mm_struct *mm) mmu_notifier_release() argument 221 if (mm_has_notifiers(mm)) mmu_notifier_release() 222 __mmu_notifier_release(mm); mmu_notifier_release() 225 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, mmu_notifier_clear_flush_young() argument 229 if (mm_has_notifiers(mm)) mmu_notifier_clear_flush_young() 230 return __mmu_notifier_clear_flush_young(mm, start, end); mmu_notifier_clear_flush_young() 234 static inline int mmu_notifier_test_young(struct mm_struct *mm, mmu_notifier_test_young() argument 237 if (mm_has_notifiers(mm)) mmu_notifier_test_young() 238 return __mmu_notifier_test_young(mm, address); mmu_notifier_test_young() 242 static inline void mmu_notifier_change_pte(struct mm_struct *mm, mmu_notifier_change_pte() argument 245 if (mm_has_notifiers(mm)) mmu_notifier_change_pte() 246 __mmu_notifier_change_pte(mm, address, pte); mmu_notifier_change_pte() 249 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, mmu_notifier_invalidate_page() argument 252 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_page() 253 __mmu_notifier_invalidate_page(mm, address); mmu_notifier_invalidate_page() 256 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, mmu_notifier_invalidate_range_start() argument 259 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_range_start() 260 __mmu_notifier_invalidate_range_start(mm, start, end); mmu_notifier_invalidate_range_start() 263 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, mmu_notifier_invalidate_range_end() argument 266 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_range_end() 267 __mmu_notifier_invalidate_range_end(mm, start, end); mmu_notifier_invalidate_range_end() 270 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, mmu_notifier_invalidate_range() argument 273 if (mm_has_notifiers(mm)) mmu_notifier_invalidate_range() 274 __mmu_notifier_invalidate_range(mm, start, end); mmu_notifier_invalidate_range() 277 static inline void mmu_notifier_mm_init(struct mm_struct *mm) mmu_notifier_mm_init() argument 279 mm->mmu_notifier_mm = NULL; mmu_notifier_mm_init() 282 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) mmu_notifier_mm_destroy() argument 284 if (mm_has_notifiers(mm)) mmu_notifier_mm_destroy() 285 __mmu_notifier_mm_destroy(mm); mmu_notifier_mm_destroy() 378 static inline void mmu_notifier_release(struct mm_struct *mm) mmu_notifier_release() argument 382 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, mmu_notifier_clear_flush_young() argument 389 static inline int mmu_notifier_test_young(struct mm_struct *mm, mmu_notifier_test_young() argument 395 static inline void mmu_notifier_change_pte(struct mm_struct *mm, mmu_notifier_change_pte() argument 400 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, mmu_notifier_invalidate_page() argument 405 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, mmu_notifier_invalidate_range_start() argument 410 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, mmu_notifier_invalidate_range_end() argument 415 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, mmu_notifier_invalidate_range() argument 420 static inline void mmu_notifier_mm_init(struct mm_struct *mm) mmu_notifier_mm_init() argument 424 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) mmu_notifier_mm_destroy() argument
|
H A D | hugetlb_inline.h | 6 #include <linux/mm.h>
|
H A D | ksm.h | 11 #include <linux/mm.h> 22 int __ksm_enter(struct mm_struct *mm); 23 void __ksm_exit(struct mm_struct *mm); 25 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ksm_fork() argument 28 return __ksm_enter(mm); ksm_fork() 32 static inline void ksm_exit(struct mm_struct *mm) ksm_exit() argument 34 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) ksm_exit() 35 __ksm_exit(mm); ksm_exit() 69 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ksm_fork() argument 74 static inline void ksm_exit(struct mm_struct *mm) ksm_exit() argument
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | pgalloc.h | 16 #include <linux/mm.h> 26 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 28 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr); 50 static inline unsigned long pgd_entry_type(struct mm_struct *mm) pgd_entry_type() argument 52 if (mm->context.asce_limit <= (1UL << 31)) pgd_entry_type() 54 if (mm->context.asce_limit <= (1UL << 42)) pgd_entry_type() 62 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) pud_alloc_one() argument 64 unsigned long *table = crst_table_alloc(mm); pud_alloc_one() 69 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) 71 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) pmd_alloc_one() argument 73 unsigned long *table = crst_table_alloc(mm); pmd_alloc_one() 79 crst_table_free(mm, table); pmd_alloc_one() 85 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 88 crst_table_free(mm, (unsigned long *) pmd); pmd_free() 91 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_populate() argument 96 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 101 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 103 unsigned long *table = crst_table_alloc(mm); pgd_alloc() 107 if (mm->context.asce_limit == (1UL << 31)) { pgd_alloc() 110 crst_table_free(mm, table); pgd_alloc() 117 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 119 if (mm->context.asce_limit == (1UL << 31)) pgd_free() 121 crst_table_free(mm, (unsigned long *) pgd); pgd_free() 124 static inline void pmd_populate(struct mm_struct *mm, pmd_populate() argument 130 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) 138 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 139 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 141 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 142 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
|
H A D | mmu_context.h | 16 struct mm_struct *mm) init_new_context() 18 spin_lock_init(&mm->context.list_lock); init_new_context() 19 INIT_LIST_HEAD(&mm->context.pgtable_list); init_new_context() 20 INIT_LIST_HEAD(&mm->context.gmap_list); init_new_context() 21 cpumask_clear(&mm->context.cpu_attach_mask); init_new_context() 22 atomic_set(&mm->context.attach_count, 0); init_new_context() 23 mm->context.flush_mm = 0; init_new_context() 25 mm->context.alloc_pgste = page_table_allocate_pgste; init_new_context() 26 mm->context.has_pgste = 0; init_new_context() 27 mm->context.use_skey = 0; init_new_context() 29 if (mm->context.asce_limit == 0) { init_new_context() 31 mm->context.asce_bits = _ASCE_TABLE_LENGTH | init_new_context() 33 mm->context.asce_limit = STACK_TOP_MAX; init_new_context() 34 } else if (mm->context.asce_limit == (1UL << 31)) { init_new_context() 35 mm_inc_nr_pmds(mm); init_new_context() 37 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); init_new_context() 41 #define destroy_context(mm) do { } while (0) 43 static inline void set_user_asce(struct mm_struct *mm) set_user_asce() argument 45 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); set_user_asce() 92 struct mm_struct *mm = tsk->mm; finish_arch_post_lock_switch() local 95 if (mm) { finish_arch_post_lock_switch() 97 while (atomic_read(&mm->context.attach_count) >> 16) finish_arch_post_lock_switch() 100 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); finish_arch_post_lock_switch() 101 if (mm->context.flush_mm) finish_arch_post_lock_switch() 102 __tlb_flush_mm(mm); finish_arch_post_lock_switch() 108 #define enter_lazy_tlb(mm,tsk) do { } while (0) 109 #define deactivate_mm(tsk,mm) do { } while (0) 120 struct mm_struct *mm) arch_dup_mmap() 124 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument 128 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument 134 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument 15 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 119 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
|
H A D | tlbflush.h | 4 #include <linux/mm.h> 22 /* Global TLB flush for the mm */ __tlb_flush_idte() 33 /* Local TLB flush for the mm */ __tlb_flush_idte_local() 61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used 64 static inline void __tlb_flush_full(struct mm_struct *mm) __tlb_flush_full() argument 67 atomic_add(0x10000, &mm->context.attach_count); __tlb_flush_full() 68 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { __tlb_flush_full() 76 cpumask_copy(mm_cpumask(mm), __tlb_flush_full() 77 &mm->context.cpu_attach_mask); __tlb_flush_full() 79 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_full() 86 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) __tlb_flush_asce() argument 91 active = (mm == current->active_mm) ? 1 : 0; __tlb_flush_asce() 92 count = atomic_add_return(0x10000, &mm->context.attach_count); __tlb_flush_asce() 94 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { __tlb_flush_asce() 103 cpumask_copy(mm_cpumask(mm), __tlb_flush_asce() 104 &mm->context.cpu_attach_mask); __tlb_flush_asce() 106 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_asce() 120 #define __tlb_flush_full(mm) __tlb_flush_local() 125 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) __tlb_flush_asce() argument 143 static inline void __tlb_flush_mm(struct mm_struct * mm) __tlb_flush_mm() argument 146 * If the machine has IDTE we prefer to do a per mm flush __tlb_flush_mm() 147 * on all cpus instead of doing a local flush if the mm __tlb_flush_mm() 150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_mm() 151 __tlb_flush_asce(mm, (unsigned long) mm->pgd | __tlb_flush_mm() 152 mm->context.asce_bits); __tlb_flush_mm() 154 __tlb_flush_full(mm); __tlb_flush_mm() 157 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) __tlb_flush_mm_lazy() argument 159 if (mm->context.flush_mm) { __tlb_flush_mm_lazy() 160 __tlb_flush_mm(mm); __tlb_flush_mm_lazy() 161 mm->context.flush_mm = 0; __tlb_flush_mm_lazy() 167 * flush_tlb() - flushes the current mm struct TLBs 169 * flush_tlb_mm(mm) - flushes the specified mm context TLB's 179 * ptep_get_and_clear do not flush the TLBs directly if the mm has 187 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 189 __tlb_flush_mm_lazy(mm); flush_tlb_mm()
|
H A D | hugetlb.h | 15 #define is_hugepage_only_range(mm, addr, len) 0 19 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 22 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 39 #define hugetlb_prefault_arch_hook(mm) do { } while (0) 45 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, huge_pte_clear() argument 69 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 72 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); huge_ptep_set_wrprotect() 73 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); huge_ptep_set_wrprotect()
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | mmu_context.h | 24 void __check_vmalloc_seq(struct mm_struct *mm); 28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 32 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 35 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, a15_erratum_get_cpumask() argument 45 static inline void check_and_switch_context(struct mm_struct *mm, check_and_switch_context() argument 48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) check_and_switch_context() 49 __check_vmalloc_seq(mm); check_and_switch_context() 55 * running with the old mm. Since we only support UP systems check_and_switch_context() 56 * on non-ASID CPUs, the old mm will remain valid until the check_and_switch_context() 59 mm->context.switch_pending = 1; check_and_switch_context() 61 cpu_switch_mm(mm->pgd, mm); check_and_switch_context() 68 struct mm_struct *mm = current->mm; finish_arch_post_lock_switch() local 70 if (mm && mm->context.switch_pending) { finish_arch_post_lock_switch() 75 * switch to this mm was already done. finish_arch_post_lock_switch() 78 if (mm->context.switch_pending) { finish_arch_post_lock_switch() 79 mm->context.switch_pending = 0; finish_arch_post_lock_switch() 80 cpu_switch_mm(mm->pgd, mm); finish_arch_post_lock_switch() 88 #define init_new_context(tsk,mm) 0 92 #define destroy_context(mm) do { } while(0) 98 * mm: describes the currently active mm context 102 * tsk->mm will be NULL 105 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 110 * This is the actual mm switch as far as the scheduler 112 * calling the CPU specific function when the mm hasn't 140 #define deactivate_mm(tsk,mm) do { } while (0)
|
H A D | mmu.h | 22 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK)) 24 #define ASID(mm) (0)
|
H A D | pgalloc.h | 30 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument 35 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 41 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 51 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) 52 #define pmd_free(mm, pmd) do { } while (0) 53 #define pud_populate(mm,pmd,pte) BUG() 57 extern pgd_t *pgd_alloc(struct mm_struct *mm); 58 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 84 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument 96 pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument 119 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 125 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 144 * of the mm address space. 149 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) pmd_populate_kernel() argument 158 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) pmd_populate() argument
|
H A D | vdso.h | 12 void arm_install_vdso(struct mm_struct *mm, unsigned long addr); 20 static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr) arm_install_vdso() argument
|
/linux-4.1.27/arch/um/kernel/skas/ |
H A D | mmu.c | 6 #include <linux/mm.h> 17 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, init_stub_pte() argument 25 pgd = pgd_offset(mm, proc); init_stub_pte() 26 pud = pud_alloc(mm, pgd, proc); init_stub_pte() 30 pmd = pmd_alloc(mm, pud, proc); init_stub_pte() 34 pte = pte_alloc_map(mm, NULL, pmd, proc); init_stub_pte() 43 pmd_free(mm, pmd); init_stub_pte() 45 pud_free(mm, pud); init_stub_pte() 50 int init_new_context(struct task_struct *task, struct mm_struct *mm) init_new_context() argument 53 struct mm_context *to_mm = &mm->context; init_new_context() 62 if (current->mm != NULL && current->mm != &init_mm) init_new_context() 63 from_mm = ¤t->mm->context; init_new_context() 91 void uml_setup_stubs(struct mm_struct *mm) uml_setup_stubs() argument 95 ret = init_stub_pte(mm, STUB_CODE, uml_setup_stubs() 100 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); uml_setup_stubs() 104 mm->context.stub_pages[0] = virt_to_page(&__syscall_stub_start); uml_setup_stubs() 105 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); uml_setup_stubs() 108 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, uml_setup_stubs() 111 mm->context.stub_pages); uml_setup_stubs() 122 void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument 126 pte = virt_to_pte(mm, STUB_CODE); arch_exit_mmap() 128 pte_clear(mm, STUB_CODE, pte); arch_exit_mmap() 130 pte = virt_to_pte(mm, STUB_DATA); arch_exit_mmap() 134 pte_clear(mm, STUB_DATA, pte); arch_exit_mmap() 137 void destroy_context(struct mm_struct *mm) destroy_context() argument 139 struct mm_context *mmu = &mm->context; destroy_context()
|
/linux-4.1.27/arch/x86/mm/kmemcheck/ |
H A D | pte.h | 4 #include <linux/mm.h>
|
H A D | pte.c | 1 #include <linux/mm.h>
|
/linux-4.1.27/arch/arm64/xen/ |
H A D | Makefile | 1 xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
|
/linux-4.1.27/arch/blackfin/mm/ |
H A D | Makefile | 2 # arch/blackfin/mm/Makefile
|
/linux-4.1.27/arch/c6x/include/asm/ |
H A D | tlb.h | 4 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
/linux-4.1.27/arch/sh/mm/ |
H A D | hugetlbpage.c | 2 * arch/sh/mm/hugetlbpage.c 13 #include <linux/mm.h> 24 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument 32 pgd = pgd_offset(mm, addr); huge_pte_alloc() 34 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc() 36 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc() 38 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc() 45 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument 52 pgd = pgd_offset(mm, addr); huge_pte_offset() 65 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) huge_pmd_unshare() argument
|
H A D | extable_32.c | 2 * linux/arch/sh/mm/extable.c 4 * linux/arch/i386/mm/extable.c
|
H A D | tlbflush_32.c | 11 #include <linux/mm.h> 28 if (vma->vm_mm != current->mm) { local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range() local 45 if (cpu_context(cpu, mm) != NO_CONTEXT) { local_flush_tlb_range() 52 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_range() 53 if (mm == current->mm) local_flush_tlb_range() 54 activate_context(mm, cpu); local_flush_tlb_range() 59 asid = cpu_asid(cpu, mm); local_flush_tlb_range() 63 if (mm != current->mm) { local_flush_tlb_range() 106 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument 112 if (cpu_context(cpu, mm) != NO_CONTEXT) { local_flush_tlb_mm() 116 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_mm() 117 if (mm == current->mm) local_flush_tlb_mm() 118 activate_context(mm, cpu); local_flush_tlb_mm()
|
H A D | pgtable.c | 1 #include <linux/mm.h> 32 pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 37 void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 43 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 48 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument 53 void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
|
H A D | tlbflush_64.c | 2 * arch/sh/mm/tlb-flush_64.c 21 #include <linux/mm.h> 86 struct mm_struct *mm; local_flush_tlb_range() local 88 mm = vma->vm_mm; local_flush_tlb_range() 89 if (cpu_context(cpu, mm) == NO_CONTEXT) local_flush_tlb_range() 97 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; local_flush_tlb_range() 128 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument 133 if (cpu_context(cpu, mm) == NO_CONTEXT) local_flush_tlb_mm() 138 cpu_context(cpu, mm) = NO_CONTEXT; local_flush_tlb_mm() 139 if (mm == current->mm) local_flush_tlb_mm() 140 activate_context(mm, cpu); local_flush_tlb_mm()
|
H A D | mmap.c | 2 * arch/sh/mm/mmap.c 11 #include <linux/mm.h> 36 struct mm_struct *mm = current->mm; arch_get_unmapped_area() local 64 vma = find_vma(mm, addr); arch_get_unmapped_area() 85 struct mm_struct *mm = current->mm; arch_get_unmapped_area_topdown() local 114 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown() 123 info.high_limit = mm->mmap_base; arch_get_unmapped_area_topdown()
|
/linux-4.1.27/arch/m32r/include/asm/ |
H A D | mmu_context.h | 26 #define mm_context(mm) mm->context 30 #define mm_context(mm) mm->context[smp_processor_id()] 37 #define enter_lazy_tlb(mm, tsk) do { } while (0) 39 static inline void get_new_mmu_context(struct mm_struct *mm) get_new_mmu_context() argument 52 mm_context(mm) = mc; get_new_mmu_context() 58 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 60 if (mm) { get_mmu_context() 65 if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK) get_mmu_context() 66 get_new_mmu_context(mm); get_mmu_context() 75 struct mm_struct *mm) init_new_context() 78 mm->context = NO_CONTEXT; init_new_context() 84 mm->context[i] = NO_CONTEXT; init_new_context() 94 #define destroy_context(mm) do { } while (0) 112 * After we have set current->mm to a new value, this activates 113 * the context for the new mm so we see the new mappings. 115 static inline void activate_context(struct mm_struct *mm) activate_context() argument 117 get_mmu_context(mm); activate_context() 118 set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK); activate_context() 143 #define deactivate_mm(tsk, mm) do { } while (0) 149 #define get_mmu_context(mm) do { } while (0) 150 #define init_new_context(tsk,mm) (0) 151 #define destroy_context(mm) do { } while (0) 154 #define activate_context(mm) do { } while (0) 156 #define deactivate_mm(mm,tsk) do { } while (0) 158 #define enter_lazy_tlb(mm,tsk) do { } while (0) 74 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
H A D | pgalloc.h | 4 #include <linux/mm.h> 8 #define pmd_populate_kernel(mm, pmd, pte) \ 11 static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 21 static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 28 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 33 static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 41 static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 60 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 66 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) 74 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 75 #define pmd_free(mm, x) do { } while (0) 77 #define pgd_populate(mm, pmd, pte) BUG()
|
H A D | tlb.h | 13 * .. because we flush the whole mm when it 16 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
H A D | tlbflush.h | 9 * - flush_tlb() flushes the current mm struct TLBs 11 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 26 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 33 #define flush_tlb_mm(mm) do { } while (0) 45 #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
|
H A D | cacheflush.h | 4 #include <linux/mm.h> 11 #define flush_cache_mm(mm) do { } while (0) 12 #define flush_cache_dup_mm(mm) do { } while (0) 33 #define flush_cache_mm(mm) do { } while (0) 34 #define flush_cache_dup_mm(mm) do { } while (0) 47 #define flush_cache_mm(mm) do { } while (0) 48 #define flush_cache_dup_mm(mm) do { } while (0)
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | mmu_context.h | 6 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 30 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 34 if (mm->context != NO_CONTEXT) get_mmu_context() 47 mm->context = ctx; get_mmu_context() 48 context_mm[ctx] = mm; get_mmu_context() 54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 59 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 61 if (mm->context != NO_CONTEXT) { destroy_context() 62 clear_bit(mm->context, context_map); destroy_context() 63 mm->context = NO_CONTEXT; destroy_context() 76 get_mmu_context(tsk->mm); switch_mm() 77 set_context(tsk->mm->context, next->pgd); switch_mm() 81 * After we have set current->mm to a new value, this activates 82 * the context for the new mm so we see the new mappings. 85 struct mm_struct *mm) activate_mm() 87 get_mmu_context(mm); activate_mm() 88 set_context(mm->context, mm->pgd); activate_mm() 91 #define deactivate_mm(tsk, mm) do { } while (0) 99 struct mm_struct *mm; load_ksp_mmu() local 116 mm = &init_mm; load_ksp_mmu() 118 pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm); load_ksp_mmu() 119 mm = task->mm; load_ksp_mmu() 122 if (!mm) load_ksp_mmu() 125 pgd = pgd_offset(mm, mmuar); load_ksp_mmu() 139 asid = mm->context & 0xff; load_ksp_mmu() 155 pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar); load_ksp_mmu() 164 extern unsigned long get_free_context(struct mm_struct *mm); 169 struct mm_struct *mm) init_new_context() 171 mm->context = SUN3_INVALID_CONTEXT; init_new_context() 177 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 179 if (mm->context == SUN3_INVALID_CONTEXT) get_mmu_context() 180 mm->context = get_free_context(mm); get_mmu_context() 184 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 186 if (mm->context != SUN3_INVALID_CONTEXT) destroy_context() 187 clear_context(mm->context); destroy_context() 190 static inline void activate_context(struct mm_struct *mm) activate_context() argument 192 get_mmu_context(mm); activate_context() 193 sun3_put_context(mm->context); activate_context() 199 activate_context(tsk->mm); switch_mm() 202 #define deactivate_mm(tsk, mm) do { } while (0) 217 struct mm_struct *mm) init_new_context() 219 mm->context = virt_to_phys(mm->pgd); init_new_context() 223 #define destroy_context(mm) do { } while(0) 225 static inline void switch_mm_0230(struct mm_struct *mm) switch_mm_0230() argument 228 0x80000000 | _PAGE_TABLE, mm->context switch_mm_0230() 258 static inline void switch_mm_0460(struct mm_struct *mm) switch_mm_0460() argument 266 asm volatile ("movec %0,%%urp" : : "r" (mm->context)); switch_mm_0460() 292 #define deactivate_mm(tsk,mm) do { } while (0) 309 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 319 #define destroy_context(mm) do { } while (0) 320 #define deactivate_mm(tsk,mm) do { } while (0) 84 activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) activate_mm() argument 168 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 216 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
H A D | motorola_pgalloc.h | 10 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument 24 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 30 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument 51 static inline void pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument 69 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument 74 static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 86 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 88 pmd_free(mm, (pmd_t *)pgd); pgd_free() 91 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 97 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument 102 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument 108 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
|
H A D | sun3_pgalloc.h | 17 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 20 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 25 static inline void pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument 37 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 49 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 66 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument 71 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument 81 #define pmd_free(mm, x) do { } while (0) 84 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 89 static inline pgd_t * pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 99 #define pgd_populate(mm, pmd, pte) BUG()
|
H A D | mcf_pgalloc.h | 7 extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 14 extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 31 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) 32 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 34 #define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) 36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ 39 #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) 51 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 76 extern inline void pte_free(struct mm_struct *mm, struct page *page) pte_free() argument 85 #define pmd_free(mm, pmd) BUG() 87 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 92 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 104 #define pgd_populate(mm, pmd, pte) BUG()
|
H A D | tlb.h | 13 * .. because we flush the whole mm when it 16 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
/linux-4.1.27/arch/sparc/mm/ |
H A D | tlb.c | 1 /* arch/sparc64/mm/tlb.c 8 #include <linux/mm.h> 26 struct mm_struct *mm = tb->mm; flush_tlb_pending() local 33 if (CTX_VALID(mm->context)) { flush_tlb_pending() 35 global_flush_tlb_page(mm, tb->vaddrs[0]); flush_tlb_pending() 38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, flush_tlb_pending() 41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), flush_tlb_pending() 69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, tlb_batch_add_one() argument 81 if (unlikely(nr != 0 && mm != tb->mm)) { tlb_batch_add_one() 87 flush_tsb_user_page(mm, vaddr); tlb_batch_add_one() 88 global_flush_tlb_page(mm, vaddr); tlb_batch_add_one() 93 tb->mm = mm; tlb_batch_add_one() 104 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, tlb_batch_add() argument 127 flush_dcache_page_all(mm, page); tlb_batch_add() 132 tlb_batch_add_one(mm, vaddr, pte_exec(orig)); tlb_batch_add() 136 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, tlb_batch_pmd_scan() argument 148 tlb_batch_add_one(mm, vaddr, exec); tlb_batch_pmd_scan() 156 void set_pmd_at(struct mm_struct *mm, unsigned long addr, set_pmd_at() argument 163 if (mm == &init_mm) set_pmd_at() 168 mm->context.huge_pte_count++; set_pmd_at() 170 mm->context.huge_pte_count--; set_pmd_at() 188 tlb_batch_add_one(mm, addr, exec); set_pmd_at() 189 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); set_pmd_at() 191 tlb_batch_pmd_scan(mm, addr, orig); set_pmd_at() 207 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_trans_huge_deposit() argument 212 assert_spin_locked(&mm->page_table_lock); pgtable_trans_huge_deposit() 215 if (!pmd_huge_pte(mm, pmdp)) pgtable_trans_huge_deposit() 218 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pgtable_trans_huge_deposit() 219 pmd_huge_pte(mm, pmdp) = pgtable; pgtable_trans_huge_deposit() 222 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_trans_huge_withdraw() argument 227 assert_spin_locked(&mm->page_table_lock); pgtable_trans_huge_withdraw() 230 pgtable = pmd_huge_pte(mm, pmdp); pgtable_trans_huge_withdraw() 233 pmd_huge_pte(mm, pmdp) = NULL; pgtable_trans_huge_withdraw() 235 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; pgtable_trans_huge_withdraw()
|
H A D | hugetlbpage.c | 8 #include <linux/mm.h> 60 struct mm_struct *mm = current->mm; hugetlb_get_unmapped_area_topdown() local 70 info.high_limit = mm->mmap_base; hugetlb_get_unmapped_area_topdown() 96 struct mm_struct *mm = current->mm; hugetlb_get_unmapped_area() local 116 vma = find_vma(mm, addr); hugetlb_get_unmapped_area() 121 if (mm->get_unmapped_area == arch_get_unmapped_area) hugetlb_get_unmapped_area() 129 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument 144 pgd = pgd_offset(mm, addr); huge_pte_alloc() 145 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc() 147 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc() 149 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc() 154 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument 163 pgd = pgd_offset(mm, addr); huge_pte_offset() 175 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) huge_pmd_unshare() argument 180 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument 186 mm->context.huge_pte_count++; set_huge_pte_at() 190 set_pte_at(mm, addr, ptep, entry); set_huge_pte_at() 197 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, huge_ptep_get_and_clear() argument 205 mm->context.huge_pte_count--; huge_ptep_get_and_clear() 210 pte_clear(mm, addr, ptep); huge_ptep_get_and_clear()
|
H A D | tsb.c | 1 /* arch/sparc64/mm/tsb.c 74 struct mm_struct *mm = tb->mm; flush_tsb_user() local 77 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user() 79 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user() 80 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user() 86 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user() 87 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user() 88 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user() 94 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user() 97 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) flush_tsb_user_page() argument 101 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user_page() 103 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user_page() 104 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user_page() 110 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user_page() 111 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user_page() 112 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user_page() 118 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user_page() 129 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) setup_tsb_params() argument 134 mm->context.tsb_block[tsb_idx].tsb_nentries = setup_tsb_params() 151 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); setup_tsb_params() 212 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params() 213 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; setup_tsb_params() 214 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; setup_tsb_params() 220 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params() 221 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; setup_tsb_params() 222 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; setup_tsb_params() 227 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; setup_tsb_params() 331 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) tsb_grow() argument 367 if (mm->context.tsb_block[tsb_index].tsb == NULL && tsb_grow() 378 if (mm->context.tsb_block[tsb_index].tsb != NULL) tsb_grow() 379 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; tsb_grow() 390 * We have to hold mm->context.lock while committing to the tsb_grow() 408 spin_lock_irqsave(&mm->context.lock, flags); tsb_grow() 410 old_tsb = mm->context.tsb_block[tsb_index].tsb; tsb_grow() 412 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); tsb_grow() 413 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * tsb_grow() 422 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { tsb_grow() 423 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow() 429 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; tsb_grow() 446 mm->context.tsb_block[tsb_index].tsb = new_tsb; tsb_grow() 447 setup_tsb_params(mm, tsb_index, new_size); tsb_grow() 449 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow() 456 tsb_context_switch(mm); tsb_grow() 460 smp_tsb_sync(mm); tsb_grow() 468 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 475 spin_lock_init(&mm->context.lock); init_new_context() 477 mm->context.sparc64_ctx_val = 0UL; init_new_context() 484 huge_pte_count = mm->context.huge_pte_count; init_new_context() 485 mm->context.huge_pte_count = 0; init_new_context() 493 mm->context.tsb_block[i].tsb = NULL; init_new_context() 498 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); init_new_context() 502 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); init_new_context() 505 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) init_new_context() 523 void destroy_context(struct mm_struct *mm) destroy_context() argument 528 tsb_destroy_one(&mm->context.tsb_block[i]); destroy_context() 532 if (CTX_VALID(mm->context)) { destroy_context() 533 unsigned long nr = CTX_NRBITS(mm->context); destroy_context()
|
H A D | fault_32.c | 19 #include <linux/mm.h> 49 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", unhandled_fault() 50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); unhandled_fault() 51 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", unhandled_fault() 52 (tsk->mm ? (unsigned long) tsk->mm->pgd : unhandled_fault() 172 struct mm_struct *mm = tsk->mm; do_sparc_fault() local 199 if (in_atomic() || !mm) do_sparc_fault() 205 down_read(&mm->mmap_sem); do_sparc_fault() 210 vma = find_vma(mm, address); do_sparc_fault() 244 fault = handle_mm_fault(mm, vma, address, flags); do_sparc_fault() 273 /* No need to up_read(&mm->mmap_sem) as we would do_sparc_fault() 275 * in mm/filemap.c. do_sparc_fault() 282 up_read(&mm->mmap_sem); do_sparc_fault() 290 up_read(&mm->mmap_sem); do_sparc_fault() 339 up_read(&mm->mmap_sem); do_sparc_fault() 347 up_read(&mm->mmap_sem); do_sparc_fault() 388 struct mm_struct *mm = tsk->mm; force_user_fault() local 394 down_read(&mm->mmap_sem); force_user_fault() 395 vma = find_vma(mm, address); force_user_fault() 414 switch (handle_mm_fault(mm, vma, address, flags)) { force_user_fault() 419 up_read(&mm->mmap_sem); force_user_fault() 422 up_read(&mm->mmap_sem); force_user_fault() 427 up_read(&mm->mmap_sem); force_user_fault()
|
/linux-4.1.27/arch/cris/include/asm/ |
H A D | pgalloc.h | 5 #include <linux/mm.h> 7 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) 8 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte)) 15 static inline pgd_t *pgd_alloc (struct mm_struct *mm) pgd_alloc() argument 20 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 25 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument 31 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument 44 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 49 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
|
H A D | mmu_context.h | 6 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 7 extern void get_mmu_context(struct mm_struct *mm); 8 extern void destroy_context(struct mm_struct *mm); 12 #define deactivate_mm(tsk,mm) do { } while (0) 20 /* defined in arch/cris/mm/fault.c */ 23 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument
|
H A D | tlbflush.h | 4 #include <linux/mm.h> 10 * TLB flushing (implemented in arch/cris/mm/tlb.c): 12 * - flush_tlb() flushes the current mm struct TLBs 14 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 16 * - flush_tlb_range(mm, start, end) flushes a range of pages 21 extern void __flush_tlb_mm(struct mm_struct *mm); 36 flush_tlb_mm(current->mm); flush_tlb()
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | pgalloc.h | 18 #include <linux/mm.h> 25 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 37 pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) pgd_populate() argument 42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument 47 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument 51 #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) 55 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_populate() argument 60 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument 65 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 70 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) 73 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) pmd_populate() argument 80 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) pmd_populate_kernel() argument 85 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument 101 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 107 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 113 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 123 #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
|
H A D | tlbflush.h | 10 #include <linux/mm.h> 30 extern void smp_flush_tlb_mm (struct mm_struct *mm); 39 local_finish_flush_tlb_mm (struct mm_struct *mm) local_finish_flush_tlb_mm() argument 41 if (mm == current->active_mm) local_finish_flush_tlb_mm() 42 activate_context(mm); local_finish_flush_tlb_mm() 51 flush_tlb_mm (struct mm_struct *mm) flush_tlb_mm() argument 53 if (!mm) flush_tlb_mm() 56 set_bit(mm->context, ia64_ctx.flushmap); flush_tlb_mm() 57 mm->context = 0; flush_tlb_mm() 59 if (atomic_read(&mm->mm_users) == 0) flush_tlb_mm() 63 smp_flush_tlb_mm(mm); flush_tlb_mm() 65 local_finish_flush_tlb_mm(mm); flush_tlb_mm()
|
H A D | hugetlb.h | 15 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument 23 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) hugetlb_prefault_arch_hook() argument 27 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument 30 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at() 33 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument 36 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear() 54 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 57 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
|
H A D | mmu_context.h | 48 extern void wrap_mmu_context (struct mm_struct *mm); 51 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 78 get_mmu_context (struct mm_struct *mm) get_mmu_context() argument 81 nv_mm_context_t context = mm->context; get_mmu_context() 88 context = mm->context; get_mmu_context() 90 cpumask_clear(mm_cpumask(mm)); get_mmu_context() 97 wrap_mmu_context(mm); get_mmu_context() 99 mm->context = context = ia64_ctx.next++; get_mmu_context() 118 init_new_context (struct task_struct *p, struct mm_struct *mm) init_new_context() argument 120 mm->context = 0; init_new_context() 125 destroy_context (struct mm_struct *mm) destroy_context() argument 163 activate_context (struct mm_struct *mm) activate_context() argument 168 context = get_mmu_context(mm); activate_context() 169 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) activate_context() 170 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); activate_context() 176 } while (unlikely(context != mm->context)); activate_context() 179 #define deactivate_mm(tsk,mm) do { } while (0)
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | mmu_context_hash64.c | 18 #include <linux/mm.h> 62 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 74 if (slice_mm_new_context(mm)) init_new_context() 75 slice_set_user_psize(mm, mmu_virtual_psize); init_new_context() 76 subpage_prot_init_new_context(mm); init_new_context() 77 mm->context.id = index; init_new_context() 79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); init_new_context() 80 if (!mm->context.cop_lockp) { init_new_context() 82 subpage_prot_free(mm); init_new_context() 83 mm->context.id = MMU_NO_CONTEXT; init_new_context() 86 spin_lock_init(mm->context.cop_lockp); init_new_context() 90 mm->context.pte_frag = NULL; init_new_context() 104 static void destroy_pagetable_page(struct mm_struct *mm) destroy_pagetable_page() argument 110 pte_frag = mm->context.pte_frag; destroy_pagetable_page() 126 static inline void destroy_pagetable_page(struct mm_struct *mm) destroy_pagetable_page() argument 133 void destroy_context(struct mm_struct *mm) destroy_context() argument 137 drop_cop(mm->context.acop, mm); destroy_context() 138 kfree(mm->context.cop_lockp); destroy_context() 139 mm->context.cop_lockp = NULL; destroy_context() 142 destroy_pagetable_page(mm); destroy_context() 143 __destroy_context(mm->context.id); destroy_context() 144 subpage_prot_free(mm); destroy_context() 145 mm->context.id = MMU_NO_CONTEXT; destroy_context()
|
H A D | icswx_pid.c | 17 #include <linux/mm.h> 58 int get_cop_pid(struct mm_struct *mm) get_cop_pid() argument 62 if (mm->context.cop_pid == COP_PID_NONE) { get_cop_pid() 66 mm->context.cop_pid = pid; get_cop_pid() 68 return mm->context.cop_pid; get_cop_pid() 71 int disable_cop_pid(struct mm_struct *mm) disable_cop_pid() argument 75 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { disable_cop_pid() 76 free_pid = mm->context.cop_pid; disable_cop_pid() 77 mm->context.cop_pid = COP_PID_NONE; disable_cop_pid()
|
H A D | copro_fault.c | 2 * CoProcessor (SPU/AFU) mm fault handler 24 #include <linux/mm.h> 36 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, copro_handle_mm_fault() argument 43 if (mm == NULL) copro_handle_mm_fault() 46 if (mm->pgd == NULL) copro_handle_mm_fault() 49 down_read(&mm->mmap_sem); copro_handle_mm_fault() 51 vma = find_vma(mm, ea); copro_handle_mm_fault() 78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); copro_handle_mm_fault() 96 up_read(&mm->mmap_sem); copro_handle_mm_fault() 101 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) copro_calculate_slb() argument 109 psize = get_slice_psize(mm, ea); copro_calculate_slb() 111 vsid = get_vsid(mm->context.id, ea, ssize); copro_calculate_slb() 145 void copro_flush_all_slbs(struct mm_struct *mm) copro_flush_all_slbs() argument 148 spu_flush_all_slbs(mm); copro_flush_all_slbs() 150 cxl_slbia(mm); copro_flush_all_slbs()
|
H A D | subpage-prot.c | 14 #include <linux/mm.h> 26 void subpage_prot_free(struct mm_struct *mm) subpage_prot_free() argument 28 struct subpage_prot_table *spt = &mm->context.spt; subpage_prot_free() 53 void subpage_prot_init_new_context(struct mm_struct *mm) subpage_prot_init_new_context() argument 55 struct subpage_prot_table *spt = &mm->context.spt; subpage_prot_init_new_context() 60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, hpte_flush_range() argument 69 pgd = pgd_offset(mm, addr); hpte_flush_range() 78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); hpte_flush_range() 81 pte_update(mm, addr, pte, 0, 0, 0); hpte_flush_range() 95 struct mm_struct *mm = current->mm; subpage_prot_clear() local 96 struct subpage_prot_table *spt = &mm->context.spt; subpage_prot_clear() 102 down_write(&mm->mmap_sem); subpage_prot_clear() 128 hpte_flush_range(mm, addr, nw); subpage_prot_clear() 130 up_write(&mm->mmap_sem); subpage_prot_clear() 142 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, subpage_mark_vma_nohuge() argument 147 .mm = mm, subpage_mark_vma_nohuge() 155 vma = find_vma(mm, addr); subpage_mark_vma_nohuge() 171 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, subpage_mark_vma_nohuge() argument 190 struct mm_struct *mm = current->mm; sys_subpage_prot() local 191 struct subpage_prot_table *spt = &mm->context.spt; sys_subpage_prot() 203 if (is_hugepage_only_range(mm, addr, len)) sys_subpage_prot() 215 down_write(&mm->mmap_sem); sys_subpage_prot() 216 subpage_mark_vma_nohuge(mm, addr, len); sys_subpage_prot() 242 demote_segment_4k(mm, addr); sys_subpage_prot() 250 up_write(&mm->mmap_sem); sys_subpage_prot() 255 down_write(&mm->mmap_sem); sys_subpage_prot() 258 hpte_flush_range(mm, addr, nw); sys_subpage_prot() 264 up_write(&mm->mmap_sem); sys_subpage_prot()
|
H A D | slice.c | 28 #include <linux/mm.h> 100 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, slice_area_is_free() argument 105 if ((mm->task_size - len) < addr) slice_area_is_free() 107 vma = find_vma(mm, addr); slice_area_is_free() 111 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) slice_low_has_vma() argument 113 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, slice_low_has_vma() 117 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) slice_high_has_vma() argument 128 return !slice_area_is_free(mm, start, end - start); slice_high_has_vma() 131 static struct slice_mask slice_mask_for_free(struct mm_struct *mm) slice_mask_for_free() argument 137 if (!slice_low_has_vma(mm, i)) slice_mask_for_free() 140 if (mm->task_size <= SLICE_LOW_TOP) slice_mask_for_free() 144 if (!slice_high_has_vma(mm, i)) slice_mask_for_free() 150 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) slice_mask_for_size() argument 158 lpsizes = mm->context.low_slices_psize; slice_mask_for_size() 163 hpsizes = mm->context.high_slices_psize; slice_mask_for_size() 182 struct mm_struct *mm = parm; slice_flush_segments() local 185 if (mm != current->active_mm) slice_flush_segments() 196 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) slice_convert() argument 204 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); slice_convert() 212 lpsizes = mm->context.low_slices_psize; slice_convert() 219 mm->context.low_slices_psize = lpsizes; slice_convert() 221 hpsizes = mm->context.high_slices_psize; slice_convert() 232 mm->context.low_slices_psize, slice_convert() 233 mm->context.high_slices_psize); slice_convert() 237 copro_flush_all_slbs(mm); slice_convert() 265 static unsigned long slice_find_area_bottomup(struct mm_struct *mm, slice_find_area_bottomup() argument 308 static unsigned long slice_find_area_topdown(struct mm_struct *mm, slice_find_area_topdown() argument 322 addr = mm->mmap_base; slice_find_area_topdown() 354 return slice_find_area_bottomup(mm, len, available, psize); slice_find_area_topdown() 358 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, slice_find_area() argument 363 return slice_find_area_topdown(mm, len, mask, psize); slice_find_area() 365 return slice_find_area_bottomup(mm, len, mask, psize); slice_find_area() 394 struct mm_struct *mm = current->mm; slice_get_unmapped_area() local 398 BUG_ON(mm->task_size == 0); slice_get_unmapped_area() 400 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_get_unmapped_area() 404 if (len > mm->task_size) slice_get_unmapped_area() 410 if (fixed && addr > (mm->task_size - len)) slice_get_unmapped_area() 418 if (addr > mm->task_size - len || slice_get_unmapped_area() 419 !slice_area_is_free(mm, addr, len)) slice_get_unmapped_area() 426 good_mask = slice_mask_for_size(mm, psize); slice_get_unmapped_area() 451 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); slice_get_unmapped_area() 474 newaddr = slice_find_area(mm, len, good_mask, psize, topdown); slice_get_unmapped_area() 487 potential_mask = slice_mask_for_free(mm); slice_get_unmapped_area() 506 addr = slice_find_area(mm, len, good_mask, psize, topdown); slice_get_unmapped_area() 516 addr = slice_find_area(mm, len, potential_mask, psize, topdown); slice_get_unmapped_area() 522 addr = slice_find_area(mm, len, potential_mask, psize, slice_get_unmapped_area() 538 slice_convert(mm, mask, psize); slice_get_unmapped_area() 540 on_each_cpu(slice_flush_segments, mm, 1); slice_get_unmapped_area() 554 current->mm->context.user_psize, 0); arch_get_unmapped_area() 564 current->mm->context.user_psize, 1); arch_get_unmapped_area_topdown() 567 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) get_slice_psize() argument 574 lpsizes = mm->context.low_slices_psize; get_slice_psize() 578 hpsizes = mm->context.high_slices_psize; get_slice_psize() 593 * N.B. This may be called before mm->context.id has been set. 599 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) slice_set_user_psize() argument 607 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); slice_set_user_psize() 611 old_psize = mm->context.user_psize; slice_set_user_psize() 616 mm->context.user_psize = psize; slice_set_user_psize() 619 lpsizes = mm->context.low_slices_psize; slice_set_user_psize() 625 mm->context.low_slices_psize = lpsizes; slice_set_user_psize() 627 hpsizes = mm->context.high_slices_psize; slice_set_user_psize() 641 mm->context.low_slices_psize, slice_set_user_psize() 642 mm->context.high_slices_psize); slice_set_user_psize() 648 void slice_set_range_psize(struct mm_struct *mm, unsigned long start, slice_set_range_psize() argument 653 slice_convert(mm, mask, psize); slice_set_range_psize() 676 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, is_hugepage_only_range() argument 680 unsigned int psize = mm->context.user_psize; is_hugepage_only_range() 683 available = slice_mask_for_size(mm, psize); is_hugepage_only_range() 688 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); is_hugepage_only_range() 694 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", is_hugepage_only_range() 695 mm, addr, len); is_hugepage_only_range()
|
H A D | tlb_hash32.c | 8 * Derived from arch/ppc/mm/init.c: 15 * Derived from "arch/i386/mm/init.c" 26 #include <linux/mm.h> 40 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) flush_hash_entry() argument 46 flush_hash_pages(mm->context.id, addr, ptephys, 1); flush_hash_entry() 80 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 90 static void flush_range(struct mm_struct *mm, unsigned long start, flush_range() argument 96 unsigned int ctx = mm->context.id; flush_range() 106 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); flush_range() 132 * Flush all the (user) entries for the address space described by mm. 134 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 144 * It is safe to go down the mm's list of vmas when called flush_tlb_mm() 149 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) flush_tlb_mm() 156 struct mm_struct *mm; flush_tlb_page() local 163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; flush_tlb_page() 164 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); flush_tlb_page() 166 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); flush_tlb_page()
|
H A D | mmu_context_nohash.c | 9 * Derived from previous arch/powerpc/mm/mmu_context.c 25 * also clear mm->cpu_vm_mask bits when processes are migrated 44 #include <linux/mm.h> 87 struct mm_struct *mm; steal_context_smp() local 94 /* Pick up the victim mm */ steal_context_smp() 95 mm = context_mm[id]; steal_context_smp() 100 if (mm->context.active) { steal_context_smp() 106 pr_hardcont(" | steal %d from 0x%p", id, mm); steal_context_smp() 108 /* Mark this mm has having no context anymore */ steal_context_smp() 109 mm->context.id = MMU_NO_CONTEXT; steal_context_smp() 111 /* Mark it stale on all CPUs that used this mm. For threaded steal_context_smp() 116 for_each_cpu(cpu, mm_cpumask(mm)) { for_each_cpu() 141 struct mm_struct *mm; steal_all_contexts() local 146 /* Pick up the victim mm */ steal_all_contexts() 147 mm = context_mm[id]; steal_all_contexts() 149 pr_hardcont(" | steal %d from 0x%p", id, mm); steal_all_contexts() 151 /* Mark this mm as having no context anymore */ steal_all_contexts() 152 mm->context.id = MMU_NO_CONTEXT; steal_all_contexts() 157 mm->context.active = 0; steal_all_contexts() 178 struct mm_struct *mm; steal_context_up() local 181 /* Pick up the victim mm */ steal_context_up() 182 mm = context_mm[id]; steal_context_up() 184 pr_hardcont(" | steal %d from 0x%p", id, mm); steal_context_up() 187 local_flush_tlb_mm(mm); steal_context_up() 189 /* Mark this mm has having no context anymore */ steal_context_up() 190 mm->context.id = MMU_NO_CONTEXT; steal_context_up() 237 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", switch_mmu_context() 257 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", switch_mmu_context() 329 int init_new_context(struct task_struct *t, struct mm_struct *mm) init_new_context() argument 331 pr_hard("initing context for mm @%p\n", mm); init_new_context() 333 mm->context.id = MMU_NO_CONTEXT; init_new_context() 334 mm->context.active = 0; init_new_context() 337 if (slice_mm_new_context(mm)) init_new_context() 338 slice_set_user_psize(mm, mmu_virtual_psize); init_new_context() 347 void destroy_context(struct mm_struct *mm) destroy_context() argument 352 if (mm->context.id == MMU_NO_CONTEXT) destroy_context() 355 WARN_ON(mm->context.active != 0); destroy_context() 358 id = mm->context.id; destroy_context() 361 mm->context.id = MMU_NO_CONTEXT; destroy_context() 363 mm->context.active = 0; destroy_context()
|
H A D | mmu_context_hash32.c | 8 * Derived from arch/ppc/mm/init.c: 15 * Derived from "arch/i386/mm/init.c" 25 #include <linux/mm.h> 53 * function is changed then arch/ppc/mm/hashtable.S will have to be 82 int init_new_context(struct task_struct *t, struct mm_struct *mm) init_new_context() argument 84 mm->context.id = __init_new_context(); init_new_context() 101 void destroy_context(struct mm_struct *mm) destroy_context() argument 104 if (mm->context.id != NO_CONTEXT) { destroy_context() 105 __destroy_context(mm->context.id); destroy_context() 106 mm->context.id = NO_CONTEXT; destroy_context()
|
H A D | icswx.c | 17 #include <linux/mm.h> 79 * @mm: The mm the coprocessor to associate with. Most likely current mm. 85 int use_cop(unsigned long acop, struct mm_struct *mm) use_cop() argument 92 if (!mm || !acop) use_cop() 96 spin_lock(&mm->page_table_lock); use_cop() 97 spin_lock(mm->context.cop_lockp); use_cop() 99 ret = get_cop_pid(mm); use_cop() 104 mm->context.acop |= acop; use_cop() 106 sync_cop(mm); use_cop() 113 if (atomic_read(&mm->mm_users) > 1) use_cop() 114 smp_call_function(sync_cop, mm, 1); use_cop() 117 spin_unlock(mm->context.cop_lockp); use_cop() 118 spin_unlock(&mm->page_table_lock); use_cop() 127 * @mm: The mm the coprocessor associated with. 129 void drop_cop(unsigned long acop, struct mm_struct *mm) drop_cop() argument 136 if (WARN_ON_ONCE(!mm)) drop_cop() 140 spin_lock(&mm->page_table_lock); drop_cop() 141 spin_lock(mm->context.cop_lockp); drop_cop() 143 mm->context.acop &= ~acop; drop_cop() 145 free_pid = disable_cop_pid(mm); drop_cop() 146 sync_cop(mm); drop_cop() 153 if (atomic_read(&mm->mm_users) > 1) drop_cop() 154 smp_call_function(sync_cop, mm, 1); drop_cop() 159 spin_unlock(mm->context.cop_lockp); drop_cop() 160 spin_unlock(&mm->page_table_lock); drop_cop() 235 * the threads (see smp_call_function(sync_cop, mm, 1)), but acop_handle_fault()
|
H A D | tlb_hash64.c | 5 * Derived from arch/ppc64/mm/init.c: 12 * Derived from "arch/i386/mm/init.c" 25 #include <linux/mm.h> 43 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, hpte_need_flush() argument 65 psize = get_slice_psize(mm, addr); hpte_need_flush() 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ hpte_need_flush() 73 psize = pte_pagesize_index(mm, addr, pte); hpte_need_flush() 85 vsid = get_vsid(mm->context.id, addr, ssize); hpte_need_flush() 97 * in that case, might be worth testing the mm cpu mask though hpte_need_flush() 116 if (i != 0 && (mm != batch->mm || batch->psize != psize || hpte_need_flush() 122 batch->mm = mm; hpte_need_flush() 148 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) __flush_tlb_pending() 177 * @mm : mm_struct of the target address space (generally init_mm) 190 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, __flush_hash_table_range() argument 199 BUG_ON(!mm->pgd); __flush_hash_table_range() 211 pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, __flush_hash_table_range() 223 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); __flush_hash_table_range() 225 hpte_need_flush(mm, start, ptep, pte, 0); __flush_hash_table_range() 231 void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) flush_tlb_pmd_range() argument 251 hpte_need_flush(mm, addr, pte, pteval, 0); flush_tlb_pmd_range()
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | mmu_context_64.h | 12 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 20 void get_new_mmu_context(struct mm_struct *mm); 27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 28 void destroy_context(struct mm_struct *mm); 35 static inline void tsb_context_switch(struct mm_struct *mm) tsb_context_switch() argument 37 __tsb_context_switch(__pa(mm->pgd), tsb_context_switch() 38 &mm->context.tsb_block[0], tsb_context_switch() 40 (mm->context.tsb_block[1].tsb ? tsb_context_switch() 41 &mm->context.tsb_block[1] : tsb_context_switch() 46 , __pa(&mm->context.tsb_descr[0])); tsb_context_switch() 49 void tsb_grow(struct mm_struct *mm, 53 void smp_tsb_sync(struct mm_struct *mm); 74 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) switch_mm() argument 79 if (unlikely(mm == &init_mm)) switch_mm() 82 spin_lock_irqsave(&mm->context.lock, flags); switch_mm() 83 ctx_valid = CTX_VALID(mm->context); switch_mm() 85 get_new_mmu_context(mm); switch_mm() 117 load_secondary_context(mm); switch_mm() 118 tsb_context_switch(mm); switch_mm() 125 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { switch_mm() 126 cpumask_set_cpu(cpu, mm_cpumask(mm)); switch_mm() 127 __flush_tlb_mm(CTX_HWBITS(mm->context), switch_mm() 130 spin_unlock_irqrestore(&mm->context.lock, flags); switch_mm() 133 #define deactivate_mm(tsk,mm) do { } while (0) 136 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) activate_mm() argument 141 spin_lock_irqsave(&mm->context.lock, flags); activate_mm() 142 if (!CTX_VALID(mm->context)) activate_mm() 143 get_new_mmu_context(mm); activate_mm() 145 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) activate_mm() 146 cpumask_set_cpu(cpu, mm_cpumask(mm)); activate_mm() 148 load_secondary_context(mm); activate_mm() 149 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); activate_mm() 150 tsb_context_switch(mm); activate_mm() 151 spin_unlock_irqrestore(&mm->context.lock, flags); activate_mm()
|
H A D | mmu_32.h | 7 /* mm/srmmu.c */
|
H A D | tlb_64.h | 16 void smp_flush_tlb_mm(struct mm_struct *mm); 17 #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) 19 #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
|
H A D | pgalloc_32.h | 27 #define pgd_free(mm, pgd) free_pgd_fast(pgd) 28 #define pgd_alloc(mm) get_pgd_fast() 39 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, pmd_alloc_one() argument 51 #define pmd_free(mm, pmd) free_pmd_fast(pmd) 52 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 54 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep); 60 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); 62 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 74 #define pte_free_kernel(mm, pte) free_pte_fast(pte) 76 void pte_free(struct mm_struct * mm, pgtable_t pte); 77 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
H A D | mmu_context_32.h | 8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 15 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 22 void destroy_context(struct mm_struct *mm); 25 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, 28 #define deactivate_mm(tsk,mm) do { } while (0) 31 #define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
|
H A D | pgalloc_64.h | 6 #include <linux/mm.h> 25 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument 48 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument 53 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument 59 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 64 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 66 pgtable_t pte_alloc_one(struct mm_struct *mm, 68 void pte_free_kernel(struct mm_struct *mm, pte_t *pte); 69 void pte_free(struct mm_struct *mm, pgtable_t ptepage);
|
H A D | tlbflush_32.h | 8 #define flush_tlb_mm(mm) \ 9 sparc32_cachetlb_ops->tlb_mm(mm)
|
H A D | tlbflush_64.h | 11 struct mm_struct *mm; member in struct:tlb_batch 19 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); 23 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 53 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) global_flush_tlb_page() argument 55 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); global_flush_tlb_page() 61 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); 63 #define global_flush_tlb_page(mm, vaddr) \ 64 smp_flush_tlb_page(mm, vaddr)
|
H A D | cacheflush_32.h | 8 #define flush_cache_mm(mm) \ 9 sparc32_cachetlb_ops->cache_mm(mm) 10 #define flush_cache_dup_mm(mm) \ 11 sparc32_cachetlb_ops->cache_mm(mm) 34 #define flush_sig_insns(mm,insn_addr) \ 35 sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
|
H A D | hugetlb.h | 8 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 11 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 14 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) hugetlb_prefault_arch_hook() argument 18 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument 61 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 65 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); huge_ptep_set_wrprotect()
|
/linux-4.1.27/arch/unicore32/mm/ |
H A D | pgd.c | 2 * linux/arch/unicore32/mm/pgd.c 12 #include <linux/mm.h> 20 #include "mm.h" 27 pgd_t *get_pgd_slow(struct mm_struct *mm) get_pgd_slow() argument 53 new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0); get_pgd_slow() 57 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); get_pgd_slow() 71 pmd_free(mm, new_pmd); get_pgd_slow() 72 mm_dec_nr_pmds(mm); get_pgd_slow() 79 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) free_pgd_slow() argument 99 pte_free(mm, pte); free_pgd_slow() 100 atomic_long_dec(&mm->nr_ptes); free_pgd_slow() 101 pmd_free(mm, pmd); free_pgd_slow() 102 mm_dec_nr_pmds(mm); free_pgd_slow()
|
H A D | proc-syms.c | 2 * linux/arch/unicore32/mm/proc-syms.c 13 #include <linux/mm.h>
|
/linux-4.1.27/arch/alpha/include/asm/ |
H A D | pgalloc.h | 4 #include <linux/mm.h> 14 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) pmd_populate() argument 21 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument 27 pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument 32 extern pgd_t *pgd_alloc(struct mm_struct *mm); 35 pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 41 pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument 48 pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 54 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument 61 pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 67 pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument 69 pte_t *pte = pte_alloc_one_kernel(mm, address); pte_alloc_one() 83 pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
|
H A D | tlb.h | 8 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 12 #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) 13 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
|
H A D | tlbflush.h | 4 #include <linux/mm.h> 21 ev4_flush_tlb_current(struct mm_struct *mm) ev4_flush_tlb_current() argument 23 __load_new_mm_context(mm); ev4_flush_tlb_current() 28 ev5_flush_tlb_current(struct mm_struct *mm) ev5_flush_tlb_current() argument 30 __load_new_mm_context(mm); ev5_flush_tlb_current() 38 ev4_flush_tlb_current_page(struct mm_struct * mm, ev4_flush_tlb_current_page() argument 44 __load_new_mm_context(mm); ev4_flush_tlb_current_page() 51 ev5_flush_tlb_current_page(struct mm_struct * mm, ev5_flush_tlb_current_page() argument 56 __load_new_mm_context(mm); ev5_flush_tlb_current_page() 89 flush_tlb_other(struct mm_struct *mm) flush_tlb_other() argument 91 unsigned long *mmc = &mm->context[smp_processor_id()]; flush_tlb_other() 107 flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 109 if (mm == current->active_mm) flush_tlb_mm() 110 flush_tlb_current(mm); flush_tlb_mm() 112 flush_tlb_other(mm); flush_tlb_mm() 119 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local 121 if (mm == current->active_mm) flush_tlb_page() 122 flush_tlb_current_page(mm, vma, addr); flush_tlb_page() 124 flush_tlb_other(mm); flush_tlb_page()
|
H A D | cacheflush.h | 4 #include <linux/mm.h> 8 #define flush_cache_mm(mm) do { } while (0) 9 #define flush_cache_dup_mm(mm) do { } while (0) 42 that icache entries are tagged with the ASN and load a new mm context. */ 54 struct mm_struct *mm = vma->vm_mm; flush_icache_user_range() local 55 if (current->active_mm == mm) flush_icache_user_range() 56 __load_new_mm_context(mm); flush_icache_user_range() 58 mm->context[smp_processor_id()] = 0; flush_icache_user_range()
|
/linux-4.1.27/arch/xtensa/include/asm/ |
H A D | nommu_context.h | 5 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 9 static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm) init_new_context() argument 14 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 27 static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm() argument
|
H A D | mmu_context.h | 68 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) get_new_mmu_context() argument 80 mm->context.asid[cpu] = asid; get_new_mmu_context() 81 mm->context.cpu = cpu; get_new_mmu_context() 84 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) get_mmu_context() argument 90 if (mm) { get_mmu_context() 91 unsigned long asid = mm->context.asid[cpu]; get_mmu_context() 95 get_new_mmu_context(mm, cpu); get_mmu_context() 99 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) activate_context() argument 101 get_mmu_context(mm, cpu); activate_context() 102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); activate_context() 113 struct mm_struct *mm) init_new_context() 117 mm->context.asid[cpu] = NO_CONTEXT; for_each_possible_cpu() 119 mm->context.cpu = -1; 138 #define deactivate_mm(tsk, mm) do { } while (0) 144 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 150 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 112 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
H A D | pgalloc.h | 24 #define pmd_populate_kernel(mm, pmdp, ptep) \ 26 #define pmd_populate(mm, pmdp, page) \ 31 pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 36 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 41 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 55 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 61 pte = pte_alloc_one_kernel(mm, addr); pte_alloc_one() 72 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 77 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
|
/linux-4.1.27/arch/sparc/power/ |
H A D | hibernate.c | 7 #include <linux/mm.h> 36 struct mm_struct *mm = current->active_mm; restore_processor_state() local 38 load_secondary_context(mm); restore_processor_state() 39 tsb_context_switch(mm); restore_processor_state()
|
/linux-4.1.27/include/asm-generic/ |
H A D | mm_hooks.h | 10 struct mm_struct *mm) arch_dup_mmap() 14 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument 18 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument 24 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument 9 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
|
H A D | 4level-fixup.h | 14 #define pmd_alloc(mm, pud, address) \ 15 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ 18 #define pud_alloc(mm, pgd, address) (pgd) 26 #define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) 32 #define pud_free(mm, x) do { } while (0)
|
H A D | mmu_context.h | 14 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument 20 struct mm_struct *mm) init_new_context() 25 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 30 struct mm_struct *mm) deactivate_mm() 19 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 29 deactivate_mm(struct task_struct *task, struct mm_struct *mm) deactivate_mm() argument
|
/linux-4.1.27/arch/cris/mm/ |
H A D | tlb.c | 2 * linux/arch/cris/mm/tlb.c 16 /* The TLB can host up to 64 different mm contexts at the same time. 19 * of which mm we have assigned to which page_id, so that we know when 36 alloc_context(struct mm_struct *mm) alloc_context() argument 40 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); alloc_context() 42 /* did we replace an mm ? */ alloc_context() 47 /* throw out any TLB entries belonging to the mm we replace alloc_context() 57 mm->context.page_id = map_replace_ptr; alloc_context() 58 page_id_map[map_replace_ptr] = mm; alloc_context() 67 * if needed, get a new MMU context for the mm. otherwise nothing is done. 71 get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 73 if(mm->context.page_id == NO_CONTEXT) get_mmu_context() 74 alloc_context(mm); get_mmu_context() 78 * destroying the mm itself. this is only called when the last user of the mm 86 destroy_context(struct mm_struct *mm) destroy_context() argument 88 if(mm->context.page_id != NO_CONTEXT) { destroy_context() 89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); destroy_context() 90 flush_tlb_mm(mm); /* TODO this might be redundant ? */ destroy_context() 91 page_id_map[mm->context.page_id] = NULL; destroy_context()
|
/linux-4.1.27/arch/arm/mm/ |
H A D | pgd.c | 2 * linux/arch/arm/mm/pgd.c 10 #include <linux/mm.h> 20 #include "mm.h" 33 pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), pgd_alloc() 64 new_pmd = pmd_alloc(mm, new_pud, 0); pgd_alloc() 75 new_pud = pud_alloc(mm, new_pgd, 0); pgd_alloc() 79 new_pmd = pmd_alloc(mm, new_pud, 0); pgd_alloc() 83 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); pgd_alloc() 99 pmd_free(mm, new_pmd); pgd_alloc() 100 mm_dec_nr_pmds(mm); pgd_alloc() 102 pud_free(mm, new_pud); pgd_alloc() 109 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) pgd_free() argument 133 pte_free(mm, pte); pgd_free() 134 atomic_long_dec(&mm->nr_ptes); pgd_free() 137 pmd_free(mm, pmd); pgd_free() 138 mm_dec_nr_pmds(mm); pgd_free() 141 pud_free(mm, pud); pgd_free() 157 pmd_free(mm, pmd); pgd_free() 158 mm_dec_nr_pmds(mm); pgd_free() 160 pud_free(mm, pud); pgd_free()
|
H A D | mmap.c | 2 * linux/arch/arm/mm/mmap.c 5 #include <linux/mm.h> 58 struct mm_struct *mm = current->mm; arch_get_unmapped_area() local 90 vma = find_vma(mm, addr); arch_get_unmapped_area() 98 info.low_limit = mm->mmap_base; arch_get_unmapped_area() 111 struct mm_struct *mm = current->mm; arch_get_unmapped_area_topdown() local 141 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown() 150 info.high_limit = mm->mmap_base; arch_get_unmapped_area_topdown() 164 info.low_limit = mm->mmap_base; arch_get_unmapped_area_topdown() 182 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument 190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; arch_pick_mmap_layout() 191 mm->get_unmapped_area = arch_get_unmapped_area; arch_pick_mmap_layout() 193 mm->mmap_base = mmap_base(random_factor); arch_pick_mmap_layout() 194 mm->get_unmapped_area = arch_get_unmapped_area_topdown; arch_pick_mmap_layout()
|
/linux-4.1.27/arch/arm64/mm/ |
H A D | context.c | 2 * Based on arch/arm/mm/context.c 22 #include <linux/mm.h> 41 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) __init_new_context() argument 43 mm->context.id = 0; __init_new_context() 44 raw_spin_lock_init(&mm->context.id_lock); __init_new_context() 58 static void set_mm_context(struct mm_struct *mm, unsigned int asid) set_mm_context() argument 64 * mm->context.id could be set from different CPUs during the set_mm_context() 66 * mm->context.id_lock has to be IRQ-safe. set_mm_context() 68 raw_spin_lock_irqsave(&mm->context.id_lock, flags); set_mm_context() 69 if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { set_mm_context() 72 * mm_cpumask(mm). set_mm_context() 74 mm->context.id = asid; set_mm_context() 75 cpumask_clear(mm_cpumask(mm)); set_mm_context() 77 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); set_mm_context() 80 * Set the mm_cpumask(mm) bit for the current CPU. set_mm_context() 82 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); set_mm_context() 93 struct mm_struct *mm = current->active_mm; reset_context() local 100 if (mm == &init_mm) reset_context() 107 set_mm_context(mm, asid); reset_context() 110 cpu_switch_mm(mm->pgd, mm); reset_context() 115 static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) set_mm_context() argument 117 mm->context.id = asid; set_mm_context() 118 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); set_mm_context() 123 void __new_context(struct mm_struct *mm) __new_context() argument 134 if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { __new_context() 135 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); __new_context() 141 * At this point, it is guaranteed that the current mm (with an old __new_context() 165 set_mm_context(mm, asid); __new_context()
|
H A D | extable.c | 2 * Based on arch/arm/mm/extable.c
|
/linux-4.1.27/arch/um/include/asm/ |
H A D | tlbflush.h | 9 #include <linux/mm.h> 14 * - flush_tlb() flushes the current mm struct TLBs 16 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 23 extern void flush_tlb_mm(struct mm_struct *mm);
|
H A D | mmu_context.h | 12 extern void uml_setup_stubs(struct mm_struct *mm); 16 static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument 18 uml_setup_stubs(mm); arch_dup_mmap() 20 extern void arch_exit_mmap(struct mm_struct *mm); arch_unmap() 21 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument 26 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument 34 #define deactivate_mm(tsk,mm) do { } while (0) 42 * when the new ->mm is used for the first time. activate_mm() 63 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument 68 extern int init_new_context(struct task_struct *task, struct mm_struct *mm); 70 extern void destroy_context(struct mm_struct *mm);
|
H A D | pgalloc.h | 11 #include <linux/mm.h> 13 #define pmd_populate_kernel(mm, pmd, pte) \ 16 #define pmd_populate(mm, pmd, pte) \ 26 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 31 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 36 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 50 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
|
H A D | tlb.h | 12 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 14 /* struct mmu_gather is an opaque type used by the mm code for passing around 18 struct mm_struct *mm; member in struct:mmu_gather 22 unsigned int fullmm; /* non-zero means full mm flush */ 48 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) tlb_gather_mmu() argument 50 tlb->mm = mm; tlb_gather_mmu() 58 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 64 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); tlb_flush_mmu_tlbonly() 132 #define tlb_migrate_finish(mm) do {} while (0)
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | mmu_context.h | 17 #include <linux/mm.h> 24 #define init_new_context(tsk, mm) 0 26 #define destroy_context(mm) do { } while (0) 31 * mm: describes the currently active mm context 35 * tsk->mm will be NULL 38 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 43 * This is the actual mm switch as far as the scheduler 45 * calling the CPU specific function when the mm hasn't 58 #define deactivate_mm(tsk, mm) do { } while (0) 66 * (the macro is used as remove_vma() is static to mm/mmap.c) 68 #define arch_exit_mmap(mm) \ 70 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ 76 mm->mmap = NULL; \ 77 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ 78 vmacache_invalidate(mm); \ 79 mm->map_count--; \ 85 struct mm_struct *mm) arch_dup_mmap() 89 static inline void arch_unmap(struct mm_struct *mm, arch_unmap() argument 95 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument 84 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
|
H A D | pgalloc.h | 25 extern pgd_t *get_pgd_slow(struct mm_struct *mm); 26 extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); 28 #define pgd_alloc(mm) get_pgd_slow(mm) 29 #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) 37 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument 49 pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument 70 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 76 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 90 * of the mm address space. 93 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) pmd_populate_kernel() argument 105 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) pmd_populate() argument
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | mm.c | 24 #include <core/mm.h> 26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) nvkm_mm_dump() argument 36 list_for_each_entry(node, &mm->nodes, nl_entry) { nvkm_mm_dump() 41 list_for_each_entry(node, &mm->free, fl_entry) { nvkm_mm_dump() 48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) nvkm_mm_free() argument 72 list_for_each_entry(prev, &mm->free, fl_entry) { nvkm_mm_free() 86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) region_head() argument 111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, nvkm_mm_head() argument 121 list_for_each_entry(this, &mm->free, fl_entry) { nvkm_mm_head() 131 s = roundup(s, mm->block_size); nvkm_mm_head() 135 e = rounddown(e, mm->block_size); nvkm_mm_head() 143 if (splitoff && !region_head(mm, this, splitoff)) nvkm_mm_head() 146 this = region_head(mm, this, min(size_max, e - s)); nvkm_mm_head() 160 region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) region_tail() argument 185 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, nvkm_mm_tail() argument 193 list_for_each_entry_reverse(this, &mm->free, fl_entry) { nvkm_mm_tail() 204 s = roundup(s, mm->block_size); nvkm_mm_tail() 208 e = rounddown(e, mm->block_size); nvkm_mm_tail() 221 if (c && !region_tail(mm, this, c)) nvkm_mm_tail() 224 this = region_tail(mm, this, a); nvkm_mm_tail() 238 nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) nvkm_mm_init() argument 243 if (nvkm_mm_initialised(mm)) { nvkm_mm_init() 244 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); nvkm_mm_init() 253 list_add_tail(&node->nl_entry, &mm->nodes); nvkm_mm_init() 255 BUG_ON(block != mm->block_size); nvkm_mm_init() 257 INIT_LIST_HEAD(&mm->nodes); nvkm_mm_init() 258 INIT_LIST_HEAD(&mm->free); nvkm_mm_init() 259 mm->block_size = block; nvkm_mm_init() 260 mm->heap_nodes = 0; nvkm_mm_init() 268 node->offset = roundup(offset, mm->block_size); nvkm_mm_init() 269 node->length = rounddown(offset + length, mm->block_size); nvkm_mm_init() 273 list_add_tail(&node->nl_entry, &mm->nodes); nvkm_mm_init() 274 list_add_tail(&node->fl_entry, &mm->free); nvkm_mm_init() 275 node->heap = ++mm->heap_nodes; nvkm_mm_init() 280 nvkm_mm_fini(struct nvkm_mm *mm) nvkm_mm_fini() argument 285 if (!nvkm_mm_initialised(mm)) nvkm_mm_fini() 288 list_for_each_entry(node, &mm->nodes, nl_entry) { nvkm_mm_fini() 290 if (++nodes > mm->heap_nodes) { nvkm_mm_fini() 291 nvkm_mm_dump(mm, "mm not clean!"); nvkm_mm_fini() 297 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) { nvkm_mm_fini() 302 mm->heap_nodes = 0; nvkm_mm_fini()
|
/linux-4.1.27/fs/proc/ |
H A D | task_nommu.c | 2 #include <linux/mm.h> 18 void task_mem(struct seq_file *m, struct mm_struct *mm) task_mem() argument 25 down_read(&mm->mmap_sem); task_mem() 26 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { task_mem() 39 if (atomic_read(&mm->mm_count) > 1 || task_mem() 49 if (atomic_read(&mm->mm_count) > 1) task_mem() 50 sbytes += kobjsize(mm); task_mem() 52 bytes += kobjsize(mm); task_mem() 77 up_read(&mm->mmap_sem); task_mem() 80 unsigned long task_vsize(struct mm_struct *mm) task_vsize() argument 86 down_read(&mm->mmap_sem); task_vsize() 87 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { task_vsize() 91 up_read(&mm->mmap_sem); task_vsize() 95 unsigned long task_statm(struct mm_struct *mm, task_statm() argument 102 unsigned long size = kobjsize(mm); task_statm() 104 down_read(&mm->mmap_sem); task_statm() 105 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { task_statm() 115 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) task_statm() 117 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) task_statm() 119 up_read(&mm->mmap_sem); task_statm() 151 struct mm_struct *mm = vma->vm_mm; nommu_vma_show() local 184 } else if (mm) { nommu_vma_show() 193 if (!is_pid || (vma->vm_start <= mm->start_stack && nommu_vma_show() 194 vma->vm_end >= mm->start_stack)) nommu_vma_show() 229 struct mm_struct *mm; m_start() local 233 /* pin the task and mm whilst we play with them */ m_start() 238 mm = priv->mm; m_start() 239 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) m_start() 242 down_read(&mm->mmap_sem); m_start() 244 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) m_start() 248 up_read(&mm->mmap_sem); m_start() 249 mmput(mm); m_start() 258 up_read(&priv->mm->mmap_sem); m_stop() 259 mmput(priv->mm); m_stop() 299 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); maps_open() 300 if (IS_ERR(priv->mm)) { maps_open() 301 int err = PTR_ERR(priv->mm); maps_open() 316 if (priv->mm) map_release() 317 mmdrop(priv->mm); map_release()
|
/linux-4.1.27/arch/s390/mm/ |
H A D | pgtable.c | 10 #include <linux/mm.h> 34 unsigned long *crst_table_alloc(struct mm_struct *mm) crst_table_alloc() argument 43 void crst_table_free(struct mm_struct *mm, unsigned long *table) crst_table_free() argument 50 struct mm_struct *mm = arg; __crst_table_upgrade() local 52 if (current->active_mm == mm) { __crst_table_upgrade() 54 set_user_asce(mm); __crst_table_upgrade() 59 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) crst_table_upgrade() argument 68 table = crst_table_alloc(mm); crst_table_upgrade() 71 spin_lock_bh(&mm->page_table_lock); crst_table_upgrade() 72 if (mm->context.asce_limit < limit) { crst_table_upgrade() 73 pgd = (unsigned long *) mm->pgd; crst_table_upgrade() 74 if (mm->context.asce_limit <= (1UL << 31)) { crst_table_upgrade() 76 mm->context.asce_limit = 1UL << 42; crst_table_upgrade() 77 mm->context.asce_bits = _ASCE_TABLE_LENGTH | crst_table_upgrade() 82 mm->context.asce_limit = 1UL << 53; crst_table_upgrade() 83 mm->context.asce_bits = _ASCE_TABLE_LENGTH | crst_table_upgrade() 88 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); crst_table_upgrade() 89 mm->pgd = (pgd_t *) table; crst_table_upgrade() 90 mm->task_size = mm->context.asce_limit; crst_table_upgrade() 94 spin_unlock_bh(&mm->page_table_lock); crst_table_upgrade() 96 crst_table_free(mm, table); crst_table_upgrade() 97 if (mm->context.asce_limit < limit) crst_table_upgrade() 100 on_each_cpu(__crst_table_upgrade, mm, 0); crst_table_upgrade() 104 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) crst_table_downgrade() argument 108 if (current->active_mm == mm) { crst_table_downgrade() 110 __tlb_flush_mm(mm); crst_table_downgrade() 112 while (mm->context.asce_limit > limit) { crst_table_downgrade() 113 pgd = mm->pgd; crst_table_downgrade() 116 mm->context.asce_limit = 1UL << 42; crst_table_downgrade() 117 mm->context.asce_bits = _ASCE_TABLE_LENGTH | crst_table_downgrade() 122 mm->context.asce_limit = 1UL << 31; crst_table_downgrade() 123 mm->context.asce_bits = _ASCE_TABLE_LENGTH | crst_table_downgrade() 130 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); crst_table_downgrade() 131 mm->task_size = mm->context.asce_limit; crst_table_downgrade() 132 crst_table_free(mm, (unsigned long *) pgd); crst_table_downgrade() 134 if (current->active_mm == mm) crst_table_downgrade() 135 set_user_asce(mm); crst_table_downgrade() 142 * @mm: pointer to the parent mm_struct 147 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) gmap_alloc() argument 178 gmap->mm = mm; gmap_alloc() 190 down_write(&mm->mmap_sem); gmap_alloc() 191 list_add(&gmap->list, &mm->context.gmap_list); gmap_alloc() 192 up_write(&mm->mmap_sem); gmap_alloc() 205 __tlb_flush_asce(gmap->mm, gmap->asce); gmap_flush_tlb() 244 __tlb_flush_asce(gmap->mm, gmap->asce); gmap_free() 253 down_write(&gmap->mm->mmap_sem); gmap_free() 255 up_write(&gmap->mm->mmap_sem); gmap_free() 295 spin_lock(&gmap->mm->page_table_lock); gmap_alloc_table() 303 spin_unlock(&gmap->mm->page_table_lock); gmap_alloc_table() 384 down_write(&gmap->mm->mmap_sem); gmap_unmap_segment() 387 up_write(&gmap->mm->mmap_sem); gmap_unmap_segment() 416 down_write(&gmap->mm->mmap_sem); gmap_map_segment() 426 up_write(&gmap->mm->mmap_sem); gmap_map_segment() 444 * The mmap_sem of the mm that belongs to the address space must be held 470 down_read(&gmap->mm->mmap_sem); gmap_translate() 472 up_read(&gmap->mm->mmap_sem); gmap_translate() 483 static void gmap_unlink(struct mm_struct *mm, unsigned long *table, gmap_unlink() argument 489 list_for_each_entry(gmap, &mm->context.gmap_list, list) { gmap_unlink() 504 * The mmap_sem of the mm that belongs to the address space must be held 509 struct mm_struct *mm; __gmap_link() local 544 /* Walk the parent mm page table */ __gmap_link() 545 mm = gmap->mm; __gmap_link() 546 pgd = pgd_offset(mm, vmaddr); __gmap_link() 559 ptl = pmd_lock(mm, pmd); __gmap_link() 589 down_read(&gmap->mm->mmap_sem); gmap_fault() 595 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { gmap_fault() 601 up_read(&gmap->mm->mmap_sem); gmap_fault() 606 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) gmap_zap_swap_entry() argument 609 dec_mm_counter(mm, MM_SWAPENTS); gmap_zap_swap_entry() 614 dec_mm_counter(mm, MM_ANONPAGES); gmap_zap_swap_entry() 616 dec_mm_counter(mm, MM_FILEPAGES); gmap_zap_swap_entry() 638 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); __gmap_zap() 650 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); __gmap_zap() 651 pte_clear(gmap->mm, vmaddr, ptep); __gmap_zap() 664 down_read(&gmap->mm->mmap_sem); gmap_discard() 674 /* Find vma in the parent mm */ gmap_discard() 675 vma = find_vma(gmap->mm, vmaddr); gmap_discard() 679 up_read(&gmap->mm->mmap_sem); gmap_discard() 731 down_read(&gmap->mm->mmap_sem); gmap_ipte_notify() 740 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { gmap_ipte_notify() 748 ptep = get_locked_pte(gmap->mm, addr, &ptl); gmap_ipte_notify() 761 up_read(&gmap->mm->mmap_sem); gmap_ipte_notify() 768 * @mm: pointer to the process mm_struct 775 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) gmap_do_ipte_notify() argument 785 list_for_each_entry(gmap, &mm->context.gmap_list, list) { gmap_do_ipte_notify() 803 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) page_table_alloc_pgste() argument 832 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, set_guest_storage_key() argument 839 down_read(&mm->mmap_sem); set_guest_storage_key() 841 ptep = get_locked_pte(mm, addr, &ptl); set_guest_storage_key() 843 up_read(&mm->mmap_sem); set_guest_storage_key() 849 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { set_guest_storage_key() 850 up_read(&mm->mmap_sem); set_guest_storage_key() 880 up_read(&mm->mmap_sem); set_guest_storage_key() 885 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) get_guest_storage_key() argument 893 down_read(&mm->mmap_sem); get_guest_storage_key() 894 ptep = get_locked_pte(mm, addr, &ptl); get_guest_storage_key() 896 up_read(&mm->mmap_sem); get_guest_storage_key() 919 up_read(&mm->mmap_sem); get_guest_storage_key() 965 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) page_table_alloc_pgste() argument 974 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, gmap_unlink() argument 995 unsigned long *page_table_alloc(struct mm_struct *mm) page_table_alloc() argument 1001 if (mm_alloc_pgste(mm)) page_table_alloc() 1002 return page_table_alloc_pgste(mm); page_table_alloc() 1004 spin_lock_bh(&mm->context.list_lock); page_table_alloc() 1006 if (!list_empty(&mm->context.pgtable_list)) { page_table_alloc() 1007 page = list_first_entry(&mm->context.pgtable_list, page_table_alloc() 1014 spin_unlock_bh(&mm->context.list_lock); page_table_alloc() 1025 spin_lock_bh(&mm->context.list_lock); page_table_alloc() 1026 list_add(&page->lru, &mm->context.pgtable_list); page_table_alloc() 1034 spin_unlock_bh(&mm->context.list_lock); page_table_alloc() 1038 void page_table_free(struct mm_struct *mm, unsigned long *table) page_table_free() argument 1048 spin_lock_bh(&mm->context.list_lock); page_table_free() 1053 list_add(&page->lru, &mm->context.pgtable_list); page_table_free() 1054 spin_unlock_bh(&mm->context.list_lock); page_table_free() 1080 struct mm_struct *mm; page_table_free_rcu() local 1084 mm = tlb->mm; page_table_free_rcu() 1087 gmap_unlink(mm, table, vmaddr); page_table_free_rcu() 1093 spin_lock_bh(&mm->context.list_lock); page_table_free_rcu() 1098 list_add_tail(&page->lru, &mm->context.pgtable_list); page_table_free_rcu() 1099 spin_unlock_bh(&mm->context.list_lock); page_table_free_rcu() 1161 tlb->mm->context.flush_mm = 1; tlb_remove_table() 1166 __tlb_flush_mm_lazy(tlb->mm); tlb_remove_table() 1186 static inline void thp_split_mm(struct mm_struct *mm) thp_split_mm() argument 1190 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { thp_split_mm() 1195 mm->def_flags |= VM_NOHUGEPAGE; thp_split_mm() 1198 static inline void thp_split_mm(struct mm_struct *mm) thp_split_mm() argument 1208 struct mm_struct *mm = current->mm; s390_enable_sie() local 1211 if (mm_has_pgste(mm)) s390_enable_sie() 1214 if (!mm_alloc_pgste(mm)) s390_enable_sie() 1216 down_write(&mm->mmap_sem); s390_enable_sie() 1217 mm->context.has_pgste = 1; s390_enable_sie() 1219 thp_split_mm(mm); s390_enable_sie() 1220 up_write(&mm->mmap_sem); s390_enable_sie() 1242 ptep_flush_direct(walk->mm, addr, pte); __s390_enable_skey() 1258 struct mm_struct *mm = current->mm; s390_enable_skey() local 1262 down_write(&mm->mmap_sem); s390_enable_skey() 1263 if (mm_use_skey(mm)) s390_enable_skey() 1266 mm->context.use_skey = 1; s390_enable_skey() 1267 for (vma = mm->mmap; vma; vma = vma->vm_next) { s390_enable_skey() 1270 mm->context.use_skey = 0; s390_enable_skey() 1275 mm->def_flags &= ~VM_MERGEABLE; s390_enable_skey() 1277 walk.mm = mm; s390_enable_skey() 1281 up_write(&mm->mmap_sem); s390_enable_skey() 1300 void s390_reset_cmma(struct mm_struct *mm) s390_reset_cmma() argument 1304 down_write(&mm->mmap_sem); s390_reset_cmma() 1305 walk.mm = mm; s390_reset_cmma() 1307 up_write(&mm->mmap_sem); s390_reset_cmma() 1320 pte = get_locked_pte(gmap->mm, address, &ptl); gmap_test_and_clear_dirty() 1324 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) gmap_test_and_clear_dirty() 1374 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_trans_huge_deposit() argument 1379 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_deposit() 1382 if (!pmd_huge_pte(mm, pmdp)) pgtable_trans_huge_deposit() 1385 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pgtable_trans_huge_deposit() 1386 pmd_huge_pte(mm, pmdp) = pgtable; pgtable_trans_huge_deposit() 1389 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_trans_huge_withdraw() argument 1395 assert_spin_locked(pmd_lockptr(mm, pmdp)); pgtable_trans_huge_withdraw() 1398 pgtable = pmd_huge_pte(mm, pmdp); pgtable_trans_huge_withdraw() 1401 pmd_huge_pte(mm, pmdp) = NULL; pgtable_trans_huge_withdraw() 1403 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; pgtable_trans_huge_withdraw()
|
H A D | mmap.c | 26 #include <linux/mm.h> 92 struct mm_struct *mm = current->mm; arch_get_unmapped_area() local 105 vma = find_vma(mm, addr); arch_get_unmapped_area() 117 info.low_limit = mm->mmap_base; arch_get_unmapped_area() 130 struct mm_struct *mm = current->mm; arch_get_unmapped_area_topdown() local 145 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown() 158 info.high_limit = mm->mmap_base; arch_get_unmapped_area_topdown() 187 return crst_table_upgrade(current->mm, 1UL << 53); s390_mmap_check() 195 struct mm_struct *mm = current->mm; s390_get_unmapped_area() local 204 rc = crst_table_upgrade(mm, 1UL << 53); s390_get_unmapped_area() 217 struct mm_struct *mm = current->mm; s390_get_unmapped_area_topdown() local 226 rc = crst_table_upgrade(mm, 1UL << 53); s390_get_unmapped_area_topdown() 238 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument 250 mm->mmap_base = mmap_base_legacy(random_factor); arch_pick_mmap_layout() 251 mm->get_unmapped_area = s390_get_unmapped_area; arch_pick_mmap_layout() 253 mm->mmap_base = mmap_base(random_factor); arch_pick_mmap_layout() 254 mm->get_unmapped_area = s390_get_unmapped_area_topdown; arch_pick_mmap_layout()
|
/linux-4.1.27/arch/mn10300/include/asm/ |
H A D | mmu_context.h | 37 #define enter_lazy_tlb(mm, tsk) do {} while (0) 39 static inline void cpu_ran_vm(int cpu, struct mm_struct *mm) cpu_ran_vm() argument 42 cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_ran_vm() 46 static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm) cpu_maybe_ran_vm() argument 49 return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm)); cpu_maybe_ran_vm() 57 #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) 61 * @mm: The userspace VM context being set up 63 static inline unsigned long allocate_mmu_context(struct mm_struct *mm) allocate_mmu_context() argument 78 mm_context(mm) = mc; allocate_mmu_context() 85 static inline unsigned long get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 89 if (mm) { get_mmu_context() 91 mc = mm_context(mm); get_mmu_context() 95 mc = allocate_mmu_context(mm); get_mmu_context() 104 struct mm_struct *mm) init_new_context() 109 mm->context.tlbpid[i] = MMU_NO_CONTEXT; init_new_context() 114 * after we have set current->mm to a new value, this activates the context for 115 * the new mm so we see the new mappings. 117 static inline void activate_context(struct mm_struct *mm) activate_context() argument 119 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; activate_context() 123 #define init_new_context(tsk, mm) (0) 124 #define activate_context(mm) local_flush_tlb() 129 * destroy_context - Destroy mm context information 130 * @mm: The MM being destroyed. 135 #define destroy_context(mm) do {} while (0) 158 #define deactivate_mm(tsk, mm) do {} while (0) 103 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
H A D | tlbflush.h | 14 #include <linux/mm.h> 56 * @mm: The MM to flush for 60 void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr) local_flush_tlb_page() argument 70 cnx = mm->context.tlbpid[smp_processor_id()]; local_flush_tlb_page() 90 * - flush_tlb() flushes the current mm struct TLBs 92 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 94 * - flush_tlb_range(mm, start, end) flushes a range of pages 95 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 123 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 149 static inline void flush_tlb_pgtables(struct mm_struct *mm, flush_tlb_pgtables() argument
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | mmu_context.h | 35 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 36 void __new_context(struct mm_struct *mm); 110 static inline void switch_new_context(struct mm_struct *mm) switch_new_context() argument 114 __new_context(mm); switch_new_context() 117 cpu_switch_mm(mm->pgd, mm); switch_new_context() 121 static inline void check_and_switch_context(struct mm_struct *mm, check_and_switch_context() argument 130 if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) check_and_switch_context() 136 cpu_switch_mm(mm->pgd, mm); check_and_switch_context() 149 switch_new_context(mm); check_and_switch_context() 152 #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) 153 #define destroy_context(mm) do { } while(0) 160 struct mm_struct *mm = current->mm; finish_arch_post_lock_switch() local 163 __new_context(mm); finish_arch_post_lock_switch() 166 cpu_switch_mm(mm->pgd, mm); finish_arch_post_lock_switch() 174 * mm: describes the currently active mm context 178 * tsk->mm will be NULL 181 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 186 * This is the actual mm switch as far as the scheduler 188 * calling the CPU specific function when the mm hasn't 210 #define deactivate_mm(tsk,mm) do { } while (0)
|
H A D | pgalloc.h | 33 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument 38 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 44 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 53 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument 58 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument 64 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_populate() argument 71 extern pgd_t *pgd_alloc(struct mm_struct *mm); 72 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 75 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument 81 pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one() argument 98 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 104 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 118 * of the mm address space. 121 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) pmd_populate_kernel() argument 130 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) pmd_populate() argument
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | pgalloc.h | 10 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 13 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); 14 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); 15 extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); 18 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument 24 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 34 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 40 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 57 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 62 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
|
H A D | hugetlb.h | 9 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument 29 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { hugetlb_prefault_arch_hook() argument 40 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument 43 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at() 46 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument 49 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear() 67 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 70 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
|
H A D | mmu_context.h | 38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) 40 #define cpu_asid(cpu, mm) \ 41 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) 57 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) get_mmu_context() argument 62 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context() 90 cpu_context(cpu, mm) = asid_cache(cpu) = asid; get_mmu_context() 98 struct mm_struct *mm) init_new_context() 103 cpu_context(i, mm) = NO_CONTEXT; init_new_context() 109 * After we have set current->mm to a new value, this activates 110 * the context for the new mm so we see the new mappings. 112 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) activate_context() argument 114 get_mmu_context(mm, cpu); activate_context() 115 set_asid(cpu_asid(cpu, mm)); activate_context() 134 #define deactivate_mm(tsk,mm) do { } while (0) 135 #define enter_lazy_tlb(mm,tsk) do { } while (0) 141 #define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) 97 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
H A D | tlbflush.h | 8 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 14 extern void local_flush_tlb_mm(struct mm_struct *mm); 29 extern void flush_tlb_mm(struct mm_struct *mm); 39 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
H A D | tlb.h | 22 struct mm_struct *mm; member in struct:mmu_gather 39 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) tlb_gather_mmu() argument 41 tlb->mm = mm; tlb_gather_mmu() 53 flush_tlb_mm(tlb->mm); tlb_finish_mmu() 112 #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) 113 #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 114 #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) 116 #define tlb_migrate_finish(mm) do { } while (0)
|
/linux-4.1.27/arch/parisc/include/asm/ |
H A D | mmu_context.h | 4 #include <linux/mm.h> 11 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 22 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 24 BUG_ON(atomic_read(&mm->mm_users) != 1); init_new_context() 26 mm->context = alloc_sid(); init_new_context() 31 destroy_context(struct mm_struct *mm) destroy_context() argument 33 free_sid(mm->context); destroy_context() 34 mm->context = 0; destroy_context() 61 #define deactivate_mm(tsk,mm) do { } while (0) 67 * for a new mm created in the exec path. There's also activate_mm()
|
H A D | tlb.h | 6 flush_tlb_mm((tlb)->mm);\ 24 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 25 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
H A D | pgalloc.h | 5 #include <linux/mm.h> 21 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 46 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 58 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument 64 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument 73 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 80 * done by generic mm code. pmd_free() 82 mm_inc_nr_pmds(mm); pmd_free() 97 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 98 #define pmd_free(mm, x) do { } while (0) 99 #define pgd_populate(mm, pmd, pte) BUG() 104 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument 120 #define pmd_populate(mm, pmd, pte_page) \ 121 pmd_populate_kernel(mm, pmd, page_address(pte_page)) 125 pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one() argument 138 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel() argument 144 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 149 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument 152 pte_free_kernel(mm, page_address(pte)); pte_free()
|
H A D | traps.h | 11 /* mm/fault.c */
|
H A D | tlbflush.h | 6 #include <linux/mm.h> 50 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 52 BUG_ON(mm == &init_mm); /* Should never happen */ flush_tlb_mm() 68 if (mm) { flush_tlb_mm() 69 if (mm->context != 0) flush_tlb_mm() 70 free_sid(mm->context); flush_tlb_mm() 71 mm->context = alloc_sid(); flush_tlb_mm() 72 if (mm == current->active_mm) flush_tlb_mm() 73 load_context(mm->context); flush_tlb_mm()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | pgalloc-32.h | 11 extern pgd_t *pgd_alloc(struct mm_struct *mm); 12 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 18 /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ 19 #define pmd_free(mm, x) do { } while (0) 21 /* #define pgd_populate(mm, pmd, pte) BUG() */ 24 #define pmd_populate_kernel(mm, pmd, pte) \ 26 #define pmd_populate(mm, pmd, pte) \ 30 #define pmd_populate_kernel(mm, pmd, pte) \ 32 #define pmd_populate(mm, pmd, pte) \ 37 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 38 extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); 40 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 45 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_free() argument
|
H A D | copro.h | 18 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 21 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); 25 void copro_flush_all_slbs(struct mm_struct *mm); 27 static inline void copro_flush_all_slbs(struct mm_struct *mm) {} argument
|
H A D | tlbflush.h | 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on 41 extern void local_flush_tlb_mm(struct mm_struct *mm); 44 extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 48 extern void flush_tlb_mm(struct mm_struct *mm); 50 extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 53 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 55 #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) 64 extern void flush_tlb_mm(struct mm_struct *mm); 75 static inline void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument 77 flush_tlb_mm(mm); local_flush_tlb_mm() 96 struct mm_struct *mm; member in struct:ppc64_tlb_batch 134 static inline void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument 138 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 168 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 170 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
|
H A D | pgalloc-64.h | 44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument 64 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument 69 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 74 #define pmd_populate(mm, pmd, pte_page) \ 75 pmd_populate_kernel(mm, pmd, page_address(pte_page)) 76 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) 79 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 85 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 91 pte = pte_alloc_one_kernel(mm, address); pte_alloc_one() 102 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 107 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_free() argument 175 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) 177 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument 183 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 194 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 197 return (pte_t *)page_table_alloc(mm, address, 1); pte_alloc_one_kernel() 200 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 203 return (pgtable_t)page_table_alloc(mm, address, 0); pte_alloc_one() 206 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 208 page_table_free(mm, (unsigned long *)pte, 1); pte_free_kernel() 211 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_free() argument 213 page_table_free(mm, (unsigned long *)ptepage, 0); pte_free() 224 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument 230 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
|
H A D | page_64.h | 124 extern unsigned int get_slice_psize(struct mm_struct *mm, 127 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); 128 extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, 131 #define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT) 137 #define get_slice_psize(mm, addr) ((mm)->context.user_psize) 138 #define slice_set_user_psize(mm, psize) \ 140 (mm)->context.user_psize = (psize); \ 141 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ 145 #define get_slice_psize(mm, addr) MMU_PAGE_64K 147 #define get_slice_psize(mm, addr) MMU_PAGE_4K 149 #define slice_set_user_psize(mm, psize) do { BUG(); } while(0) 152 #define slice_set_range_psize(mm, start, len, psize) \ 153 slice_set_user_psize((mm), (psize)) 154 #define slice_mm_new_context(mm) 1
|
H A D | mmu_context.h | 6 #include <linux/mm.h> 17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18 extern void destroy_context(struct mm_struct *mm); 21 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 35 extern int use_cop(unsigned long acop, struct mm_struct *mm); mmu_context_init() 36 extern void drop_cop(unsigned long acop, struct mm_struct *mm); mmu_context_init() 87 #define deactivate_mm(tsk,mm) do { } while (0) 90 * After we have set current->mm to a new value, this activates 91 * the context for the new mm so we see the new mappings. 103 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | pgalloc.h | 5 #include <linux/mm.h> /* for struct page */ 8 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } __paravirt_pgd_alloc() argument 13 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) paravirt_pgd_free() 14 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} paravirt_alloc_pte() argument 15 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} paravirt_alloc_pmd() argument 16 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} paravirt_alloc_pmd_clone() argument 19 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} paravirt_release_pte() argument 34 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); paravirt_release_pud() 42 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 48 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument 62 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_populate_kernel() argument 65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); pmd_populate_kernel() 69 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 74 paravirt_alloc_pte(mm, pfn); pmd_populate() 81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) pmd_alloc_one() argument 94 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 110 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); 112 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 114 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pud_populate() 120 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) pgd_populate() argument 122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); pgd_populate() 126 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) pud_alloc_one() argument 131 static inline void pud_free(struct mm_struct *mm, pud_t *pud) pud_free() argument
|
H A D | tlb.h | 11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ 13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
|
H A D | hugetlb.h | 9 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument 30 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { hugetlb_prefault_arch_hook() argument 41 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument 44 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at() 47 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument 50 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear() 69 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 72 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
|
H A D | mpx.h | 65 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) kernel_managing_mpx_tables() argument 67 return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR); kernel_managing_mpx_tables() 69 static inline void mpx_mm_init(struct mm_struct *mm) mpx_mm_init() argument 75 mm->bd_addr = MPX_INVALID_BOUNDS_DIR; mpx_mm_init() 77 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 89 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) kernel_managing_mpx_tables() argument 93 static inline void mpx_mm_init(struct mm_struct *mm) mpx_mm_init() argument 96 static inline void mpx_notify_unmap(struct mm_struct *mm, mpx_notify_unmap() argument
|
H A D | mmu_context.h | 24 static inline void load_mm_cr4(struct mm_struct *mm) load_mm_cr4() argument 27 atomic_read(&mm->context.perf_rdpmc_allowed)) load_mm_cr4() 33 static inline void load_mm_cr4(struct mm_struct *mm) {} load_mm_cr4() argument 51 static inline void load_mm_ldt(struct mm_struct *mm) load_mm_ldt() argument 56 ldt = lockless_dereference(mm->context.ldt); load_mm_ldt() 59 * Any change to mm->context.ldt is followed by an IPI to all load_mm_ldt() 60 * CPUs with the mm active. The LDT will not be freed until load_mm_ldt() 83 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 84 void destroy_context(struct mm_struct *mm); 87 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 139 /* Stop flush ipis for the previous mm */ switch_mm() 142 /* Load per-mm CR4 state */ switch_mm() 153 * never set context.ldt to NULL while the mm still switch_mm() 198 #define deactivate_mm(tsk, mm) \ 203 #define deactivate_mm(tsk, mm) \ 211 struct mm_struct *mm) arch_dup_mmap() 213 paravirt_arch_dup_mmap(oldmm, mm); arch_dup_mmap() 216 static inline void arch_exit_mmap(struct mm_struct *mm) arch_exit_mmap() argument 218 paravirt_arch_exit_mmap(mm); arch_exit_mmap() 221 static inline void arch_bprm_mm_init(struct mm_struct *mm, arch_bprm_mm_init() argument 224 mpx_mm_init(mm); arch_bprm_mm_init() 227 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, arch_unmap() argument 248 mpx_notify_unmap(mm, vma, start, end); arch_unmap() 210 arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) arch_dup_mmap() argument
|
/linux-4.1.27/arch/x86/um/vdso/ |
H A D | vma.c | 11 #include <linux/mm.h> 58 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local 63 down_write(&mm->mmap_sem); arch_setup_additional_pages() 65 err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, arch_setup_additional_pages() 70 up_write(&mm->mmap_sem); arch_setup_additional_pages()
|
/linux-4.1.27/arch/tile/mm/ |
H A D | mmap.c | 17 #include <linux/mm.h> 32 static inline unsigned long mmap_base(struct mm_struct *mm) mmap_base() argument 52 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument 81 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; arch_pick_mmap_layout() 82 mm->get_unmapped_area = arch_get_unmapped_area; arch_pick_mmap_layout() 84 mm->mmap_base = mmap_base(mm); arch_pick_mmap_layout() 85 mm->get_unmapped_area = arch_get_unmapped_area_topdown; arch_pick_mmap_layout() 89 unsigned long arch_randomize_brk(struct mm_struct *mm) arch_randomize_brk() argument 91 unsigned long range_end = mm->brk + 0x02000000; arch_randomize_brk() 92 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; arch_randomize_brk()
|
H A D | elf.c | 15 #include <linux/mm.h> 41 static int notify_exec(struct mm_struct *mm) notify_exec() argument 55 exe_file = get_mm_exe_file(mm); notify_exec() 63 down_read(&mm->mmap_sem); notify_exec() 64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { notify_exec() 66 up_read(&mm->mmap_sem); notify_exec() 94 up_read(&mm->mmap_sem); notify_exec() 120 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local 128 if (!notify_exec(mm)) arch_setup_additional_pages() 131 down_write(&mm->mmap_sem); arch_setup_additional_pages() 152 up_write(&mm->mmap_sem); arch_setup_additional_pages()
|
/linux-4.1.27/arch/x86/um/ |
H A D | mem_64.c | 1 #include <linux/mm.h>
|
H A D | mem_32.c | 9 #include <linux/mm.h> 30 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) get_gate_vma() argument 46 int in_gate_area(struct mm_struct *mm, unsigned long addr) in_gate_area() argument 48 struct vm_area_struct *vma = get_gate_vma(mm); in_gate_area()
|
/linux-4.1.27/arch/metag/include/asm/ |
H A D | tlbflush.h | 12 * - flush_tlb() flushes the current mm struct TLBs 14 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 16 * - flush_tlb_range(mm, start, end) flushes a range of pages 18 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 46 static inline void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 48 if (mm == current->active_mm) flush_tlb_mm() 64 static inline void flush_tlb_pgtables(struct mm_struct *mm, flush_tlb_pgtables() argument 67 flush_tlb_mm(mm); flush_tlb_pgtables()
|
H A D | pgalloc.h | 5 #include <linux/mm.h> 7 #define pmd_populate_kernel(mm, pmd, pte) \ 10 #define pmd_populate(mm, pmd, pte) \ 29 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 37 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 42 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 50 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 64 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 69 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
|
H A D | hugetlb.h | 8 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument 17 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) hugetlb_prefault_arch_hook() argument 29 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument 32 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at() 35 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument 38 return ptep_get_and_clear(mm, addr, ptep); huge_ptep_get_and_clear() 56 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 59 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
|
H A D | mmu_context.h | 13 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument 19 struct mm_struct *mm) init_new_context() 26 mm->context.pgd_base = (unsigned long) mm->pgd; init_new_context() 29 INIT_LIST_HEAD(&mm->context.tcm); init_new_context() 39 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { list_for_each_entry_safe() 50 #define destroy_context(mm) do { } while (0) 111 #define deactivate_mm(tsk, mm) do { } while (0) 18 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
/linux-4.1.27/arch/score/include/asm/ |
H A D | pgalloc.h | 4 #include <linux/mm.h> 6 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument 12 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 20 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 35 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 40 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 51 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 67 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 72 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
|
H A D | mmu_context.h | 42 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument 47 get_new_mmu_context(struct mm_struct *mm) get_new_mmu_context() argument 57 mm->context = asid; get_new_mmu_context() 66 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 68 mm->context = 0; init_new_context() 90 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 94 deactivate_mm(struct task_struct *task, struct mm_struct *mm) deactivate_mm() argument 98 * After we have set current->mm to a new value, this activates 99 * the context for the new mm so we see the new mappings.
|
/linux-4.1.27/arch/nios2/include/asm/ |
H A D | pgalloc.h | 13 #include <linux/mm.h> 15 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument 21 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 33 extern pgd_t *pgd_alloc(struct mm_struct *mm); 35 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 40 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 51 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 67 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 72 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
|
H A D | mmu_context.h | 27 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 38 struct mm_struct *mm) init_new_context() 40 mm->context = 0; init_new_context() 48 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 56 struct mm_struct *mm) deactivate_mm() 61 * After we have set current->mm to a new value, this activates 62 * the context for the new mm so we see the new mappings. 37 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 55 deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm() argument
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | pgalloc.h | 11 #include <linux/mm.h> 19 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_populate_kernel() argument 25 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 54 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 60 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 79 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 84 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument
|
H A D | tlbflush.h | 16 * - flush_tlb() flushes the current mm struct TLBs 18 * - flush_tlb_mm(mm) flushes the specified mm context TLBs 25 extern void flush_tlb_mm(struct mm_struct *mm);
|
H A D | mmu_context.h | 41 get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 45 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context() 64 mm->context = mc; get_mmu_context() 72 struct mm_struct *mm) init_new_context() 74 mm->context = NO_CONTEXT; init_new_context() 82 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 102 static inline void activate_context(struct mm_struct *mm) activate_context() argument 104 get_mmu_context(mm); activate_context() 105 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); activate_context() 120 #define deactivate_mm(tsk,mm) do { } while(0) 125 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 71 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
/linux-4.1.27/kernel/ |
H A D | fork.c | 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 31 #include <linux/mm.h> 212 /* SLAB cache for mm_struct structures (tsk->mm) */ 390 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) dup_mmap() argument 400 uprobe_dup_mmap(oldmm, mm); dup_mmap() 404 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); dup_mmap() 407 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); dup_mmap() 409 mm->total_vm = oldmm->total_vm; dup_mmap() 410 mm->shared_vm = oldmm->shared_vm; dup_mmap() 411 mm->exec_vm = oldmm->exec_vm; dup_mmap() 412 mm->stack_vm = oldmm->stack_vm; dup_mmap() 414 rb_link = &mm->mm_rb.rb_node; dup_mmap() 416 pprev = &mm->mmap; dup_mmap() 417 retval = ksm_fork(mm, oldmm); dup_mmap() 420 retval = khugepaged_fork(mm, oldmm); dup_mmap() 429 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, dup_mmap() 449 tmp->vm_mm = mm; dup_mmap() 489 __vma_link_rb(mm, tmp, rb_link, rb_parent); dup_mmap() 493 mm->map_count++; dup_mmap() 494 retval = copy_page_range(mm, oldmm, mpnt); dup_mmap() 502 /* a new mm has just been created */ dup_mmap() 503 arch_dup_mmap(oldmm, mm); dup_mmap() 506 up_write(&mm->mmap_sem); dup_mmap() 521 static inline int mm_alloc_pgd(struct mm_struct *mm) mm_alloc_pgd() argument 523 mm->pgd = pgd_alloc(mm); mm_alloc_pgd() 524 if (unlikely(!mm->pgd)) mm_alloc_pgd() 529 static inline void mm_free_pgd(struct mm_struct *mm) mm_free_pgd() argument 531 pgd_free(mm, mm->pgd); mm_free_pgd() 534 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) dup_mmap() argument 537 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); dup_mmap() 541 #define mm_alloc_pgd(mm) (0) 542 #define mm_free_pgd(mm) 548 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 564 static void mm_init_aio(struct mm_struct *mm) mm_init_aio() argument 567 spin_lock_init(&mm->ioctx_lock); mm_init_aio() 568 mm->ioctx_table = NULL; mm_init_aio() 572 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) mm_init_owner() argument 575 mm->owner = p; mm_init_owner() 579 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) mm_init() argument 581 mm->mmap = NULL; mm_init() 582 mm->mm_rb = RB_ROOT; mm_init() 583 mm->vmacache_seqnum = 0; mm_init() 584 atomic_set(&mm->mm_users, 1); mm_init() 585 atomic_set(&mm->mm_count, 1); mm_init() 586 init_rwsem(&mm->mmap_sem); mm_init() 587 INIT_LIST_HEAD(&mm->mmlist); mm_init() 588 mm->core_state = NULL; mm_init() 589 atomic_long_set(&mm->nr_ptes, 0); mm_init() 590 mm_nr_pmds_init(mm); mm_init() 591 mm->map_count = 0; mm_init() 592 mm->locked_vm = 0; mm_init() 593 mm->pinned_vm = 0; mm_init() 594 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); mm_init() 595 spin_lock_init(&mm->page_table_lock); mm_init() 596 mm_init_cpumask(mm); mm_init() 597 mm_init_aio(mm); mm_init() 598 mm_init_owner(mm, p); mm_init() 599 mmu_notifier_mm_init(mm); mm_init() 600 clear_tlb_flush_pending(mm); mm_init() 602 mm->pmd_huge_pte = NULL; mm_init() 605 if (current->mm) { mm_init() 606 mm->flags = current->mm->flags & MMF_INIT_MASK; mm_init() 607 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; mm_init() 609 mm->flags = default_dump_filter; mm_init() 610 mm->def_flags = 0; mm_init() 613 if (mm_alloc_pgd(mm)) mm_init() 616 if (init_new_context(p, mm)) mm_init() 619 return mm; mm_init() 622 mm_free_pgd(mm); mm_init() 624 free_mm(mm); mm_init() 628 static void check_mm(struct mm_struct *mm) check_mm() argument 633 long x = atomic_long_read(&mm->rss_stat.count[i]); check_mm() 637 "mm:%p idx:%d val:%ld\n", mm, i, x); check_mm() 640 if (atomic_long_read(&mm->nr_ptes)) check_mm() 641 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", check_mm() 642 atomic_long_read(&mm->nr_ptes)); check_mm() 643 if (mm_nr_pmds(mm)) check_mm() 644 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", check_mm() 645 mm_nr_pmds(mm)); check_mm() 648 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); check_mm() 657 struct mm_struct *mm; mm_alloc() local 659 mm = allocate_mm(); mm_alloc() 660 if (!mm) mm_alloc() 663 memset(mm, 0, sizeof(*mm)); mm_alloc() 664 return mm_init(mm, current); mm_alloc() 668 * Called when the last reference to the mm 670 * mmput. Free the page directory and the mm. 672 void __mmdrop(struct mm_struct *mm) __mmdrop() argument 674 BUG_ON(mm == &init_mm); __mmdrop() 675 mm_free_pgd(mm); __mmdrop() 676 destroy_context(mm); __mmdrop() 677 mmu_notifier_mm_destroy(mm); __mmdrop() 678 check_mm(mm); __mmdrop() 679 free_mm(mm); __mmdrop() 684 * Decrement the use count and release all resources for an mm. 686 void mmput(struct mm_struct *mm) mmput() argument 690 if (atomic_dec_and_test(&mm->mm_users)) { mmput() 691 uprobe_clear_state(mm); mmput() 692 exit_aio(mm); mmput() 693 ksm_exit(mm); mmput() 694 khugepaged_exit(mm); /* must run before exit_mmap */ mmput() 695 exit_mmap(mm); mmput() 696 set_mm_exe_file(mm, NULL); mmput() 697 if (!list_empty(&mm->mmlist)) { mmput() 699 list_del(&mm->mmlist); mmput() 702 if (mm->binfmt) mmput() 703 module_put(mm->binfmt->module); mmput() 704 mmdrop(mm); mmput() 710 * set_mm_exe_file - change a reference to the mm's executable file 712 * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 717 * mm->exe_file, but does so without using set_mm_exe_file() in order 720 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) set_mm_exe_file() argument 727 * this mm -- see comment above for justification. set_mm_exe_file() 729 old_exe_file = rcu_dereference_raw(mm->exe_file); set_mm_exe_file() 733 rcu_assign_pointer(mm->exe_file, new_exe_file); set_mm_exe_file() 739 * get_mm_exe_file - acquire a reference to the mm's executable file 741 * Returns %NULL if mm has no associated executable file. 744 struct file *get_mm_exe_file(struct mm_struct *mm) get_mm_exe_file() argument 749 exe_file = rcu_dereference(mm->exe_file); get_mm_exe_file() 758 * get_task_mm - acquire a reference to the task's mm 760 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 761 * this kernel workthread has transiently adopted a user mm with use_mm, 763 * bumping up the use count. User must release the mm via mmput() 768 struct mm_struct *mm; get_task_mm() local 771 mm = task->mm; get_task_mm() 772 if (mm) { get_task_mm() 774 mm = NULL; get_task_mm() 776 atomic_inc(&mm->mm_users); get_task_mm() 779 return mm; get_task_mm() 785 struct mm_struct *mm; mm_access() local 792 mm = get_task_mm(task); mm_access() 793 if (mm && mm != current->mm && mm_access() 795 mmput(mm); mm_access() 796 mm = ERR_PTR(-EACCES); mm_access() 800 return mm; mm_access() 848 void mm_release(struct task_struct *tsk, struct mm_struct *mm) mm_release() argument 850 /* Get rid of any futexes when releasing the mm */ mm_release() 869 deactivate_mm(tsk, mm); mm_release() 875 * trouble, say, a killed vfork parent shouldn't touch this mm. mm_release() 880 atomic_read(&mm->mm_users) > 1) { mm_release() 893 * All done, finally we can wake up parent and return this mm to him. mm_release() 901 * Allocate a new mm structure and copy contents from the 902 * mm structure of the passed in task structure. 906 struct mm_struct *mm, *oldmm = current->mm; dup_mm() local 909 mm = allocate_mm(); dup_mm() 910 if (!mm) dup_mm() 913 memcpy(mm, oldmm, sizeof(*mm)); dup_mm() 915 if (!mm_init(mm, tsk)) dup_mm() 918 err = dup_mmap(mm, oldmm); dup_mm() 922 mm->hiwater_rss = get_mm_rss(mm); dup_mm() 923 mm->hiwater_vm = mm->total_vm; dup_mm() 925 if (mm->binfmt && !try_module_get(mm->binfmt->module)) dup_mm() 928 return mm; dup_mm() 932 mm->binfmt = NULL; dup_mm() 933 mmput(mm); dup_mm() 941 struct mm_struct *mm, *oldmm; copy_mm() local 950 tsk->mm = NULL; copy_mm() 958 oldmm = current->mm; copy_mm() 967 mm = oldmm; copy_mm() 972 mm = dup_mm(tsk); copy_mm() 973 if (!mm) copy_mm() 977 tsk->mm = mm; copy_mm() 978 tsk->active_mm = mm; copy_mm() 1615 if (p->mm) copy_process() 1616 mmput(p->mm); copy_process()
|
H A D | elfcore.c | 3 #include <linux/mm.h>
|
H A D | tsacct.c | 24 #include <linux/mm.h> 94 struct mm_struct *mm; xacct_add_tsk() local 99 mm = get_task_mm(p); xacct_add_tsk() 100 if (mm) { xacct_add_tsk() 102 stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB; xacct_add_tsk() 103 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; xacct_add_tsk() 104 mmput(mm); xacct_add_tsk() 126 if (likely(tsk->mm)) { __acct_update_integrals() 142 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); __acct_update_integrals() 143 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; __acct_update_integrals() 150 * acct_update_integrals - update mm integral fields in task_struct 162 * acct_account_cputime - update mm integral after cputime update 171 * acct_clear_integrals - clear the mm integral fields in task_struct
|
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_mqd_manager_cik.c | 36 static int init_mqd(struct mqd_manager *mm, void **mqd, init_mqd() argument 44 BUG_ON(!mm || !q || !mqd); init_mqd() 48 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), init_mqd() 103 retval = mm->update_mqd(mm, m, q); init_mqd() 108 static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, init_mqd_sdma() argument 115 BUG_ON(!mm || !mqd || !mqd_mem_obj); init_mqd_sdma() 117 retval = kfd_gtt_sa_allocate(mm->dev, init_mqd_sdma() 132 retval = mm->update_mqd(mm, m, q); init_mqd_sdma() 137 static void uninit_mqd(struct mqd_manager *mm, void *mqd, uninit_mqd() argument 140 BUG_ON(!mm || !mqd); uninit_mqd() 141 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); uninit_mqd() 144 static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, uninit_mqd_sdma() argument 147 BUG_ON(!mm || !mqd); uninit_mqd_sdma() 148 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); uninit_mqd_sdma() 151 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, load_mqd() argument 154 return mm->dev->kfd2kgd->hqd_load load_mqd() 155 (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); load_mqd() 158 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, load_mqd_sdma() argument 162 return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); load_mqd_sdma() 165 static int update_mqd(struct mqd_manager *mm, void *mqd, update_mqd() argument 170 BUG_ON(!mm || !q || !mqd); update_mqd() 209 static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, update_mqd_sdma() argument 214 BUG_ON(!mm || !mqd || !q); update_mqd_sdma() 244 static int destroy_mqd(struct mqd_manager *mm, void *mqd, destroy_mqd() argument 249 return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, destroy_mqd() 257 static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, destroy_mqd_sdma() argument 262 return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); destroy_mqd_sdma() 265 static bool is_occupied(struct mqd_manager *mm, void *mqd, is_occupied() argument 270 return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, is_occupied() 275 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, is_occupied_sdma() argument 279 return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); is_occupied_sdma() 288 static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, init_mqd_hiq() argument 296 BUG_ON(!mm || !q || !mqd || !mqd_mem_obj); init_mqd_hiq() 300 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), init_mqd_hiq() 344 retval = mm->update_mqd(mm, m, q); init_mqd_hiq() 349 static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, update_mqd_hiq() argument 354 BUG_ON(!mm || !q || !mqd); update_mqd_hiq()
|
/linux-4.1.27/arch/openrisc/include/asm/ |
H A D | pgalloc.h | 24 #include <linux/mm.h> 30 #define pmd_populate_kernel(mm, pmd, pte) \ 33 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 60 * current_pgd (from mm->pgd) to load kernel pages so we need it 63 extern inline pgd_t *pgd_alloc(struct mm_struct *mm) 69 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 74 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); 76 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 91 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 96 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument
|
H A D | tlbflush.h | 22 #include <linux/mm.h> 30 * - flush_tlb() flushes the current mm struct TLBs 32 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 34 * - flush_tlb_range(mm, start, end) flushes a range of pages 38 void flush_tlb_mm(struct mm_struct *mm); 46 flush_tlb_mm(current->mm); flush_tlb()
|
/linux-4.1.27/arch/tile/include/asm/ |
H A D | pgalloc.h | 19 #include <linux/mm.h> 50 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_populate_kernel() argument 57 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 68 extern pgd_t *pgd_alloc(struct mm_struct *mm); 69 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 71 extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address, 73 extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order); 75 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 78 return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER); pte_alloc_one() 81 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free() argument 83 pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER); pte_free() 89 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel() argument 91 return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); pte_alloc_one_kernel() 94 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 97 pte_free(mm, virt_to_page(pte)); pte_free_kernel() 125 #define pud_populate(mm, pud, pmd) \ 126 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) 144 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument 146 struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER); pmd_alloc_one() 150 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) pmd_free() argument 152 pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER); pmd_free()
|
/linux-4.1.27/arch/microblaze/mm/ |
H A D | mmu_context.c | 6 * Derived from arch/ppc/mm/4xx_mmu.c: 9 * Derived from arch/ppc/mm/init.c: 17 * Derived from "arch/i386/mm/init.c" 27 #include <linux/mm.h> 61 struct mm_struct *mm; steal_context() local 67 mm = context_mm[next_mmu_context]; steal_context() 68 flush_tlb_mm(mm); steal_context() 69 destroy_context(mm); steal_context()
|
/linux-4.1.27/arch/mips/mm/ |
H A D | hugetlbpage.c | 15 #include <linux/mm.h> 24 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, huge_pte_alloc() argument 31 pgd = pgd_offset(mm, addr); huge_pte_alloc() 32 pud = pud_alloc(mm, pgd, addr); huge_pte_alloc() 34 pte = (pte_t *)pmd_alloc(mm, pud, addr); huge_pte_alloc() 39 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument 45 pgd = pgd_offset(mm, addr); huge_pte_offset() 54 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) huge_pmd_unshare() argument
|
H A D | mmap.c | 11 #include <linux/mm.h> 58 struct mm_struct *mm = current->mm; arch_get_unmapped_area_common() local 93 vma = find_vma(mm, addr); arch_get_unmapped_area_common() 106 info.high_limit = mm->mmap_base; arch_get_unmapped_area_common() 121 info.low_limit = mm->mmap_base; arch_get_unmapped_area_common() 159 void arch_pick_mmap_layout(struct mm_struct *mm) arch_pick_mmap_layout() argument 167 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; arch_pick_mmap_layout() 168 mm->get_unmapped_area = arch_get_unmapped_area; arch_pick_mmap_layout() 170 mm->mmap_base = mmap_base(random_factor); arch_pick_mmap_layout() 171 mm->get_unmapped_area = arch_get_unmapped_area_topdown; arch_pick_mmap_layout() 189 unsigned long arch_randomize_brk(struct mm_struct *mm) arch_randomize_brk() argument 191 unsigned long base = mm->brk; arch_randomize_brk() 196 if (ret < mm->brk) arch_randomize_brk() 197 return mm->brk; arch_randomize_brk()
|
/linux-4.1.27/arch/blackfin/include/asm/ |
H A D | tlb.h | 15 * .. because we flush the whole mm when it 18 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
H A D | mmu_context.h | 60 activate_l1stack(struct mm_struct *mm, unsigned long sp_base) activate_l1stack() argument 64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; activate_l1stack() 69 #define deactivate_mm(tsk,mm) do { } while (0) 120 static inline void protect_page(struct mm_struct *mm, unsigned long addr, protect_page() argument 123 unsigned long *mask = mm->context.page_rwx_mask; protect_page() 151 static inline void update_protections(struct mm_struct *mm) update_protections() argument 154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { update_protections() 156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu); update_protections() 167 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 173 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 177 mm->context.page_rwx_mask = (unsigned long *)p; init_new_context() 178 memset(mm->context.page_rwx_mask, 0, init_new_context() 184 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 192 if (current_l1_stack_save == mm->context.l1_stack_save) destroy_context() 194 if (mm->context.l1_stack_save) destroy_context() 198 while ((tmp = mm->context.sram_list)) { destroy_context() 199 mm->context.sram_list = tmp->next; destroy_context() 204 if (current_rwx_mask[cpu] == mm->context.page_rwx_mask) destroy_context() 206 free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); destroy_context()
|
/linux-4.1.27/lib/ |
H A D | is_single_threaded.c | 16 * Returns true if the task does not share ->mm with another thread/process. 21 struct mm_struct *mm = task->mm; current_is_single_threaded() local 28 if (atomic_read(&mm->mm_users) == 1) current_is_single_threaded() 41 if (unlikely(t->mm == mm)) for_each_process() 43 if (likely(t->mm)) for_each_process() 46 * t->mm == NULL. Make sure next_thread/next_task for_each_process()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | mmu_context.h | 85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 86 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 89 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) get_new_mmu_context() argument 119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; get_new_mmu_context() 127 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 132 cpu_context(i, mm) = 0; init_new_context() 134 atomic_set(&mm->context.fp_mode_switching, 0); init_new_context() 168 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 172 #define deactivate_mm(tsk, mm) do { } while (0) 175 * After we have set current->mm to a new value, this activates 176 * the context for the new mm so we see the new mappings. 202 * If mm is currently active_mm, we can't really drop it. Instead, 206 drop_mmu_context(struct mm_struct *mm, unsigned cpu) drop_mmu_context() argument 213 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { drop_mmu_context() 214 get_new_mmu_context(mm, cpu); drop_mmu_context() 215 write_c0_entryhi(cpu_asid(cpu, mm)); drop_mmu_context() 218 cpu_context(cpu, mm) = 0; drop_mmu_context()
|
H A D | pgalloc.h | 13 #include <linux/mm.h> 16 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument 22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument 36 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument 47 static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_alloc() argument 62 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 67 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_alloc_one_kernel() argument 77 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 93 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 98 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one() argument 122 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument 127 #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
|
H A D | hugetlb.h | 16 static inline int is_hugepage_only_range(struct mm_struct *mm, is_hugepage_only_range() argument 41 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) hugetlb_prefault_arch_hook() argument 54 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_huge_pte_at() argument 57 set_pte_at(mm, addr, ptep, pte); set_huge_pte_at() 60 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, huge_ptep_get_and_clear() argument 67 set_pte_at(mm, addr, ptep, clear); huge_ptep_get_and_clear() 88 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, huge_ptep_set_wrprotect() argument 91 ptep_set_wrprotect(mm, addr, ptep); huge_ptep_set_wrprotect()
|
H A D | tlbflush.h | 4 #include <linux/mm.h> 10 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries 16 extern void local_flush_tlb_mm(struct mm_struct *mm); 38 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
/linux-4.1.27/arch/arc/include/asm/ |
H A D | tlbflush.h | 12 #include <linux/mm.h> 15 void local_flush_tlb_mm(struct mm_struct *mm); 26 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 33 extern void flush_tlb_mm(struct mm_struct *mm);
|
H A D | mmu_context.h | 9 * -Refactored get_new_mmu_context( ) to only handle live-mm. 10 * retiring-mm handled in other hooks 39 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits 50 #define asid_mm(mm, cpu) mm->context.asid[cpu] 51 #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) 60 static inline void get_new_mmu_context(struct mm_struct *mm) get_new_mmu_context() argument 69 * This is done by ensuring that the generation bits in both mm->ASID get_new_mmu_context() 77 if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) get_new_mmu_context() 95 asid_mm(mm, cpu) = asid_cpu(cpu); get_new_mmu_context() 98 write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); get_new_mmu_context() 108 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 113 asid_mm(mm, i) = MM_CTXT_NO_ASID; init_new_context() 118 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 124 asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; destroy_context() 150 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ switch_mm() 160 * vs. in switch_mm(). Here it always returns a new ASID, because mm has 167 * for retiring-mm. However destroy_context( ) still needs to do that because 173 #define deactivate_mm(tsk, mm) do { } while (0) 175 #define enter_lazy_tlb(mm, tsk)
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_mm.c | 93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 112 struct drm_mm *mm = hole_node->mm; drm_mm_insert_helper() local 120 if (mm->color_adjust) drm_mm_insert_helper() 121 mm->color_adjust(hole_node, color, &adj_start, &adj_end); drm_mm_insert_helper() 149 node->mm = mm; drm_mm_insert_helper() 160 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_insert_helper() 167 * @mm: drm_mm allocator to insert @node into 179 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) drm_mm_reserve_node() argument 189 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { drm_mm_for_each_hole() 193 node->mm = mm; drm_mm_for_each_hole() 206 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_for_each_hole() 219 * @mm: drm_mm to allocate from 232 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, drm_mm_insert_node_generic() argument 240 hole_node = drm_mm_search_free_generic(mm, size, alignment, drm_mm_insert_node_generic() 257 struct drm_mm *mm = hole_node->mm; drm_mm_insert_helper_range() local 273 if (mm->color_adjust) drm_mm_insert_helper_range() 274 mm->color_adjust(hole_node, color, &adj_start, &adj_end); drm_mm_insert_helper_range() 296 node->mm = mm; drm_mm_insert_helper_range() 310 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_insert_helper_range() 317 * @mm: drm_mm to allocate from 332 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, drm_mm_insert_node_in_range_generic() argument 341 hole_node = drm_mm_search_free_in_range_generic(mm, drm_mm_insert_node_in_range_generic() 364 struct drm_mm *mm = node->mm; drm_mm_remove_node() local 387 list_add(&prev_node->hole_stack, &mm->hole_stack); drm_mm_remove_node() 389 list_move(&prev_node->hole_stack, &mm->hole_stack); drm_mm_remove_node() 413 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, drm_mm_search_free_generic() argument 425 BUG_ON(mm->scanned_blocks); drm_mm_search_free_generic() 430 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, __drm_mm_for_each_hole() 434 if (mm->color_adjust) { __drm_mm_for_each_hole() 435 mm->color_adjust(entry, color, &adj_start, &adj_end); __drm_mm_for_each_hole() 455 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, drm_mm_search_free_in_range_generic() argument 469 BUG_ON(mm->scanned_blocks); drm_mm_search_free_in_range_generic() 474 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, __drm_mm_for_each_hole() 483 if (mm->color_adjust) { __drm_mm_for_each_hole() 484 mm->color_adjust(entry, color, &adj_start, &adj_end); __drm_mm_for_each_hole() 518 new->mm = old->mm; drm_mm_replace_node() 558 * @mm: drm_mm to scan 571 void drm_mm_init_scan(struct drm_mm *mm, drm_mm_init_scan() argument 576 mm->scan_color = color; drm_mm_init_scan() 577 mm->scan_alignment = alignment; drm_mm_init_scan() 578 mm->scan_size = size; drm_mm_init_scan() 579 mm->scanned_blocks = 0; drm_mm_init_scan() 580 mm->scan_hit_start = 0; drm_mm_init_scan() 581 mm->scan_hit_end = 0; drm_mm_init_scan() 582 mm->scan_check_range = 0; drm_mm_init_scan() 583 mm->prev_scanned_node = NULL; drm_mm_init_scan() 589 * @mm: drm_mm to scan 604 void drm_mm_init_scan_with_range(struct drm_mm *mm, drm_mm_init_scan_with_range() argument 611 mm->scan_color = color; drm_mm_init_scan_with_range() 612 mm->scan_alignment = alignment; drm_mm_init_scan_with_range() 613 mm->scan_size = size; drm_mm_init_scan_with_range() 614 mm->scanned_blocks = 0; drm_mm_init_scan_with_range() 615 mm->scan_hit_start = 0; drm_mm_init_scan_with_range() 616 mm->scan_hit_end = 0; drm_mm_init_scan_with_range() 617 mm->scan_start = start; drm_mm_init_scan_with_range() 618 mm->scan_end = end; drm_mm_init_scan_with_range() 619 mm->scan_check_range = 1; drm_mm_init_scan_with_range() 620 mm->prev_scanned_node = NULL; drm_mm_init_scan_with_range() 636 struct drm_mm *mm = node->mm; drm_mm_scan_add_block() local 641 mm->scanned_blocks++; drm_mm_scan_add_block() 653 node->node_list.next = &mm->prev_scanned_node->node_list; drm_mm_scan_add_block() 654 mm->prev_scanned_node = node; drm_mm_scan_add_block() 659 if (mm->scan_check_range) { drm_mm_scan_add_block() 660 if (adj_start < mm->scan_start) drm_mm_scan_add_block() 661 adj_start = mm->scan_start; drm_mm_scan_add_block() 662 if (adj_end > mm->scan_end) drm_mm_scan_add_block() 663 adj_end = mm->scan_end; drm_mm_scan_add_block() 666 if (mm->color_adjust) drm_mm_scan_add_block() 667 mm->color_adjust(prev_node, mm->scan_color, drm_mm_scan_add_block() 671 mm->scan_size, mm->scan_alignment)) { drm_mm_scan_add_block() 672 mm->scan_hit_start = hole_start; drm_mm_scan_add_block() 673 mm->scan_hit_end = hole_end; drm_mm_scan_add_block() 699 struct drm_mm *mm = node->mm; drm_mm_scan_remove_block() local 702 mm->scanned_blocks--; drm_mm_scan_remove_block() 713 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && drm_mm_scan_remove_block() 714 node->start < mm->scan_hit_end); drm_mm_scan_remove_block() 720 * @mm: drm_mm allocator to check 726 bool drm_mm_clean(struct drm_mm * mm) drm_mm_clean() argument 728 struct list_head *head = &mm->head_node.node_list; drm_mm_clean() 735 * drm_mm_init - initialize a drm-mm allocator 736 * @mm: the drm_mm structure to initialize 737 * @start: start of the range managed by @mm 738 * @size: end of the range managed by @mm 740 * Note that @mm must be cleared to 0 before calling this function. 742 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) drm_mm_init() argument 744 INIT_LIST_HEAD(&mm->hole_stack); drm_mm_init() 745 mm->scanned_blocks = 0; drm_mm_init() 748 INIT_LIST_HEAD(&mm->head_node.node_list); drm_mm_init() 749 INIT_LIST_HEAD(&mm->head_node.hole_stack); drm_mm_init() 750 mm->head_node.hole_follows = 1; drm_mm_init() 751 mm->head_node.scanned_block = 0; drm_mm_init() 752 mm->head_node.scanned_prev_free = 0; drm_mm_init() 753 mm->head_node.scanned_next_free = 0; drm_mm_init() 754 mm->head_node.mm = mm; drm_mm_init() 755 mm->head_node.start = start + size; drm_mm_init() 756 mm->head_node.size = start - mm->head_node.start; drm_mm_init() 757 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); drm_mm_init() 759 mm->color_adjust = NULL; drm_mm_init() 765 * @mm: drm_mm allocator to clean up 770 void drm_mm_takedown(struct drm_mm * mm) drm_mm_takedown() argument 772 WARN(!list_empty(&mm->head_node.node_list), drm_mm_takedown() 796 * @mm: drm_mm allocator to dump 799 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) drm_mm_debug_table() argument 804 total_free += drm_mm_debug_hole(&mm->head_node, prefix); drm_mm_debug_table() 806 drm_mm_for_each_node(entry, mm) { drm_mm_for_each_node() 839 * @mm: drm_mm allocator to dump 841 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) drm_mm_dump_table() argument 846 total_free += drm_mm_dump_hole(m, &mm->head_node); drm_mm_dump_table() 848 drm_mm_for_each_node(entry, mm) { drm_mm_for_each_node()
|
/linux-4.1.27/drivers/gpio/ |
H A D | gpio-mpc8xxx.c | 53 to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) to_mpc8xxx_gpio_chip() argument 55 return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); to_mpc8xxx_gpio_chip() 58 static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) mpc8xxx_gpio_save_regs() argument 60 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_save_regs() 62 mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); mpc8xxx_gpio_save_regs() 73 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8572_gpio_get() local 74 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8572_gpio_get() 77 out_mask = in_be32(mm->regs + GPIO_DIR); mpc8572_gpio_get() 79 val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; mpc8572_gpio_get() 87 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_get() local 89 return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); mpc8xxx_gpio_get() 94 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_set() local 95 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_set() 105 out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); mpc8xxx_gpio_set() 113 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_set_multiple() local 114 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_set_multiple() 131 out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); mpc8xxx_gpio_set_multiple() 138 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_dir_in() local 139 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_dir_in() 144 clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); mpc8xxx_gpio_dir_in() 153 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_dir_out() local 154 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_dir_out() 161 setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); mpc8xxx_gpio_dir_out() 179 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); mpc8xxx_gpio_to_irq() local 180 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gpio_to_irq() 192 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_gpio_irq_cascade() local 195 mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); mpc8xxx_gpio_irq_cascade() 206 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_unmask() local 211 setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); mpc8xxx_irq_unmask() 219 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_mask() local 224 clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); mpc8xxx_irq_mask() 232 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_ack() local 234 out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); mpc8xxx_irq_ack() 240 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc8xxx_irq_set_type() local 246 setbits32(mm->regs + GPIO_ICR, mpc8xxx_irq_set_type() 253 clrbits32(mm->regs + GPIO_ICR, mpc8xxx_irq_set_type() 268 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; mpc512x_irq_set_type() local 275 reg = mm->regs + GPIO_ICR; mpc512x_irq_set_type() 278 reg = mm->regs + GPIO_ICR2; mpc512x_irq_set_type()
|
/linux-4.1.27/arch/x86/mm/ |
H A D | tlb.c | 3 #include <linux/mm.h> 39 * instead update mm->cpu_vm_mask. 64 * 1a) thread switch to a different mm 69 * Now cpu0 accepts tlb flushes for the new mm. 74 * Stop ipi delivery for the old mm. This is not synchronized with 76 * mm, and in the worst case we perform a superfluous tlb flush. 77 * 1b) thread switch without mm change 87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. 98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. 99 * 2) Leave the mm if we are in the lazy tlb mode. 134 struct mm_struct *mm, unsigned long start, native_flush_tlb_others() 138 info.flush_mm = mm; native_flush_tlb_others() 147 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); native_flush_tlb_others() 158 struct mm_struct *mm = current->mm; flush_tlb_current_task() local 168 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_current_task() 169 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); flush_tlb_current_task() local 185 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, flush_tlb_mm_range() argument 193 if (current->active_mm != mm) { flush_tlb_mm_range() 200 if (!current->mm) { flush_tlb_mm_range() 233 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_mm_range() 234 flush_tlb_others(mm_cpumask(mm), mm, start, end); flush_tlb_mm_range() local 240 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local 244 if (current->active_mm == mm) { flush_tlb_page() 245 if (current->mm) { flush_tlb_page() 259 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_page() 260 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); flush_tlb_page() local 133 native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) native_flush_tlb_others() argument
|
/linux-4.1.27/arch/microblaze/include/asm/ |
H A D | mmu_context_mm.h | 36 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 79 * Get a new mmu context for the address space described by `mm'. 81 static inline void get_mmu_context(struct mm_struct *mm) get_mmu_context() argument 85 if (mm->context != NO_CONTEXT) get_mmu_context() 96 mm->context = ctx; get_mmu_context() 97 context_mm[ctx] = mm; get_mmu_context() 103 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 108 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 110 if (mm->context != NO_CONTEXT) { destroy_context() 111 clear_bit(mm->context, context_map); destroy_context() 112 mm->context = NO_CONTEXT; destroy_context() 126 * After we have set current->mm to a new value, this activates 127 * the context for the new mm so we see the new mappings. 130 struct mm_struct *mm) activate_mm() 132 current->thread.pgdir = mm->pgd; activate_mm() 133 get_mmu_context(mm); activate_mm() 134 set_context(mm->context, mm->pgd); activate_mm() 129 activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) activate_mm() argument
|
H A D | pgalloc.h | 99 #define pgd_free(mm, pgd) free_pgd_fast(pgd) 100 #define pgd_alloc(mm) get_pgd_fast() 108 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) 109 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 111 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 113 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one() argument 135 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, pte_alloc_one_fast() argument 156 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 166 static inline void pte_free(struct mm_struct *mm, struct page *ptepage) pte_free() argument 172 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) 174 #define pmd_populate(mm, pmd, pte) \ 177 #define pmd_populate_kernel(mm, pmd, pte) \ 184 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 185 #define pmd_free(mm, x) do { } while (0) 186 #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) 187 #define pgd_populate(mm, pmd, pte) BUG()
|
/linux-4.1.27/arch/frv/mm/ |
H A D | mmu-context.c | 13 #include <linux/mm.h> 29 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 31 memset(&mm->context, 0, sizeof(mm->context)); init_new_context() 32 INIT_LIST_HEAD(&mm->context.id_link); init_new_context() 33 mm->context.itlb_cached_pge = 0xffffffffUL; init_new_context() 34 mm->context.dtlb_cached_pge = 0xffffffffUL; init_new_context() 130 void destroy_context(struct mm_struct *mm) destroy_context() argument 132 mm_context_t *ctx = &mm->context; destroy_context() 154 char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer) proc_pid_status_frv_cxnr() argument 157 buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); proc_pid_status_frv_cxnr() 171 struct mm_struct *mm = NULL; cxn_pin_by_pid() local 189 if (tsk->mm) { cxn_pin_by_pid() 190 mm = tsk->mm; cxn_pin_by_pid() 191 atomic_inc(&mm->mm_users); cxn_pin_by_pid() 203 cxn_pinned = get_cxn(&mm->context); cxn_pin_by_pid() 206 mmput(mm); cxn_pin_by_pid()
|
/linux-4.1.27/arch/sh/kernel/vsyscall/ |
H A D | vsyscall.c | 13 #include <linux/mm.h> 63 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local 67 down_write(&mm->mmap_sem); arch_setup_additional_pages() 74 ret = install_special_mapping(mm, addr, PAGE_SIZE, arch_setup_additional_pages() 81 current->mm->context.vdso = (void *)addr; arch_setup_additional_pages() 84 up_write(&mm->mmap_sem); arch_setup_additional_pages()
|
/linux-4.1.27/drivers/misc/cxl/ |
H A D | fault.c | 13 #include <linux/mm.h> 87 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, cxl_fault_segment() argument 93 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { cxl_fault_segment() 116 struct mm_struct *mm, u64 ea) cxl_handle_segment_miss() 123 if ((rc = cxl_fault_segment(ctx, mm, ea))) cxl_handle_segment_miss() 135 struct mm_struct *mm, u64 dsisr, u64 dar) cxl_handle_page_fault() 143 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { cxl_handle_page_fault() 162 hash_page_mm(mm, dar, access, 0x300, inv_flags); cxl_handle_page_fault() 176 struct mm_struct *mm; cxl_handle_fault() local 203 if (!(mm = get_task_mm(task))) { cxl_handle_fault() 204 pr_devel("cxl_handle_fault unable to get mm %i\n", cxl_handle_fault() 211 cxl_handle_segment_miss(ctx, mm, dar); cxl_handle_fault() 213 cxl_handle_page_fault(ctx, mm, dsisr, dar); cxl_handle_fault() 217 mmput(mm); cxl_handle_fault() 226 struct mm_struct *mm; cxl_prefault_one() local 233 if (!(mm = get_task_mm(task))) { cxl_prefault_one() 234 pr_devel("cxl_prefault_one unable to get mm %i\n", cxl_prefault_one() 240 rc = cxl_fault_segment(ctx, mm, ea); cxl_prefault_one() 242 mmput(mm); cxl_prefault_one() 263 struct mm_struct *mm; cxl_prefault_vma() local 270 if (!(mm = get_task_mm(task))) { cxl_prefault_vma() 271 pr_devel("cxl_prefault_vm unable to get mm %i\n", cxl_prefault_vma() 276 down_read(&mm->mmap_sem); cxl_prefault_vma() 277 for (vma = mm->mmap; vma; vma = vma->vm_next) { cxl_prefault_vma() 280 rc = copro_calculate_slb(mm, ea, &slb); cxl_prefault_vma() 291 up_read(&mm->mmap_sem); cxl_prefault_vma() 293 mmput(mm); cxl_prefault_vma() 115 cxl_handle_segment_miss(struct cxl_context *ctx, struct mm_struct *mm, u64 ea) cxl_handle_segment_miss() argument 134 cxl_handle_page_fault(struct cxl_context *ctx, struct mm_struct *mm, u64 dsisr, u64 dar) cxl_handle_page_fault() argument
|
/linux-4.1.27/arch/mn10300/mm/ |
H A D | tlb-smp.c | 52 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 91 * @mm: The VM context to flush from (if va!=FLUSH_ALL). 94 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, flush_tlb_others() argument 104 BUG_ON(!mm); flush_tlb_others() 119 flush_mm = mm; flush_tlb_others() 141 * @mm: The VM context to invalidate. 143 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 148 cpumask_copy(&cpu_mask, mm_cpumask(mm)); flush_tlb_mm() 153 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_mm() 163 struct mm_struct *mm = current->mm; flush_tlb_current_task() local 167 cpumask_copy(&cpu_mask, mm_cpumask(mm)); flush_tlb_current_task() 172 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_current_task() 184 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local 188 cpumask_copy(&cpu_mask, mm_cpumask(mm)); flush_tlb_page() 191 local_flush_tlb_page(mm, va); flush_tlb_page() 193 flush_tlb_others(cpu_mask, mm, va); flush_tlb_page()
|
/linux-4.1.27/include/trace/events/ |
H A D | xen.h | 168 TP_PROTO(struct mm_struct *mm, unsigned long addr, 170 TP_ARGS(mm, addr, ptep, pteval), 172 __field(struct mm_struct *, mm) 177 TP_fast_assign(__entry->mm = mm; 181 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", 182 __entry->mm, __entry->addr, __entry->ptep, 188 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep), 189 TP_ARGS(mm, addr, ptep), 191 __field(struct mm_struct *, mm) 195 TP_fast_assign(__entry->mm = mm; 198 TP_printk("mm %p addr %lx ptep %p", 199 __entry->mm, __entry->addr, __entry->ptep) 300 TP_PROTO(struct mm_struct *mm, unsigned long addr, 302 TP_ARGS(mm, addr, ptep, pteval), 304 __field(struct mm_struct *, mm) 309 TP_fast_assign(__entry->mm = mm; 313 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", 314 __entry->mm, __entry->addr, __entry->ptep, 320 TP_PROTO(struct mm_struct *mm, unsigned long addr, \ 322 TP_ARGS(mm, addr, ptep, pteval)) 328 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned), 329 TP_ARGS(mm, pfn, level, pinned), 331 __field(struct mm_struct *, mm) 336 TP_fast_assign(__entry->mm = mm; 340 TP_printk("mm %p pfn %lx level %d %spinned", 341 __entry->mm, __entry->pfn, __entry->level, 362 TP_PROTO(struct mm_struct *mm, pgd_t *pgd), 363 TP_ARGS(mm, pgd), 365 __field(struct mm_struct *, mm) 368 TP_fast_assign(__entry->mm = mm; 370 TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd) 374 TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \ 375 TP_ARGS(mm, pgd)) 407 TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm, 409 TP_ARGS(cpus, mm, addr, end), 412 __field(struct mm_struct *, mm) 417 __entry->mm = mm; 420 TP_printk("ncpus %d mm %p addr %lx, end %lx", 421 __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
|
/linux-4.1.27/arch/arm64/kernel/ |
H A D | vdso.c | 27 #include <linux/mm.h> 89 struct mm_struct *mm = current->mm; aarch32_setup_vectors_page() local 98 down_write(&mm->mmap_sem); aarch32_setup_vectors_page() 99 current->mm->context.vdso = (void *)addr; aarch32_setup_vectors_page() 102 ret = _install_special_mapping(mm, addr, PAGE_SIZE, aarch32_setup_vectors_page() 106 up_write(&mm->mmap_sem); aarch32_setup_vectors_page() 158 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local 166 down_write(&mm->mmap_sem); arch_setup_additional_pages() 172 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, arch_setup_additional_pages() 179 mm->context.vdso = (void *)vdso_base; arch_setup_additional_pages() 180 ret = _install_special_mapping(mm, vdso_base, vdso_text_len, arch_setup_additional_pages() 188 up_write(&mm->mmap_sem); arch_setup_additional_pages() 192 mm->context.vdso = NULL; arch_setup_additional_pages() 193 up_write(&mm->mmap_sem); arch_setup_additional_pages()
|
/linux-4.1.27/arch/m32r/mm/ |
H A D | extable.c | 2 * linux/arch/m32r/mm/extable.c
|
/linux-4.1.27/arch/arm/mach-gemini/ |
H A D | Makefile | 7 obj-y := irq.o mm.o time.o devices.o gpio.o idle.o reset.o
|
/linux-4.1.27/arch/x86/xen/ |
H A D | mmu.h | 18 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 19 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
/linux-4.1.27/arch/mips/kernel/ |
H A D | vdso.c | 13 #include <linux/mm.h> 90 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local 92 down_write(&mm->mmap_sem); arch_setup_additional_pages() 94 addr = vdso_addr(mm->start_stack); arch_setup_additional_pages() 102 ret = install_special_mapping(mm, addr, PAGE_SIZE, arch_setup_additional_pages() 110 mm->context.vdso = (void *)addr; arch_setup_additional_pages() 113 up_write(&mm->mmap_sem); arch_setup_additional_pages()
|
/linux-4.1.27/arch/frv/include/asm/ |
H A D | tlb.h | 20 * .. because we flush the whole mm when it fills up 22 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
H A D | mmu_context.h | 20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) enter_lazy_tlb() argument 25 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 27 extern void destroy_context(struct mm_struct *mm); 30 #define init_new_context(tsk, mm) ({ 0; }) 32 #define destroy_context(mm) do {} while(0) 46 #define deactivate_mm(tsk, mm) \
|
H A D | pgalloc.h | 23 #define pmd_populate_kernel(mm, pmd, pte) __set_pmd(pmd, __pa(pte) | _PAGE_TABLE) 35 extern void pgd_free(struct mm_struct *mm, pgd_t *); 41 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free_kernel() argument 46 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) pte_free() argument 63 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) 64 #define pmd_free(mm, x) do { } while (0)
|
/linux-4.1.27/arch/hexagon/include/asm/ |
H A D | mmu_context.h | 29 static inline void destroy_context(struct mm_struct *mm) destroy_context() argument 38 static inline void enter_lazy_tlb(struct mm_struct *mm, enter_lazy_tlb() argument 47 struct mm_struct *mm) deactivate_mm() 54 * @mm: pointer to a new mm struct 57 struct mm_struct *mm) init_new_context() 59 /* mm->context is set up by pgd_alloc */ init_new_context() 64 * Switch active mm context 46 deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm() argument 56 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument
|
/linux-4.1.27/include/drm/ |
H A D | drm_mm.h | 73 struct drm_mm *mm; member in struct:drm_mm_node 114 * @mm: drm_mm to check 120 * True if the @mm is initialized. 122 static inline bool drm_mm_initialized(struct drm_mm *mm) drm_mm_initialized() argument 124 return mm->hole_stack.next; drm_mm_initialized() 174 * @mm: drm_mm allocator to walk 179 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 180 &(mm)->head_node.node_list, \ 186 * @mm: drm_mm allocator to walk 202 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 203 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 204 &entry->hole_stack != &(mm)->hole_stack ? \ 210 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ 211 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 212 &entry->hole_stack != &(mm)->hole_stack ? \ 221 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 223 int drm_mm_insert_node_generic(struct drm_mm *mm, 232 * @mm: drm_mm to allocate from 246 static inline int drm_mm_insert_node(struct drm_mm *mm, drm_mm_insert_node() argument 252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, drm_mm_insert_node() 256 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 267 * @mm: drm_mm to allocate from 283 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, drm_mm_insert_node_in_range() argument 291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, drm_mm_insert_node_in_range() 298 void drm_mm_init(struct drm_mm *mm, 301 void drm_mm_takedown(struct drm_mm *mm); 302 bool drm_mm_clean(struct drm_mm *mm); 304 void drm_mm_init_scan(struct drm_mm *mm, 308 void drm_mm_init_scan_with_range(struct drm_mm *mm, 317 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); 319 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
|
/linux-4.1.27/arch/m68k/mm/ |
H A D | fault.c | 2 * linux/arch/m68k/mm/fault.c 8 #include <linux/mm.h> 72 struct mm_struct *mm = current->mm; do_page_fault() local 78 regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); do_page_fault() 84 if (in_atomic() || !mm) do_page_fault() 90 down_read(&mm->mmap_sem); do_page_fault() 92 vma = find_vma(mm, address); do_page_fault() 139 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault() 172 * No need to up_read(&mm->mmap_sem) as we would do_page_fault() 174 * in mm/filemap.c. do_page_fault() 181 up_read(&mm->mmap_sem); do_page_fault() 189 up_read(&mm->mmap_sem); do_page_fault() 218 up_read(&mm->mmap_sem); do_page_fault()
|
H A D | mcfmmu.c | 2 * Based upon linux/arch/m68k/mm/sun3mmu.c 3 * Based upon linux/arch/ppc/mm/mmu_context.c 5 * Implementations of mm routines specific to the Coldfire MMU. 12 #include <linux/mm.h> 76 current->mm = NULL; paging_init() 87 struct mm_struct *mm; cf_tlb_miss() local 98 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; cf_tlb_miss() 99 if (!mm) { cf_tlb_miss() 104 pgd = pgd_offset(mm, mmuar); cf_tlb_miss() 132 asid = mm->context & 0xff; cf_tlb_miss() 184 struct mm_struct *mm; steal_context() local 191 mm = context_mm[next_mmu_context]; steal_context() 192 flush_tlb_mm(mm); steal_context() 193 destroy_context(mm); steal_context()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | ldt.c | 13 #include <linux/mm.h> 106 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 112 mutex_init(&mm->context.lock); init_new_context() 113 old_mm = current->mm; init_new_context() 115 mm->context.ldt = NULL; init_new_context() 121 mm->context.ldt = NULL; init_new_context() 135 mm->context.ldt = new_ldt; init_new_context() 147 void destroy_context(struct mm_struct *mm) destroy_context() argument 149 free_ldt_struct(mm->context.ldt); destroy_context() 150 mm->context.ldt = NULL; destroy_context() 157 struct mm_struct *mm = current->mm; read_ldt() local 159 mutex_lock(&mm->context.lock); read_ldt() 161 if (!mm->context.ldt) { read_ldt() 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE; read_ldt() 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { read_ldt() 188 mutex_unlock(&mm->context.lock); read_ldt() 209 struct mm_struct *mm = current->mm; write_ldt() local 248 mutex_lock(&mm->context.lock); write_ldt() 250 old_ldt = mm->context.ldt; write_ldt() 264 install_ldt(mm, new_ldt); write_ldt() 269 mutex_unlock(&mm->context.lock); write_ldt()
|
/linux-4.1.27/arch/xtensa/mm/ |
H A D | tlb.c | 2 * arch/xtensa/mm/tlb.c 17 #include <linux/mm.h> 57 /* If mm is current, we simply assign the current task a new ASID, thus, 58 * invalidating all previous tlb entries. If mm is someone else's user mapping, 63 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument 67 if (mm == current->active_mm) { local_flush_tlb_mm() 70 mm->context.asid[cpu] = NO_CONTEXT; local_flush_tlb_mm() 71 activate_context(mm, cpu); local_flush_tlb_mm() 74 mm->context.asid[cpu] = NO_CONTEXT; local_flush_tlb_mm() 75 mm->context.cpu = -1; local_flush_tlb_mm() 92 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range() local 95 if (mm->context.asid[cpu] == NO_CONTEXT) local_flush_tlb_range() 100 (unsigned long)mm->context.asid[cpu], start, end); local_flush_tlb_range() 107 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); local_flush_tlb_range() 123 local_flush_tlb_mm(mm); local_flush_tlb_range() 131 struct mm_struct* mm = vma->vm_mm; local_flush_tlb_page() local 135 if (mm->context.asid[cpu] == NO_CONTEXT) local_flush_tlb_page() 141 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); local_flush_tlb_page() 172 struct mm_struct *mm = task->mm; get_pte_for_vaddr() local 177 if (!mm) get_pte_for_vaddr() 178 mm = task->active_mm; get_pte_for_vaddr() 179 pgd = pgd_offset(mm, vaddr); get_pte_for_vaddr()
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | smp.c | 19 #include <linux/mm.h> 180 struct mm_struct *mm = &init_mm; start_secondary() local 183 atomic_inc(&mm->mm_count); start_secondary() 184 atomic_inc(&mm->mm_users); start_secondary() 185 current->active_mm = mm; start_secondary() 186 enter_lazy_tlb(mm, current); start_secondary() 341 static void flush_tlb_mm_ipi(void *mm) flush_tlb_mm_ipi() argument 343 local_flush_tlb_mm((struct mm_struct *)mm); flush_tlb_mm_ipi() 351 * at switch_mm time, should the mm ever be used on other cpus. For 354 * mm might be active on another cpu (eg debuggers doing the flushes on 358 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 362 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { flush_tlb_mm() 363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); flush_tlb_mm() 368 cpu_context(i, mm) = 0; flush_tlb_mm() 370 local_flush_tlb_mm(mm); flush_tlb_mm() 391 struct mm_struct *mm = vma->vm_mm; flush_tlb_range() local 394 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { flush_tlb_range() 405 cpu_context(i, mm) = 0; flush_tlb_range() 438 (current->mm != vma->vm_mm)) { flush_tlb_page()
|
/linux-4.1.27/arch/tile/kernel/ |
H A D | tlb.c | 34 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 38 for_each_cpu(cpu, mm_cpumask(mm)) { for_each_cpu() 44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm), 50 flush_tlb_mm(current->mm); flush_tlb_current_task() 53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, flush_tlb_page_mm() argument 58 flush_remote(0, cache, mm_cpumask(mm), flush_tlb_page_mm() 59 va, size, size, mm_cpumask(mm), NULL, 0); flush_tlb_page_mm() 72 struct mm_struct *mm = vma->vm_mm; flush_tlb_range() local 74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, flush_tlb_range() 75 mm_cpumask(mm), NULL, 0); flush_tlb_range()
|
/linux-4.1.27/arch/metag/mm/ |
H A D | fault.c | 8 #include <linux/mm.h> 52 struct mm_struct *mm; do_page_fault() local 106 mm = tsk->mm; do_page_fault() 108 if (in_atomic() || !mm) do_page_fault() 114 down_read(&mm->mmap_sem); do_page_fault() 116 vma = find_vma_prev(mm, address, &prev_vma); do_page_fault() 136 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault() 160 * No need to up_read(&mm->mmap_sem) as we would do_page_fault() 162 * in mm/filemap.c. do_page_fault() 169 up_read(&mm->mmap_sem); do_page_fault() 178 up_read(&mm->mmap_sem); do_page_fault() 206 up_read(&mm->mmap_sem); do_page_fault() 230 up_read(&mm->mmap_sem); do_page_fault()
|
H A D | hugetlbpage.c | 2 * arch/metag/mm/hugetlbpage.c 15 #include <linux/mm.h> 33 struct mm_struct *mm = current->mm; prepare_hugepage_range() local 44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); prepare_hugepage_range() 48 vma = find_vma(mm, addr); prepare_hugepage_range() 59 pte_t *huge_pte_alloc(struct mm_struct *mm, huge_pte_alloc() argument 67 pgd = pgd_offset(mm, addr); huge_pte_alloc() 70 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc() 77 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) huge_pte_offset() argument 84 pgd = pgd_offset(mm, addr); huge_pte_offset() 92 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) huge_pmd_unshare() argument 107 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, follow_huge_pmd() argument 130 struct mm_struct *mm = current->mm; hugetlb_get_unmapped_area_existing() local 135 if (mm->context.part_huge) { hugetlb_get_unmapped_area_existing() 136 start_addr = mm->context.part_huge; hugetlb_get_unmapped_area_existing() 145 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { hugetlb_get_unmapped_area_existing() 165 mm->context.part_huge = end; hugetlb_get_unmapped_area_existing() 166 else if (addr == mm->context.part_huge) hugetlb_get_unmapped_area_existing() 167 mm->context.part_huge = 0; hugetlb_get_unmapped_area_existing()
|
/linux-4.1.27/arch/cris/arch-v10/mm/ |
H A D | tlb.c | 2 * linux/arch/cris/arch-v10/mm/tlb.c 19 /* The TLB can host up to 64 different mm contexts at the same time. 22 * of which mm's we have assigned which page_id's, so that we know when 61 /* invalidate the selected mm context only */ 64 flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 67 int page_id = mm->context.page_id; flush_tlb_mm() 70 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); flush_tlb_mm() 101 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local 102 int page_id = mm->context.page_id; flush_tlb_page() 106 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); flush_tlb_page() 113 /* invalidate those TLB entries that match both the mm context flush_tlb_page() 143 init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context() argument 145 mm->context.page_id = NO_CONTEXT; init_new_context()
|
/linux-4.1.27/arch/hexagon/kernel/ |
H A D | vdso.c | 22 #include <linux/mm.h> 66 struct mm_struct *mm = current->mm; arch_setup_additional_pages() local 68 down_write(&mm->mmap_sem); arch_setup_additional_pages() 80 ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, arch_setup_additional_pages() 88 mm->context.vdso = (void *)vdso_base; arch_setup_additional_pages() 91 up_write(&mm->mmap_sem); arch_setup_additional_pages()
|
/linux-4.1.27/arch/alpha/kernel/ |
H A D | smp.c | 18 #include <linux/mm.h> 146 /* All kernel threads share the same mm context. */ smp_callin() 642 struct mm_struct *mm = (struct mm_struct *) x; ipi_flush_tlb_mm() local 643 if (mm == current->active_mm && !asn_locked()) ipi_flush_tlb_mm() 644 flush_tlb_current(mm); ipi_flush_tlb_mm() 646 flush_tlb_other(mm); ipi_flush_tlb_mm() 650 flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 654 if (mm == current->active_mm) { flush_tlb_mm() 655 flush_tlb_current(mm); flush_tlb_mm() 656 if (atomic_read(&mm->mm_users) <= 1) { flush_tlb_mm() 661 if (mm->context[cpu]) flush_tlb_mm() 662 mm->context[cpu] = 0; flush_tlb_mm() 669 if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { flush_tlb_mm() 679 struct mm_struct *mm; member in struct:flush_tlb_page_struct 687 struct mm_struct * mm = data->mm; ipi_flush_tlb_page() local 689 if (mm == current->active_mm && !asn_locked()) ipi_flush_tlb_page() 690 flush_tlb_current_page(mm, data->vma, data->addr); ipi_flush_tlb_page() 692 flush_tlb_other(mm); ipi_flush_tlb_page() 699 struct mm_struct *mm = vma->vm_mm; flush_tlb_page() local 703 if (mm == current->active_mm) { flush_tlb_page() 704 flush_tlb_current_page(mm, vma, addr); flush_tlb_page() 705 if (atomic_read(&mm->mm_users) <= 1) { flush_tlb_page() 710 if (mm->context[cpu]) flush_tlb_page() 711 mm->context[cpu] = 0; flush_tlb_page() 719 data.mm = mm; flush_tlb_page() 741 struct mm_struct *mm = (struct mm_struct *) x; ipi_flush_icache_page() local 742 if (mm == current->active_mm && !asn_locked()) ipi_flush_icache_page() 743 __load_new_mm_context(mm); ipi_flush_icache_page() 745 flush_tlb_other(mm); ipi_flush_icache_page() 752 struct mm_struct *mm = vma->vm_mm; flush_icache_user_range() local 759 if (mm == current->active_mm) { flush_icache_user_range() 760 __load_new_mm_context(mm); flush_icache_user_range() 761 if (atomic_read(&mm->mm_users) <= 1) { flush_icache_user_range() 766 if (mm->context[cpu]) flush_icache_user_range() 767 mm->context[cpu] = 0; flush_icache_user_range() 774 if (smp_call_function(ipi_flush_icache_page, mm, 1)) { flush_icache_user_range()
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
H A D | radeon_mn.c | 42 struct mm_struct *mm; member in struct:radeon_mn 90 mmu_notifier_unregister(&rmn->mn, rmn->mm); radeon_mn_destroy() 95 * radeon_mn_release - callback to notify about mm destruction 98 * @mn: the mm this callback is about 103 struct mm_struct *mm) radeon_mn_release() 111 * radeon_mn_invalidate_range_start - callback to notify about mm change 114 * @mn: the mm this callback is about 122 struct mm_struct *mm, radeon_mn_invalidate_range_start() 181 * Creates a notifier context for current->mm. 185 struct mm_struct *mm = current->mm; radeon_mn_get() local 189 down_write(&mm->mmap_sem); radeon_mn_get() 192 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) radeon_mn_get() 193 if (rmn->mm == mm) radeon_mn_get() 203 rmn->mm = mm; radeon_mn_get() 208 r = __mmu_notifier_register(&rmn->mn, mm); radeon_mn_get() 212 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); radeon_mn_get() 216 up_write(&mm->mmap_sem); radeon_mn_get() 222 up_write(&mm->mmap_sem); radeon_mn_get() 102 radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) radeon_mn_release() argument 121 radeon_mn_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) radeon_mn_invalidate_range_start() argument
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
H A D | i915_gem_userptr.c | 36 struct mm_struct *mm; member in struct:i915_mm_struct 79 was_interruptible = dev_priv->mm.interruptible; cancel_userptr() 80 dev_priv->mm.interruptible = false; cancel_userptr() 88 dev_priv->mm.interruptible = was_interruptible; cancel_userptr() 100 struct mm_struct *mm, invalidate_range__linear() 133 struct mm_struct *mm, i915_gem_userptr_mn_invalidate_range_start() 148 it = invalidate_range__linear(mn, mm, start, end); i915_gem_userptr_mn_invalidate_range_start() 184 i915_mmu_notifier_create(struct mm_struct *mm) i915_mmu_notifier_create() argument 201 ret = __mmu_notifier_register(&mn->mn, mm); i915_mmu_notifier_create() 310 i915_mmu_notifier_find(struct i915_mm_struct *mm) i915_mmu_notifier_find() argument 312 struct i915_mmu_notifier *mn = mm->mn; i915_mmu_notifier_find() 314 mn = mm->mn; i915_mmu_notifier_find() 318 down_write(&mm->mm->mmap_sem); i915_mmu_notifier_find() 319 mutex_lock(&to_i915(mm->dev)->mm_lock); i915_mmu_notifier_find() 320 if ((mn = mm->mn) == NULL) { i915_mmu_notifier_find() 321 mn = i915_mmu_notifier_create(mm->mm); i915_mmu_notifier_find() 323 mm->mn = mn; i915_mmu_notifier_find() 325 mutex_unlock(&to_i915(mm->dev)->mm_lock); i915_mmu_notifier_find() 326 up_write(&mm->mm->mmap_sem); i915_mmu_notifier_find() 342 if (WARN_ON(obj->userptr.mm == NULL)) i915_gem_userptr_init__mmu_notifier() 345 mn = i915_mmu_notifier_find(obj->userptr.mm); i915_gem_userptr_init__mmu_notifier() 370 struct mm_struct *mm) i915_mmu_notifier_free() 375 mmu_notifier_unregister(&mn->mn, mm); i915_mmu_notifier_free() 401 struct mm_struct *mm) i915_mmu_notifier_free() 410 struct i915_mm_struct *mm; __i915_mm_struct_find() local 413 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) __i915_mm_struct_find() 414 if (mm->mm == real) __i915_mm_struct_find() 415 return mm; __i915_mm_struct_find() 424 struct i915_mm_struct *mm; i915_gem_userptr_init__mm_struct() local 433 * to defer releasing the mm reference until after we drop the i915_gem_userptr_init__mm_struct() 438 mm = __i915_mm_struct_find(dev_priv, current->mm); i915_gem_userptr_init__mm_struct() 439 if (mm == NULL) { i915_gem_userptr_init__mm_struct() 440 mm = kmalloc(sizeof(*mm), GFP_KERNEL); i915_gem_userptr_init__mm_struct() 441 if (mm == NULL) { i915_gem_userptr_init__mm_struct() 446 kref_init(&mm->kref); i915_gem_userptr_init__mm_struct() 447 mm->dev = obj->base.dev; i915_gem_userptr_init__mm_struct() 449 mm->mm = current->mm; i915_gem_userptr_init__mm_struct() 450 atomic_inc(¤t->mm->mm_count); i915_gem_userptr_init__mm_struct() 452 mm->mn = NULL; i915_gem_userptr_init__mm_struct() 456 &mm->node, (unsigned long)mm->mm); i915_gem_userptr_init__mm_struct() 458 kref_get(&mm->kref); i915_gem_userptr_init__mm_struct() 460 obj->userptr.mm = mm; i915_gem_userptr_init__mm_struct() 469 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); __i915_mm_struct_free__worker() local 470 i915_mmu_notifier_free(mm->mn, mm->mm); __i915_mm_struct_free__worker() 471 mmdrop(mm->mm); __i915_mm_struct_free__worker() 472 kfree(mm); __i915_mm_struct_free__worker() 478 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); __i915_mm_struct_free() local 481 hash_del(&mm->node); __i915_mm_struct_free() 482 mutex_unlock(&to_i915(mm->dev)->mm_lock); __i915_mm_struct_free() 484 INIT_WORK(&mm->work, __i915_mm_struct_free__worker); __i915_mm_struct_free() 485 schedule_work(&mm->work); __i915_mm_struct_free() 491 if (obj->userptr.mm == NULL) i915_gem_userptr_release__mm_struct() 494 kref_put_mutex(&obj->userptr.mm->kref, i915_gem_userptr_release__mm_struct() 497 obj->userptr.mm = NULL; i915_gem_userptr_release__mm_struct() 563 struct mm_struct *mm = obj->userptr.mm->mm; __i915_gem_userptr_get_pages_worker() local 565 down_read(&mm->mmap_sem); __i915_gem_userptr_get_pages_worker() 567 ret = get_user_pages(work->task, mm, __i915_gem_userptr_get_pages_worker() 577 up_read(&mm->mmap_sem); __i915_gem_userptr_get_pages_worker() 586 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); __i915_gem_userptr_get_pages_worker() 629 if (obj->userptr.mm->mm == current->mm) { i915_gem_userptr_get_pages() 754 * Creates a new mm object that wraps some normal memory from the process 831 /* And keep a pointer to the current->mm for resolving the user pages i915_gem_userptr_ioctl() 99 invalidate_range__linear(struct i915_mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) invalidate_range__linear() argument 132 i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct mm_struct *mm, unsigned long start, unsigned long end) i915_gem_userptr_mn_invalidate_range_start() argument 369 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, struct mm_struct *mm) i915_mmu_notifier_free() argument 400 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, struct mm_struct *mm) i915_mmu_notifier_free() argument
|
/linux-4.1.27/arch/score/mm/ |
H A D | tlb-score.c | 2 * arch/score/mm/tlb-score.c 60 * If mm is currently active_mm, we can't really drop it. Instead, 64 drop_mmu_context(struct mm_struct *mm) drop_mmu_context() argument 69 get_new_mmu_context(mm); drop_mmu_context() 70 pevn_set(mm->context & ASID_MASK); drop_mmu_context() 74 void local_flush_tlb_mm(struct mm_struct *mm) local_flush_tlb_mm() argument 76 if (mm->context != 0) local_flush_tlb_mm() 77 drop_mmu_context(mm); local_flush_tlb_mm() 83 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range() local 84 unsigned long vma_mm_context = mm->context; local_flush_tlb_range() 85 if (mm->context != 0) { local_flush_tlb_range() 115 get_new_mmu_context(mm); local_flush_tlb_range() 116 if (mm == current->active_mm) local_flush_tlb_range()
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | smp_tlb.c | 35 struct mm_struct *mm = (struct mm_struct *)arg; ipi_flush_tlb_mm() local 37 local_flush_tlb_mm(mm); ipi_flush_tlb_mm() 124 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) broadcast_tlb_mm_a15_erratum() argument 133 a15_erratum_get_cpumask(this_cpu, mm, &mask); broadcast_tlb_mm_a15_erratum() 147 void flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm() argument 150 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); flush_tlb_mm() local 152 __flush_tlb_mm(mm); flush_tlb_mm() 153 broadcast_tlb_mm_a15_erratum(mm); flush_tlb_mm()
|