Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 200 of 989) sorted by relevance

12345

/linux-4.1.27/include/linux/
Dmmu_notifier.h52 struct mm_struct *mm);
64 struct mm_struct *mm,
75 struct mm_struct *mm,
83 struct mm_struct *mm,
95 struct mm_struct *mm,
142 struct mm_struct *mm,
145 struct mm_struct *mm,
168 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
188 static inline int mm_has_notifiers(struct mm_struct *mm) in mm_has_notifiers() argument
190 return unlikely(mm->mmu_notifier_mm); in mm_has_notifiers()
[all …]
Dmm.h1144 struct mm_struct *mm; member
1180 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1182 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1185 static inline int handle_mm_fault(struct mm_struct *mm, in handle_mm_fault() argument
1194 struct mm_struct *mm, unsigned long address, in fixup_user_fault() argument
1204 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1207 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1211 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1215 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1219 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
[all …]
Dkhugepaged.h7 extern int __khugepaged_enter(struct mm_struct *mm);
8 extern void __khugepaged_exit(struct mm_struct *mm);
26 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) in khugepaged_fork() argument
29 return __khugepaged_enter(mm); in khugepaged_fork()
33 static inline void khugepaged_exit(struct mm_struct *mm) in khugepaged_exit() argument
35 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) in khugepaged_exit()
36 __khugepaged_exit(mm); in khugepaged_exit()
51 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) in khugepaged_fork() argument
55 static inline void khugepaged_exit(struct mm_struct *mm) in khugepaged_exit() argument
Dmm_types.h462 static inline void mm_init_cpumask(struct mm_struct *mm) in mm_init_cpumask() argument
465 mm->cpu_vm_mask_var = &mm->cpumask_allocation; in mm_init_cpumask()
467 cpumask_clear(mm->cpu_vm_mask_var); in mm_init_cpumask()
471 static inline cpumask_t *mm_cpumask(struct mm_struct *mm) in mm_cpumask() argument
473 return mm->cpu_vm_mask_var; in mm_cpumask()
483 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) in mm_tlb_flush_pending() argument
486 return mm->tlb_flush_pending; in mm_tlb_flush_pending()
488 static inline void set_tlb_flush_pending(struct mm_struct *mm) in set_tlb_flush_pending() argument
490 mm->tlb_flush_pending = true; in set_tlb_flush_pending()
499 static inline void clear_tlb_flush_pending(struct mm_struct *mm) in clear_tlb_flush_pending() argument
[all …]
Dksm.h22 int __ksm_enter(struct mm_struct *mm);
23 void __ksm_exit(struct mm_struct *mm);
25 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) in ksm_fork() argument
28 return __ksm_enter(mm); in ksm_fork()
32 static inline void ksm_exit(struct mm_struct *mm) in ksm_exit() argument
34 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) in ksm_exit()
35 __ksm_exit(mm); in ksm_exit()
69 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) in ksm_fork() argument
74 static inline void ksm_exit(struct mm_struct *mm) in ksm_exit() argument
Dvmacache.h18 extern void vmacache_flush_all(struct mm_struct *mm);
20 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
24 extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
29 static inline void vmacache_invalidate(struct mm_struct *mm) in vmacache_invalidate() argument
31 mm->vmacache_seqnum++; in vmacache_invalidate()
34 if (unlikely(mm->vmacache_seqnum == 0)) in vmacache_invalidate()
35 vmacache_flush_all(mm); in vmacache_invalidate()
Dhugetlb.h78 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
90 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
99 pte_t *huge_pte_alloc(struct mm_struct *mm,
101 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
102 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
103 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
105 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
107 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
126 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) argument
135 #define follow_huge_pmd(mm, addr, pmd, flags) NULL argument
[all …]
/linux-4.1.27/arch/m68k/include/asm/
Dmmu_context.h6 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
30 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
34 if (mm->context != NO_CONTEXT) in get_mmu_context()
47 mm->context = ctx; in get_mmu_context()
48 context_mm[ctx] = mm; in get_mmu_context()
54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
59 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
61 if (mm->context != NO_CONTEXT) { in destroy_context()
62 clear_bit(mm->context, context_map); in destroy_context()
63 mm->context = NO_CONTEXT; in destroy_context()
[all …]
Dmcf_pgalloc.h7 extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
14 extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
31 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) argument
32 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) argument
34 #define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) argument
36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ argument
39 #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) argument
51 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
76 extern inline void pte_free(struct mm_struct *mm, struct page *page) in pte_free() argument
85 #define pmd_free(mm, pmd) BUG() argument
[all …]
Dsun3_pgalloc.h17 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) argument
20 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
25 static inline void pte_free(struct mm_struct *mm, pgtable_t page) in pte_free() argument
37 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
49 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
66 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
71 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) in pmd_populate() argument
81 #define pmd_free(mm, x) do { } while (0) argument
84 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
89 static inline pgd_t * pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
[all …]
Dmotorola_pgalloc.h10 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) in pte_alloc_one_kernel() argument
24 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
30 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() argument
51 static inline void pte_free(struct mm_struct *mm, pgtable_t page) in pte_free() argument
69 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) in pmd_alloc_one() argument
74 static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
86 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
88 pmd_free(mm, (pmd_t *)pgd); in pgd_free()
91 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
97 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
[all …]
/linux-4.1.27/arch/sparc/include/asm/
Dmmu_context_64.h12 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
20 void get_new_mmu_context(struct mm_struct *mm);
27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
28 void destroy_context(struct mm_struct *mm);
35 static inline void tsb_context_switch(struct mm_struct *mm) in tsb_context_switch() argument
37 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch()
38 &mm->context.tsb_block[0], in tsb_context_switch()
40 (mm->context.tsb_block[1].tsb ? in tsb_context_switch()
41 &mm->context.tsb_block[1] : in tsb_context_switch()
46 , __pa(&mm->context.tsb_descr[0])); in tsb_context_switch()
[all …]
Dpgalloc_32.h27 #define pgd_free(mm, pgd) free_pgd_fast(pgd) argument
28 #define pgd_alloc(mm) get_pgd_fast() argument
39 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, in pmd_alloc_one() argument
51 #define pmd_free(mm, pmd) free_pmd_fast(pmd) argument
52 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
54 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
60 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
62 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
74 #define pte_free_kernel(mm, pte) free_pte_fast(pte) argument
76 void pte_free(struct mm_struct * mm, pgtable_t pte);
[all …]
Dmmu_context_32.h8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
15 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22 void destroy_context(struct mm_struct *mm);
25 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
28 #define deactivate_mm(tsk,mm) do { } while (0) argument
31 #define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL) argument
Dtlbflush_64.h11 struct mm_struct *mm; member
19 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
23 static inline void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
53 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) in global_flush_tlb_page() argument
55 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); in global_flush_tlb_page()
61 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
63 #define global_flush_tlb_page(mm, vaddr) \ argument
64 smp_flush_tlb_page(mm, vaddr)
Dpgalloc_64.h25 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) in pud_alloc_one() argument
48 static inline void pud_free(struct mm_struct *mm, pud_t *pud) in pud_free() argument
53 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) in pmd_alloc_one() argument
59 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
64 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
66 pgtable_t pte_alloc_one(struct mm_struct *mm,
68 void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
69 void pte_free(struct mm_struct *mm, pgtable_t ptepage);
/linux-4.1.27/mm/
Dmmu_notifier.c56 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
66 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) in __mmu_notifier_release()
74 mn->ops->release(mn, mm); in __mmu_notifier_release()
76 spin_lock(&mm->mmu_notifier_mm->lock); in __mmu_notifier_release()
77 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { in __mmu_notifier_release()
78 mn = hlist_entry(mm->mmu_notifier_mm->list.first, in __mmu_notifier_release()
89 spin_unlock(&mm->mmu_notifier_mm->lock); in __mmu_notifier_release()
109 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
117 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { in __mmu_notifier_clear_flush_young()
119 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young()
[all …]
Ddebug.c169 void dump_mm(const struct mm_struct *mm) in dump_mm() argument
201 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, in dump_mm()
203 mm->get_unmapped_area, in dump_mm()
205 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, in dump_mm()
206 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
207 atomic_read(&mm->mm_count), in dump_mm()
208 atomic_long_read((atomic_long_t *)&mm->nr_ptes), in dump_mm()
209 mm_nr_pmds((struct mm_struct *)mm), in dump_mm()
210 mm->map_count, in dump_mm()
211 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
[all …]
Dvmacache.c16 void vmacache_flush_all(struct mm_struct *mm) in vmacache_flush_all() argument
29 if (atomic_read(&mm->mm_users) == 1) in vmacache_flush_all()
40 if (mm == p->mm) in vmacache_flush_all()
55 static bool vmacache_valid_mm(struct mm_struct *mm) in vmacache_valid_mm() argument
57 return current->mm == mm && !(current->flags & PF_KTHREAD); in vmacache_valid_mm()
66 static bool vmacache_valid(struct mm_struct *mm) in vmacache_valid() argument
70 if (!vmacache_valid_mm(mm)) in vmacache_valid()
74 if (mm->vmacache_seqnum != curr->vmacache_seqnum) { in vmacache_valid()
79 curr->vmacache_seqnum = mm->vmacache_seqnum; in vmacache_valid()
86 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) in vmacache_find() argument
[all …]
Dmmap.c60 static void unmap_region(struct mm_struct *mm,
153 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
221 if (mm) { in __vm_enough_memory()
223 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
289 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
293 down_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
302 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
304 min_brk = mm->end_data; in SYSCALL_DEFINE1()
306 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
317 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
[all …]
Dmmu_context.c20 void use_mm(struct mm_struct *mm) in use_mm() argument
27 if (active_mm != mm) { in use_mm()
28 atomic_inc(&mm->mm_count); in use_mm()
29 tsk->active_mm = mm; in use_mm()
31 tsk->mm = mm; in use_mm()
32 switch_mm(active_mm, mm, tsk); in use_mm()
38 if (active_mm != mm) in use_mm()
51 void unuse_mm(struct mm_struct *mm) in unuse_mm() argument
56 sync_mm_rss(mm); in unuse_mm()
57 tsk->mm = NULL; in unuse_mm()
[all …]
Dmemory.c136 void sync_mm_rss(struct mm_struct *mm) in sync_mm_rss() argument
142 add_mm_counter(mm, i, current->rss_stat.count[i]); in sync_mm_rss()
149 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument
153 if (likely(task->mm == mm)) in add_mm_counter_fast()
156 add_mm_counter(mm, member, val); in add_mm_counter_fast()
158 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) argument
159 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) argument
168 sync_mm_rss(task->mm); in check_sync_rss_stat()
172 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) argument
173 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) argument
[all …]
Dhuge_memory.c86 struct mm_struct *mm; member
717 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, in __do_huge_pmd_anonymous_page() argument
728 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) in __do_huge_pmd_anonymous_page()
731 pgtable = pte_alloc_one(mm, haddr); in __do_huge_pmd_anonymous_page()
745 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page()
750 pte_free(mm, pgtable); in __do_huge_pmd_anonymous_page()
758 pgtable_trans_huge_deposit(mm, pmd, pgtable); in __do_huge_pmd_anonymous_page()
759 set_pmd_at(mm, haddr, pmd, entry); in __do_huge_pmd_anonymous_page()
760 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
761 atomic_long_inc(&mm->nr_ptes); in __do_huge_pmd_anonymous_page()
[all …]
Dmremap.c31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) in get_old_pmd() argument
37 pgd = pgd_offset(mm, addr); in get_old_pmd()
52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
59 pgd = pgd_offset(mm, addr); in alloc_new_pmd()
60 pud = pud_alloc(mm, pgd, addr); in alloc_new_pmd()
64 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd()
95 struct mm_struct *mm = vma->vm_mm; in move_ptes() local
132 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); in move_ptes()
134 new_ptl = pte_lockptr(mm, new_pmd); in move_ptes()
143 pte = ptep_get_and_clear(mm, old_addr, old_pte); in move_ptes()
[all …]
Dnommu.c139 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
151 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages() argument
169 vma = find_vma(mm, start); in __get_user_pages()
201 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages() argument
213 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
218 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_locked() argument
223 return get_user_pages(tsk, mm, start, nr_pages, write, force, in get_user_pages_locked()
228 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages_unlocked() argument
234 down_read(&mm->mmap_sem); in __get_user_pages_unlocked()
235 ret = get_user_pages(tsk, mm, start, nr_pages, write, force, in __get_user_pages_unlocked()
[all …]
Dksm.c106 struct mm_struct *mm; member
169 struct mm_struct *mm; member
295 rmap_item->mm = NULL; /* debug safety */ in free_rmap_item()
321 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument
325 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) in get_mm_slot()
326 if (slot->mm == mm) in get_mm_slot()
332 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument
335 mm_slot->mm = mm; in insert_to_mm_slots_hash()
336 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); in insert_to_mm_slots_hash()
347 static inline bool ksm_test_exit(struct mm_struct *mm) in ksm_test_exit() argument
[all …]
Dgup.c38 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() local
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
64 migration_entry_wait(mm, pmd, address); in follow_page_pte()
152 struct mm_struct *mm = vma->vm_mm; in follow_page_mask() local
156 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); in follow_page_mask()
162 pgd = pgd_offset(mm, address); in follow_page_mask()
170 page = follow_huge_pud(mm, address, pud, flags); in follow_page_mask()
182 page = follow_huge_pmd(mm, address, pmd, flags); in follow_page_mask()
194 ptl = pmd_lock(mm, pmd); in follow_page_mask()
212 static int get_gate_page(struct mm_struct *mm, unsigned long address, in get_gate_page() argument
[all …]
Dutil.c179 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_list() argument
189 mm->mmap = vma; in __vma_link_list()
233 void arch_pick_mmap_layout(struct mm_struct *mm) in arch_pick_mmap_layout() argument
235 mm->mmap_base = TASK_UNMAPPED_BASE; in arch_pick_mmap_layout()
236 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
280 struct mm_struct *mm = current->mm; in get_user_pages_fast() local
281 return get_user_pages_unlocked(current, mm, start, nr_pages, in get_user_pages_fast()
291 struct mm_struct *mm = current->mm; in vm_mmap_pgoff() local
296 down_write(&mm->mmap_sem); in vm_mmap_pgoff()
299 up_write(&mm->mmap_sem); in vm_mmap_pgoff()
[all …]
Dpgtable-generic.c113 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() local
115 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
116 if (pte_accessible(mm, pte)) in ptep_clear_flush()
152 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
155 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
158 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
161 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); in pgtable_trans_huge_deposit()
162 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
170 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
174 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
[all …]
Dmprotect.c64 struct mm_struct *mm = vma->vm_mm; in change_pte_range() local
96 ptent = ptep_modify_prot_start(mm, addr, pte); in change_pte_range()
107 ptep_modify_prot_commit(mm, addr, pte, ptent); in change_pte_range()
122 set_pte_at(mm, addr, pte, newpte); in change_pte_range()
139 struct mm_struct *mm = vma->vm_mm; in change_pmd_range() local
156 mmu_notifier_invalidate_range_start(mm, mni_start, end); in change_pmd_range()
184 mmu_notifier_invalidate_range_end(mm, mni_start, end); in change_pmd_range()
215 struct mm_struct *mm = vma->vm_mm; in change_protection_range() local
222 pgd = pgd_offset(mm, addr); in change_protection_range()
224 set_tlb_flush_pending(mm); in change_protection_range()
[all …]
Dmsync.c34 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE3() local
57 down_read(&mm->mmap_sem); in SYSCALL_DEFINE3()
58 vma = find_vma(mm, start); in SYSCALL_DEFINE3()
88 up_read(&mm->mmap_sem); in SYSCALL_DEFINE3()
93 down_read(&mm->mmap_sem); in SYSCALL_DEFINE3()
94 vma = find_vma(mm, start); in SYSCALL_DEFINE3()
104 up_read(&mm->mmap_sem); in SYSCALL_DEFINE3()
Doom_kill.c109 if (likely(t->mm)) in find_lock_task_mm()
172 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + in oom_badness()
173 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); in oom_badness()
272 if (!task->mm) in oom_scan_process_thread()
372 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), in dump_tasks()
373 atomic_long_read(&task->mm->nr_ptes), in dump_tasks()
374 mm_nr_pmds(task->mm), in dump_tasks()
375 get_mm_counter(task->mm, MM_SWAPENTS), in dump_tasks()
509 struct mm_struct *mm; in oom_kill_process() local
519 if (p->mm && task_will_free_mem(p)) { in oom_kill_process()
[all …]
/linux-4.1.27/drivers/gpu/drm/
Ddrm_mm.c93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
112 struct drm_mm *mm = hole_node->mm; in drm_mm_insert_helper() local
120 if (mm->color_adjust) in drm_mm_insert_helper()
121 mm->color_adjust(hole_node, color, &adj_start, &adj_end); in drm_mm_insert_helper()
149 node->mm = mm; in drm_mm_insert_helper()
160 list_add(&node->hole_stack, &mm->hole_stack); in drm_mm_insert_helper()
179 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) in drm_mm_reserve_node() argument
189 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in drm_mm_reserve_node()
193 node->mm = mm; in drm_mm_reserve_node()
[all …]
/linux-4.1.27/arch/s390/include/asm/
Dpgalloc.h26 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
28 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
50 static inline unsigned long pgd_entry_type(struct mm_struct *mm) in pgd_entry_type() argument
52 if (mm->context.asce_limit <= (1UL << 31)) in pgd_entry_type()
54 if (mm->context.asce_limit <= (1UL << 42)) in pgd_entry_type()
62 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) in pud_alloc_one() argument
64 unsigned long *table = crst_table_alloc(mm); in pud_alloc_one()
69 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) argument
71 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) in pmd_alloc_one() argument
73 unsigned long *table = crst_table_alloc(mm); in pmd_alloc_one()
[all …]
Dmmu_context.h16 struct mm_struct *mm) in init_new_context() argument
18 spin_lock_init(&mm->context.list_lock); in init_new_context()
19 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context()
20 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context()
21 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context()
22 atomic_set(&mm->context.attach_count, 0); in init_new_context()
23 mm->context.flush_mm = 0; in init_new_context()
25 mm->context.alloc_pgste = page_table_allocate_pgste; in init_new_context()
26 mm->context.has_pgste = 0; in init_new_context()
27 mm->context.use_skey = 0; in init_new_context()
[all …]
Dtlbflush.h64 static inline void __tlb_flush_full(struct mm_struct *mm) in __tlb_flush_full() argument
67 atomic_add(0x10000, &mm->context.attach_count); in __tlb_flush_full()
68 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { in __tlb_flush_full()
76 cpumask_copy(mm_cpumask(mm), in __tlb_flush_full()
77 &mm->context.cpu_attach_mask); in __tlb_flush_full()
79 atomic_sub(0x10000, &mm->context.attach_count); in __tlb_flush_full()
86 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) in __tlb_flush_asce() argument
91 active = (mm == current->active_mm) ? 1 : 0; in __tlb_flush_asce()
92 count = atomic_add_return(0x10000, &mm->context.attach_count); in __tlb_flush_asce()
94 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { in __tlb_flush_asce()
[all …]
Dpgtable.h386 static inline int mm_has_pgste(struct mm_struct *mm) in mm_has_pgste() argument
389 if (unlikely(mm->context.has_pgste)) in mm_has_pgste()
395 static inline int mm_alloc_pgste(struct mm_struct *mm) in mm_alloc_pgste() argument
398 if (unlikely(mm->context.alloc_pgste)) in mm_alloc_pgste()
409 static inline int mm_use_skey(struct mm_struct *mm) in mm_use_skey() argument
412 if (mm->context.use_skey) in mm_use_skey()
629 struct mm_struct *mm) in pgste_update_all() argument
634 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) in pgste_update_all()
650 struct mm_struct *mm) in pgste_set_key() argument
656 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
[all …]
Dtlb.h32 struct mm_struct *mm; member
51 struct mm_struct *mm, in tlb_gather_mmu() argument
55 tlb->mm = mm; in tlb_gather_mmu()
64 __tlb_flush_mm_lazy(tlb->mm); in tlb_flush_mmu_tlbonly()
121 if (tlb->mm->context.asce_limit <= (1UL << 31)) in pmd_free_tlb()
137 if (tlb->mm->context.asce_limit <= (1UL << 42)) in pud_free_tlb()
146 #define tlb_migrate_finish(mm) do { } while (0) argument
Dhugetlb.h15 #define is_hugepage_only_range(mm, addr, len) 0 argument
19 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
22 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
39 #define hugetlb_prefault_arch_hook(mm) do { } while (0) argument
45 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, in huge_pte_clear() argument
69 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
72 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); in huge_ptep_set_wrprotect()
73 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); in huge_ptep_set_wrprotect()
/linux-4.1.27/arch/arm/include/asm/
Dmmu_context.h24 void __check_vmalloc_seq(struct mm_struct *mm);
28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) argument
32 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
35 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
45 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context()
49 __check_vmalloc_seq(mm); in check_and_switch_context()
59 mm->context.switch_pending = 1; in check_and_switch_context()
61 cpu_switch_mm(mm->pgd, mm); in check_and_switch_context()
[all …]
Dpgalloc.h30 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) in pmd_alloc_one() argument
35 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
41 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) in pud_populate() argument
51 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) argument
52 #define pmd_free(mm, pmd) do { } while (0) argument
53 #define pud_populate(mm,pmd,pte) BUG() argument
57 extern pgd_t *pgd_alloc(struct mm_struct *mm);
58 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
84 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) in pte_alloc_one_kernel() argument
96 pte_alloc_one(struct mm_struct *mm, unsigned long addr) in pte_alloc_one() argument
[all …]
Dcacheflush.h225 static inline void vivt_flush_cache_mm(struct mm_struct *mm) in vivt_flush_cache_mm() argument
227 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) in vivt_flush_cache_mm()
234 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range() local
236 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) in vivt_flush_cache_range()
244 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page() local
246 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { in vivt_flush_cache_page()
253 #define flush_cache_mm(mm) \ argument
254 vivt_flush_cache_mm(mm)
260 extern void flush_cache_mm(struct mm_struct *mm);
265 #define flush_cache_dup_mm(mm) flush_cache_mm(mm) argument
Dhugetlb-3level.h40 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
43 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
52 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
55 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
58 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
61 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear()
/linux-4.1.27/arch/s390/mm/
Dpgtable.c34 unsigned long *crst_table_alloc(struct mm_struct *mm) in crst_table_alloc() argument
43 void crst_table_free(struct mm_struct *mm, unsigned long *table) in crst_table_free() argument
50 struct mm_struct *mm = arg; in __crst_table_upgrade() local
52 if (current->active_mm == mm) { in __crst_table_upgrade()
54 set_user_asce(mm); in __crst_table_upgrade()
59 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) in crst_table_upgrade() argument
68 table = crst_table_alloc(mm); in crst_table_upgrade()
71 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
72 if (mm->context.asce_limit < limit) { in crst_table_upgrade()
73 pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
[all …]
Dmmap.c92 struct mm_struct *mm = current->mm; in arch_get_unmapped_area() local
105 vma = find_vma(mm, addr); in arch_get_unmapped_area()
117 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
130 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown() local
145 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
158 info.high_limit = mm->mmap_base; in arch_get_unmapped_area_topdown()
187 return crst_table_upgrade(current->mm, 1UL << 53); in s390_mmap_check()
195 struct mm_struct *mm = current->mm; in s390_get_unmapped_area() local
204 rc = crst_table_upgrade(mm, 1UL << 53); in s390_get_unmapped_area()
217 struct mm_struct *mm = current->mm; in s390_get_unmapped_area_topdown() local
[all …]
/linux-4.1.27/arch/m32r/include/asm/
Dmmu_context.h26 #define mm_context(mm) mm->context argument
30 #define mm_context(mm) mm->context[smp_processor_id()] argument
37 #define enter_lazy_tlb(mm, tsk) do { } while (0) argument
39 static inline void get_new_mmu_context(struct mm_struct *mm) in get_new_mmu_context() argument
52 mm_context(mm) = mc; in get_new_mmu_context()
58 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
60 if (mm) { in get_mmu_context()
65 if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK) in get_mmu_context()
66 get_new_mmu_context(mm); in get_mmu_context()
75 struct mm_struct *mm) in init_new_context() argument
[all …]
Dpgalloc.h8 #define pmd_populate_kernel(mm, pmd, pte) \ argument
11 static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
21 static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
28 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
33 static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
41 static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
60 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
66 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
74 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) argument
[all …]
/linux-4.1.27/arch/powerpc/mm/
Dmmu_context_hash64.c62 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
74 if (slice_mm_new_context(mm)) in init_new_context()
75 slice_set_user_psize(mm, mmu_virtual_psize); in init_new_context()
76 subpage_prot_init_new_context(mm); in init_new_context()
77 mm->context.id = index; in init_new_context()
79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); in init_new_context()
80 if (!mm->context.cop_lockp) { in init_new_context()
82 subpage_prot_free(mm); in init_new_context()
83 mm->context.id = MMU_NO_CONTEXT; in init_new_context()
86 spin_lock_init(mm->context.cop_lockp); in init_new_context()
[all …]
Dslice.c100 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument
105 if ((mm->task_size - len) < addr) in slice_area_is_free()
107 vma = find_vma(mm, addr); in slice_area_is_free()
111 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument
113 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma()
117 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument
128 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma()
131 static struct slice_mask slice_mask_for_free(struct mm_struct *mm) in slice_mask_for_free() argument
137 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free()
140 if (mm->task_size <= SLICE_LOW_TOP) in slice_mask_for_free()
[all …]
Dicswx.c85 int use_cop(unsigned long acop, struct mm_struct *mm) in use_cop() argument
92 if (!mm || !acop) in use_cop()
96 spin_lock(&mm->page_table_lock); in use_cop()
97 spin_lock(mm->context.cop_lockp); in use_cop()
99 ret = get_cop_pid(mm); in use_cop()
104 mm->context.acop |= acop; in use_cop()
106 sync_cop(mm); in use_cop()
113 if (atomic_read(&mm->mm_users) > 1) in use_cop()
114 smp_call_function(sync_cop, mm, 1); in use_cop()
117 spin_unlock(mm->context.cop_lockp); in use_cop()
[all …]
Dsubpage-prot.c26 void subpage_prot_free(struct mm_struct *mm) in subpage_prot_free() argument
28 struct subpage_prot_table *spt = &mm->context.spt; in subpage_prot_free()
53 void subpage_prot_init_new_context(struct mm_struct *mm) in subpage_prot_init_new_context() argument
55 struct subpage_prot_table *spt = &mm->context.spt; in subpage_prot_init_new_context()
60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, in hpte_flush_range() argument
69 pgd = pgd_offset(mm, addr); in hpte_flush_range()
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in hpte_flush_range()
81 pte_update(mm, addr, pte, 0, 0, 0); in hpte_flush_range()
95 struct mm_struct *mm = current->mm; in subpage_prot_clear() local
96 struct subpage_prot_table *spt = &mm->context.spt; in subpage_prot_clear()
[all …]
Dmmu_context_nohash.c87 struct mm_struct *mm; in steal_context_smp() local
95 mm = context_mm[id]; in steal_context_smp()
100 if (mm->context.active) { in steal_context_smp()
106 pr_hardcont(" | steal %d from 0x%p", id, mm); in steal_context_smp()
109 mm->context.id = MMU_NO_CONTEXT; in steal_context_smp()
116 for_each_cpu(cpu, mm_cpumask(mm)) { in steal_context_smp()
141 struct mm_struct *mm; in steal_all_contexts() local
147 mm = context_mm[id]; in steal_all_contexts()
149 pr_hardcont(" | steal %d from 0x%p", id, mm); in steal_all_contexts()
152 mm->context.id = MMU_NO_CONTEXT; in steal_all_contexts()
[all …]
Dtlb_hash64.c43 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, in hpte_need_flush() argument
65 psize = get_slice_psize(mm, addr); in hpte_need_flush()
70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ in hpte_need_flush()
73 psize = pte_pagesize_index(mm, addr, pte); in hpte_need_flush()
85 vsid = get_vsid(mm->context.id, addr, ssize); in hpte_need_flush()
116 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush()
122 batch->mm = mm; in hpte_need_flush()
148 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) in __flush_tlb_pending()
190 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, in __flush_hash_table_range() argument
199 BUG_ON(!mm->pgd); in __flush_hash_table_range()
[all …]
Dcopro_fault.c36 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, in copro_handle_mm_fault() argument
43 if (mm == NULL) in copro_handle_mm_fault()
46 if (mm->pgd == NULL) in copro_handle_mm_fault()
49 down_read(&mm->mmap_sem); in copro_handle_mm_fault()
51 vma = find_vma(mm, ea); in copro_handle_mm_fault()
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); in copro_handle_mm_fault()
96 up_read(&mm->mmap_sem); in copro_handle_mm_fault()
101 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) in copro_calculate_slb() argument
109 psize = get_slice_psize(mm, ea); in copro_calculate_slb()
111 vsid = get_vsid(mm->context.id, ea, ssize); in copro_calculate_slb()
[all …]
Dicswx_pid.c58 int get_cop_pid(struct mm_struct *mm) in get_cop_pid() argument
62 if (mm->context.cop_pid == COP_PID_NONE) { in get_cop_pid()
66 mm->context.cop_pid = pid; in get_cop_pid()
68 return mm->context.cop_pid; in get_cop_pid()
71 int disable_cop_pid(struct mm_struct *mm) in disable_cop_pid() argument
75 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { in disable_cop_pid()
76 free_pid = mm->context.cop_pid; in disable_cop_pid()
77 mm->context.cop_pid = COP_PID_NONE; in disable_cop_pid()
Dtlb_hash32.c40 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) in flush_hash_entry() argument
46 flush_hash_pages(mm->context.id, addr, ptephys, 1); in flush_hash_entry()
90 static void flush_range(struct mm_struct *mm, unsigned long start, in flush_range() argument
96 unsigned int ctx = mm->context.id; in flush_range()
106 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); in flush_range()
134 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
149 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) in flush_tlb_mm()
156 struct mm_struct *mm; in flush_tlb_page() local
163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in flush_tlb_page()
164 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); in flush_tlb_page()
[all …]
Dpgtable_64.c377 static pte_t *get_from_cache(struct mm_struct *mm) in get_from_cache() argument
381 spin_lock(&mm->page_table_lock); in get_from_cache()
382 ret = mm->context.pte_frag; in get_from_cache()
390 mm->context.pte_frag = pte_frag; in get_from_cache()
392 spin_unlock(&mm->page_table_lock); in get_from_cache()
396 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) in __alloc_for_cache() argument
409 spin_lock(&mm->page_table_lock); in __alloc_for_cache()
415 if (likely(!mm->context.pte_frag)) { in __alloc_for_cache()
417 mm->context.pte_frag = ret + PTE_FRAG_SIZE; in __alloc_for_cache()
419 spin_unlock(&mm->page_table_lock); in __alloc_for_cache()
[all …]
Dmmu_context_hash32.c82 int init_new_context(struct task_struct *t, struct mm_struct *mm) in init_new_context() argument
84 mm->context.id = __init_new_context(); in init_new_context()
101 void destroy_context(struct mm_struct *mm) in destroy_context() argument
104 if (mm->context.id != NO_CONTEXT) { in destroy_context()
105 __destroy_context(mm->context.id); in destroy_context()
106 mm->context.id = NO_CONTEXT; in destroy_context()
Dtlb_nohash.c179 void local_flush_tlb_mm(struct mm_struct *mm) in local_flush_tlb_mm() argument
184 pid = mm->context.id; in local_flush_tlb_mm()
191 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, in __local_flush_tlb_page() argument
197 pid = mm ? mm->context.id : 0; in __local_flush_tlb_page()
217 static int mm_is_core_local(struct mm_struct *mm) in mm_is_core_local() argument
219 return cpumask_subset(mm_cpumask(mm), in mm_is_core_local()
261 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
266 pid = mm->context.id; in flush_tlb_mm()
269 if (!mm_is_core_local(mm)) { in flush_tlb_mm()
272 smp_call_function_many(mm_cpumask(mm), in flush_tlb_mm()
[all …]
/linux-4.1.27/arch/sparc/mm/
Dtlb.c26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local
33 if (CTX_VALID(mm->context)) { in flush_tlb_pending()
35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending()
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending()
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending()
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument
81 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one()
87 flush_tsb_user_page(mm, vaddr); in tlb_batch_add_one()
88 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one()
93 tb->mm = mm; in tlb_batch_add_one()
[all …]
Dtsb.c74 struct mm_struct *mm = tb->mm; in flush_tsb_user() local
77 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
79 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
80 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
86 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
87 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
88 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
94 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user()
97 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) in flush_tsb_user_page() argument
101 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page()
[all …]
Dhugetlbpage.c60 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area_topdown() local
70 info.high_limit = mm->mmap_base; in hugetlb_get_unmapped_area_topdown()
96 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area() local
116 vma = find_vma(mm, addr); in hugetlb_get_unmapped_area()
121 if (mm->get_unmapped_area == arch_get_unmapped_area) in hugetlb_get_unmapped_area()
129 pte_t *huge_pte_alloc(struct mm_struct *mm, in huge_pte_alloc() argument
144 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
145 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc()
147 pmd = pmd_alloc(mm, pud, addr); in huge_pte_alloc()
149 pte = pte_alloc_map(mm, NULL, pmd, addr); in huge_pte_alloc()
[all …]
Dfault_32.c50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); in unhandled_fault()
52 (tsk->mm ? (unsigned long) tsk->mm->pgd : in unhandled_fault()
172 struct mm_struct *mm = tsk->mm; in do_sparc_fault() local
199 if (in_atomic() || !mm) in do_sparc_fault()
205 down_read(&mm->mmap_sem); in do_sparc_fault()
210 vma = find_vma(mm, address); in do_sparc_fault()
244 fault = handle_mm_fault(mm, vma, address, flags); in do_sparc_fault()
282 up_read(&mm->mmap_sem); in do_sparc_fault()
290 up_read(&mm->mmap_sem); in do_sparc_fault()
339 up_read(&mm->mmap_sem); in do_sparc_fault()
[all …]
Dsrmmu.c71 #define FLUSH_BEGIN(mm) argument
74 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) { argument
124 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) in pmd_populate() argument
342 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() argument
347 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) in pte_alloc_one()
357 void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
404 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) in alloc_context() argument
412 mm->context = ctxp->ctx_number; in alloc_context()
413 ctxp->ctx_mm = mm; in alloc_context()
426 ctxp->ctx_mm = mm; in alloc_context()
[all …]
Dfault_64.c65 (tsk->mm ? in unhandled_fault()
66 CTX_HWBITS(tsk->mm->context) : in unhandled_fault()
69 (tsk->mm ? (unsigned long) tsk->mm->pgd : in unhandled_fault()
94 pgd_t *pgdp = pgd_offset(current->mm, tpc); in get_user_insn()
285 struct mm_struct *mm = current->mm; in do_sparc64_fault() local
333 if (in_atomic() || !mm) in do_sparc64_fault()
338 if (!down_read_trylock(&mm->mmap_sem)) { in do_sparc64_fault()
346 down_read(&mm->mmap_sem); in do_sparc64_fault()
352 vma = find_vma(mm, address); in do_sparc64_fault()
441 fault = handle_mm_fault(mm, vma, address, flags); in do_sparc64_fault()
[all …]
/linux-4.1.27/arch/arm64/mm/
Dcontext.c41 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) in __init_new_context() argument
43 mm->context.id = 0; in __init_new_context()
44 raw_spin_lock_init(&mm->context.id_lock); in __init_new_context()
58 static void set_mm_context(struct mm_struct *mm, unsigned int asid) in set_mm_context() argument
68 raw_spin_lock_irqsave(&mm->context.id_lock, flags); in set_mm_context()
69 if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { in set_mm_context()
74 mm->context.id = asid; in set_mm_context()
75 cpumask_clear(mm_cpumask(mm)); in set_mm_context()
77 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); in set_mm_context()
82 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); in set_mm_context()
[all …]
Dfault.c45 void show_pte(struct mm_struct *mm, unsigned long addr) in show_pte() argument
49 if (!mm) in show_pte()
50 mm = &init_mm; in show_pte()
52 pr_alert("pgd = %p\n", mm->pgd); in show_pte()
53 pgd = pgd_offset(mm, addr); in show_pte()
85 static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, in __do_kernel_fault() argument
102 show_pte(mm, addr); in __do_kernel_fault()
123 show_pte(tsk->mm, addr); in __do_user_fault()
139 struct mm_struct *mm = tsk->active_mm; in do_bad_area() local
148 __do_kernel_fault(mm, addr, esr, regs); in do_bad_area()
[all …]
/linux-4.1.27/arch/um/kernel/skas/
Dmmu.c17 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, in init_stub_pte() argument
25 pgd = pgd_offset(mm, proc); in init_stub_pte()
26 pud = pud_alloc(mm, pgd, proc); in init_stub_pte()
30 pmd = pmd_alloc(mm, pud, proc); in init_stub_pte()
34 pte = pte_alloc_map(mm, NULL, pmd, proc); in init_stub_pte()
43 pmd_free(mm, pmd); in init_stub_pte()
45 pud_free(mm, pud); in init_stub_pte()
50 int init_new_context(struct task_struct *task, struct mm_struct *mm) in init_new_context() argument
53 struct mm_context *to_mm = &mm->context; in init_new_context()
62 if (current->mm != NULL && current->mm != &init_mm) in init_new_context()
[all …]
/linux-4.1.27/arch/mn10300/include/asm/
Dmmu_context.h37 #define enter_lazy_tlb(mm, tsk) do {} while (0) argument
39 static inline void cpu_ran_vm(int cpu, struct mm_struct *mm) in cpu_ran_vm() argument
42 cpumask_set_cpu(cpu, mm_cpumask(mm)); in cpu_ran_vm()
46 static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm) in cpu_maybe_ran_vm() argument
49 return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm)); in cpu_maybe_ran_vm()
57 #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) argument
63 static inline unsigned long allocate_mmu_context(struct mm_struct *mm) in allocate_mmu_context() argument
78 mm_context(mm) = mc; in allocate_mmu_context()
85 static inline unsigned long get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
89 if (mm) { in get_mmu_context()
[all …]
/linux-4.1.27/arch/arm64/include/asm/
Dmmu_context.h35 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
36 void __new_context(struct mm_struct *mm);
110 static inline void switch_new_context(struct mm_struct *mm) in switch_new_context() argument
114 __new_context(mm); in switch_new_context()
117 cpu_switch_mm(mm->pgd, mm); in switch_new_context()
121 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
130 if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) in check_and_switch_context()
136 cpu_switch_mm(mm->pgd, mm); in check_and_switch_context()
149 switch_new_context(mm); in check_and_switch_context()
152 #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) argument
[all …]
Dpgalloc.h33 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) in pmd_alloc_one() argument
38 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
44 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) in pud_populate() argument
53 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) in pud_alloc_one() argument
58 static inline void pud_free(struct mm_struct *mm, pud_t *pud) in pud_free() argument
64 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) in pgd_populate() argument
71 extern pgd_t *pgd_alloc(struct mm_struct *mm);
72 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
75 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) in pte_alloc_one_kernel() argument
81 pte_alloc_one(struct mm_struct *mm, unsigned long addr) in pte_alloc_one() argument
[all …]
Dhugetlb.h33 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
36 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
45 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
48 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
51 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
54 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear()
72 static inline int is_hugepage_only_range(struct mm_struct *mm, in is_hugepage_only_range() argument
89 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) in hugetlb_prefault_arch_hook() argument
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_gem_userptr.c36 struct mm_struct *mm; member
79 was_interruptible = dev_priv->mm.interruptible; in cancel_userptr()
80 dev_priv->mm.interruptible = false; in cancel_userptr()
88 dev_priv->mm.interruptible = was_interruptible; in cancel_userptr()
100 struct mm_struct *mm, in invalidate_range__linear() argument
133 struct mm_struct *mm, in i915_gem_userptr_mn_invalidate_range_start() argument
148 it = invalidate_range__linear(mn, mm, start, end); in i915_gem_userptr_mn_invalidate_range_start()
184 i915_mmu_notifier_create(struct mm_struct *mm) in i915_mmu_notifier_create() argument
201 ret = __mmu_notifier_register(&mn->mn, mm); in i915_mmu_notifier_create()
310 i915_mmu_notifier_find(struct i915_mm_struct *mm) in i915_mmu_notifier_find() argument
[all …]
Di915_gem_shrinker.c82 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, in i915_gem_shrink()
83 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, in i915_gem_shrink()
172 if (to_i915(dev)->mm.shrinker_no_lock_stealing) in i915_gem_shrinker_lock()
198 container_of(shrinker, struct drm_i915_private, mm.shrinker); in i915_gem_shrinker_count()
208 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) in i915_gem_shrinker_count()
212 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { in i915_gem_shrinker_count()
228 container_of(shrinker, struct drm_i915_private, mm.shrinker); in i915_gem_shrinker_scan()
256 container_of(nb, struct drm_i915_private, mm.oom_notifier); in i915_gem_shrinker_oom()
274 was_interruptible = dev_priv->mm.interruptible; in i915_gem_shrinker_oom()
275 dev_priv->mm.interruptible = false; in i915_gem_shrinker_oom()
[all …]
/linux-4.1.27/kernel/
Dfork.c390 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) in dup_mmap() argument
400 uprobe_dup_mmap(oldmm, mm); in dup_mmap()
404 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); in dup_mmap()
407 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); in dup_mmap()
409 mm->total_vm = oldmm->total_vm; in dup_mmap()
410 mm->shared_vm = oldmm->shared_vm; in dup_mmap()
411 mm->exec_vm = oldmm->exec_vm; in dup_mmap()
412 mm->stack_vm = oldmm->stack_vm; in dup_mmap()
414 rb_link = &mm->mm_rb.rb_node; in dup_mmap()
416 pprev = &mm->mmap; in dup_mmap()
[all …]
/linux-4.1.27/arch/alpha/include/asm/
Dtlbflush.h21 ev4_flush_tlb_current(struct mm_struct *mm) in ev4_flush_tlb_current() argument
23 __load_new_mm_context(mm); in ev4_flush_tlb_current()
28 ev5_flush_tlb_current(struct mm_struct *mm) in ev5_flush_tlb_current() argument
30 __load_new_mm_context(mm); in ev5_flush_tlb_current()
38 ev4_flush_tlb_current_page(struct mm_struct * mm, in ev4_flush_tlb_current_page() argument
44 __load_new_mm_context(mm); in ev4_flush_tlb_current_page()
51 ev5_flush_tlb_current_page(struct mm_struct * mm, in ev5_flush_tlb_current_page() argument
56 __load_new_mm_context(mm); in ev5_flush_tlb_current_page()
89 flush_tlb_other(struct mm_struct *mm) in flush_tlb_other() argument
91 unsigned long *mmc = &mm->context[smp_processor_id()]; in flush_tlb_other()
[all …]
Dpgalloc.h14 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) in pmd_populate() argument
21 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
27 pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) in pgd_populate() argument
32 extern pgd_t *pgd_alloc(struct mm_struct *mm);
35 pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
41 pmd_alloc_one(struct mm_struct *mm, unsigned long address) in pmd_alloc_one() argument
48 pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
54 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) in pte_alloc_one_kernel() argument
61 pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
67 pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() argument
[all …]
Dmmu_context.h117 __get_new_mm_context(struct mm_struct *mm, long cpu) in __get_new_mm_context() argument
190 struct mm_struct * mm = current->active_mm; \
192 if (!mm->context[cpu]) \
193 __load_new_mm_context(mm); \
213 #define deactivate_mm(tsk,mm) do { } while (0) argument
229 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
234 mm->context[i] = 0; in init_new_context()
237 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; in init_new_context()
242 destroy_context(struct mm_struct *mm) in destroy_context() argument
248 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
[all …]
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
Dkfd_mqd_manager_cik.c36 static int init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument
44 BUG_ON(!mm || !q || !mqd); in init_mqd()
48 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), in init_mqd()
103 retval = mm->update_mqd(mm, m, q); in init_mqd()
108 static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, in init_mqd_sdma() argument
115 BUG_ON(!mm || !mqd || !mqd_mem_obj); in init_mqd_sdma()
117 retval = kfd_gtt_sa_allocate(mm->dev, in init_mqd_sdma()
132 retval = mm->update_mqd(mm, m, q); in init_mqd_sdma()
137 static void uninit_mqd(struct mqd_manager *mm, void *mqd, in uninit_mqd() argument
140 BUG_ON(!mm || !mqd); in uninit_mqd()
[all …]
Dkfd_process.c83 if (thread->mm == NULL) in kfd_create_process()
87 if (thread->group_leader->mm != thread->mm) in kfd_create_process()
91 down_write(&thread->mm->mmap_sem); in kfd_create_process()
110 up_write(&thread->mm->mmap_sem); in kfd_create_process()
119 if (thread->mm == NULL) in kfd_get_process()
123 if (thread->group_leader->mm != thread->mm) in kfd_get_process()
131 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) in find_process_by_mm() argument
136 kfd_processes, (uintptr_t)mm) in find_process_by_mm()
137 if (process->mm == mm) in find_process_by_mm()
149 p = find_process_by_mm(thread->mm); in find_process()
[all …]
Dkfd_mqd_manager.h64 int (*init_mqd)(struct mqd_manager *mm, void **mqd,
68 int (*load_mqd)(struct mqd_manager *mm, void *mqd,
72 int (*update_mqd)(struct mqd_manager *mm, void *mqd,
75 int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
80 void (*uninit_mqd)(struct mqd_manager *mm, void *mqd,
83 bool (*is_occupied)(struct mqd_manager *mm, void *mqd,
/linux-4.1.27/arch/x86/include/asm/
Dpgalloc.h8 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument
13 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument
14 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument
15 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument
16 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument
19 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument
34 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
42 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
48 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument
62 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument
[all …]
Dmmu_context.h24 static inline void load_mm_cr4(struct mm_struct *mm) in load_mm_cr4() argument
27 atomic_read(&mm->context.perf_rdpmc_allowed)) in load_mm_cr4()
33 static inline void load_mm_cr4(struct mm_struct *mm) {} in load_mm_cr4() argument
51 static inline void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument
56 ldt = lockless_dereference(mm->context.ldt); in load_mm_ldt()
83 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
84 void destroy_context(struct mm_struct *mm);
87 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
198 #define deactivate_mm(tsk, mm) \ argument
203 #define deactivate_mm(tsk, mm) \ argument
[all …]
Dmpx.h65 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) in kernel_managing_mpx_tables() argument
67 return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR); in kernel_managing_mpx_tables()
69 static inline void mpx_mm_init(struct mm_struct *mm) in mpx_mm_init() argument
75 mm->bd_addr = MPX_INVALID_BOUNDS_DIR; in mpx_mm_init()
77 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
89 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) in kernel_managing_mpx_tables() argument
93 static inline void mpx_mm_init(struct mm_struct *mm) in mpx_mm_init() argument
96 static inline void mpx_notify_unmap(struct mm_struct *mm, in mpx_notify_unmap() argument
Dtlbflush.h185 static inline void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
187 if (mm == current->active_mm) in flush_tlb_mm()
205 static inline void flush_tlb_mm_range(struct mm_struct *mm, in flush_tlb_mm_range() argument
208 if (mm == current->active_mm) in flush_tlb_mm_range()
213 struct mm_struct *mm, in native_flush_tlb_others() argument
235 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) argument
243 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
250 struct mm_struct *mm,
265 #define flush_tlb_others(mask, mm, start, end) \ argument
266 native_flush_tlb_others(mask, mm, start, end)
Dhugetlb.h9 static inline int is_hugepage_only_range(struct mm_struct *mm, in is_hugepage_only_range() argument
30 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { in hugetlb_prefault_arch_hook() argument
41 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
44 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
47 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
50 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear()
69 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
72 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
Dparavirt.h334 struct mm_struct *mm) in paravirt_arch_dup_mmap() argument
336 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); in paravirt_arch_dup_mmap()
339 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) in paravirt_arch_exit_mmap() argument
341 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); in paravirt_arch_exit_mmap()
358 struct mm_struct *mm, in flush_tlb_others() argument
362 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end); in flush_tlb_others()
365 static inline int paravirt_pgd_alloc(struct mm_struct *mm) in paravirt_pgd_alloc() argument
367 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm); in paravirt_pgd_alloc()
370 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) in paravirt_pgd_free() argument
372 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); in paravirt_pgd_free()
[all …]
Dpgtable.h40 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) argument
41 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) argument
61 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) argument
64 #define pte_update(mm, addr, ptep) do { } while (0) argument
65 #define pte_update_defer(mm, addr, ptep) do { } while (0) argument
66 #define pmd_update(mm, addr, ptep) do { } while (0) argument
67 #define pmd_update_defer(mm, addr, ptep) do { } while (0) argument
444 static inline bool pte_accessible(struct mm_struct *mm, pte_t a) in pte_accessible() argument
450 mm_tlb_flush_pending(mm)) in pte_accessible()
652 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) argument
[all …]
/linux-4.1.27/fs/proc/
Dtask_nommu.c18 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument
25 down_read(&mm->mmap_sem); in task_mem()
26 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_mem()
39 if (atomic_read(&mm->mm_count) > 1 || in task_mem()
49 if (atomic_read(&mm->mm_count) > 1) in task_mem()
50 sbytes += kobjsize(mm); in task_mem()
52 bytes += kobjsize(mm); in task_mem()
77 up_read(&mm->mmap_sem); in task_mem()
80 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument
86 down_read(&mm->mmap_sem); in task_vsize()
[all …]
Dtask_mmu.c22 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument
34 hiwater_vm = total_vm = mm->total_vm; in task_mem()
35 if (hiwater_vm < mm->hiwater_vm) in task_mem()
36 hiwater_vm = mm->hiwater_vm; in task_mem()
37 hiwater_rss = total_rss = get_mm_rss(mm); in task_mem()
38 if (hiwater_rss < mm->hiwater_rss) in task_mem()
39 hiwater_rss = mm->hiwater_rss; in task_mem()
41 data = mm->total_vm - mm->shared_vm - mm->stack_vm; in task_mem()
42 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; in task_mem()
43 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; in task_mem()
[all …]
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/
Dmm.c26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) in nvkm_mm_dump() argument
36 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_dump()
41 list_for_each_entry(node, &mm->free, fl_entry) { in nvkm_mm_dump()
48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) in nvkm_mm_free() argument
72 list_for_each_entry(prev, &mm->free, fl_entry) { in nvkm_mm_free()
86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) in region_head() argument
111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument
121 list_for_each_entry(this, &mm->free, fl_entry) { in nvkm_mm_head()
131 s = roundup(s, mm->block_size); in nvkm_mm_head()
[all …]
/linux-4.1.27/arch/powerpc/include/asm/
Dpgalloc-64.h44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) in pud_alloc_one() argument
64 static inline void pud_free(struct mm_struct *mm, pud_t *pud) in pud_free() argument
69 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) in pud_populate() argument
74 #define pmd_populate(mm, pmd, pte_page) \ argument
75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
76 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) argument
79 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
85 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
[all …]
Dpage_64.h124 extern unsigned int get_slice_psize(struct mm_struct *mm,
127 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
128 extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
131 #define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT) argument
137 #define get_slice_psize(mm, addr) ((mm)->context.user_psize) argument
138 #define slice_set_user_psize(mm, psize) \ argument
140 (mm)->context.user_psize = (psize); \
141 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
145 #define get_slice_psize(mm, addr) MMU_PAGE_64K argument
147 #define get_slice_psize(mm, addr) MMU_PAGE_4K argument
[all …]
Dtlbflush.h41 extern void local_flush_tlb_mm(struct mm_struct *mm);
44 extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
48 extern void flush_tlb_mm(struct mm_struct *mm);
50 extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
53 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) argument
55 #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) argument
64 extern void flush_tlb_mm(struct mm_struct *mm);
75 static inline void local_flush_tlb_mm(struct mm_struct *mm) in local_flush_tlb_mm() argument
77 flush_tlb_mm(mm); in local_flush_tlb_mm()
96 struct mm_struct *mm; member
[all …]
Dpgalloc-32.h11 extern pgd_t *pgd_alloc(struct mm_struct *mm);
12 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
19 #define pmd_free(mm, x) do { } while (0) argument
24 #define pmd_populate_kernel(mm, pmd, pte) \ argument
26 #define pmd_populate(mm, pmd, pte) \ argument
30 #define pmd_populate_kernel(mm, pmd, pte) \ argument
32 #define pmd_populate(mm, pmd, pte) \ argument
37 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
38 extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
40 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
[all …]
Dpgtable-ppc64.h142 #define pte_pagesize_index(mm, addr, pte) \ argument
148 psize = get_slice_psize(mm, addr); \
152 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K argument
201 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) argument
215 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
219 static inline unsigned long pte_update(struct mm_struct *mm, in pte_update() argument
245 assert_pte_locked(mm, addr); in pte_update()
249 hpte_need_flush(mm, addr, ptep, old, huge); in pte_update()
255 static inline int __ptep_test_and_clear_young(struct mm_struct *mm, in __ptep_test_and_clear_young() argument
262 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); in __ptep_test_and_clear_young()
[all …]
Dmmu_context.h17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
21 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
35 extern int use_cop(unsigned long acop, struct mm_struct *mm);
36 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
87 #define deactivate_mm(tsk,mm) do { } while (0) argument
103 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument
/linux-4.1.27/include/drm/
Ddrm_mm.h73 struct drm_mm *mm; member
122 static inline bool drm_mm_initialized(struct drm_mm *mm) in drm_mm_initialized() argument
124 return mm->hole_stack.next; in drm_mm_initialized()
179 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ argument
180 &(mm)->head_node.node_list, \
202 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ argument
203 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
204 &entry->hole_stack != &(mm)->hole_stack ? \
210 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ argument
211 …for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm…
[all …]
/linux-4.1.27/arch/microblaze/include/asm/
Dmmu_context_mm.h36 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
81 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
85 if (mm->context != NO_CONTEXT) in get_mmu_context()
96 mm->context = ctx; in get_mmu_context()
97 context_mm[ctx] = mm; in get_mmu_context()
103 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
108 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
110 if (mm->context != NO_CONTEXT) { in destroy_context()
111 clear_bit(mm->context, context_map); in destroy_context()
112 mm->context = NO_CONTEXT; in destroy_context()
[all …]
Dpgalloc.h99 #define pgd_free(mm, pgd) free_pgd_fast(pgd) argument
100 #define pgd_alloc(mm) get_pgd_fast() argument
108 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) argument
109 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) argument
111 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
113 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
135 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, in pte_alloc_one_fast() argument
156 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
166 static inline void pte_free(struct mm_struct *mm, struct page *ptepage) in pte_free() argument
172 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
[all …]
/linux-4.1.27/arch/tile/include/asm/
Dpgalloc.h50 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument
57 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
68 extern pgd_t *pgd_alloc(struct mm_struct *mm);
69 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
71 extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
73 extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
75 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
78 return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER); in pte_alloc_one()
81 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument
83 pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER); in pte_free()
[all …]
/linux-4.1.27/arch/cris/mm/
Dtlb.c36 alloc_context(struct mm_struct *mm) in alloc_context() argument
40 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); in alloc_context()
57 mm->context.page_id = map_replace_ptr; in alloc_context()
58 page_id_map[map_replace_ptr] = mm; in alloc_context()
71 get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
73 if(mm->context.page_id == NO_CONTEXT) in get_mmu_context()
74 alloc_context(mm); in get_mmu_context()
86 destroy_context(struct mm_struct *mm) in destroy_context() argument
88 if(mm->context.page_id != NO_CONTEXT) { in destroy_context()
89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); in destroy_context()
[all …]
/linux-4.1.27/arch/xtensa/include/asm/
Dmmu_context.h68 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_new_mmu_context() argument
80 mm->context.asid[cpu] = asid; in get_new_mmu_context()
81 mm->context.cpu = cpu; in get_new_mmu_context()
84 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument
90 if (mm) { in get_mmu_context()
91 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context()
95 get_new_mmu_context(mm, cpu); in get_mmu_context()
99 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) in activate_context() argument
101 get_mmu_context(mm, cpu); in activate_context()
102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in activate_context()
[all …]
Dpgalloc.h24 #define pmd_populate_kernel(mm, pmdp, ptep) \ argument
26 #define pmd_populate(mm, pmdp, page) \ argument
31 pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
36 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
41 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
55 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
61 pte = pte_alloc_one_kernel(mm, addr); in pte_alloc_one()
72 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
77 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
/linux-4.1.27/arch/ia64/include/asm/
Dpgalloc.h25 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
37 pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) in pgd_populate() argument
42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) in pud_alloc_one() argument
47 static inline void pud_free(struct mm_struct *mm, pud_t *pud) in pud_free() argument
51 #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
55 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) in pud_populate() argument
60 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) in pmd_alloc_one() argument
65 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
70 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
[all …]
Dmmu_context.h48 extern void wrap_mmu_context (struct mm_struct *mm);
51 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
78 get_mmu_context (struct mm_struct *mm) in get_mmu_context() argument
81 nv_mm_context_t context = mm->context; in get_mmu_context()
88 context = mm->context; in get_mmu_context()
90 cpumask_clear(mm_cpumask(mm)); in get_mmu_context()
97 wrap_mmu_context(mm); in get_mmu_context()
99 mm->context = context = ia64_ctx.next++; in get_mmu_context()
118 init_new_context (struct task_struct *p, struct mm_struct *mm) in init_new_context() argument
120 mm->context = 0; in init_new_context()
[all …]
Dtlbflush.h30 extern void smp_flush_tlb_mm (struct mm_struct *mm);
39 local_finish_flush_tlb_mm (struct mm_struct *mm) in local_finish_flush_tlb_mm() argument
41 if (mm == current->active_mm) in local_finish_flush_tlb_mm()
42 activate_context(mm); in local_finish_flush_tlb_mm()
51 flush_tlb_mm (struct mm_struct *mm) in flush_tlb_mm() argument
53 if (!mm) in flush_tlb_mm()
56 set_bit(mm->context, ia64_ctx.flushmap); in flush_tlb_mm()
57 mm->context = 0; in flush_tlb_mm()
59 if (atomic_read(&mm->mm_users) == 0) in flush_tlb_mm()
63 smp_flush_tlb_mm(mm); in flush_tlb_mm()
[all …]
Dhugetlb.h15 static inline int is_hugepage_only_range(struct mm_struct *mm, in is_hugepage_only_range() argument
23 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) in hugetlb_prefault_arch_hook() argument
27 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
30 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
33 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
36 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear()
54 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
57 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
/linux-4.1.27/drivers/gpio/
Dgpio-mpc8xxx.c53 to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) in to_mpc8xxx_gpio_chip() argument
55 return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); in to_mpc8xxx_gpio_chip()
58 static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) in mpc8xxx_gpio_save_regs() argument
60 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); in mpc8xxx_gpio_save_regs()
62 mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); in mpc8xxx_gpio_save_regs()
73 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); in mpc8572_gpio_get() local
74 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); in mpc8572_gpio_get()
77 out_mask = in_be32(mm->regs + GPIO_DIR); in mpc8572_gpio_get()
79 val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; in mpc8572_gpio_get()
87 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); in mpc8xxx_gpio_get() local
[all …]
/linux-4.1.27/arch/unicore32/include/asm/
Dmmu_context.h24 #define init_new_context(tsk, mm) 0 argument
26 #define destroy_context(mm) do { } while (0) argument
38 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
58 #define deactivate_mm(tsk, mm) do { } while (0) argument
68 #define arch_exit_mmap(mm) \ argument
70 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
76 mm->mmap = NULL; \
77 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
78 vmacache_invalidate(mm); \
79 mm->map_count--; \
[all …]
Dpgalloc.h25 extern pgd_t *get_pgd_slow(struct mm_struct *mm);
26 extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
28 #define pgd_alloc(mm) get_pgd_slow(mm) argument
29 #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) argument
37 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) in pte_alloc_one_kernel() argument
49 pte_alloc_one(struct mm_struct *mm, unsigned long addr) in pte_alloc_one() argument
70 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
76 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
93 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) in pmd_populate_kernel() argument
105 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) in pmd_populate() argument
/linux-4.1.27/arch/x86/mm/
Dmpx.c29 struct mm_struct *mm = current->mm; in mpx_mmap() local
37 down_write(&mm->mmap_sem); in mpx_mmap()
40 if (mm->map_count > sysctl_max_map_count) { in mpx_mmap()
55 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in mpx_mmap()
64 vma = find_vma(mm, ret); in mpx_mmap()
71 up_write(&mm->mmap_sem); in mpx_mmap()
77 up_write(&mm->mmap_sem); in mpx_mmap()
366 struct mm_struct *mm = tsk->mm; in mpx_enable_management() local
380 down_write(&mm->mmap_sem); in mpx_enable_management()
381 mm->bd_addr = bd_base; in mpx_enable_management()
[all …]
Dtlb.c134 struct mm_struct *mm, unsigned long start, in native_flush_tlb_others() argument
138 info.flush_mm = mm; in native_flush_tlb_others()
147 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); in native_flush_tlb_others()
158 struct mm_struct *mm = current->mm; in flush_tlb_current_task() local
168 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_current_task()
169 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); in flush_tlb_current_task()
185 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, in flush_tlb_mm_range() argument
193 if (current->active_mm != mm) { in flush_tlb_mm_range()
200 if (!current->mm) { in flush_tlb_mm_range()
233 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_mm_range()
[all …]
Dpgtable.c19 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) in pte_alloc_one_kernel() argument
24 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() argument
105 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) in pgd_set_mm() argument
107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); in pgd_set_mm()
108 virt_to_page(pgd)->index = (pgoff_t)mm; in pgd_set_mm()
116 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) in pgd_ctor() argument
131 pgd_set_mm(pgd, mm); in pgd_ctor()
171 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) in pud_populate() argument
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate()
185 flush_tlb_mm(mm); in pud_populate()
[all …]
Dmmap.c112 void arch_pick_mmap_layout(struct mm_struct *mm) in arch_pick_mmap_layout() argument
119 mm->mmap_legacy_base = mmap_legacy_base(random_factor); in arch_pick_mmap_layout()
122 mm->mmap_base = mm->mmap_legacy_base; in arch_pick_mmap_layout()
123 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
125 mm->mmap_base = mmap_base(random_factor); in arch_pick_mmap_layout()
126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; in arch_pick_mmap_layout()
/linux-4.1.27/arch/arc/include/asm/
Dmmu_context.h50 #define asid_mm(mm, cpu) mm->context.asid[cpu] argument
51 #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) argument
60 static inline void get_new_mmu_context(struct mm_struct *mm) in get_new_mmu_context() argument
77 if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) in get_new_mmu_context()
95 asid_mm(mm, cpu) = asid_cpu(cpu); in get_new_mmu_context()
98 write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); in get_new_mmu_context()
108 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
113 asid_mm(mm, i) = MM_CTXT_NO_ASID; in init_new_context()
118 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
124 asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; in destroy_context()
[all …]
Dpgalloc.h39 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
45 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) in pmd_populate() argument
55 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
74 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
93 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
105 pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() argument
123 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
128 static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) in pte_free() argument
134 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
/linux-4.1.27/arch/arm/mm/
Dpgd.c33 pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc()
64 new_pmd = pmd_alloc(mm, new_pud, 0); in pgd_alloc()
75 new_pud = pud_alloc(mm, new_pgd, 0); in pgd_alloc()
79 new_pmd = pmd_alloc(mm, new_pud, 0); in pgd_alloc()
83 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); in pgd_alloc()
99 pmd_free(mm, new_pmd); in pgd_alloc()
100 mm_dec_nr_pmds(mm); in pgd_alloc()
102 pud_free(mm, new_pud); in pgd_alloc()
109 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) in pgd_free() argument
[all …]
Dcontext.c54 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
62 context_id = mm->context.id.counter; in a15_erratum_get_cpumask()
177 static u64 new_context(struct mm_struct *mm, unsigned int cpu) in new_context() argument
180 u64 asid = atomic64_read(&mm->context.id); in new_context()
222 cpumask_clear(mm_cpumask(mm)); in new_context()
226 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) in check_and_switch_context() argument
232 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context()
233 __check_vmalloc_seq(mm); in check_and_switch_context()
242 asid = atomic64_read(&mm->context.id); in check_and_switch_context()
249 asid = atomic64_read(&mm->context.id); in check_and_switch_context()
[all …]
Dmmap.c58 struct mm_struct *mm = current->mm; in arch_get_unmapped_area() local
90 vma = find_vma(mm, addr); in arch_get_unmapped_area()
98 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
111 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown() local
141 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
150 info.high_limit = mm->mmap_base; in arch_get_unmapped_area_topdown()
164 info.low_limit = mm->mmap_base; in arch_get_unmapped_area_topdown()
182 void arch_pick_mmap_layout(struct mm_struct *mm) in arch_pick_mmap_layout() argument
190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout()
191 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
[all …]
Dfault.c59 void show_pte(struct mm_struct *mm, unsigned long addr) in show_pte() argument
63 if (!mm) in show_pte()
64 mm = &init_mm; in show_pte()
66 pr_alert("pgd = %p\n", mm->pgd); in show_pte()
67 pgd = pgd_offset(mm, addr); in show_pte()
124 void show_pte(struct mm_struct *mm, unsigned long addr) in show_pte() argument
132 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, in __do_kernel_fault() argument
149 show_pte(mm, addr); in __do_kernel_fault()
171 show_pte(tsk->mm, addr); in __do_user_fault()
189 struct mm_struct *mm = tsk->active_mm; in do_bad_area() local
[all …]
/linux-4.1.27/arch/parisc/include/asm/
Dpgalloc.h21 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
46 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
58 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) in pgd_populate() argument
64 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) in pmd_alloc_one() argument
73 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
82 mm_inc_nr_pmds(mm); in pmd_free()
97 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) argument
98 #define pmd_free(mm, x) do { } while (0) argument
99 #define pgd_populate(mm, pmd, pte) BUG() argument
104 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
[all …]
Dmmu_context.h11 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
22 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
24 BUG_ON(atomic_read(&mm->mm_users) != 1); in init_new_context()
26 mm->context = alloc_sid(); in init_new_context()
31 destroy_context(struct mm_struct *mm) in destroy_context() argument
33 free_sid(mm->context); in destroy_context()
34 mm->context = 0; in destroy_context()
61 #define deactivate_mm(tsk,mm) do { } while (0) argument
Dtlbflush.h50 static inline void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
52 BUG_ON(mm == &init_mm); /* Should never happen */ in flush_tlb_mm()
68 if (mm) { in flush_tlb_mm()
69 if (mm->context != 0) in flush_tlb_mm()
70 free_sid(mm->context); in flush_tlb_mm()
71 mm->context = alloc_sid(); in flush_tlb_mm()
72 if (mm == current->active_mm) in flush_tlb_mm()
73 load_context(mm->context); in flush_tlb_mm()
/linux-4.1.27/arch/sh/mm/
Dtlbflush_32.c28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page()
42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() local
45 if (cpu_context(cpu, mm) != NO_CONTEXT) { in local_flush_tlb_range()
52 cpu_context(cpu, mm) = NO_CONTEXT; in local_flush_tlb_range()
53 if (mm == current->mm) in local_flush_tlb_range()
54 activate_context(mm, cpu); in local_flush_tlb_range()
59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range()
63 if (mm != current->mm) { in local_flush_tlb_range()
106 void local_flush_tlb_mm(struct mm_struct *mm) in local_flush_tlb_mm() argument
112 if (cpu_context(cpu, mm) != NO_CONTEXT) { in local_flush_tlb_mm()
[all …]
Dtlbflush_64.c86 struct mm_struct *mm; in local_flush_tlb_range() local
88 mm = vma->vm_mm; in local_flush_tlb_range()
89 if (cpu_context(cpu, mm) == NO_CONTEXT) in local_flush_tlb_range()
97 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; in local_flush_tlb_range()
128 void local_flush_tlb_mm(struct mm_struct *mm) in local_flush_tlb_mm() argument
133 if (cpu_context(cpu, mm) == NO_CONTEXT) in local_flush_tlb_mm()
138 cpu_context(cpu, mm) = NO_CONTEXT; in local_flush_tlb_mm()
139 if (mm == current->mm) in local_flush_tlb_mm()
140 activate_context(mm, cpu); in local_flush_tlb_mm()
Dhugetlbpage.c24 pte_t *huge_pte_alloc(struct mm_struct *mm, in huge_pte_alloc() argument
32 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
34 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc()
36 pmd = pmd_alloc(mm, pud, addr); in huge_pte_alloc()
38 pte = pte_alloc_map(mm, NULL, pmd, addr); in huge_pte_alloc()
45 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) in huge_pte_offset() argument
52 pgd = pgd_offset(mm, addr); in huge_pte_offset()
65 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
Dfault.c57 static void show_pte(struct mm_struct *mm, unsigned long addr) in show_pte() argument
61 if (mm) { in show_pte()
62 pgd = mm->pgd; in show_pte()
280 struct mm_struct *mm = current->mm; in __bad_area() local
286 up_read(&mm->mmap_sem); in __bad_area()
308 struct mm_struct *mm = tsk->mm; in do_sigbus() local
310 up_read(&mm->mmap_sem); in do_sigbus()
329 up_read(&current->mm->mmap_sem); in mm_fault_error()
341 up_read(&current->mm->mmap_sem); in mm_fault_error()
345 up_read(&current->mm->mmap_sem); in mm_fault_error()
[all …]
/linux-4.1.27/arch/mips/include/asm/
Dmmu_context.h85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) argument
86 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) argument
89 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) in get_new_mmu_context() argument
119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context()
127 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
132 cpu_context(i, mm) = 0; in init_new_context()
134 atomic_set(&mm->context.fp_mode_switching, 0); in init_new_context()
168 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
172 #define deactivate_mm(tsk, mm) do { } while (0) argument
[all …]
Dpgalloc.h16 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, in pmd_populate_kernel() argument
22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
36 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) in pud_populate() argument
47 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
62 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
67 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
77 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
93 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
98 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) in pmd_alloc_one() argument
[all …]
Dhugetlb.h16 static inline int is_hugepage_only_range(struct mm_struct *mm, in is_hugepage_only_range() argument
41 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) in hugetlb_prefault_arch_hook() argument
54 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
57 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
60 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
67 set_pte_at(mm, addr, ptep, clear); in huge_ptep_get_and_clear()
88 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
91 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
/linux-4.1.27/arch/frv/mm/
Dmmu-context.c29 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
31 memset(&mm->context, 0, sizeof(mm->context)); in init_new_context()
32 INIT_LIST_HEAD(&mm->context.id_link); in init_new_context()
33 mm->context.itlb_cached_pge = 0xffffffffUL; in init_new_context()
34 mm->context.dtlb_cached_pge = 0xffffffffUL; in init_new_context()
130 void destroy_context(struct mm_struct *mm) in destroy_context() argument
132 mm_context_t *ctx = &mm->context; in destroy_context()
154 char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer) in proc_pid_status_frv_cxnr() argument
157 buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); in proc_pid_status_frv_cxnr()
171 struct mm_struct *mm = NULL; in cxn_pin_by_pid() local
[all …]
Dfault.c35 struct mm_struct *mm; in do_page_fault() local
54 mm = current->mm; in do_page_fault()
81 if (in_atomic() || !mm) in do_page_fault()
87 down_read(&mm->mmap_sem); in do_page_fault()
89 vma = find_vma(mm, ear0); in do_page_fault()
167 fault = handle_mm_fault(mm, vma, ear0, flags); in do_page_fault()
182 up_read(&mm->mmap_sem); in do_page_fault()
190 up_read(&mm->mmap_sem); in do_page_fault()
232 pge = pgd_offset(current->mm, ear0); in do_page_fault()
262 up_read(&mm->mmap_sem); in do_page_fault()
[all …]
/linux-4.1.27/arch/sh/include/asm/
Dmmu_context.h38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) argument
40 #define cpu_asid(cpu, mm) \ argument
41 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
57 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument
62 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context()
90 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context()
98 struct mm_struct *mm) in init_new_context() argument
103 cpu_context(i, mm) = NO_CONTEXT; in init_new_context()
112 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) in activate_context() argument
114 get_mmu_context(mm, cpu); in activate_context()
[all …]
Dpgalloc.h10 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
13 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
15 extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
18 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, in pmd_populate_kernel() argument
24 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
34 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
40 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
57 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
62 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
Dhugetlb.h9 static inline int is_hugepage_only_range(struct mm_struct *mm, in is_hugepage_only_range() argument
29 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { in hugetlb_prefault_arch_hook() argument
40 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
43 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
46 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
49 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear()
67 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
70 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
Dtlb.h22 struct mm_struct *mm; member
39 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) in tlb_gather_mmu() argument
41 tlb->mm = mm; in tlb_gather_mmu()
53 flush_tlb_mm(tlb->mm); in tlb_finish_mmu()
112 #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
113 #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
114 #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
116 #define tlb_migrate_finish(mm) do { } while (0) argument
/linux-4.1.27/arch/mn10300/mm/
Dtlb-smp.c52 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
94 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, in flush_tlb_others() argument
104 BUG_ON(!mm); in flush_tlb_others()
119 flush_mm = mm; in flush_tlb_others()
143 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
148 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in flush_tlb_mm()
153 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); in flush_tlb_mm()
163 struct mm_struct *mm = current->mm; in flush_tlb_current_task() local
167 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in flush_tlb_current_task()
172 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); in flush_tlb_current_task()
[all …]
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_user_pages.c74 ret = get_user_pages(current, current->mm, in __ipath_get_user_pages()
82 current->mm->pinned_vm += num_pages; in __ipath_get_user_pages()
166 down_write(&current->mm->mmap_sem); in ipath_get_user_pages()
170 up_write(&current->mm->mmap_sem); in ipath_get_user_pages()
177 down_write(&current->mm->mmap_sem); in ipath_release_user_pages()
181 current->mm->pinned_vm -= num_pages; in ipath_release_user_pages()
183 up_write(&current->mm->mmap_sem); in ipath_release_user_pages()
188 struct mm_struct *mm; member
197 down_write(&work->mm->mmap_sem); in user_pages_account()
198 work->mm->pinned_vm -= work->num_pages; in user_pages_account()
[all …]
/linux-4.1.27/arch/tile/mm/
Dmmap.c32 static inline unsigned long mmap_base(struct mm_struct *mm) in mmap_base() argument
52 void arch_pick_mmap_layout(struct mm_struct *mm) in arch_pick_mmap_layout() argument
81 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout()
82 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
84 mm->mmap_base = mmap_base(mm); in arch_pick_mmap_layout()
85 mm->get_unmapped_area = arch_get_unmapped_area_topdown; in arch_pick_mmap_layout()
89 unsigned long arch_randomize_brk(struct mm_struct *mm) in arch_randomize_brk() argument
91 unsigned long range_end = mm->brk + 0x02000000; in arch_randomize_brk()
92 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; in arch_randomize_brk()
Delf.c41 static int notify_exec(struct mm_struct *mm) in notify_exec() argument
55 exe_file = get_mm_exe_file(mm); in notify_exec()
63 down_read(&mm->mmap_sem); in notify_exec()
64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { in notify_exec()
66 up_read(&mm->mmap_sem); in notify_exec()
94 up_read(&mm->mmap_sem); in notify_exec()
120 struct mm_struct *mm = current->mm; in arch_setup_additional_pages() local
128 if (!notify_exec(mm)) in arch_setup_additional_pages()
131 down_write(&mm->mmap_sem); in arch_setup_additional_pages()
152 up_write(&mm->mmap_sem); in arch_setup_additional_pages()
/linux-4.1.27/arch/blackfin/include/asm/
Dmmu_context.h60 activate_l1stack(struct mm_struct *mm, unsigned long sp_base) in activate_l1stack() argument
64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; in activate_l1stack()
69 #define deactivate_mm(tsk,mm) do { } while (0) argument
120 static inline void protect_page(struct mm_struct *mm, unsigned long addr, in protect_page() argument
123 unsigned long *mask = mm->context.page_rwx_mask; in protect_page()
151 static inline void update_protections(struct mm_struct *mm) in update_protections() argument
154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { in update_protections()
156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu); in update_protections()
167 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
173 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
[all …]
/linux-4.1.27/arch/x86/kernel/
Dldt.c106 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
112 mutex_init(&mm->context.lock); in init_new_context()
113 old_mm = current->mm; in init_new_context()
115 mm->context.ldt = NULL; in init_new_context()
121 mm->context.ldt = NULL; in init_new_context()
135 mm->context.ldt = new_ldt; in init_new_context()
147 void destroy_context(struct mm_struct *mm) in destroy_context() argument
149 free_ldt_struct(mm->context.ldt); in destroy_context()
150 mm->context.ldt = NULL; in destroy_context()
157 struct mm_struct *mm = current->mm; in read_ldt() local
[all …]
/linux-4.1.27/drivers/oprofile/
Dbuffer_sync.c89 struct mm_struct *mm = current->mm; in munmap_notify() local
92 down_read(&mm->mmap_sem); in munmap_notify()
94 mpnt = find_vma(mm, addr); in munmap_notify()
96 up_read(&mm->mmap_sem); in munmap_notify()
104 up_read(&mm->mmap_sem); in munmap_notify()
225 static unsigned long get_exec_dcookie(struct mm_struct *mm) in get_exec_dcookie() argument
230 if (!mm) in get_exec_dcookie()
233 exe_file = get_mm_exe_file(mm); in get_exec_dcookie()
252 lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) in lookup_dcookie() argument
257 down_read(&mm->mmap_sem); in lookup_dcookie()
[all …]
/linux-4.1.27/arch/metag/mm/
Dhugetlbpage.c33 struct mm_struct *mm = current->mm; in prepare_hugepage_range() local
44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); in prepare_hugepage_range()
48 vma = find_vma(mm, addr); in prepare_hugepage_range()
59 pte_t *huge_pte_alloc(struct mm_struct *mm, in huge_pte_alloc() argument
67 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
70 pte = pte_alloc_map(mm, NULL, pmd, addr); in huge_pte_alloc()
77 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) in huge_pte_offset() argument
84 pgd = pgd_offset(mm, addr); in huge_pte_offset()
92 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
107 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, in follow_huge_pmd() argument
[all …]
/linux-4.1.27/arch/alpha/kernel/
Dsmp.c642 struct mm_struct *mm = (struct mm_struct *) x; in ipi_flush_tlb_mm() local
643 if (mm == current->active_mm && !asn_locked()) in ipi_flush_tlb_mm()
644 flush_tlb_current(mm); in ipi_flush_tlb_mm()
646 flush_tlb_other(mm); in ipi_flush_tlb_mm()
650 flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
654 if (mm == current->active_mm) { in flush_tlb_mm()
655 flush_tlb_current(mm); in flush_tlb_mm()
656 if (atomic_read(&mm->mm_users) <= 1) { in flush_tlb_mm()
661 if (mm->context[cpu]) in flush_tlb_mm()
662 mm->context[cpu] = 0; in flush_tlb_mm()
[all …]
/linux-4.1.27/arch/um/include/asm/
Dmmu_context.h12 extern void uml_setup_stubs(struct mm_struct *mm);
16 static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) in arch_dup_mmap() argument
18 uml_setup_stubs(mm); in arch_dup_mmap()
20 extern void arch_exit_mmap(struct mm_struct *mm);
21 static inline void arch_unmap(struct mm_struct *mm, in arch_unmap() argument
26 static inline void arch_bprm_mm_init(struct mm_struct *mm, in arch_bprm_mm_init() argument
34 #define deactivate_mm(tsk,mm) do { } while (0) argument
63 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument
68 extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
70 extern void destroy_context(struct mm_struct *mm);
/linux-4.1.27/arch/m32r/mm/
Dfault.c78 struct mm_struct *mm; in do_page_fault() local
111 mm = tsk->mm; in do_page_fault()
117 if (in_atomic() || !mm) in do_page_fault()
138 if (!down_read_trylock(&mm->mmap_sem)) { in do_page_fault()
142 down_read(&mm->mmap_sem); in do_page_fault()
145 vma = find_vma(mm, address); in do_page_fault()
199 fault = handle_mm_fault(mm, vma, addr, flags); in do_page_fault()
214 up_read(&mm->mmap_sem); in do_page_fault()
222 up_read(&mm->mmap_sem); in do_page_fault()
275 up_read(&mm->mmap_sem); in do_page_fault()
[all …]
/linux-4.1.27/arch/xtensa/mm/
Dtlb.c63 void local_flush_tlb_mm(struct mm_struct *mm) in local_flush_tlb_mm() argument
67 if (mm == current->active_mm) { in local_flush_tlb_mm()
70 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm()
71 activate_context(mm, cpu); in local_flush_tlb_mm()
74 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm()
75 mm->context.cpu = -1; in local_flush_tlb_mm()
92 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() local
95 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_range()
100 (unsigned long)mm->context.asid[cpu], start, end); in local_flush_tlb_range()
107 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_range()
[all …]
/linux-4.1.27/arch/avr32/include/asm/
Dmmu_context.h41 get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
45 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context()
64 mm->context = mc; in get_mmu_context()
72 struct mm_struct *mm) in init_new_context() argument
74 mm->context = NO_CONTEXT; in init_new_context()
82 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
102 static inline void activate_context(struct mm_struct *mm) in activate_context() argument
104 get_mmu_context(mm); in activate_context()
105 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); in activate_context()
120 #define deactivate_mm(tsk,mm) do { } while(0) argument
[all …]
Dpgalloc.h19 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument
25 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
54 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
60 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
79 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
84 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
/linux-4.1.27/drivers/misc/cxl/
Dfault.c87 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument
93 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment()
116 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument
123 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss()
135 struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_page_fault() argument
143 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { in cxl_handle_page_fault()
162 hash_page_mm(mm, dar, access, 0x300, inv_flags); in cxl_handle_page_fault()
176 struct mm_struct *mm; in cxl_handle_fault() local
203 if (!(mm = get_task_mm(task))) { in cxl_handle_fault()
211 cxl_handle_segment_miss(ctx, mm, dar); in cxl_handle_fault()
[all …]
/linux-4.1.27/arch/hexagon/include/asm/
Dpgalloc.h34 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
49 mm->context.generation = kmap_generation; in pgd_alloc()
52 mm->context.ptbase = __pa(pgd); in pgd_alloc()
57 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
62 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
78 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
85 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument
91 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
96 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
116 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, in pmd_populate_kernel() argument
[all …]
/linux-4.1.27/kernel/events/
Duprobes.c163 struct mm_struct *mm = vma->vm_mm; in __replace_page() local
179 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __replace_page()
181 ptep = page_check_address(page, mm, addr, &ptl, 0); in __replace_page()
191 dec_mm_counter(mm, MM_FILEPAGES); in __replace_page()
192 inc_mm_counter(mm, MM_ANONPAGES); in __replace_page()
197 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in __replace_page()
211 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __replace_page()
300 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, in uprobe_write_opcode() argument
309 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); in uprobe_write_opcode()
349 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) in set_swbp() argument
[all …]
/linux-4.1.27/include/trace/events/
Dxen.h168 TP_PROTO(struct mm_struct *mm, unsigned long addr,
170 TP_ARGS(mm, addr, ptep, pteval),
172 __field(struct mm_struct *, mm)
177 TP_fast_assign(__entry->mm = mm;
182 __entry->mm, __entry->addr, __entry->ptep,
188 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
189 TP_ARGS(mm, addr, ptep),
191 __field(struct mm_struct *, mm)
195 TP_fast_assign(__entry->mm = mm;
199 __entry->mm, __entry->addr, __entry->ptep)
[all …]
/linux-4.1.27/arch/unicore32/mm/
Dpgd.c27 pgd_t *get_pgd_slow(struct mm_struct *mm) in get_pgd_slow() argument
53 new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0); in get_pgd_slow()
57 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); in get_pgd_slow()
71 pmd_free(mm, new_pmd); in get_pgd_slow()
72 mm_dec_nr_pmds(mm); in get_pgd_slow()
79 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) in free_pgd_slow() argument
99 pte_free(mm, pte); in free_pgd_slow()
100 atomic_long_dec(&mm->nr_ptes); in free_pgd_slow()
101 pmd_free(mm, pmd); in free_pgd_slow()
102 mm_dec_nr_pmds(mm); in free_pgd_slow()
Dfault.c41 void show_pte(struct mm_struct *mm, unsigned long addr) in show_pte() argument
45 if (!mm) in show_pte()
46 mm = &init_mm; in show_pte()
48 printk(KERN_ALERT "pgd = %p\n", mm->pgd); in show_pte()
49 pgd = pgd_offset(mm, addr); in show_pte()
91 static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, in __do_kernel_fault() argument
109 show_pte(mm, addr); in __do_kernel_fault()
138 struct mm_struct *mm = tsk->active_mm; in do_bad_area() local
147 __do_kernel_fault(mm, addr, fsr, regs); in do_bad_area()
170 static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, in __do_pf() argument
[all …]
/linux-4.1.27/arch/mips/mm/
Dmmap.c58 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_common() local
93 vma = find_vma(mm, addr); in arch_get_unmapped_area_common()
106 info.high_limit = mm->mmap_base; in arch_get_unmapped_area_common()
121 info.low_limit = mm->mmap_base; in arch_get_unmapped_area_common()
159 void arch_pick_mmap_layout(struct mm_struct *mm) in arch_pick_mmap_layout() argument
167 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout()
168 mm->get_unmapped_area = arch_get_unmapped_area; in arch_pick_mmap_layout()
170 mm->mmap_base = mmap_base(random_factor); in arch_pick_mmap_layout()
171 mm->get_unmapped_area = arch_get_unmapped_area_topdown; in arch_pick_mmap_layout()
189 unsigned long arch_randomize_brk(struct mm_struct *mm) in arch_randomize_brk() argument
[all …]
Dhugetlbpage.c24 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, in huge_pte_alloc() argument
31 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
32 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc()
34 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pte_alloc()
39 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) in huge_pte_offset() argument
45 pgd = pgd_offset(mm, addr); in huge_pte_offset()
54 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
/linux-4.1.27/arch/arm64/kernel/
Dvdso.c89 struct mm_struct *mm = current->mm; in aarch32_setup_vectors_page() local
98 down_write(&mm->mmap_sem); in aarch32_setup_vectors_page()
99 current->mm->context.vdso = (void *)addr; in aarch32_setup_vectors_page()
102 ret = _install_special_mapping(mm, addr, PAGE_SIZE, in aarch32_setup_vectors_page()
106 up_write(&mm->mmap_sem); in aarch32_setup_vectors_page()
158 struct mm_struct *mm = current->mm; in arch_setup_additional_pages() local
166 down_write(&mm->mmap_sem); in arch_setup_additional_pages()
172 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, in arch_setup_additional_pages()
179 mm->context.vdso = (void *)vdso_base; in arch_setup_additional_pages()
180 ret = _install_special_mapping(mm, vdso_base, vdso_text_len, in arch_setup_additional_pages()
[all …]
/linux-4.1.27/include/asm-generic/
Dpgtable.h89 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, in ptep_get_and_clear() argument
94 pte_clear(mm, address, ptep); in ptep_get_and_clear()
101 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, in pmdp_get_and_clear() argument
114 static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, in pmdp_get_and_clear_full() argument
118 return pmdp_get_and_clear(mm, address, pmdp); in pmdp_get_and_clear_full()
124 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, in ptep_get_and_clear_full() argument
129 pte = ptep_get_and_clear(mm, address, ptep); in ptep_get_and_clear_full()
140 static inline void pte_clear_not_present_full(struct mm_struct *mm, in pte_clear_not_present_full() argument
145 pte_clear(mm, address, ptep); in pte_clear_not_present_full()
163 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) in ptep_set_wrprotect() argument
[all …]
/linux-4.1.27/arch/sh/kernel/
Dsmp.c180 struct mm_struct *mm = &init_mm; in start_secondary() local
183 atomic_inc(&mm->mm_count); in start_secondary()
184 atomic_inc(&mm->mm_users); in start_secondary()
185 current->active_mm = mm; in start_secondary()
186 enter_lazy_tlb(mm, current); in start_secondary()
341 static void flush_tlb_mm_ipi(void *mm) in flush_tlb_mm_ipi() argument
343 local_flush_tlb_mm((struct mm_struct *)mm); in flush_tlb_mm_ipi()
358 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
362 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()
363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); in flush_tlb_mm()
[all …]
/linux-4.1.27/drivers/infiniband/core/
Dumem.c166 down_write(&current->mm->mmap_sem); in ib_umem_get()
168 locked = npages + current->mm->pinned_vm; in ib_umem_get()
191 ret = get_user_pages(current, current->mm, cur_base, in ib_umem_get()
234 current->mm->pinned_vm = locked; in ib_umem_get()
236 up_write(&current->mm->mmap_sem); in ib_umem_get()
249 down_write(&umem->mm->mmap_sem); in ib_umem_account()
250 umem->mm->pinned_vm -= umem->diff; in ib_umem_account()
251 up_write(&umem->mm->mmap_sem); in ib_umem_account()
252 mmput(umem->mm); in ib_umem_account()
263 struct mm_struct *mm; in ib_umem_release() local
[all …]
/linux-4.1.27/arch/tile/kernel/
Dtlb.c34 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
38 for_each_cpu(cpu, mm_cpumask(mm)) { in flush_tlb_mm()
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm), in flush_tlb_mm()
50 flush_tlb_mm(current->mm); in flush_tlb_current_task()
53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, in flush_tlb_page_mm() argument
58 flush_remote(0, cache, mm_cpumask(mm), in flush_tlb_page_mm()
59 va, size, size, mm_cpumask(mm), NULL, 0); in flush_tlb_page_mm()
72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() local
74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, in flush_tlb_range()
75 mm_cpumask(mm), NULL, 0); in flush_tlb_range()
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-kernel-mm-ksm1 What: /sys/kernel/mm/ksm
4 Contact: Linux memory management mailing list <linux-mm@kvack.org>
7 What: /sys/kernel/mm/ksm/full_scans
8 What: /sys/kernel/mm/ksm/pages_shared
9 What: /sys/kernel/mm/ksm/pages_sharing
10 What: /sys/kernel/mm/ksm/pages_to_scan
11 What: /sys/kernel/mm/ksm/pages_unshared
12 What: /sys/kernel/mm/ksm/pages_volatile
13 What: /sys/kernel/mm/ksm/run
14 What: /sys/kernel/mm/ksm/sleep_millisecs
[all …]
/linux-4.1.27/arch/openrisc/include/asm/
Dpgalloc.h30 #define pmd_populate_kernel(mm, pmd, pte) \ argument
33 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
44 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
63 extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
69 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
74 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
76 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
91 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
96 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument
/linux-4.1.27/arch/cris/include/asm/
Dpgalloc.h7 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) argument
8 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte)) argument
15 static inline pgd_t *pgd_alloc (struct mm_struct *mm) in pgd_alloc() argument
20 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
25 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) in pte_alloc_one_kernel() argument
31 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() argument
44 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
49 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
/linux-4.1.27/fs/
Dbinfmt_elf_fdpic.c330 current->mm->start_code = 0; in load_elf_fdpic_binary()
331 current->mm->end_code = 0; in load_elf_fdpic_binary()
332 current->mm->start_stack = 0; in load_elf_fdpic_binary()
333 current->mm->start_data = 0; in load_elf_fdpic_binary()
334 current->mm->end_data = 0; in load_elf_fdpic_binary()
335 current->mm->context.exec_fdpic_loadmap = 0; in load_elf_fdpic_binary()
336 current->mm->context.interp_fdpic_loadmap = 0; in load_elf_fdpic_binary()
341 &current->mm->start_stack, in load_elf_fdpic_binary()
342 &current->mm->start_brk); in load_elf_fdpic_binary()
344 retval = setup_arg_pages(bprm, current->mm->start_stack, in load_elf_fdpic_binary()
[all …]
Dexec.c172 struct mm_struct *mm = current->mm; in acct_arg_size() local
175 if (!mm || !diff) in acct_arg_size()
179 add_mm_counter(mm, MM_ANONPAGES, diff); in acct_arg_size()
195 ret = get_user_pages(current, bprm->mm, pos, in get_arg_page()
253 struct mm_struct *mm = bprm->mm; in __bprm_mm_init() local
259 down_write(&mm->mmap_sem); in __bprm_mm_init()
260 vma->vm_mm = mm; in __bprm_mm_init()
275 err = insert_vm_struct(mm, vma); in __bprm_mm_init()
279 mm->stack_vm = mm->total_vm = 1; in __bprm_mm_init()
280 arch_bprm_mm_init(mm, vma); in __bprm_mm_init()
[all …]
/linux-4.1.27/arch/metag/include/asm/
Dhugetlb.h8 static inline int is_hugepage_only_range(struct mm_struct *mm, in is_hugepage_only_range() argument
17 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) in hugetlb_prefault_arch_hook() argument
29 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument
32 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at()
35 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument
38 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear()
56 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument
59 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
Dpgalloc.h7 #define pmd_populate_kernel(mm, pmd, pte) \ argument
10 #define pmd_populate(mm, pmd, pte) \ argument
29 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
37 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
42 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
50 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
64 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
69 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
Dmmu_context.h13 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument
19 struct mm_struct *mm) in init_new_context() argument
26 mm->context.pgd_base = (unsigned long) mm->pgd; in init_new_context()
29 INIT_LIST_HEAD(&mm->context.tcm); in init_new_context()
39 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { in destroy_context()
50 #define destroy_context(mm) do { } while (0) argument
111 #define deactivate_mm(tsk, mm) do { } while (0) argument
/linux-4.1.27/arch/nios2/include/asm/
Dpgalloc.h15 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, in pmd_populate_kernel() argument
21 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
33 extern pgd_t *pgd_alloc(struct mm_struct *mm);
35 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
40 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
51 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
67 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
72 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_user_pages.c69 ret = get_user_pages(current, current->mm, in __qib_get_user_pages()
77 current->mm->pinned_vm += num_pages; in __qib_get_user_pages()
137 down_write(&current->mm->mmap_sem); in qib_get_user_pages()
141 up_write(&current->mm->mmap_sem); in qib_get_user_pages()
148 if (current->mm) /* during close after signal, mm can be NULL */ in qib_release_user_pages()
149 down_write(&current->mm->mmap_sem); in qib_release_user_pages()
153 if (current->mm) { in qib_release_user_pages()
154 current->mm->pinned_vm -= num_pages; in qib_release_user_pages()
155 up_write(&current->mm->mmap_sem); in qib_release_user_pages()
/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_bo_manager.c46 struct drm_mm mm; member
56 struct drm_mm *mm = &rman->mm; in ttm_bo_man_get_node() local
77 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, in ttm_bo_man_get_node()
117 drm_mm_init(&rman->mm, 0, p_size); in ttm_bo_man_init()
126 struct drm_mm *mm = &rman->mm; in ttm_bo_man_takedown() local
129 if (drm_mm_clean(mm)) { in ttm_bo_man_takedown()
130 drm_mm_takedown(mm); in ttm_bo_man_takedown()
146 drm_mm_debug_table(&rman->mm, prefix); in ttm_bo_man_debug()
/linux-4.1.27/arch/microblaze/mm/
Dfault.c90 struct mm_struct *mm = current->mm; in do_page_fault() local
110 if (unlikely(in_atomic() || !mm)) { in do_page_fault()
117 mm); in do_page_fault()
141 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { in do_page_fault()
146 down_read(&mm->mmap_sem); in do_page_fault()
149 vma = find_vma(mm, address); in do_page_fault()
219 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
253 up_read(&mm->mmap_sem); in do_page_fault()
264 up_read(&mm->mmap_sem); in do_page_fault()
288 up_read(&mm->mmap_sem); in do_page_fault()
[all …]
/linux-4.1.27/arch/x86/xen/
Dmmu.c335 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, in xen_set_pte_at() argument
338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); in xen_set_pte_at()
342 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, in xen_ptep_modify_prot_start() argument
346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); in xen_ptep_modify_prot_start()
350 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, in xen_ptep_modify_prot_commit() argument
355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); in xen_ptep_modify_prot_commit()
484 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in xen_pte_clear() argument
486 trace_xen_mmu_pte_clear(mm, addr, ptep); in xen_pte_clear()
488 native_pte_clear(mm, addr, ptep); in xen_pte_clear()
609 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, in __xen_pgd_walk() argument
[all …]
/linux-4.1.27/arch/m68k/mm/
Dfault.c72 struct mm_struct *mm = current->mm; in do_page_fault() local
78 regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); in do_page_fault()
84 if (in_atomic() || !mm) in do_page_fault()
90 down_read(&mm->mmap_sem); in do_page_fault()
92 vma = find_vma(mm, address); in do_page_fault()
139 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
181 up_read(&mm->mmap_sem); in do_page_fault()
189 up_read(&mm->mmap_sem); in do_page_fault()
218 up_read(&mm->mmap_sem); in do_page_fault()
Dmcfmmu.c76 current->mm = NULL; in paging_init()
87 struct mm_struct *mm; in cf_tlb_miss() local
98 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; in cf_tlb_miss()
99 if (!mm) { in cf_tlb_miss()
104 pgd = pgd_offset(mm, mmuar); in cf_tlb_miss()
132 asid = mm->context & 0xff; in cf_tlb_miss()
184 struct mm_struct *mm; in steal_context() local
191 mm = context_mm[next_mmu_context]; in steal_context()
192 flush_tlb_mm(mm); in steal_context()
193 destroy_context(mm); in steal_context()
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_mn.c42 struct mm_struct *mm; member
90 mmu_notifier_unregister(&rmn->mn, rmn->mm); in radeon_mn_destroy()
103 struct mm_struct *mm) in radeon_mn_release() argument
122 struct mm_struct *mm, in radeon_mn_invalidate_range_start() argument
185 struct mm_struct *mm = current->mm; in radeon_mn_get() local
189 down_write(&mm->mmap_sem); in radeon_mn_get()
192 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) in radeon_mn_get()
193 if (rmn->mm == mm) in radeon_mn_get()
203 rmn->mm = mm; in radeon_mn_get()
208 r = __mmu_notifier_register(&rmn->mn, mm); in radeon_mn_get()
[all …]
/linux-4.1.27/arch/score/include/asm/
Dpgalloc.h6 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, in pmd_populate_kernel() argument
12 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
20 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
35 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
40 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, in pte_alloc_one_kernel() argument
51 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() argument
67 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
72 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
Dmmu_context.h42 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument
47 get_new_mmu_context(struct mm_struct *mm) in get_new_mmu_context() argument
57 mm->context = asid; in get_new_mmu_context()
66 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
68 mm->context = 0; in init_new_context()
90 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
94 deactivate_mm(struct task_struct *task, struct mm_struct *mm) in deactivate_mm() argument
/linux-4.1.27/lib/
Dis_single_threaded.c21 struct mm_struct *mm = task->mm; in current_is_single_threaded() local
28 if (atomic_read(&mm->mm_users) == 1) in current_is_single_threaded()
41 if (unlikely(t->mm == mm)) in current_is_single_threaded()
43 if (likely(t->mm)) in current_is_single_threaded()
/linux-4.1.27/arch/mips/kernel/
Dsmp.c289 static void flush_tlb_mm_ipi(void *mm) in flush_tlb_mm_ipi() argument
291 local_flush_tlb_mm((struct mm_struct *)mm); in flush_tlb_mm_ipi()
330 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
334 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()
335 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); in flush_tlb_mm()
340 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) in flush_tlb_mm()
341 cpu_context(cpu, mm) = 0; in flush_tlb_mm()
344 local_flush_tlb_mm(mm); in flush_tlb_mm()
364 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() local
367 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_range()
[all …]
/linux-4.1.27/arch/arm/kernel/
Dprocess.c286 unsigned long arch_randomize_brk(struct mm_struct *mm) in arch_randomize_brk() argument
288 unsigned long range_end = mm->brk + 0x02000000; in arch_randomize_brk()
289 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; in arch_randomize_brk()
312 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) in get_gate_vma() argument
317 int in_gate_area(struct mm_struct *mm, unsigned long addr) in in_gate_area() argument
339 static unsigned long sigpage_addr(const struct mm_struct *mm, in sigpage_addr() argument
348 first = PAGE_ALIGN(mm->start_stack); in sigpage_addr()
379 struct mm_struct *mm = current->mm; in arch_setup_additional_pages() local
394 down_write(&mm->mmap_sem); in arch_setup_additional_pages()
395 hint = sigpage_addr(mm, npages); in arch_setup_additional_pages()
[all …]
/linux-4.1.27/arch/m32r/kernel/
Dsmp.c246 void smp_flush_tlb_mm(struct mm_struct *mm) in smp_flush_tlb_mm() argument
255 mmc = &mm->context[cpu_id]; in smp_flush_tlb_mm()
256 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in smp_flush_tlb_mm()
262 if (mm == current->mm) in smp_flush_tlb_mm()
263 activate_context(mm); in smp_flush_tlb_mm()
265 cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); in smp_flush_tlb_mm()
269 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); in smp_flush_tlb_mm()
317 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page() local
325 mmc = &mm->context[cpu_id]; in smp_flush_tlb_page()
326 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in smp_flush_tlb_page()
[all …]
/linux-4.1.27/arch/nios2/mm/
Dfault.c47 struct mm_struct *mm = tsk->mm; in do_page_fault() local
80 if (in_atomic() || !mm) in do_page_fault()
86 if (!down_read_trylock(&mm->mmap_sem)) { in do_page_fault()
90 down_read(&mm->mmap_sem); in do_page_fault()
93 vma = find_vma(mm, address); in do_page_fault()
134 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
175 up_read(&mm->mmap_sem); in do_page_fault()
183 up_read(&mm->mmap_sem); in do_page_fault()
221 up_read(&mm->mmap_sem); in do_page_fault()
228 up_read(&mm->mmap_sem); in do_page_fault()
/linux-4.1.27/arch/arc/mm/
Dfault.c60 struct mm_struct *mm = tsk->mm; in do_page_fault() local
89 if (in_atomic() || !mm) in do_page_fault()
95 down_read(&mm->mmap_sem); in do_page_fault()
96 vma = find_vma(mm, address); in do_page_fault()
133 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
138 up_read(&mm->mmap_sem); in do_page_fault()
166 up_read(&mm->mmap_sem); in do_page_fault()
185 up_read(&mm->mmap_sem); in do_page_fault()
214 up_read(&mm->mmap_sem); in do_page_fault()
224 up_read(&mm->mmap_sem); in do_page_fault()
/linux-4.1.27/arch/um/kernel/
Dtlb.c45 #define INIT_HVC(mm, force) \ argument
48 .id = &mm->context.id, \
263 void fix_range_common(struct mm_struct *mm, unsigned long start_addr, in fix_range_common() argument
271 hvc = INIT_HVC(mm, force); in fix_range_common()
272 pgd = pgd_offset(mm, addr); in fix_range_common()
292 up_write(&current->mm->mmap_sem); in fix_range_common()
300 struct mm_struct *mm; in flush_tlb_kernel_range_common() local
308 mm = &init_mm; in flush_tlb_kernel_range_common()
310 pgd = pgd_offset(mm, addr); in flush_tlb_kernel_range_common()
389 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() local
[all …]
/linux-4.1.27/arch/frv/include/asm/
Dmmu_context.h20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
25 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
27 extern void destroy_context(struct mm_struct *mm);
30 #define init_new_context(tsk, mm) ({ 0; }) argument
32 #define destroy_context(mm) do {} while(0) argument
46 #define deactivate_mm(tsk, mm) \ argument
Dpgalloc.h23 #define pmd_populate_kernel(mm, pmd, pte) __set_pmd(pmd, __pa(pte) | _PAGE_TABLE) argument
35 extern void pgd_free(struct mm_struct *mm, pgd_t *);
41 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
46 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) in pte_free() argument
63 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) argument
64 #define pmd_free(mm, x) do { } while (0) argument
/linux-4.1.27/arch/hexagon/mm/
Dvm_tlb.c40 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() local
42 if (mm->context.ptbase == current->active_mm->context.ptbase) in flush_tlb_range()
68 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
71 if (current->active_mm->context.ptbase == mm->context.ptbase) in flush_tlb_mm()
80 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() local
82 if (mm->context.ptbase == current->active_mm->context.ptbase) in flush_tlb_page()
Dvm_fault.c51 struct mm_struct *mm = current->mm; in do_page_fault() local
62 if (unlikely(in_interrupt() || !mm)) in do_page_fault()
70 down_read(&mm->mmap_sem); in do_page_fault()
71 vma = find_vma(mm, address); in do_page_fault()
104 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
123 up_read(&mm->mmap_sem); in do_page_fault()
127 up_read(&mm->mmap_sem); in do_page_fault()
156 up_read(&mm->mmap_sem); in do_page_fault()
/linux-4.1.27/arch/avr32/mm/
Dfault.c59 struct mm_struct *mm; in do_page_fault() local
75 mm = tsk->mm; in do_page_fault()
84 if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) in do_page_fault()
92 down_read(&mm->mmap_sem); in do_page_fault()
94 vma = find_vma(mm, address); in do_page_fault()
137 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
170 up_read(&mm->mmap_sem); in do_page_fault()
178 up_read(&mm->mmap_sem); in do_page_fault()
234 up_read(&mm->mmap_sem); in do_page_fault()
241 up_read(&mm->mmap_sem); in do_page_fault()
Dtlb.c160 if (vma->vm_mm != current->mm) { in flush_tlb_page()
176 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() local
178 if (mm->context != NO_CONTEXT) { in flush_tlb_range()
186 mm->context = NO_CONTEXT; in flush_tlb_range()
187 if (mm == current->mm) in flush_tlb_range()
188 activate_context(mm); in flush_tlb_range()
193 asid = mm->context & MMU_CONTEXT_ASID_MASK; in flush_tlb_range()
200 if (mm != current->mm) { in flush_tlb_range()
247 void flush_tlb_mm(struct mm_struct *mm) in flush_tlb_mm() argument
250 if (mm->context != NO_CONTEXT) { in flush_tlb_mm()
[all …]
/linux-4.1.27/arch/openrisc/mm/
Dfault.c53 struct mm_struct *mm; in do_page_fault() local
99 mm = tsk->mm; in do_page_fault()
107 if (in_interrupt() || !mm) in do_page_fault()
111 down_read(&mm->mmap_sem); in do_page_fault()
112 vma = find_vma(mm, address); in do_page_fault()
166 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
200 up_read(&mm->mmap_sem); in do_page_fault()
209 up_read(&mm->mmap_sem); in do_page_fault()
272 up_read(&mm->mmap_sem); in do_page_fault()
279 up_read(&mm->mmap_sem); in do_page_fault()
/linux-4.1.27/arch/score/mm/
Dtlb-score.c64 drop_mmu_context(struct mm_struct *mm) in drop_mmu_context() argument
69 get_new_mmu_context(mm); in drop_mmu_context()
70 pevn_set(mm->context & ASID_MASK); in drop_mmu_context()
74 void local_flush_tlb_mm(struct mm_struct *mm) in local_flush_tlb_mm() argument
76 if (mm->context != 0) in local_flush_tlb_mm()
77 drop_mmu_context(mm); in local_flush_tlb_mm()
83 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() local
84 unsigned long vma_mm_context = mm->context; in local_flush_tlb_range()
85 if (mm->context != 0) { in local_flush_tlb_range()
115 get_new_mmu_context(mm); in local_flush_tlb_range()
[all …]
Dfault.c48 struct mm_struct *mm = tsk->mm; in do_page_fault() local
76 if (in_atomic() || !mm) in do_page_fault()
82 down_read(&mm->mmap_sem); in do_page_fault()
83 vma = find_vma(mm, address); in do_page_fault()
113 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
128 up_read(&mm->mmap_sem); in do_page_fault()
136 up_read(&mm->mmap_sem); in do_page_fault()
175 up_read(&mm->mmap_sem); in do_page_fault()
182 up_read(&mm->mmap_sem); in do_page_fault()
/linux-4.1.27/arch/ia64/mm/
Dhugetlbpage.c28 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) in huge_pte_alloc() argument
36 pgd = pgd_offset(mm, taddr); in huge_pte_alloc()
37 pud = pud_alloc(mm, pgd, taddr); in huge_pte_alloc()
39 pmd = pmd_alloc(mm, pud, taddr); in huge_pte_alloc()
41 pte = pte_alloc_map(mm, NULL, pmd, taddr); in huge_pte_alloc()
47 huge_pte_offset (struct mm_struct *mm, unsigned long addr) in huge_pte_offset() argument
55 pgd = pgd_offset(mm, taddr); in huge_pte_offset()
68 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
92 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) in follow_huge_addr() argument
100 ptep = huge_pte_offset(mm, addr); in follow_huge_addr()
/linux-4.1.27/arch/ia64/sn/kernel/sn2/
Dsn2_smp.c122 void sn_tlb_migrate_finish(struct mm_struct *mm) in sn_tlb_migrate_finish() argument
125 if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) in sn_tlb_migrate_finish()
126 flush_tlb_mm(mm); in sn_tlb_migrate_finish()
130 sn2_ipi_flush_all_tlb(struct mm_struct *mm) in sn2_ipi_flush_all_tlb() argument
135 smp_flush_tlb_cpumask(*mm_cpumask(mm)); in sn2_ipi_flush_all_tlb()
164 sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, in sn2_global_tlb_purge() argument
168 int mymm = (mm == current->active_mm && mm == current->mm); in sn2_global_tlb_purge()
177 sn2_ipi_flush_all_tlb(mm); in sn2_global_tlb_purge()
184 for_each_cpu(cpu, mm_cpumask(mm)) { in sn2_global_tlb_purge()
207 if (atomic_read(&mm->mm_users) == 1 && mymm) { in sn2_global_tlb_purge()
[all …]

12345