Lines Matching refs:mm

137 void sync_mm_rss(struct mm_struct *mm)  in sync_mm_rss()  argument
143 add_mm_counter(mm, i, current->rss_stat.count[i]); in sync_mm_rss()
150 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument
154 if (likely(task->mm == mm)) in add_mm_counter_fast()
157 add_mm_counter(mm, member, val); in add_mm_counter_fast()
159 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) argument
160 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) argument
169 sync_mm_rss(task->mm); in check_sync_rss_stat()
173 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) argument
174 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) argument
217 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned lon… in tlb_gather_mmu() argument
219 tlb->mm = mm; in tlb_gather_mmu()
243 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
368 if (atomic_read(&tlb->mm->mm_users) < 2) { in tlb_remove_table()
398 atomic_long_dec(&tlb->mm->nr_ptes); in free_pte_range()
432 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
520 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
564 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, in __pte_alloc() argument
568 pgtable_t new = pte_alloc_one(mm, address); in __pte_alloc()
588 ptl = pmd_lock(mm, pmd); in __pte_alloc()
591 atomic_long_inc(&mm->nr_ptes); in __pte_alloc()
592 pmd_populate(mm, pmd, new); in __pte_alloc()
598 pte_free(mm, new); in __pte_alloc()
629 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) in add_mm_rss_vec() argument
633 if (current->mm == mm) in add_mm_rss_vec()
634 sync_mm_rss(mm); in add_mm_rss_vec()
637 add_mm_counter(mm, i, rss[i]); in add_mm_rss_vec()
1118 struct mm_struct *mm = tlb->mm; in zap_pte_range() local
1128 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1151 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range()
1197 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1200 add_mm_rss_vec(mm, rss); in zap_pte_range()
1239 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { in zap_pmd_range()
1377 struct mm_struct *mm = vma->vm_mm; in unmap_vmas() local
1379 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); in unmap_vmas()
1382 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); in unmap_vmas()
1397 struct mm_struct *mm = vma->vm_mm; in zap_page_range() local
1402 tlb_gather_mmu(&tlb, mm, start, end); in zap_page_range()
1403 update_hiwater_rss(mm); in zap_page_range()
1404 mmu_notifier_invalidate_range_start(mm, start, end); in zap_page_range()
1407 mmu_notifier_invalidate_range_end(mm, start, end); in zap_page_range()
1423 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single() local
1428 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1429 update_hiwater_rss(mm); in zap_page_range_single()
1430 mmu_notifier_invalidate_range_start(mm, address, end); in zap_page_range_single()
1432 mmu_notifier_invalidate_range_end(mm, address, end); in zap_page_range_single()
1459 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, in __get_locked_pte() argument
1462 pgd_t * pgd = pgd_offset(mm, addr); in __get_locked_pte()
1463 pud_t * pud = pud_alloc(mm, pgd, addr); in __get_locked_pte()
1465 pmd_t * pmd = pmd_alloc(mm, pud, addr); in __get_locked_pte()
1468 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1484 struct mm_struct *mm = vma->vm_mm; in insert_page() local
1494 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
1503 inc_mm_counter_fast(mm, MM_FILEPAGES); in insert_page()
1505 set_pte_at(mm, addr, pte, mk_pte(page, prot)); in insert_page()
1562 struct mm_struct *mm = vma->vm_mm; in insert_pfn() local
1568 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
1577 set_pte_at(mm, addr, pte, entry); in insert_pfn()
1662 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
1669 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1675 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
1683 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, in remap_pmd_range() argument
1691 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
1697 if (remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
1704 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, in remap_pud_range() argument
1712 pud = pud_alloc(mm, pgd, addr); in remap_pud_range()
1717 if (remap_pmd_range(mm, pud, addr, next, in remap_pud_range()
1740 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range() local
1775 pgd = pgd_offset(mm, addr); in remap_pfn_range()
1779 err = remap_pud_range(mm, pgd, addr, next, in remap_pfn_range()
1839 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
1848 pte = (mm == &init_mm) ? in apply_to_pte_range()
1850 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
1868 if (mm != &init_mm) in apply_to_pte_range()
1873 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, in apply_to_pmd_range() argument
1883 pmd = pmd_alloc(mm, pud, addr); in apply_to_pmd_range()
1888 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); in apply_to_pmd_range()
1895 static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, in apply_to_pud_range() argument
1903 pud = pud_alloc(mm, pgd, addr); in apply_to_pud_range()
1908 err = apply_to_pmd_range(mm, pud, addr, next, fn, data); in apply_to_pud_range()
1919 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_page_range() argument
1928 pgd = pgd_offset(mm, addr); in apply_to_page_range()
1931 err = apply_to_pud_range(mm, pgd, addr, next, fn, data); in apply_to_page_range()
1948 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, in pte_unmap_same() argument
1954 spinlock_t *ptl = pte_lockptr(mm, pmd); in pte_unmap_same()
2033 static inline int wp_page_reuse(struct mm_struct *mm, in wp_page_reuse() argument
2100 static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_copy() argument
2126 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) in wp_page_copy()
2131 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in wp_page_copy()
2136 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_page_copy()
2140 dec_mm_counter_fast(mm, MM_FILEPAGES); in wp_page_copy()
2141 inc_mm_counter_fast(mm, MM_ANONPAGES); in wp_page_copy()
2144 inc_mm_counter_fast(mm, MM_ANONPAGES); in wp_page_copy()
2164 set_pte_at_notify(mm, address, page_table, entry); in wp_page_copy()
2203 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in wp_page_copy()
2229 static int wp_pfn_shared(struct mm_struct *mm, in wp_pfn_shared() argument
2247 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_pfn_shared()
2257 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, in wp_pfn_shared()
2261 static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_shared() argument
2292 page_table = pte_offset_map_lock(mm, pmd, address, in wp_page_shared()
2303 return wp_page_reuse(mm, vma, address, page_table, ptl, in wp_page_shared()
2325 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_wp_page() argument
2343 return wp_pfn_shared(mm, vma, address, page_table, ptl, in do_wp_page()
2347 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2360 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2378 return wp_page_reuse(mm, vma, address, page_table, ptl, in do_wp_page()
2384 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2394 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2486 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_swap_page() argument
2499 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) in do_swap_page()
2505 migration_entry_wait(mm, pmd, address); in do_swap_page()
2524 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2534 mem_cgroup_count_vm_event(mm, PGMAJFAULT); in do_swap_page()
2547 locked = lock_page_or_retry(page, mm, flags); in do_swap_page()
2571 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { in do_swap_page()
2579 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2598 inc_mm_counter_fast(mm, MM_ANONPAGES); in do_swap_page()
2599 dec_mm_counter_fast(mm, MM_SWAPENTS); in do_swap_page()
2610 set_pte_at(mm, address, page_table, pte); in do_swap_page()
2638 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2703 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_anonymous_page() argument
2723 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { in do_anonymous_page()
2726 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2745 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) in do_anonymous_page()
2759 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2772 inc_mm_counter_fast(mm, MM_ANONPAGES); in do_anonymous_page()
2777 set_pte_at(mm, address, page_table, entry); in do_anonymous_page()
2979 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_read_fault() argument
2994 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
3005 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
3019 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_cow_fault() argument
3036 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) { in do_cow_fault()
3049 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
3085 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_shared_fault() argument
3114 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
3154 static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_fault() argument
3166 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3169 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3171 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3189 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_numa_page() argument
3213 ptl = pte_lockptr(mm, pmd); in do_numa_page()
3225 set_pte_at(mm, addr, ptep, pte); in do_numa_page()
3275 static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in create_huge_pmd() argument
3279 return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); in create_huge_pmd()
3285 static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in wp_huge_pmd() argument
3290 return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); in wp_huge_pmd()
3312 static int handle_pte_fault(struct mm_struct *mm, in handle_pte_fault() argument
3332 return do_anonymous_page(mm, vma, address, in handle_pte_fault()
3335 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3338 return do_swap_page(mm, vma, address, in handle_pte_fault()
3343 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3345 ptl = pte_lockptr(mm, pmd); in handle_pte_fault()
3351 return do_wp_page(mm, vma, address, in handle_pte_fault()
3379 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in __handle_mm_fault() argument
3388 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3390 pgd = pgd_offset(mm, address); in __handle_mm_fault()
3391 pud = pud_alloc(mm, pgd, address); in __handle_mm_fault()
3394 pmd = pmd_alloc(mm, pud, address); in __handle_mm_fault()
3398 int ret = create_huge_pmd(mm, vma, address, pmd, flags); in __handle_mm_fault()
3418 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3422 ret = wp_huge_pmd(mm, vma, address, pmd, in __handle_mm_fault()
3427 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3440 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3463 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3472 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in handle_mm_fault() argument
3480 mem_cgroup_count_vm_event(mm, PGFAULT); in handle_mm_fault()
3492 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3515 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __pud_alloc() argument
3517 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
3523 spin_lock(&mm->page_table_lock); in __pud_alloc()
3525 pud_free(mm, new); in __pud_alloc()
3527 pgd_populate(mm, pgd, new); in __pud_alloc()
3528 spin_unlock(&mm->page_table_lock); in __pud_alloc()
3538 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
3540 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
3546 spin_lock(&mm->page_table_lock); in __pmd_alloc()
3549 mm_inc_nr_pmds(mm); in __pmd_alloc()
3550 pud_populate(mm, pud, new); in __pmd_alloc()
3552 pmd_free(mm, new); in __pmd_alloc()
3555 mm_inc_nr_pmds(mm); in __pmd_alloc()
3556 pgd_populate(mm, pud, new); in __pmd_alloc()
3558 pmd_free(mm, new); in __pmd_alloc()
3560 spin_unlock(&mm->page_table_lock); in __pmd_alloc()
3565 static int __follow_pte(struct mm_struct *mm, unsigned long address, in __follow_pte() argument
3573 pgd = pgd_offset(mm, address); in __follow_pte()
3590 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte()
3603 static inline int follow_pte(struct mm_struct *mm, unsigned long address, in follow_pte() argument
3610 !(res = __follow_pte(mm, address, ptepp, ptlp))); in follow_pte()
3699 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument
3705 down_read(&mm->mmap_sem); in __access_remote_vm()
3712 ret = get_user_pages(tsk, mm, addr, 1, in __access_remote_vm()
3722 vma = find_vma(mm, addr); in __access_remote_vm()
3754 up_read(&mm->mmap_sem); in __access_remote_vm()
3769 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
3772 return __access_remote_vm(NULL, mm, addr, buf, len, write); in access_remote_vm()
3783 struct mm_struct *mm; in access_process_vm() local
3786 mm = get_task_mm(tsk); in access_process_vm()
3787 if (!mm) in access_process_vm()
3790 ret = __access_remote_vm(tsk, mm, addr, buf, len, write); in access_process_vm()
3791 mmput(mm); in access_process_vm()
3801 struct mm_struct *mm = current->mm; in print_vma_addr() local
3811 down_read(&mm->mmap_sem); in print_vma_addr()
3812 vma = find_vma(mm, ip); in print_vma_addr()
3828 up_read(&mm->mmap_sem); in print_vma_addr()
3846 if (current->mm) in __might_fault()
3847 might_lock_read(&current->mm->mmap_sem); in __might_fault()