Lines Matching refs:mm

89 	struct mm_struct *mm;  member
714 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, in __do_huge_pmd_anonymous_page() argument
727 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { in __do_huge_pmd_anonymous_page()
733 pgtable = pte_alloc_one(mm, haddr); in __do_huge_pmd_anonymous_page()
748 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page()
753 pte_free(mm, pgtable); in __do_huge_pmd_anonymous_page()
764 pte_free(mm, pgtable); in __do_huge_pmd_anonymous_page()
776 pgtable_trans_huge_deposit(mm, pmd, pgtable); in __do_huge_pmd_anonymous_page()
777 set_pmd_at(mm, haddr, pmd, entry); in __do_huge_pmd_anonymous_page()
778 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
779 atomic_long_inc(&mm->nr_ptes); in __do_huge_pmd_anonymous_page()
793 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, in set_huge_zero_page() argument
802 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
803 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
804 atomic_long_inc(&mm->nr_ptes); in set_huge_zero_page()
808 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_anonymous_page() argument
822 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) && in do_huge_pmd_anonymous_page()
829 pgtable = pte_alloc_one(mm, haddr); in do_huge_pmd_anonymous_page()
834 pte_free(mm, pgtable); in do_huge_pmd_anonymous_page()
838 ptl = pmd_lock(mm, pmd); in do_huge_pmd_anonymous_page()
848 set_huge_zero_page(pgtable, mm, vma, in do_huge_pmd_anonymous_page()
857 pte_free(mm, pgtable); in do_huge_pmd_anonymous_page()
868 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, in do_huge_pmd_anonymous_page()
875 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd() local
879 ptl = pmd_lock(mm, pmd); in insert_pfn_pmd()
886 set_pmd_at(mm, addr, pmd, entry); in insert_pfn_pmd()
988 void huge_pmd_set_accessed(struct mm_struct *mm, in huge_pmd_set_accessed() argument
998 ptl = pmd_lock(mm, pmd); in huge_pmd_set_accessed()
1042 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, in do_huge_pmd_wp_page_fallback() argument
1070 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
1096 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1098 ptl = pmd_lock(mm, pmd); in do_huge_pmd_wp_page_fallback()
1106 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in do_huge_pmd_wp_page_fallback()
1107 pmd_populate(mm, &_pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1120 set_pte_at(mm, haddr, pte, entry); in do_huge_pmd_wp_page_fallback()
1126 pmd_populate(mm, pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1130 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1140 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1151 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_wp_page() argument
1163 ptl = pmd_lockptr(mm, pmd); in do_huge_pmd_wp_page()
1198 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, in do_huge_pmd_wp_page()
1210 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) { in do_huge_pmd_wp_page()
1232 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1250 set_pmd_at(mm, haddr, pmd, entry); in do_huge_pmd_wp_page()
1253 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1264 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1277 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd() local
1280 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1328 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_numa_page() argument
1345 ptl = pmd_lock(mm, pmdp); in do_huge_pmd_numa_page()
1424 migrated = migrate_misplaced_transhuge_page(mm, vma, in do_huge_pmd_numa_page()
1440 set_pmd_at(mm, haddr, pmdp, pmd); in do_huge_pmd_numa_page()
1470 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd()
1478 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); in zap_huge_pmd()
1479 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd()
1486 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd()
1488 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); in zap_huge_pmd()
1489 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd()
1505 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd() local
1528 new_ptl = pmd_lockptr(mm, new_pmd); in move_huge_pmd()
1531 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1536 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); in move_huge_pmd()
1537 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); in move_huge_pmd()
1539 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); in move_huge_pmd()
1557 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd() local
1577 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); in change_huge_pmd()
1582 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1626 struct mm_struct *mm, in page_check_address_pmd() argument
1638 pgd = pgd_offset(mm, address); in page_check_address_pmd()
1646 *ptl = pmd_lock(mm, pmd); in page_check_address_pmd()
1675 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting() local
1683 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __split_huge_page_splitting()
1684 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_splitting()
1699 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_splitting()
1827 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map() local
1834 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_map()
1837 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_page_map()
1838 pmd_populate(mm, &_pmd, pgtable); in __split_huge_page_map()
1859 set_pte_at(mm, haddr, pte, entry); in __split_huge_page_map()
1891 pmd_populate(mm, pmd, pgtable); in __split_huge_page_map()
2071 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument
2075 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) in get_mm_slot()
2076 if (mm == mm_slot->mm) in get_mm_slot()
2082 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument
2085 mm_slot->mm = mm; in insert_to_mm_slots_hash()
2086 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash()
2089 static inline int khugepaged_test_exit(struct mm_struct *mm) in khugepaged_test_exit() argument
2091 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit()
2094 int __khugepaged_enter(struct mm_struct *mm) in __khugepaged_enter() argument
2104 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); in __khugepaged_enter()
2105 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
2111 insert_to_mm_slots_hash(mm, mm_slot); in __khugepaged_enter()
2120 atomic_inc(&mm->mm_count); in __khugepaged_enter()
2147 void __khugepaged_exit(struct mm_struct *mm) in __khugepaged_exit() argument
2153 mm_slot = get_mm_slot(mm); in __khugepaged_exit()
2162 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
2164 mmdrop(mm); in __khugepaged_exit()
2174 down_write(&mm->mmap_sem); in __khugepaged_exit()
2175 up_write(&mm->mmap_sem); in __khugepaged_exit()
2413 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page() argument
2424 up_read(&mm->mmap_sem); in khugepaged_alloc_page()
2480 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page() argument
2483 up_read(&mm->mmap_sem); in khugepaged_alloc_page()
2503 static void collapse_huge_page(struct mm_struct *mm, in collapse_huge_page() argument
2528 new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); in collapse_huge_page()
2532 if (unlikely(mem_cgroup_try_charge(new_page, mm, in collapse_huge_page()
2541 down_write(&mm->mmap_sem); in collapse_huge_page()
2542 if (unlikely(khugepaged_test_exit(mm))) in collapse_huge_page()
2545 vma = find_vma(mm, address); in collapse_huge_page()
2554 pmd = mm_find_pmd(mm, address); in collapse_huge_page()
2561 pte_ptl = pte_lockptr(mm, pmd); in collapse_huge_page()
2565 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in collapse_huge_page()
2566 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ in collapse_huge_page()
2575 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in collapse_huge_page()
2590 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); in collapse_huge_page()
2622 pgtable_trans_huge_deposit(mm, pmd, pgtable); in collapse_huge_page()
2623 set_pmd_at(mm, address, pmd, _pmd); in collapse_huge_page()
2631 up_write(&mm->mmap_sem); in collapse_huge_page()
2639 static int khugepaged_scan_pmd(struct mm_struct *mm, in khugepaged_scan_pmd() argument
2655 pmd = mm_find_pmd(mm, address); in khugepaged_scan_pmd()
2660 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in khugepaged_scan_pmd()
2711 collapse_huge_page(mm, address, hpage, vma, node); in khugepaged_scan_pmd()
2719 struct mm_struct *mm = mm_slot->mm; in collect_mm_slot() local
2723 if (khugepaged_test_exit(mm)) { in collect_mm_slot()
2736 mmdrop(mm); in collect_mm_slot()
2746 struct mm_struct *mm; in khugepaged_scan_mm_slot() local
2763 mm = mm_slot->mm; in khugepaged_scan_mm_slot()
2764 down_read(&mm->mmap_sem); in khugepaged_scan_mm_slot()
2765 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2768 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2775 if (unlikely(khugepaged_test_exit(mm))) { in khugepaged_scan_mm_slot()
2797 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2803 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2817 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ in khugepaged_scan_mm_slot()
2826 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()
2934 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd() local
2942 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2943 pmd_populate(mm, &_pmd, pgtable); in __split_huge_zero_page_pmd()
2951 set_pte_at(mm, haddr, pte, entry); in __split_huge_zero_page_pmd()
2955 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2964 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_pmd() local
2974 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2975 ptl = pmd_lock(mm, pmd); in __split_huge_page_pmd()
2991 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
3008 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, in split_huge_page_pmd_mm() argument
3013 vma = find_vma(mm, address); in split_huge_page_pmd_mm()
3018 static void split_huge_page_address(struct mm_struct *mm, in split_huge_page_address() argument
3027 pgd = pgd_offset(mm, address); in split_huge_page_address()
3042 split_huge_page_pmd_mm(mm, address, pmd); in split_huge_page_address()