Lines Matching refs:mm
27 unsigned long *crst_table_alloc(struct mm_struct *mm) in crst_table_alloc() argument
36 void crst_table_free(struct mm_struct *mm, unsigned long *table) in crst_table_free() argument
43 struct mm_struct *mm = arg; in __crst_table_upgrade() local
45 if (current->active_mm == mm) { in __crst_table_upgrade()
47 set_user_asce(mm); in __crst_table_upgrade()
52 int crst_table_upgrade(struct mm_struct *mm) in crst_table_upgrade() argument
57 BUG_ON(mm->context.asce_limit != (1UL << 42)); in crst_table_upgrade()
59 table = crst_table_alloc(mm); in crst_table_upgrade()
63 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
64 pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
66 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); in crst_table_upgrade()
67 mm->pgd = (pgd_t *) table; in crst_table_upgrade()
68 mm->context.asce_limit = 1UL << 53; in crst_table_upgrade()
69 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
71 mm->task_size = mm->context.asce_limit; in crst_table_upgrade()
72 spin_unlock_bh(&mm->page_table_lock); in crst_table_upgrade()
74 on_each_cpu(__crst_table_upgrade, mm, 0); in crst_table_upgrade()
78 void crst_table_downgrade(struct mm_struct *mm) in crst_table_downgrade() argument
83 BUG_ON(mm->context.asce_limit != (1UL << 42)); in crst_table_downgrade()
85 if (current->active_mm == mm) { in crst_table_downgrade()
87 __tlb_flush_mm(mm); in crst_table_downgrade()
90 pgd = mm->pgd; in crst_table_downgrade()
91 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); in crst_table_downgrade()
92 mm->context.asce_limit = 1UL << 31; in crst_table_downgrade()
93 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_downgrade()
95 mm->task_size = mm->context.asce_limit; in crst_table_downgrade()
96 crst_table_free(mm, (unsigned long *) pgd); in crst_table_downgrade()
98 if (current->active_mm == mm) in crst_table_downgrade()
99 set_user_asce(mm); in crst_table_downgrade()
111 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) in gmap_alloc() argument
142 gmap->mm = mm; in gmap_alloc()
154 down_write(&mm->mmap_sem); in gmap_alloc()
155 list_add(&gmap->list, &mm->context.gmap_list); in gmap_alloc()
156 up_write(&mm->mmap_sem); in gmap_alloc()
169 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_flush_tlb()
208 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_free()
217 down_write(&gmap->mm->mmap_sem); in gmap_free()
219 up_write(&gmap->mm->mmap_sem); in gmap_free()
259 spin_lock(&gmap->mm->page_table_lock); in gmap_alloc_table()
267 spin_unlock(&gmap->mm->page_table_lock); in gmap_alloc_table()
348 down_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
351 up_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
380 down_write(&gmap->mm->mmap_sem); in gmap_map_segment()
390 up_write(&gmap->mm->mmap_sem); in gmap_map_segment()
434 down_read(&gmap->mm->mmap_sem); in gmap_translate()
436 up_read(&gmap->mm->mmap_sem); in gmap_translate()
447 static void gmap_unlink(struct mm_struct *mm, unsigned long *table, in gmap_unlink() argument
453 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_unlink()
473 struct mm_struct *mm; in __gmap_link() local
509 mm = gmap->mm; in __gmap_link()
510 pgd = pgd_offset(mm, vmaddr); in __gmap_link()
523 ptl = pmd_lock(mm, pmd); in __gmap_link()
553 down_read(&gmap->mm->mmap_sem); in gmap_fault()
559 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { in gmap_fault()
565 up_read(&gmap->mm->mmap_sem); in gmap_fault()
570 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) in gmap_zap_swap_entry() argument
573 dec_mm_counter(mm, MM_SWAPENTS); in gmap_zap_swap_entry()
578 dec_mm_counter(mm, MM_ANONPAGES); in gmap_zap_swap_entry()
580 dec_mm_counter(mm, MM_FILEPAGES); in gmap_zap_swap_entry()
602 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
614 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); in __gmap_zap()
615 pte_clear(gmap->mm, vmaddr, ptep); in __gmap_zap()
628 down_read(&gmap->mm->mmap_sem); in gmap_discard()
639 vma = find_vma(gmap->mm, vmaddr); in gmap_discard()
643 up_read(&gmap->mm->mmap_sem); in gmap_discard()
695 down_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
704 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { in gmap_ipte_notify()
712 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify()
725 up_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
739 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) in gmap_do_ipte_notify() argument
749 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_do_ipte_notify()
762 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in set_guest_storage_key() argument
769 down_read(&mm->mmap_sem); in set_guest_storage_key()
771 ptep = get_locked_pte(mm, addr, &ptl); in set_guest_storage_key()
773 up_read(&mm->mmap_sem); in set_guest_storage_key()
779 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { in set_guest_storage_key()
780 up_read(&mm->mmap_sem); in set_guest_storage_key()
810 up_read(&mm->mmap_sem); in set_guest_storage_key()
815 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) in get_guest_storage_key() argument
823 down_read(&mm->mmap_sem); in get_guest_storage_key()
824 ptep = get_locked_pte(mm, addr, &ptl); in get_guest_storage_key()
826 up_read(&mm->mmap_sem); in get_guest_storage_key()
849 up_read(&mm->mmap_sem); in get_guest_storage_key()
890 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, in gmap_unlink() argument
911 unsigned long *page_table_alloc(struct mm_struct *mm) in page_table_alloc() argument
918 if (!mm_alloc_pgste(mm)) { in page_table_alloc()
920 spin_lock_bh(&mm->context.list_lock); in page_table_alloc()
921 if (!list_empty(&mm->context.pgtable_list)) { in page_table_alloc()
922 page = list_first_entry(&mm->context.pgtable_list, in page_table_alloc()
935 spin_unlock_bh(&mm->context.list_lock); in page_table_alloc()
949 if (mm_alloc_pgste(mm)) { in page_table_alloc()
958 spin_lock_bh(&mm->context.list_lock); in page_table_alloc()
959 list_add(&page->lru, &mm->context.pgtable_list); in page_table_alloc()
960 spin_unlock_bh(&mm->context.list_lock); in page_table_alloc()
965 void page_table_free(struct mm_struct *mm, unsigned long *table) in page_table_free() argument
971 if (!mm_alloc_pgste(mm)) { in page_table_free()
974 spin_lock_bh(&mm->context.list_lock); in page_table_free()
977 list_add(&page->lru, &mm->context.pgtable_list); in page_table_free()
980 spin_unlock_bh(&mm->context.list_lock); in page_table_free()
993 struct mm_struct *mm; in page_table_free_rcu() local
997 mm = tlb->mm; in page_table_free_rcu()
999 if (mm_alloc_pgste(mm)) { in page_table_free_rcu()
1000 gmap_unlink(mm, table, vmaddr); in page_table_free_rcu()
1006 spin_lock_bh(&mm->context.list_lock); in page_table_free_rcu()
1009 list_add_tail(&page->lru, &mm->context.pgtable_list); in page_table_free_rcu()
1012 spin_unlock_bh(&mm->context.list_lock); in page_table_free_rcu()
1085 tlb->mm->context.flush_mm = 1; in tlb_remove_table()
1090 __tlb_flush_mm_lazy(tlb->mm); in tlb_remove_table()
1110 static inline void thp_split_mm(struct mm_struct *mm) in thp_split_mm() argument
1114 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { in thp_split_mm()
1119 mm->def_flags |= VM_NOHUGEPAGE; in thp_split_mm()
1122 static inline void thp_split_mm(struct mm_struct *mm) in thp_split_mm() argument
1132 struct mm_struct *mm = current->mm; in s390_enable_sie() local
1135 if (mm_has_pgste(mm)) in s390_enable_sie()
1138 if (!mm_alloc_pgste(mm)) in s390_enable_sie()
1140 down_write(&mm->mmap_sem); in s390_enable_sie()
1141 mm->context.has_pgste = 1; in s390_enable_sie()
1143 thp_split_mm(mm); in s390_enable_sie()
1144 up_write(&mm->mmap_sem); in s390_enable_sie()
1166 ptep_flush_direct(walk->mm, addr, pte); in __s390_enable_skey()
1182 struct mm_struct *mm = current->mm; in s390_enable_skey() local
1186 down_write(&mm->mmap_sem); in s390_enable_skey()
1187 if (mm_use_skey(mm)) in s390_enable_skey()
1190 mm->context.use_skey = 1; in s390_enable_skey()
1191 for (vma = mm->mmap; vma; vma = vma->vm_next) { in s390_enable_skey()
1194 mm->context.use_skey = 0; in s390_enable_skey()
1199 mm->def_flags &= ~VM_MERGEABLE; in s390_enable_skey()
1201 walk.mm = mm; in s390_enable_skey()
1205 up_write(&mm->mmap_sem); in s390_enable_skey()
1224 void s390_reset_cmma(struct mm_struct *mm) in s390_reset_cmma() argument
1228 down_write(&mm->mmap_sem); in s390_reset_cmma()
1229 walk.mm = mm; in s390_reset_cmma()
1231 up_write(&mm->mmap_sem); in s390_reset_cmma()
1244 pte = get_locked_pte(gmap->mm, address, &ptl); in gmap_test_and_clear_dirty()
1248 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) in gmap_test_and_clear_dirty()
1298 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
1303 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
1306 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
1309 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
1310 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
1313 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
1319 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
1322 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
1325 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
1327 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()