Lines Matching refs:gmap
111 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) in gmap_alloc()
113 struct gmap *gmap; in gmap_alloc() local
135 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); in gmap_alloc()
136 if (!gmap) in gmap_alloc()
138 INIT_LIST_HEAD(&gmap->crst_list); in gmap_alloc()
139 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); in gmap_alloc()
140 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); in gmap_alloc()
141 spin_lock_init(&gmap->guest_table_lock); in gmap_alloc()
142 gmap->mm = mm; in gmap_alloc()
147 list_add(&page->lru, &gmap->crst_list); in gmap_alloc()
150 gmap->table = table; in gmap_alloc()
151 gmap->asce = atype | _ASCE_TABLE_LENGTH | in gmap_alloc()
153 gmap->asce_end = limit; in gmap_alloc()
155 list_add(&gmap->list, &mm->context.gmap_list); in gmap_alloc()
157 return gmap; in gmap_alloc()
160 kfree(gmap); in gmap_alloc()
166 static void gmap_flush_tlb(struct gmap *gmap) in gmap_flush_tlb() argument
169 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_flush_tlb()
202 void gmap_free(struct gmap *gmap) in gmap_free() argument
208 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_free()
213 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) in gmap_free()
215 gmap_radix_tree_free(&gmap->guest_to_host); in gmap_free()
216 gmap_radix_tree_free(&gmap->host_to_guest); in gmap_free()
217 down_write(&gmap->mm->mmap_sem); in gmap_free()
218 list_del(&gmap->list); in gmap_free()
219 up_write(&gmap->mm->mmap_sem); in gmap_free()
220 kfree(gmap); in gmap_free()
228 void gmap_enable(struct gmap *gmap) in gmap_enable() argument
230 S390_lowcore.gmap = (unsigned long) gmap; in gmap_enable()
238 void gmap_disable(struct gmap *gmap) in gmap_disable() argument
240 S390_lowcore.gmap = 0UL; in gmap_disable()
247 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, in gmap_alloc_table() argument
259 spin_lock(&gmap->mm->page_table_lock); in gmap_alloc_table()
261 list_add(&page->lru, &gmap->crst_list); in gmap_alloc_table()
267 spin_unlock(&gmap->mm->page_table_lock); in gmap_alloc_table()
298 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) in __gmap_unlink_by_vmaddr() argument
303 spin_lock(&gmap->guest_table_lock); in __gmap_unlink_by_vmaddr()
304 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); in __gmap_unlink_by_vmaddr()
309 spin_unlock(&gmap->guest_table_lock); in __gmap_unlink_by_vmaddr()
320 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr) in __gmap_unmap_by_gaddr() argument
324 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host, in __gmap_unmap_by_gaddr()
326 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0; in __gmap_unmap_by_gaddr()
337 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) in gmap_unmap_segment() argument
348 down_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
350 flush |= __gmap_unmap_by_gaddr(gmap, to + off); in gmap_unmap_segment()
351 up_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
353 gmap_flush_tlb(gmap); in gmap_unmap_segment()
367 int gmap_map_segment(struct gmap *gmap, unsigned long from, in gmap_map_segment() argument
376 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) in gmap_map_segment()
380 down_write(&gmap->mm->mmap_sem); in gmap_map_segment()
383 flush |= __gmap_unmap_by_gaddr(gmap, to + off); in gmap_map_segment()
385 if (radix_tree_insert(&gmap->guest_to_host, in gmap_map_segment()
390 up_write(&gmap->mm->mmap_sem); in gmap_map_segment()
392 gmap_flush_tlb(gmap); in gmap_map_segment()
395 gmap_unmap_segment(gmap, to, len); in gmap_map_segment()
411 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr) in __gmap_translate() argument
416 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT); in __gmap_translate()
430 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr) in gmap_translate() argument
434 down_read(&gmap->mm->mmap_sem); in gmap_translate()
435 rc = __gmap_translate(gmap, gaddr); in gmap_translate()
436 up_read(&gmap->mm->mmap_sem); in gmap_translate()
450 struct gmap *gmap; in gmap_unlink() local
453 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_unlink()
454 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); in gmap_unlink()
456 gmap_flush_tlb(gmap); in gmap_unlink()
471 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) in __gmap_link() argument
482 table = gmap->table; in __gmap_link()
483 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { in __gmap_link()
486 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, in __gmap_link()
491 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { in __gmap_link()
494 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, in __gmap_link()
499 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { in __gmap_link()
502 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, in __gmap_link()
509 mm = gmap->mm; in __gmap_link()
524 spin_lock(&gmap->guest_table_lock); in __gmap_link()
526 rc = radix_tree_insert(&gmap->host_to_guest, in __gmap_link()
532 spin_unlock(&gmap->guest_table_lock); in __gmap_link()
547 int gmap_fault(struct gmap *gmap, unsigned long gaddr, in gmap_fault() argument
553 down_read(&gmap->mm->mmap_sem); in gmap_fault()
554 vmaddr = __gmap_translate(gmap, gaddr); in gmap_fault()
559 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { in gmap_fault()
563 rc = __gmap_link(gmap, gaddr, vmaddr); in gmap_fault()
565 up_read(&gmap->mm->mmap_sem); in gmap_fault()
588 void __gmap_zap(struct gmap *gmap, unsigned long gaddr) in __gmap_zap() argument
596 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, in __gmap_zap()
602 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
614 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); in __gmap_zap()
615 pte_clear(gmap->mm, vmaddr, ptep); in __gmap_zap()
623 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) in gmap_discard() argument
628 down_read(&gmap->mm->mmap_sem); in gmap_discard()
633 radix_tree_lookup(&gmap->guest_to_host, in gmap_discard()
639 vma = find_vma(gmap->mm, vmaddr); in gmap_discard()
643 up_read(&gmap->mm->mmap_sem); in gmap_discard()
685 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) in gmap_ipte_notify() argument
695 down_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
698 addr = __gmap_translate(gmap, gaddr); in gmap_ipte_notify()
704 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { in gmap_ipte_notify()
708 rc = __gmap_link(gmap, gaddr, addr); in gmap_ipte_notify()
712 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify()
725 up_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
744 struct gmap *gmap; in gmap_do_ipte_notify() local
749 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_do_ipte_notify()
750 table = radix_tree_lookup(&gmap->host_to_guest, in gmap_do_ipte_notify()
756 nb->notifier_call(gmap, gaddr); in gmap_do_ipte_notify()
1238 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) in gmap_test_and_clear_dirty() argument
1244 pte = get_locked_pte(gmap->mm, address, &ptl); in gmap_test_and_clear_dirty()
1248 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) in gmap_test_and_clear_dirty()