Lines Matching refs:gmap
147 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) in gmap_alloc()
149 struct gmap *gmap; in gmap_alloc() local
171 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); in gmap_alloc()
172 if (!gmap) in gmap_alloc()
174 INIT_LIST_HEAD(&gmap->crst_list); in gmap_alloc()
175 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); in gmap_alloc()
176 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); in gmap_alloc()
177 spin_lock_init(&gmap->guest_table_lock); in gmap_alloc()
178 gmap->mm = mm; in gmap_alloc()
183 list_add(&page->lru, &gmap->crst_list); in gmap_alloc()
186 gmap->table = table; in gmap_alloc()
187 gmap->asce = atype | _ASCE_TABLE_LENGTH | in gmap_alloc()
189 gmap->asce_end = limit; in gmap_alloc()
191 list_add(&gmap->list, &mm->context.gmap_list); in gmap_alloc()
193 return gmap; in gmap_alloc()
196 kfree(gmap); in gmap_alloc()
202 static void gmap_flush_tlb(struct gmap *gmap) in gmap_flush_tlb() argument
205 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_flush_tlb()
238 void gmap_free(struct gmap *gmap) in gmap_free() argument
244 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_free()
249 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) in gmap_free()
251 gmap_radix_tree_free(&gmap->guest_to_host); in gmap_free()
252 gmap_radix_tree_free(&gmap->host_to_guest); in gmap_free()
253 down_write(&gmap->mm->mmap_sem); in gmap_free()
254 list_del(&gmap->list); in gmap_free()
255 up_write(&gmap->mm->mmap_sem); in gmap_free()
256 kfree(gmap); in gmap_free()
264 void gmap_enable(struct gmap *gmap) in gmap_enable() argument
266 S390_lowcore.gmap = (unsigned long) gmap; in gmap_enable()
274 void gmap_disable(struct gmap *gmap) in gmap_disable() argument
276 S390_lowcore.gmap = 0UL; in gmap_disable()
283 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, in gmap_alloc_table() argument
295 spin_lock(&gmap->mm->page_table_lock); in gmap_alloc_table()
297 list_add(&page->lru, &gmap->crst_list); in gmap_alloc_table()
303 spin_unlock(&gmap->mm->page_table_lock); in gmap_alloc_table()
334 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) in __gmap_unlink_by_vmaddr() argument
339 spin_lock(&gmap->guest_table_lock); in __gmap_unlink_by_vmaddr()
340 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); in __gmap_unlink_by_vmaddr()
345 spin_unlock(&gmap->guest_table_lock); in __gmap_unlink_by_vmaddr()
356 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr) in __gmap_unmap_by_gaddr() argument
360 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host, in __gmap_unmap_by_gaddr()
362 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0; in __gmap_unmap_by_gaddr()
373 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) in gmap_unmap_segment() argument
384 down_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
386 flush |= __gmap_unmap_by_gaddr(gmap, to + off); in gmap_unmap_segment()
387 up_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
389 gmap_flush_tlb(gmap); in gmap_unmap_segment()
403 int gmap_map_segment(struct gmap *gmap, unsigned long from, in gmap_map_segment() argument
412 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) in gmap_map_segment()
416 down_write(&gmap->mm->mmap_sem); in gmap_map_segment()
419 flush |= __gmap_unmap_by_gaddr(gmap, to + off); in gmap_map_segment()
421 if (radix_tree_insert(&gmap->guest_to_host, in gmap_map_segment()
426 up_write(&gmap->mm->mmap_sem); in gmap_map_segment()
428 gmap_flush_tlb(gmap); in gmap_map_segment()
431 gmap_unmap_segment(gmap, to, len); in gmap_map_segment()
447 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr) in __gmap_translate() argument
452 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT); in __gmap_translate()
466 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr) in gmap_translate() argument
470 down_read(&gmap->mm->mmap_sem); in gmap_translate()
471 rc = __gmap_translate(gmap, gaddr); in gmap_translate()
472 up_read(&gmap->mm->mmap_sem); in gmap_translate()
486 struct gmap *gmap; in gmap_unlink() local
489 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_unlink()
490 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); in gmap_unlink()
492 gmap_flush_tlb(gmap); in gmap_unlink()
507 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) in __gmap_link() argument
518 table = gmap->table; in __gmap_link()
519 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { in __gmap_link()
522 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, in __gmap_link()
527 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { in __gmap_link()
530 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, in __gmap_link()
535 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { in __gmap_link()
538 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, in __gmap_link()
545 mm = gmap->mm; in __gmap_link()
560 spin_lock(&gmap->guest_table_lock); in __gmap_link()
562 rc = radix_tree_insert(&gmap->host_to_guest, in __gmap_link()
568 spin_unlock(&gmap->guest_table_lock); in __gmap_link()
583 int gmap_fault(struct gmap *gmap, unsigned long gaddr, in gmap_fault() argument
589 down_read(&gmap->mm->mmap_sem); in gmap_fault()
590 vmaddr = __gmap_translate(gmap, gaddr); in gmap_fault()
595 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { in gmap_fault()
599 rc = __gmap_link(gmap, gaddr, vmaddr); in gmap_fault()
601 up_read(&gmap->mm->mmap_sem); in gmap_fault()
624 void __gmap_zap(struct gmap *gmap, unsigned long gaddr) in __gmap_zap() argument
632 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, in __gmap_zap()
638 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
650 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); in __gmap_zap()
651 pte_clear(gmap->mm, vmaddr, ptep); in __gmap_zap()
659 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) in gmap_discard() argument
664 down_read(&gmap->mm->mmap_sem); in gmap_discard()
669 radix_tree_lookup(&gmap->guest_to_host, in gmap_discard()
675 vma = find_vma(gmap->mm, vmaddr); in gmap_discard()
679 up_read(&gmap->mm->mmap_sem); in gmap_discard()
721 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) in gmap_ipte_notify() argument
731 down_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
734 addr = __gmap_translate(gmap, gaddr); in gmap_ipte_notify()
740 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { in gmap_ipte_notify()
744 rc = __gmap_link(gmap, gaddr, addr); in gmap_ipte_notify()
748 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify()
761 up_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
780 struct gmap *gmap; in gmap_do_ipte_notify() local
785 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_do_ipte_notify()
786 table = radix_tree_lookup(&gmap->host_to_guest, in gmap_do_ipte_notify()
792 nb->notifier_call(gmap, gaddr); in gmap_do_ipte_notify()
1314 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) in gmap_test_and_clear_dirty() argument
1320 pte = get_locked_pte(gmap->mm, address, &ptl); in gmap_test_and_clear_dirty()
1324 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) in gmap_test_and_clear_dirty()