Lines Matching refs:vb

821 	struct vmap_block *vb;  in new_vmap_block()  local
829 vb = kmalloc_node(sizeof(struct vmap_block), in new_vmap_block()
831 if (unlikely(!vb)) in new_vmap_block()
838 kfree(vb); in new_vmap_block()
844 kfree(vb); in new_vmap_block()
850 spin_lock_init(&vb->lock); in new_vmap_block()
851 vb->va = va; in new_vmap_block()
854 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
855 vb->dirty = 0; in new_vmap_block()
856 vb->dirty_min = VMAP_BBMAP_BITS; in new_vmap_block()
857 vb->dirty_max = 0; in new_vmap_block()
858 INIT_LIST_HEAD(&vb->free_list); in new_vmap_block()
862 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); in new_vmap_block()
869 list_add_tail_rcu(&vb->free_list, &vbq->free); in new_vmap_block()
876 static void free_vmap_block(struct vmap_block *vb) in free_vmap_block() argument
881 vb_idx = addr_to_vb_idx(vb->va->va_start); in free_vmap_block()
885 BUG_ON(tmp != vb); in free_vmap_block()
887 free_vmap_area_noflush(vb->va); in free_vmap_block()
888 kfree_rcu(vb, rcu_head); in free_vmap_block()
894 struct vmap_block *vb; in purge_fragmented_blocks() local
899 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in purge_fragmented_blocks()
901 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) in purge_fragmented_blocks()
904 spin_lock(&vb->lock); in purge_fragmented_blocks()
905 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { in purge_fragmented_blocks()
906 vb->free = 0; /* prevent further allocs after releasing lock */ in purge_fragmented_blocks()
907 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ in purge_fragmented_blocks()
908 vb->dirty_min = 0; in purge_fragmented_blocks()
909 vb->dirty_max = VMAP_BBMAP_BITS; in purge_fragmented_blocks()
911 list_del_rcu(&vb->free_list); in purge_fragmented_blocks()
913 spin_unlock(&vb->lock); in purge_fragmented_blocks()
914 list_add_tail(&vb->purge, &purge); in purge_fragmented_blocks()
916 spin_unlock(&vb->lock); in purge_fragmented_blocks()
920 list_for_each_entry_safe(vb, n_vb, &purge, purge) { in purge_fragmented_blocks()
921 list_del(&vb->purge); in purge_fragmented_blocks()
922 free_vmap_block(vb); in purge_fragmented_blocks()
937 struct vmap_block *vb; in vb_alloc() local
955 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vb_alloc()
958 spin_lock(&vb->lock); in vb_alloc()
959 if (vb->free < (1UL << order)) { in vb_alloc()
960 spin_unlock(&vb->lock); in vb_alloc()
964 pages_off = VMAP_BBMAP_BITS - vb->free; in vb_alloc()
965 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
966 vb->free -= 1UL << order; in vb_alloc()
967 if (vb->free == 0) { in vb_alloc()
969 list_del_rcu(&vb->free_list); in vb_alloc()
973 spin_unlock(&vb->lock); in vb_alloc()
992 struct vmap_block *vb; in vb_free() local
1006 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); in vb_free()
1008 BUG_ON(!vb); in vb_free()
1012 spin_lock(&vb->lock); in vb_free()
1015 vb->dirty_min = min(vb->dirty_min, offset); in vb_free()
1016 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free()
1018 vb->dirty += 1UL << order; in vb_free()
1019 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free()
1020 BUG_ON(vb->free); in vb_free()
1021 spin_unlock(&vb->lock); in vb_free()
1022 free_vmap_block(vb); in vb_free()
1024 spin_unlock(&vb->lock); in vb_free()
1051 struct vmap_block *vb; in vm_unmap_aliases() local
1054 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vm_unmap_aliases()
1055 spin_lock(&vb->lock); in vm_unmap_aliases()
1056 if (vb->dirty) { in vm_unmap_aliases()
1057 unsigned long va_start = vb->va->va_start; in vm_unmap_aliases()
1060 s = va_start + (vb->dirty_min << PAGE_SHIFT); in vm_unmap_aliases()
1061 e = va_start + (vb->dirty_max << PAGE_SHIFT); in vm_unmap_aliases()
1068 spin_unlock(&vb->lock); in vm_unmap_aliases()