Lines Matching refs:blkif
32 struct xen_blkif *blkif; member
44 static void xen_blkif_free(struct xen_blkif *blkif);
58 struct xen_blkif *blkif; in xen_blkif_deferred_free() local
60 blkif = container_of(work, struct xen_blkif, free_work); in xen_blkif_deferred_free()
61 xen_blkif_free(blkif); in xen_blkif_deferred_free()
64 static int blkback_name(struct xen_blkif *blkif, char *buf) in blkback_name() argument
67 struct xenbus_device *dev = blkif->be->dev; in blkback_name()
79 snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname); in blkback_name()
85 static void xen_update_blkif_status(struct xen_blkif *blkif) in xen_update_blkif_status() argument
91 if (!blkif->irq || !blkif->vbd.bdev) in xen_update_blkif_status()
95 if (blkif->be->dev->state == XenbusStateConnected) in xen_update_blkif_status()
99 connect(blkif->be); in xen_update_blkif_status()
100 if (blkif->be->dev->state != XenbusStateConnected) in xen_update_blkif_status()
103 err = blkback_name(blkif, name); in xen_update_blkif_status()
105 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name"); in xen_update_blkif_status()
109 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping); in xen_update_blkif_status()
111 xenbus_dev_error(blkif->be->dev, err, "block flush"); in xen_update_blkif_status()
114 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); in xen_update_blkif_status()
116 blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name); in xen_update_blkif_status()
117 if (IS_ERR(blkif->xenblkd)) { in xen_update_blkif_status()
118 err = PTR_ERR(blkif->xenblkd); in xen_update_blkif_status()
119 blkif->xenblkd = NULL; in xen_update_blkif_status()
120 xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); in xen_update_blkif_status()
127 struct xen_blkif *blkif; in xen_blkif_alloc() local
131 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); in xen_blkif_alloc()
132 if (!blkif) in xen_blkif_alloc()
135 blkif->domid = domid; in xen_blkif_alloc()
136 spin_lock_init(&blkif->blk_ring_lock); in xen_blkif_alloc()
137 atomic_set(&blkif->refcnt, 1); in xen_blkif_alloc()
138 init_waitqueue_head(&blkif->wq); in xen_blkif_alloc()
139 init_completion(&blkif->drain_complete); in xen_blkif_alloc()
140 atomic_set(&blkif->drain, 0); in xen_blkif_alloc()
141 blkif->st_print = jiffies; in xen_blkif_alloc()
142 blkif->persistent_gnts.rb_node = NULL; in xen_blkif_alloc()
143 spin_lock_init(&blkif->free_pages_lock); in xen_blkif_alloc()
144 INIT_LIST_HEAD(&blkif->free_pages); in xen_blkif_alloc()
145 INIT_LIST_HEAD(&blkif->persistent_purge_list); in xen_blkif_alloc()
146 blkif->free_pages_num = 0; in xen_blkif_alloc()
147 atomic_set(&blkif->persistent_gnt_in_use, 0); in xen_blkif_alloc()
148 atomic_set(&blkif->inflight, 0); in xen_blkif_alloc()
149 INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); in xen_blkif_alloc()
151 INIT_LIST_HEAD(&blkif->pending_free); in xen_blkif_alloc()
152 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); in xen_blkif_alloc()
153 spin_lock_init(&blkif->pending_free_lock); in xen_blkif_alloc()
154 init_waitqueue_head(&blkif->pending_free_wq); in xen_blkif_alloc()
155 init_waitqueue_head(&blkif->shutdown_wq); in xen_blkif_alloc()
157 return blkif; in xen_blkif_alloc()
160 static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, in xen_blkif_map() argument
166 if (blkif->irq) in xen_blkif_map()
169 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs, in xen_blkif_map()
170 &blkif->blk_ring); in xen_blkif_map()
174 switch (blkif->blk_protocol) { in xen_blkif_map()
178 sring = (struct blkif_sring *)blkif->blk_ring; in xen_blkif_map()
179 BACK_RING_INIT(&blkif->blk_rings.native, sring, in xen_blkif_map()
186 sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring; in xen_blkif_map()
187 BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, in xen_blkif_map()
194 sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring; in xen_blkif_map()
195 BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, in xen_blkif_map()
203 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, in xen_blkif_map()
205 "blkif-backend", blkif); in xen_blkif_map()
207 xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring); in xen_blkif_map()
208 blkif->blk_rings.common.sring = NULL; in xen_blkif_map()
211 blkif->irq = err; in xen_blkif_map()
216 static int xen_blkif_disconnect(struct xen_blkif *blkif) in xen_blkif_disconnect() argument
221 if (blkif->xenblkd) { in xen_blkif_disconnect()
222 kthread_stop(blkif->xenblkd); in xen_blkif_disconnect()
223 wake_up(&blkif->shutdown_wq); in xen_blkif_disconnect()
224 blkif->xenblkd = NULL; in xen_blkif_disconnect()
231 if (atomic_read(&blkif->inflight) > 0) in xen_blkif_disconnect()
234 if (blkif->irq) { in xen_blkif_disconnect()
235 unbind_from_irqhandler(blkif->irq, blkif); in xen_blkif_disconnect()
236 blkif->irq = 0; in xen_blkif_disconnect()
239 if (blkif->blk_rings.common.sring) { in xen_blkif_disconnect()
240 xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring); in xen_blkif_disconnect()
241 blkif->blk_rings.common.sring = NULL; in xen_blkif_disconnect()
245 xen_blkbk_free_caches(blkif); in xen_blkif_disconnect()
248 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { in xen_blkif_disconnect()
261 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); in xen_blkif_disconnect()
262 blkif->nr_ring_pages = 0; in xen_blkif_disconnect()
267 static void xen_blkif_free(struct xen_blkif *blkif) in xen_blkif_free() argument
270 xen_blkif_disconnect(blkif); in xen_blkif_free()
271 xen_vbd_free(&blkif->vbd); in xen_blkif_free()
274 BUG_ON(blkif->persistent_gnt_c != 0); in xen_blkif_free()
275 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0); in xen_blkif_free()
276 BUG_ON(blkif->free_pages_num != 0); in xen_blkif_free()
277 BUG_ON(!list_empty(&blkif->persistent_purge_list)); in xen_blkif_free()
278 BUG_ON(!list_empty(&blkif->free_pages)); in xen_blkif_free()
279 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); in xen_blkif_free()
281 kmem_cache_free(xen_blkif_cachep, blkif); in xen_blkif_free()
311 VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req);
312 VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req);
313 VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req);
314 VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req);
315 VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req);
316 VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
317 VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
377 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, in xen_vbd_create() argument
385 vbd = &blkif->vbd; in xen_vbd_create()
423 handle, blkif->domid); in xen_vbd_create()
443 if (be->blkif) { in xen_blkbk_remove()
444 xen_blkif_disconnect(be->blkif); in xen_blkbk_remove()
445 xen_blkif_put(be->blkif); in xen_blkbk_remove()
470 struct xen_blkif *blkif = be->blkif; in xen_blkbk_discard() local
473 struct block_device *bdev = be->blkif->vbd.bdev; in xen_blkbk_discard()
500 blkif->vbd.discard_secure); in xen_blkbk_discard()
548 be->blkif = xen_blkif_alloc(dev->otherend_id); in xen_blkbk_probe()
549 if (IS_ERR(be->blkif)) { in xen_blkbk_probe()
550 err = PTR_ERR(be->blkif); in xen_blkbk_probe()
551 be->blkif = NULL; in xen_blkbk_probe()
557 be->blkif->be = be; in xen_blkbk_probe()
646 err = xen_vbd_create(be->blkif, handle, major, minor, in backend_changed()
654 xen_vbd_free(&be->blkif->vbd); in backend_changed()
666 xen_update_blkif_status(be->blkif); in backend_changed()
704 err = xen_blkif_disconnect(be->blkif); in frontend_changed()
713 xen_update_blkif_status(be->blkif); in frontend_changed()
721 xen_blkif_disconnect(be->blkif); in frontend_changed()
763 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support); in connect()
767 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); in connect()
782 (unsigned long long)vbd_sz(&be->blkif->vbd)); in connect()
791 be->blkif->vbd.type | in connect()
792 (be->blkif->vbd.readonly ? VDISK_READONLY : 0)); in connect()
800 bdev_logical_block_size(be->blkif->vbd.bdev)); in connect()
807 bdev_physical_block_size(be->blkif->vbd.bdev)); in connect()
893 be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; in connect_ring()
899 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; in connect_ring()
901 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; in connect_ring()
903 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; in connect_ring()
914 be->blkif->vbd.feature_gnt_persistent = pers_grants; in connect_ring()
915 be->blkif->vbd.overflow_max_grants = 0; in connect_ring()
916 be->blkif->nr_ring_pages = nr_grefs; in connect_ring()
919 nr_grefs, evtchn, be->blkif->blk_protocol, protocol, in connect_ring()
926 list_add_tail(&req->free_list, &be->blkif->pending_free); in connect_ring()
941 err = xen_blkif_map(be->blkif, ring_ref, nr_grefs, evtchn); in connect_ring()
950 list_for_each_entry_safe(req, n, &be->blkif->pending_free, free_list) { in connect_ring()