Lines Matching refs:info
110 #define BLK_RING_SIZE(info) \ argument
111 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
201 static int blkfront_setup_indirect(struct blkfront_info *info);
202 static int blkfront_gather_backend_features(struct blkfront_info *info);
204 static int get_id_from_freelist(struct blkfront_info *info) in get_id_from_freelist() argument
206 unsigned long free = info->shadow_free; in get_id_from_freelist()
207 BUG_ON(free >= BLK_RING_SIZE(info)); in get_id_from_freelist()
208 info->shadow_free = info->shadow[free].req.u.rw.id; in get_id_from_freelist()
209 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ in get_id_from_freelist()
213 static int add_id_to_freelist(struct blkfront_info *info, in add_id_to_freelist() argument
216 if (info->shadow[id].req.u.rw.id != id) in add_id_to_freelist()
218 if (info->shadow[id].request == NULL) in add_id_to_freelist()
220 info->shadow[id].req.u.rw.id = info->shadow_free; in add_id_to_freelist()
221 info->shadow[id].request = NULL; in add_id_to_freelist()
222 info->shadow_free = id; in add_id_to_freelist()
226 static int fill_grant_buffer(struct blkfront_info *info, int num) in fill_grant_buffer() argument
237 if (info->feature_persistent) { in fill_grant_buffer()
247 list_add(&gnt_list_entry->node, &info->grants); in fill_grant_buffer()
255 &info->grants, node) { in fill_grant_buffer()
257 if (info->feature_persistent) in fill_grant_buffer()
266 static struct grant *get_free_grant(struct blkfront_info *info) in get_free_grant() argument
270 BUG_ON(list_empty(&info->grants)); in get_free_grant()
271 gnt_list_entry = list_first_entry(&info->grants, struct grant, in get_free_grant()
276 info->persistent_gnts_c--; in get_free_grant()
282 const struct blkfront_info *info) in grant_foreign_access() argument
285 info->xbdev->otherend_id, in grant_foreign_access()
292 struct blkfront_info *info) in get_grant() argument
294 struct grant *gnt_list_entry = get_free_grant(info); in get_grant()
302 if (info->feature_persistent) in get_grant()
303 grant_foreign_access(gnt_list_entry, info); in get_grant()
307 info->xbdev->otherend_id, in get_grant()
315 struct blkfront_info *info) in get_indirect_grant() argument
317 struct grant *gnt_list_entry = get_free_grant(info); in get_indirect_grant()
325 if (!info->feature_persistent) { in get_indirect_grant()
329 BUG_ON(list_empty(&info->indirect_pages)); in get_indirect_grant()
330 indirect_page = list_first_entry(&info->indirect_pages, in get_indirect_grant()
335 grant_foreign_access(gnt_list_entry, info); in get_indirect_grant()
406 struct blkfront_info *info = (struct blkfront_info *)arg; in blkif_restart_queue_callback() local
407 schedule_work(&info->work); in blkif_restart_queue_callback()
429 struct blkfront_info *info = bdev->bd_disk->private_data; in blkif_ioctl() local
432 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", in blkif_ioctl()
437 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); in blkif_ioctl()
444 struct gendisk *gd = info->gd; in blkif_ioctl()
461 struct blkfront_info *info = req->rq_disk->private_data; in blkif_queue_discard_req() local
466 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); in blkif_queue_discard_req()
467 id = get_id_from_freelist(info); in blkif_queue_discard_req()
468 info->shadow[id].request = req; in blkif_queue_discard_req()
474 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) in blkif_queue_discard_req()
479 info->ring.req_prod_pvt++; in blkif_queue_discard_req()
482 info->shadow[id].req = *ring_req; in blkif_queue_discard_req()
490 struct blkfront_info *info; member
510 struct blkfront_info *info = setup->info; in blkif_setup_rw_req_grant() local
511 struct blk_shadow *shadow = &info->shadow[setup->id]; in blkif_setup_rw_req_grant()
519 gnt_list_entry = get_indirect_grant(&setup->gref_head, info); in blkif_setup_rw_req_grant()
525 gnt_list_entry = get_grant(&setup->gref_head, gfn, info); in blkif_setup_rw_req_grant()
571 struct blkfront_info *info = req->rq_disk->private_data; in blkif_queue_rw_req() local
578 .info = info, in blkif_queue_rw_req()
579 .need_copy = rq_data_dir(req) && info->feature_persistent, in blkif_queue_rw_req()
600 if (info->persistent_gnts_c < max_grefs) { in blkif_queue_rw_req()
603 max_grefs - info->persistent_gnts_c, in blkif_queue_rw_req()
606 &info->callback, in blkif_queue_rw_req()
608 info, in blkif_queue_rw_req()
616 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); in blkif_queue_rw_req()
617 id = get_id_from_freelist(info); in blkif_queue_rw_req()
618 info->shadow[id].request = req; in blkif_queue_rw_req()
620 BUG_ON(info->max_indirect_segments == 0 && in blkif_queue_rw_req()
622 BUG_ON(info->max_indirect_segments && in blkif_queue_rw_req()
623 GREFS(req->nr_phys_segments) > info->max_indirect_segments); in blkif_queue_rw_req()
625 num_sg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); in blkif_queue_rw_req()
628 for_each_sg(info->shadow[id].sg, sg, num_sg, i) in blkif_queue_rw_req()
632 info->shadow[id].num_sg = num_sg; in blkif_queue_rw_req()
643 ring_req->u.indirect.handle = info->handle; in blkif_queue_rw_req()
647 ring_req->u.rw.handle = info->handle; in blkif_queue_rw_req()
658 switch (info->feature_flush & in blkif_queue_rw_req()
677 for_each_sg(info->shadow[id].sg, sg, num_sg, i) { in blkif_queue_rw_req()
697 info->ring.req_prod_pvt++; in blkif_queue_rw_req()
700 info->shadow[id].req = *ring_req; in blkif_queue_rw_req()
716 struct blkfront_info *info = req->rq_disk->private_data; in blkif_queue_request() local
718 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) in blkif_queue_request()
727 static inline void flush_requests(struct blkfront_info *info) in flush_requests() argument
731 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); in flush_requests()
734 notify_remote_via_irq(info->irq); in flush_requests()
738 struct blkfront_info *info) in blkif_request_flush_invalid() argument
742 !(info->feature_flush & REQ_FLUSH)) || in blkif_request_flush_invalid()
744 !(info->feature_flush & REQ_FUA))); in blkif_request_flush_invalid()
750 struct blkfront_info *info = qd->rq->rq_disk->private_data; in blkif_queue_rq() local
753 spin_lock_irq(&info->io_lock); in blkif_queue_rq()
754 if (RING_FULL(&info->ring)) in blkif_queue_rq()
757 if (blkif_request_flush_invalid(qd->rq, info)) in blkif_queue_rq()
763 flush_requests(info); in blkif_queue_rq()
764 spin_unlock_irq(&info->io_lock); in blkif_queue_rq()
768 spin_unlock_irq(&info->io_lock); in blkif_queue_rq()
772 spin_unlock_irq(&info->io_lock); in blkif_queue_rq()
787 struct blkfront_info *info = gd->private_data; in xlvbd_init_blk_queue() local
789 memset(&info->tag_set, 0, sizeof(info->tag_set)); in xlvbd_init_blk_queue()
790 info->tag_set.ops = &blkfront_mq_ops; in xlvbd_init_blk_queue()
791 info->tag_set.nr_hw_queues = 1; in xlvbd_init_blk_queue()
792 info->tag_set.queue_depth = BLK_RING_SIZE(info); in xlvbd_init_blk_queue()
793 info->tag_set.numa_node = NUMA_NO_NODE; in xlvbd_init_blk_queue()
794 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in xlvbd_init_blk_queue()
795 info->tag_set.cmd_size = 0; in xlvbd_init_blk_queue()
796 info->tag_set.driver_data = info; in xlvbd_init_blk_queue()
798 if (blk_mq_alloc_tag_set(&info->tag_set)) in xlvbd_init_blk_queue()
800 rq = blk_mq_init_queue(&info->tag_set); in xlvbd_init_blk_queue()
802 blk_mq_free_tag_set(&info->tag_set); in xlvbd_init_blk_queue()
808 if (info->feature_discard) { in xlvbd_init_blk_queue()
811 rq->limits.discard_granularity = info->discard_granularity; in xlvbd_init_blk_queue()
812 rq->limits.discard_alignment = info->discard_alignment; in xlvbd_init_blk_queue()
813 if (info->feature_secdiscard) in xlvbd_init_blk_queue()
852 static void xlvbd_flush(struct blkfront_info *info) in xlvbd_flush() argument
854 blk_queue_flush(info->rq, info->feature_flush); in xlvbd_flush()
856 info->gd->disk_name, flush_info(info->feature_flush), in xlvbd_flush()
857 "persistent grants:", info->feature_persistent ? in xlvbd_flush()
859 info->max_indirect_segments ? "enabled;" : "disabled;"); in xlvbd_flush()
931 struct blkfront_info *info, in xlvbd_alloc_gendisk() argument
943 BUG_ON(info->gd != NULL); in xlvbd_alloc_gendisk()
944 BUG_ON(info->rq != NULL); in xlvbd_alloc_gendisk()
946 if ((info->vdevice>>EXT_SHIFT) > 1) { in xlvbd_alloc_gendisk()
948 …printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevic… in xlvbd_alloc_gendisk()
952 if (!VDEV_IS_EXTENDED(info->vdevice)) { in xlvbd_alloc_gendisk()
953 err = xen_translate_vdev(info->vdevice, &minor, &offset); in xlvbd_alloc_gendisk()
958 minor = BLKIF_MINOR_EXT(info->vdevice); in xlvbd_alloc_gendisk()
964 "from xvde on\n", info->vdevice); in xlvbd_alloc_gendisk()
968 info->vdevice, minor); in xlvbd_alloc_gendisk()
996 gd->private_data = info; in xlvbd_alloc_gendisk()
997 gd->driverfs_dev = &(info->xbdev->dev); in xlvbd_alloc_gendisk()
1001 info->max_indirect_segments ? : in xlvbd_alloc_gendisk()
1007 info->rq = gd->queue; in xlvbd_alloc_gendisk()
1008 info->gd = gd; in xlvbd_alloc_gendisk()
1010 xlvbd_flush(info); in xlvbd_alloc_gendisk()
1029 static void xlvbd_release_gendisk(struct blkfront_info *info) in xlvbd_release_gendisk() argument
1033 if (info->rq == NULL) in xlvbd_release_gendisk()
1037 blk_mq_stop_hw_queues(info->rq); in xlvbd_release_gendisk()
1040 gnttab_cancel_free_callback(&info->callback); in xlvbd_release_gendisk()
1043 flush_work(&info->work); in xlvbd_release_gendisk()
1045 del_gendisk(info->gd); in xlvbd_release_gendisk()
1047 minor = info->gd->first_minor; in xlvbd_release_gendisk()
1048 nr_minors = info->gd->minors; in xlvbd_release_gendisk()
1051 blk_cleanup_queue(info->rq); in xlvbd_release_gendisk()
1052 blk_mq_free_tag_set(&info->tag_set); in xlvbd_release_gendisk()
1053 info->rq = NULL; in xlvbd_release_gendisk()
1055 put_disk(info->gd); in xlvbd_release_gendisk()
1056 info->gd = NULL; in xlvbd_release_gendisk()
1060 static void kick_pending_request_queues(struct blkfront_info *info) in kick_pending_request_queues() argument
1062 if (!RING_FULL(&info->ring)) in kick_pending_request_queues()
1063 blk_mq_start_stopped_hw_queues(info->rq, true); in kick_pending_request_queues()
1068 struct blkfront_info *info = container_of(work, struct blkfront_info, work); in blkif_restart_queue() local
1070 spin_lock_irq(&info->io_lock); in blkif_restart_queue()
1071 if (info->connected == BLKIF_STATE_CONNECTED) in blkif_restart_queue()
1072 kick_pending_request_queues(info); in blkif_restart_queue()
1073 spin_unlock_irq(&info->io_lock); in blkif_restart_queue()
1076 static void blkif_free(struct blkfront_info *info, int suspend) in blkif_free() argument
1083 spin_lock_irq(&info->io_lock); in blkif_free()
1084 info->connected = suspend ? in blkif_free()
1087 if (info->rq) in blkif_free()
1088 blk_mq_stop_hw_queues(info->rq); in blkif_free()
1091 if (!list_empty(&info->grants)) { in blkif_free()
1093 &info->grants, node) { in blkif_free()
1098 info->persistent_gnts_c--; in blkif_free()
1100 if (info->feature_persistent) in blkif_free()
1105 BUG_ON(info->persistent_gnts_c != 0); in blkif_free()
1111 if (!list_empty(&info->indirect_pages)) { in blkif_free()
1114 BUG_ON(info->feature_persistent); in blkif_free()
1115 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { in blkif_free()
1121 for (i = 0; i < BLK_RING_SIZE(info); i++) { in blkif_free()
1126 if (!info->shadow[i].request) in blkif_free()
1129 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free()
1130 info->shadow[i].req.u.indirect.nr_segments : in blkif_free()
1131 info->shadow[i].req.u.rw.nr_segments; in blkif_free()
1133 persistent_gnt = info->shadow[i].grants_used[j]; in blkif_free()
1135 if (info->feature_persistent) in blkif_free()
1140 if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) in blkif_free()
1148 persistent_gnt = info->shadow[i].indirect_grants[j]; in blkif_free()
1155 kfree(info->shadow[i].grants_used); in blkif_free()
1156 info->shadow[i].grants_used = NULL; in blkif_free()
1157 kfree(info->shadow[i].indirect_grants); in blkif_free()
1158 info->shadow[i].indirect_grants = NULL; in blkif_free()
1159 kfree(info->shadow[i].sg); in blkif_free()
1160 info->shadow[i].sg = NULL; in blkif_free()
1164 gnttab_cancel_free_callback(&info->callback); in blkif_free()
1165 spin_unlock_irq(&info->io_lock); in blkif_free()
1168 flush_work(&info->work); in blkif_free()
1171 for (i = 0; i < info->nr_ring_pages; i++) { in blkif_free()
1172 if (info->ring_ref[i] != GRANT_INVALID_REF) { in blkif_free()
1173 gnttab_end_foreign_access(info->ring_ref[i], 0, 0); in blkif_free()
1174 info->ring_ref[i] = GRANT_INVALID_REF; in blkif_free()
1177 free_pages((unsigned long)info->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE)); in blkif_free()
1178 info->ring.sring = NULL; in blkif_free()
1180 if (info->irq) in blkif_free()
1181 unbind_from_irqhandler(info->irq, info); in blkif_free()
1182 info->evtchn = info->irq = 0; in blkif_free()
1196 struct copy_from_grant *info = data; in blkif_copy_from_grant() local
1199 const struct blk_shadow *s = info->s; in blkif_copy_from_grant()
1201 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page); in blkif_copy_from_grant()
1203 memcpy(info->bvec_data + info->bvec_offset, in blkif_copy_from_grant()
1206 info->bvec_offset += len; in blkif_copy_from_grant()
1207 info->grant_idx++; in blkif_copy_from_grant()
1212 static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, in blkif_completion() argument
1227 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { in blkif_completion()
1252 if (!info->feature_persistent) in blkif_completion()
1255 list_add(&s->grants_used[i]->node, &info->grants); in blkif_completion()
1256 info->persistent_gnts_c++; in blkif_completion()
1266 list_add_tail(&s->grants_used[i]->node, &info->grants); in blkif_completion()
1272 if (!info->feature_persistent) in blkif_completion()
1275 list_add(&s->indirect_grants[i]->node, &info->grants); in blkif_completion()
1276 info->persistent_gnts_c++; in blkif_completion()
1285 if (!info->feature_persistent) { in blkif_completion()
1287 list_add(&indirect_page->lru, &info->indirect_pages); in blkif_completion()
1290 list_add_tail(&s->indirect_grants[i]->node, &info->grants); in blkif_completion()
1302 struct blkfront_info *info = (struct blkfront_info *)dev_id; in blkif_interrupt() local
1305 spin_lock_irqsave(&info->io_lock, flags); in blkif_interrupt()
1307 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { in blkif_interrupt()
1308 spin_unlock_irqrestore(&info->io_lock, flags); in blkif_interrupt()
1313 rp = info->ring.sring->rsp_prod; in blkif_interrupt()
1316 for (i = info->ring.rsp_cons; i != rp; i++) { in blkif_interrupt()
1319 bret = RING_GET_RESPONSE(&info->ring, i); in blkif_interrupt()
1326 if (id >= BLK_RING_SIZE(info)) { in blkif_interrupt()
1328 info->gd->disk_name, op_name(bret->operation), id); in blkif_interrupt()
1333 req = info->shadow[id].request; in blkif_interrupt()
1336 blkif_completion(&info->shadow[id], info, bret); in blkif_interrupt()
1338 if (add_id_to_freelist(info, id)) { in blkif_interrupt()
1340 info->gd->disk_name, op_name(bret->operation), id); in blkif_interrupt()
1348 struct request_queue *rq = info->rq; in blkif_interrupt()
1350 info->gd->disk_name, op_name(bret->operation)); in blkif_interrupt()
1352 info->feature_discard = 0; in blkif_interrupt()
1353 info->feature_secdiscard = 0; in blkif_interrupt()
1363 info->gd->disk_name, op_name(bret->operation)); in blkif_interrupt()
1367 info->shadow[id].req.u.rw.nr_segments == 0)) { in blkif_interrupt()
1369 info->gd->disk_name, op_name(bret->operation)); in blkif_interrupt()
1375 info->feature_flush = 0; in blkif_interrupt()
1376 xlvbd_flush(info); in blkif_interrupt()
1382 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " in blkif_interrupt()
1392 info->ring.rsp_cons = i; in blkif_interrupt()
1394 if (i != info->ring.req_prod_pvt) { in blkif_interrupt()
1396 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); in blkif_interrupt()
1400 info->ring.sring->rsp_event = i + 1; in blkif_interrupt()
1402 kick_pending_request_queues(info); in blkif_interrupt()
1404 spin_unlock_irqrestore(&info->io_lock, flags); in blkif_interrupt()
1411 struct blkfront_info *info) in setup_blkring() argument
1415 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE; in setup_blkring()
1418 for (i = 0; i < info->nr_ring_pages; i++) in setup_blkring()
1419 info->ring_ref[i] = GRANT_INVALID_REF; in setup_blkring()
1428 FRONT_RING_INIT(&info->ring, sring, ring_size); in setup_blkring()
1430 err = xenbus_grant_ring(dev, info->ring.sring, info->nr_ring_pages, gref); in setup_blkring()
1433 info->ring.sring = NULL; in setup_blkring()
1436 for (i = 0; i < info->nr_ring_pages; i++) in setup_blkring()
1437 info->ring_ref[i] = gref[i]; in setup_blkring()
1439 err = xenbus_alloc_evtchn(dev, &info->evtchn); in setup_blkring()
1443 err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0, in setup_blkring()
1444 "blkif", info); in setup_blkring()
1450 info->irq = err; in setup_blkring()
1454 blkif_free(info, 0); in setup_blkring()
1461 struct blkfront_info *info) in talk_to_blkback() argument
1469 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, in talk_to_blkback()
1472 info->nr_ring_pages = 1; in talk_to_blkback()
1475 info->nr_ring_pages = 1 << ring_page_order; in talk_to_blkback()
1479 err = setup_blkring(dev, info); in talk_to_blkback()
1490 if (info->nr_ring_pages == 1) { in talk_to_blkback()
1492 "ring-ref", "%u", info->ring_ref[0]); in talk_to_blkback()
1505 for (i = 0; i < info->nr_ring_pages; i++) { in talk_to_blkback()
1510 "%u", info->ring_ref[i]); in talk_to_blkback()
1518 "event-channel", "%u", info->evtchn); in talk_to_blkback()
1543 for (i = 0; i < BLK_RING_SIZE(info); i++) in talk_to_blkback()
1544 info->shadow[i].req.u.rw.id = i+1; in talk_to_blkback()
1545 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; in talk_to_blkback()
1555 blkif_free(info, 0); in talk_to_blkback()
1570 struct blkfront_info *info; in blkfront_probe() local
1614 info = kzalloc(sizeof(*info), GFP_KERNEL); in blkfront_probe()
1615 if (!info) { in blkfront_probe()
1620 mutex_init(&info->mutex); in blkfront_probe()
1621 spin_lock_init(&info->io_lock); in blkfront_probe()
1622 info->xbdev = dev; in blkfront_probe()
1623 info->vdevice = vdevice; in blkfront_probe()
1624 INIT_LIST_HEAD(&info->grants); in blkfront_probe()
1625 INIT_LIST_HEAD(&info->indirect_pages); in blkfront_probe()
1626 info->persistent_gnts_c = 0; in blkfront_probe()
1627 info->connected = BLKIF_STATE_DISCONNECTED; in blkfront_probe()
1628 INIT_WORK(&info->work, blkif_restart_queue); in blkfront_probe()
1631 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); in blkfront_probe()
1632 dev_set_drvdata(&dev->dev, info); in blkfront_probe()
1650 static int blkif_recover(struct blkfront_info *info) in blkif_recover() argument
1664 copy = kmemdup(info->shadow, sizeof(info->shadow), in blkif_recover()
1670 memset(&info->shadow, 0, sizeof(info->shadow)); in blkif_recover()
1671 for (i = 0; i < BLK_RING_SIZE(info); i++) in blkif_recover()
1672 info->shadow[i].req.u.rw.id = i+1; in blkif_recover()
1673 info->shadow_free = info->ring.req_prod_pvt; in blkif_recover()
1674 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; in blkif_recover()
1676 rc = blkfront_gather_backend_features(info); in blkif_recover()
1682 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_recover()
1683 blk_queue_max_segments(info->rq, segs); in blkif_recover()
1686 for (i = 0; i < BLK_RING_SIZE(info); i++) { in blkif_recover()
1712 xenbus_switch_state(info->xbdev, XenbusStateConnected); in blkif_recover()
1714 spin_lock_irq(&info->io_lock); in blkif_recover()
1717 info->connected = BLKIF_STATE_CONNECTED; in blkif_recover()
1720 kick_pending_request_queues(info); in blkif_recover()
1728 spin_unlock_irq(&info->io_lock); in blkif_recover()
1729 blk_mq_kick_requeue_list(info->rq); in blkif_recover()
1775 struct blkfront_info *info = dev_get_drvdata(&dev->dev); in blkfront_resume() local
1780 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); in blkfront_resume()
1782 err = talk_to_blkback(dev, info); in blkfront_resume()
1794 blkfront_closing(struct blkfront_info *info) in blkfront_closing() argument
1796 struct xenbus_device *xbdev = info->xbdev; in blkfront_closing()
1799 mutex_lock(&info->mutex); in blkfront_closing()
1802 mutex_unlock(&info->mutex); in blkfront_closing()
1806 if (info->gd) in blkfront_closing()
1807 bdev = bdget_disk(info->gd, 0); in blkfront_closing()
1809 mutex_unlock(&info->mutex); in blkfront_closing()
1823 xlvbd_release_gendisk(info); in blkfront_closing()
1831 static void blkfront_setup_discard(struct blkfront_info *info) in blkfront_setup_discard() argument
1838 info->feature_discard = 1; in blkfront_setup_discard()
1839 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_setup_discard()
1844 info->discard_granularity = discard_granularity; in blkfront_setup_discard()
1845 info->discard_alignment = discard_alignment; in blkfront_setup_discard()
1847 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_setup_discard()
1851 info->feature_secdiscard = !!discard_secure; in blkfront_setup_discard()
1854 static int blkfront_setup_indirect(struct blkfront_info *info) in blkfront_setup_indirect() argument
1859 if (info->max_indirect_segments == 0) in blkfront_setup_indirect()
1862 grants = info->max_indirect_segments; in blkfront_setup_indirect()
1865 err = fill_grant_buffer(info, in blkfront_setup_indirect()
1866 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); in blkfront_setup_indirect()
1870 if (!info->feature_persistent && info->max_indirect_segments) { in blkfront_setup_indirect()
1876 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); in blkfront_setup_indirect()
1878 BUG_ON(!list_empty(&info->indirect_pages)); in blkfront_setup_indirect()
1883 list_add(&indirect_page->lru, &info->indirect_pages); in blkfront_setup_indirect()
1887 for (i = 0; i < BLK_RING_SIZE(info); i++) { in blkfront_setup_indirect()
1888 info->shadow[i].grants_used = kzalloc( in blkfront_setup_indirect()
1889 sizeof(info->shadow[i].grants_used[0]) * grants, in blkfront_setup_indirect()
1891 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * psegs, GFP_NOIO); in blkfront_setup_indirect()
1892 if (info->max_indirect_segments) in blkfront_setup_indirect()
1893 info->shadow[i].indirect_grants = kzalloc( in blkfront_setup_indirect()
1894 sizeof(info->shadow[i].indirect_grants[0]) * in blkfront_setup_indirect()
1897 if ((info->shadow[i].grants_used == NULL) || in blkfront_setup_indirect()
1898 (info->shadow[i].sg == NULL) || in blkfront_setup_indirect()
1899 (info->max_indirect_segments && in blkfront_setup_indirect()
1900 (info->shadow[i].indirect_grants == NULL))) in blkfront_setup_indirect()
1902 sg_init_table(info->shadow[i].sg, psegs); in blkfront_setup_indirect()
1909 for (i = 0; i < BLK_RING_SIZE(info); i++) { in blkfront_setup_indirect()
1910 kfree(info->shadow[i].grants_used); in blkfront_setup_indirect()
1911 info->shadow[i].grants_used = NULL; in blkfront_setup_indirect()
1912 kfree(info->shadow[i].sg); in blkfront_setup_indirect()
1913 info->shadow[i].sg = NULL; in blkfront_setup_indirect()
1914 kfree(info->shadow[i].indirect_grants); in blkfront_setup_indirect()
1915 info->shadow[i].indirect_grants = NULL; in blkfront_setup_indirect()
1917 if (!list_empty(&info->indirect_pages)) { in blkfront_setup_indirect()
1919 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { in blkfront_setup_indirect()
1930 static int blkfront_gather_backend_features(struct blkfront_info *info) in blkfront_gather_backend_features() argument
1936 info->feature_flush = 0; in blkfront_gather_backend_features()
1938 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_gather_backend_features()
1950 info->feature_flush = REQ_FLUSH | REQ_FUA; in blkfront_gather_backend_features()
1955 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_gather_backend_features()
1960 info->feature_flush = REQ_FLUSH; in blkfront_gather_backend_features()
1962 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_gather_backend_features()
1967 blkfront_setup_discard(info); in blkfront_gather_backend_features()
1969 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_gather_backend_features()
1973 info->feature_persistent = 0; in blkfront_gather_backend_features()
1975 info->feature_persistent = persistent; in blkfront_gather_backend_features()
1977 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_gather_backend_features()
1981 info->max_indirect_segments = 0; in blkfront_gather_backend_features()
1983 info->max_indirect_segments = min(indirect_segments, in blkfront_gather_backend_features()
1986 return blkfront_setup_indirect(info); in blkfront_gather_backend_features()
1993 static void blkfront_connect(struct blkfront_info *info) in blkfront_connect() argument
2001 switch (info->connected) { in blkfront_connect()
2007 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, in blkfront_connect()
2013 set_capacity(info->gd, sectors); in blkfront_connect()
2014 revalidate_disk(info->gd); in blkfront_connect()
2024 blkif_recover(info); in blkfront_connect()
2031 dev_dbg(&info->xbdev->dev, "%s:%s.\n", in blkfront_connect()
2032 __func__, info->xbdev->otherend); in blkfront_connect()
2034 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, in blkfront_connect()
2040 xenbus_dev_fatal(info->xbdev, err, in blkfront_connect()
2042 info->xbdev->otherend); in blkfront_connect()
2051 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, in blkfront_connect()
2056 err = blkfront_gather_backend_features(info); in blkfront_connect()
2058 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", in blkfront_connect()
2059 info->xbdev->otherend); in blkfront_connect()
2063 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, in blkfront_connect()
2066 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", in blkfront_connect()
2067 info->xbdev->otherend); in blkfront_connect()
2071 xenbus_switch_state(info->xbdev, XenbusStateConnected); in blkfront_connect()
2074 spin_lock_irq(&info->io_lock); in blkfront_connect()
2075 info->connected = BLKIF_STATE_CONNECTED; in blkfront_connect()
2076 kick_pending_request_queues(info); in blkfront_connect()
2077 spin_unlock_irq(&info->io_lock); in blkfront_connect()
2079 add_disk(info->gd); in blkfront_connect()
2081 info->is_ready = 1; in blkfront_connect()
2090 struct blkfront_info *info = dev_get_drvdata(&dev->dev); in blkback_changed() local
2098 if (talk_to_blkback(dev, info)) { in blkback_changed()
2099 kfree(info); in blkback_changed()
2111 blkfront_connect(info); in blkback_changed()
2119 if (info) in blkback_changed()
2120 blkfront_closing(info); in blkback_changed()
2127 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); in blkfront_remove() local
2133 blkif_free(info, 0); in blkfront_remove()
2135 mutex_lock(&info->mutex); in blkfront_remove()
2137 disk = info->gd; in blkfront_remove()
2141 info->xbdev = NULL; in blkfront_remove()
2142 mutex_unlock(&info->mutex); in blkfront_remove()
2145 kfree(info); in blkfront_remove()
2156 info = disk->private_data; in blkfront_remove()
2162 if (info && !bdev->bd_openers) { in blkfront_remove()
2163 xlvbd_release_gendisk(info); in blkfront_remove()
2165 kfree(info); in blkfront_remove()
2176 struct blkfront_info *info = dev_get_drvdata(&dev->dev); in blkfront_is_ready() local
2178 return info->is_ready && info->xbdev; in blkfront_is_ready()
2184 struct blkfront_info *info; in blkif_open() local
2189 info = disk->private_data; in blkif_open()
2190 if (!info) { in blkif_open()
2196 mutex_lock(&info->mutex); in blkif_open()
2198 if (!info->gd) in blkif_open()
2202 mutex_unlock(&info->mutex); in blkif_open()
2211 struct blkfront_info *info = disk->private_data; in blkif_release() local
2231 mutex_lock(&info->mutex); in blkif_release()
2232 xbdev = info->xbdev; in blkif_release()
2237 xlvbd_release_gendisk(info); in blkif_release()
2238 xenbus_frontend_closed(info->xbdev); in blkif_release()
2241 mutex_unlock(&info->mutex); in blkif_release()
2246 xlvbd_release_gendisk(info); in blkif_release()
2248 kfree(info); in blkif_release()