1 /*
2 * blkfront.c
3 *
4 * XenLinux virtual block device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/hdreg.h>
41 #include <linux/cdrom.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/mutex.h>
45 #include <linux/scatterlist.h>
46 #include <linux/bitmap.h>
47 #include <linux/list.h>
48
49 #include <xen/xen.h>
50 #include <xen/xenbus.h>
51 #include <xen/grant_table.h>
52 #include <xen/events.h>
53 #include <xen/page.h>
54 #include <xen/platform_pci.h>
55
56 #include <xen/interface/grant_table.h>
57 #include <xen/interface/io/blkif.h>
58 #include <xen/interface/io/protocols.h>
59
60 #include <asm/xen/hypervisor.h>
61
62 enum blkif_state {
63 BLKIF_STATE_DISCONNECTED,
64 BLKIF_STATE_CONNECTED,
65 BLKIF_STATE_SUSPENDED,
66 };
67
68 struct grant {
69 grant_ref_t gref;
70 unsigned long pfn;
71 struct list_head node;
72 };
73
74 struct blk_shadow {
75 struct blkif_request req;
76 struct request *request;
77 struct grant **grants_used;
78 struct grant **indirect_grants;
79 struct scatterlist *sg;
80 };
81
82 struct split_bio {
83 struct bio *bio;
84 atomic_t pending;
85 int err;
86 };
87
88 static DEFINE_MUTEX(blkfront_mutex);
89 static const struct block_device_operations xlvbd_block_fops;
90
91 /*
92 * Maximum number of segments in indirect requests, the actual value used by
93 * the frontend driver is the minimum of this value and the value provided
94 * by the backend driver.
95 */
96
97 static unsigned int xen_blkif_max_segments = 32;
98 module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
99 MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
100
101 #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
102
103 /*
104 * We have one of these per vbd, whether ide, scsi or 'other'. They
105 * hang in private_data off the gendisk structure. We may end up
106 * putting all kinds of interesting stuff here :-)
107 */
108 struct blkfront_info
109 {
110 spinlock_t io_lock;
111 struct mutex mutex;
112 struct xenbus_device *xbdev;
113 struct gendisk *gd;
114 int vdevice;
115 blkif_vdev_t handle;
116 enum blkif_state connected;
117 int ring_ref;
118 struct blkif_front_ring ring;
119 unsigned int evtchn, irq;
120 struct request_queue *rq;
121 struct work_struct work;
122 struct gnttab_free_callback callback;
123 struct blk_shadow shadow[BLK_RING_SIZE];
124 struct list_head grants;
125 struct list_head indirect_pages;
126 unsigned int persistent_gnts_c;
127 unsigned long shadow_free;
128 unsigned int feature_flush;
129 unsigned int feature_discard:1;
130 unsigned int feature_secdiscard:1;
131 unsigned int discard_granularity;
132 unsigned int discard_alignment;
133 unsigned int feature_persistent:1;
134 unsigned int max_indirect_segments;
135 int is_ready;
136 };
137
138 static unsigned int nr_minors;
139 static unsigned long *minors;
140 static DEFINE_SPINLOCK(minor_lock);
141
142 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
143 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
144 #define GRANT_INVALID_REF 0
145
146 #define PARTS_PER_DISK 16
147 #define PARTS_PER_EXT_DISK 256
148
149 #define BLKIF_MAJOR(dev) ((dev)>>8)
150 #define BLKIF_MINOR(dev) ((dev) & 0xff)
151
152 #define EXT_SHIFT 28
153 #define EXTENDED (1<<EXT_SHIFT)
154 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
155 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
156 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
157 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
158 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
159 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
160
161 #define DEV_NAME "xvd" /* name in /dev */
162
163 #define SEGS_PER_INDIRECT_FRAME \
164 (PAGE_SIZE/sizeof(struct blkif_request_segment))
165 #define INDIRECT_GREFS(_segs) \
166 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
167
168 static int blkfront_setup_indirect(struct blkfront_info *info);
169
get_id_from_freelist(struct blkfront_info * info)170 static int get_id_from_freelist(struct blkfront_info *info)
171 {
172 unsigned long free = info->shadow_free;
173 BUG_ON(free >= BLK_RING_SIZE);
174 info->shadow_free = info->shadow[free].req.u.rw.id;
175 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
176 return free;
177 }
178
add_id_to_freelist(struct blkfront_info * info,unsigned long id)179 static int add_id_to_freelist(struct blkfront_info *info,
180 unsigned long id)
181 {
182 if (info->shadow[id].req.u.rw.id != id)
183 return -EINVAL;
184 if (info->shadow[id].request == NULL)
185 return -EINVAL;
186 info->shadow[id].req.u.rw.id = info->shadow_free;
187 info->shadow[id].request = NULL;
188 info->shadow_free = id;
189 return 0;
190 }
191
fill_grant_buffer(struct blkfront_info * info,int num)192 static int fill_grant_buffer(struct blkfront_info *info, int num)
193 {
194 struct page *granted_page;
195 struct grant *gnt_list_entry, *n;
196 int i = 0;
197
198 while(i < num) {
199 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
200 if (!gnt_list_entry)
201 goto out_of_memory;
202
203 if (info->feature_persistent) {
204 granted_page = alloc_page(GFP_NOIO);
205 if (!granted_page) {
206 kfree(gnt_list_entry);
207 goto out_of_memory;
208 }
209 gnt_list_entry->pfn = page_to_pfn(granted_page);
210 }
211
212 gnt_list_entry->gref = GRANT_INVALID_REF;
213 list_add(&gnt_list_entry->node, &info->grants);
214 i++;
215 }
216
217 return 0;
218
219 out_of_memory:
220 list_for_each_entry_safe(gnt_list_entry, n,
221 &info->grants, node) {
222 list_del(&gnt_list_entry->node);
223 if (info->feature_persistent)
224 __free_page(pfn_to_page(gnt_list_entry->pfn));
225 kfree(gnt_list_entry);
226 i--;
227 }
228 BUG_ON(i != 0);
229 return -ENOMEM;
230 }
231
get_grant(grant_ref_t * gref_head,unsigned long pfn,struct blkfront_info * info)232 static struct grant *get_grant(grant_ref_t *gref_head,
233 unsigned long pfn,
234 struct blkfront_info *info)
235 {
236 struct grant *gnt_list_entry;
237 unsigned long buffer_mfn;
238
239 BUG_ON(list_empty(&info->grants));
240 gnt_list_entry = list_first_entry(&info->grants, struct grant,
241 node);
242 list_del(&gnt_list_entry->node);
243
244 if (gnt_list_entry->gref != GRANT_INVALID_REF) {
245 info->persistent_gnts_c--;
246 return gnt_list_entry;
247 }
248
249 /* Assign a gref to this page */
250 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
251 BUG_ON(gnt_list_entry->gref == -ENOSPC);
252 if (!info->feature_persistent) {
253 BUG_ON(!pfn);
254 gnt_list_entry->pfn = pfn;
255 }
256 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
257 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
258 info->xbdev->otherend_id,
259 buffer_mfn, 0);
260 return gnt_list_entry;
261 }
262
op_name(int op)263 static const char *op_name(int op)
264 {
265 static const char *const names[] = {
266 [BLKIF_OP_READ] = "read",
267 [BLKIF_OP_WRITE] = "write",
268 [BLKIF_OP_WRITE_BARRIER] = "barrier",
269 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
270 [BLKIF_OP_DISCARD] = "discard" };
271
272 if (op < 0 || op >= ARRAY_SIZE(names))
273 return "unknown";
274
275 if (!names[op])
276 return "reserved";
277
278 return names[op];
279 }
xlbd_reserve_minors(unsigned int minor,unsigned int nr)280 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
281 {
282 unsigned int end = minor + nr;
283 int rc;
284
285 if (end > nr_minors) {
286 unsigned long *bitmap, *old;
287
288 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
289 GFP_KERNEL);
290 if (bitmap == NULL)
291 return -ENOMEM;
292
293 spin_lock(&minor_lock);
294 if (end > nr_minors) {
295 old = minors;
296 memcpy(bitmap, minors,
297 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
298 minors = bitmap;
299 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
300 } else
301 old = bitmap;
302 spin_unlock(&minor_lock);
303 kfree(old);
304 }
305
306 spin_lock(&minor_lock);
307 if (find_next_bit(minors, end, minor) >= end) {
308 bitmap_set(minors, minor, nr);
309 rc = 0;
310 } else
311 rc = -EBUSY;
312 spin_unlock(&minor_lock);
313
314 return rc;
315 }
316
xlbd_release_minors(unsigned int minor,unsigned int nr)317 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
318 {
319 unsigned int end = minor + nr;
320
321 BUG_ON(end > nr_minors);
322 spin_lock(&minor_lock);
323 bitmap_clear(minors, minor, nr);
324 spin_unlock(&minor_lock);
325 }
326
blkif_restart_queue_callback(void * arg)327 static void blkif_restart_queue_callback(void *arg)
328 {
329 struct blkfront_info *info = (struct blkfront_info *)arg;
330 schedule_work(&info->work);
331 }
332
blkif_getgeo(struct block_device * bd,struct hd_geometry * hg)333 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
334 {
335 /* We don't have real geometry info, but let's at least return
336 values consistent with the size of the device */
337 sector_t nsect = get_capacity(bd->bd_disk);
338 sector_t cylinders = nsect;
339
340 hg->heads = 0xff;
341 hg->sectors = 0x3f;
342 sector_div(cylinders, hg->heads * hg->sectors);
343 hg->cylinders = cylinders;
344 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
345 hg->cylinders = 0xffff;
346 return 0;
347 }
348
blkif_ioctl(struct block_device * bdev,fmode_t mode,unsigned command,unsigned long argument)349 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
350 unsigned command, unsigned long argument)
351 {
352 struct blkfront_info *info = bdev->bd_disk->private_data;
353 int i;
354
355 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
356 command, (long)argument);
357
358 switch (command) {
359 case CDROMMULTISESSION:
360 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
361 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
362 if (put_user(0, (char __user *)(argument + i)))
363 return -EFAULT;
364 return 0;
365
366 case CDROM_GET_CAPABILITY: {
367 struct gendisk *gd = info->gd;
368 if (gd->flags & GENHD_FL_CD)
369 return 0;
370 return -EINVAL;
371 }
372
373 default:
374 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
375 command);*/
376 return -EINVAL; /* same return as native Linux */
377 }
378
379 return 0;
380 }
381
382 /*
383 * Generate a Xen blkfront IO request from a blk layer request. Reads
384 * and writes are handled as expected.
385 *
386 * @req: a request struct
387 */
blkif_queue_request(struct request * req)388 static int blkif_queue_request(struct request *req)
389 {
390 struct blkfront_info *info = req->rq_disk->private_data;
391 struct blkif_request *ring_req;
392 unsigned long id;
393 unsigned int fsect, lsect;
394 int i, ref, n;
395 struct blkif_request_segment *segments = NULL;
396
397 /*
398 * Used to store if we are able to queue the request by just using
399 * existing persistent grants, or if we have to get new grants,
400 * as there are not sufficiently many free.
401 */
402 bool new_persistent_gnts;
403 grant_ref_t gref_head;
404 struct grant *gnt_list_entry = NULL;
405 struct scatterlist *sg;
406 int nseg, max_grefs;
407
408 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
409 return 1;
410
411 max_grefs = req->nr_phys_segments;
412 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
413 /*
414 * If we are using indirect segments we need to account
415 * for the indirect grefs used in the request.
416 */
417 max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
418
419 /* Check if we have enough grants to allocate a requests */
420 if (info->persistent_gnts_c < max_grefs) {
421 new_persistent_gnts = 1;
422 if (gnttab_alloc_grant_references(
423 max_grefs - info->persistent_gnts_c,
424 &gref_head) < 0) {
425 gnttab_request_free_callback(
426 &info->callback,
427 blkif_restart_queue_callback,
428 info,
429 max_grefs);
430 return 1;
431 }
432 } else
433 new_persistent_gnts = 0;
434
435 /* Fill out a communications ring structure. */
436 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
437 id = get_id_from_freelist(info);
438 info->shadow[id].request = req;
439
440 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
441 ring_req->operation = BLKIF_OP_DISCARD;
442 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
443 ring_req->u.discard.id = id;
444 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
445 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
446 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
447 else
448 ring_req->u.discard.flag = 0;
449 } else {
450 BUG_ON(info->max_indirect_segments == 0 &&
451 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
452 BUG_ON(info->max_indirect_segments &&
453 req->nr_phys_segments > info->max_indirect_segments);
454 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
455 ring_req->u.rw.id = id;
456 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
457 /*
458 * The indirect operation can only be a BLKIF_OP_READ or
459 * BLKIF_OP_WRITE
460 */
461 BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
462 ring_req->operation = BLKIF_OP_INDIRECT;
463 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
464 BLKIF_OP_WRITE : BLKIF_OP_READ;
465 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
466 ring_req->u.indirect.handle = info->handle;
467 ring_req->u.indirect.nr_segments = nseg;
468 } else {
469 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
470 ring_req->u.rw.handle = info->handle;
471 ring_req->operation = rq_data_dir(req) ?
472 BLKIF_OP_WRITE : BLKIF_OP_READ;
473 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
474 /*
475 * Ideally we can do an unordered flush-to-disk. In case the
476 * backend onlysupports barriers, use that. A barrier request
477 * a superset of FUA, so we can implement it the same
478 * way. (It's also a FLUSH+FUA, since it is
479 * guaranteed ordered WRT previous writes.)
480 */
481 switch (info->feature_flush &
482 ((REQ_FLUSH|REQ_FUA))) {
483 case REQ_FLUSH|REQ_FUA:
484 ring_req->operation =
485 BLKIF_OP_WRITE_BARRIER;
486 break;
487 case REQ_FLUSH:
488 ring_req->operation =
489 BLKIF_OP_FLUSH_DISKCACHE;
490 break;
491 default:
492 ring_req->operation = 0;
493 }
494 }
495 ring_req->u.rw.nr_segments = nseg;
496 }
497 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
498 fsect = sg->offset >> 9;
499 lsect = fsect + (sg->length >> 9) - 1;
500
501 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
502 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
503 unsigned long uninitialized_var(pfn);
504
505 if (segments)
506 kunmap_atomic(segments);
507
508 n = i / SEGS_PER_INDIRECT_FRAME;
509 if (!info->feature_persistent) {
510 struct page *indirect_page;
511
512 /* Fetch a pre-allocated page to use for indirect grefs */
513 BUG_ON(list_empty(&info->indirect_pages));
514 indirect_page = list_first_entry(&info->indirect_pages,
515 struct page, lru);
516 list_del(&indirect_page->lru);
517 pfn = page_to_pfn(indirect_page);
518 }
519 gnt_list_entry = get_grant(&gref_head, pfn, info);
520 info->shadow[id].indirect_grants[n] = gnt_list_entry;
521 segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
522 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
523 }
524
525 gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
526 ref = gnt_list_entry->gref;
527
528 info->shadow[id].grants_used[i] = gnt_list_entry;
529
530 if (rq_data_dir(req) && info->feature_persistent) {
531 char *bvec_data;
532 void *shared_data;
533
534 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
535
536 shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
537 bvec_data = kmap_atomic(sg_page(sg));
538
539 /*
540 * this does not wipe data stored outside the
541 * range sg->offset..sg->offset+sg->length.
542 * Therefore, blkback *could* see data from
543 * previous requests. This is OK as long as
544 * persistent grants are shared with just one
545 * domain. It may need refactoring if this
546 * changes
547 */
548 memcpy(shared_data + sg->offset,
549 bvec_data + sg->offset,
550 sg->length);
551
552 kunmap_atomic(bvec_data);
553 kunmap_atomic(shared_data);
554 }
555 if (ring_req->operation != BLKIF_OP_INDIRECT) {
556 ring_req->u.rw.seg[i] =
557 (struct blkif_request_segment) {
558 .gref = ref,
559 .first_sect = fsect,
560 .last_sect = lsect };
561 } else {
562 n = i % SEGS_PER_INDIRECT_FRAME;
563 segments[n] =
564 (struct blkif_request_segment) {
565 .gref = ref,
566 .first_sect = fsect,
567 .last_sect = lsect };
568 }
569 }
570 if (segments)
571 kunmap_atomic(segments);
572 }
573
574 info->ring.req_prod_pvt++;
575
576 /* Keep a private copy so we can reissue requests when recovering. */
577 info->shadow[id].req = *ring_req;
578
579 if (new_persistent_gnts)
580 gnttab_free_grant_references(gref_head);
581
582 return 0;
583 }
584
585
flush_requests(struct blkfront_info * info)586 static inline void flush_requests(struct blkfront_info *info)
587 {
588 int notify;
589
590 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
591
592 if (notify)
593 notify_remote_via_irq(info->irq);
594 }
595
blkif_request_flush_invalid(struct request * req,struct blkfront_info * info)596 static inline bool blkif_request_flush_invalid(struct request *req,
597 struct blkfront_info *info)
598 {
599 return ((req->cmd_type != REQ_TYPE_FS) ||
600 ((req->cmd_flags & REQ_FLUSH) &&
601 !(info->feature_flush & REQ_FLUSH)) ||
602 ((req->cmd_flags & REQ_FUA) &&
603 !(info->feature_flush & REQ_FUA)));
604 }
605
606 /*
607 * do_blkif_request
608 * read a block; request is in a request queue
609 */
do_blkif_request(struct request_queue * rq)610 static void do_blkif_request(struct request_queue *rq)
611 {
612 struct blkfront_info *info = NULL;
613 struct request *req;
614 int queued;
615
616 pr_debug("Entered do_blkif_request\n");
617
618 queued = 0;
619
620 while ((req = blk_peek_request(rq)) != NULL) {
621 info = req->rq_disk->private_data;
622
623 if (RING_FULL(&info->ring))
624 goto wait;
625
626 blk_start_request(req);
627
628 if (blkif_request_flush_invalid(req, info)) {
629 __blk_end_request_all(req, -EOPNOTSUPP);
630 continue;
631 }
632
633 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
634 "(%u/%u) [%s]\n",
635 req, req->cmd, (unsigned long)blk_rq_pos(req),
636 blk_rq_cur_sectors(req), blk_rq_sectors(req),
637 rq_data_dir(req) ? "write" : "read");
638
639 if (blkif_queue_request(req)) {
640 blk_requeue_request(rq, req);
641 wait:
642 /* Avoid pointless unplugs. */
643 blk_stop_queue(rq);
644 break;
645 }
646
647 queued++;
648 }
649
650 if (queued != 0)
651 flush_requests(info);
652 }
653
xlvbd_init_blk_queue(struct gendisk * gd,u16 sector_size,unsigned int physical_sector_size,unsigned int segments)654 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
655 unsigned int physical_sector_size,
656 unsigned int segments)
657 {
658 struct request_queue *rq;
659 struct blkfront_info *info = gd->private_data;
660
661 rq = blk_init_queue(do_blkif_request, &info->io_lock);
662 if (rq == NULL)
663 return -1;
664
665 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
666
667 if (info->feature_discard) {
668 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
669 blk_queue_max_discard_sectors(rq, get_capacity(gd));
670 rq->limits.discard_granularity = info->discard_granularity;
671 rq->limits.discard_alignment = info->discard_alignment;
672 if (info->feature_secdiscard)
673 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
674 }
675
676 /* Hard sector size and max sectors impersonate the equiv. hardware. */
677 blk_queue_logical_block_size(rq, sector_size);
678 blk_queue_physical_block_size(rq, physical_sector_size);
679 blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512);
680
681 /* Each segment in a request is up to an aligned page in size. */
682 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
683 blk_queue_max_segment_size(rq, PAGE_SIZE);
684
685 /* Ensure a merged request will fit in a single I/O ring slot. */
686 blk_queue_max_segments(rq, segments);
687
688 /* Make sure buffer addresses are sector-aligned. */
689 blk_queue_dma_alignment(rq, 511);
690
691 /* Make sure we don't use bounce buffers. */
692 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
693
694 gd->queue = rq;
695
696 return 0;
697 }
698
flush_info(unsigned int feature_flush)699 static const char *flush_info(unsigned int feature_flush)
700 {
701 switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
702 case REQ_FLUSH|REQ_FUA:
703 return "barrier: enabled;";
704 case REQ_FLUSH:
705 return "flush diskcache: enabled;";
706 default:
707 return "barrier or flush: disabled;";
708 }
709 }
710
xlvbd_flush(struct blkfront_info * info)711 static void xlvbd_flush(struct blkfront_info *info)
712 {
713 blk_queue_flush(info->rq, info->feature_flush);
714 pr_info("blkfront: %s: %s %s %s %s %s\n",
715 info->gd->disk_name, flush_info(info->feature_flush),
716 "persistent grants:", info->feature_persistent ?
717 "enabled;" : "disabled;", "indirect descriptors:",
718 info->max_indirect_segments ? "enabled;" : "disabled;");
719 }
720
xen_translate_vdev(int vdevice,int * minor,unsigned int * offset)721 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
722 {
723 int major;
724 major = BLKIF_MAJOR(vdevice);
725 *minor = BLKIF_MINOR(vdevice);
726 switch (major) {
727 case XEN_IDE0_MAJOR:
728 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
729 *minor = ((*minor / 64) * PARTS_PER_DISK) +
730 EMULATED_HD_DISK_MINOR_OFFSET;
731 break;
732 case XEN_IDE1_MAJOR:
733 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
734 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
735 EMULATED_HD_DISK_MINOR_OFFSET;
736 break;
737 case XEN_SCSI_DISK0_MAJOR:
738 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
739 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
740 break;
741 case XEN_SCSI_DISK1_MAJOR:
742 case XEN_SCSI_DISK2_MAJOR:
743 case XEN_SCSI_DISK3_MAJOR:
744 case XEN_SCSI_DISK4_MAJOR:
745 case XEN_SCSI_DISK5_MAJOR:
746 case XEN_SCSI_DISK6_MAJOR:
747 case XEN_SCSI_DISK7_MAJOR:
748 *offset = (*minor / PARTS_PER_DISK) +
749 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
750 EMULATED_SD_DISK_NAME_OFFSET;
751 *minor = *minor +
752 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
753 EMULATED_SD_DISK_MINOR_OFFSET;
754 break;
755 case XEN_SCSI_DISK8_MAJOR:
756 case XEN_SCSI_DISK9_MAJOR:
757 case XEN_SCSI_DISK10_MAJOR:
758 case XEN_SCSI_DISK11_MAJOR:
759 case XEN_SCSI_DISK12_MAJOR:
760 case XEN_SCSI_DISK13_MAJOR:
761 case XEN_SCSI_DISK14_MAJOR:
762 case XEN_SCSI_DISK15_MAJOR:
763 *offset = (*minor / PARTS_PER_DISK) +
764 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
765 EMULATED_SD_DISK_NAME_OFFSET;
766 *minor = *minor +
767 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
768 EMULATED_SD_DISK_MINOR_OFFSET;
769 break;
770 case XENVBD_MAJOR:
771 *offset = *minor / PARTS_PER_DISK;
772 break;
773 default:
774 printk(KERN_WARNING "blkfront: your disk configuration is "
775 "incorrect, please use an xvd device instead\n");
776 return -ENODEV;
777 }
778 return 0;
779 }
780
encode_disk_name(char * ptr,unsigned int n)781 static char *encode_disk_name(char *ptr, unsigned int n)
782 {
783 if (n >= 26)
784 ptr = encode_disk_name(ptr, n / 26 - 1);
785 *ptr = 'a' + n % 26;
786 return ptr + 1;
787 }
788
xlvbd_alloc_gendisk(blkif_sector_t capacity,struct blkfront_info * info,u16 vdisk_info,u16 sector_size,unsigned int physical_sector_size)789 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
790 struct blkfront_info *info,
791 u16 vdisk_info, u16 sector_size,
792 unsigned int physical_sector_size)
793 {
794 struct gendisk *gd;
795 int nr_minors = 1;
796 int err;
797 unsigned int offset;
798 int minor;
799 int nr_parts;
800 char *ptr;
801
802 BUG_ON(info->gd != NULL);
803 BUG_ON(info->rq != NULL);
804
805 if ((info->vdevice>>EXT_SHIFT) > 1) {
806 /* this is above the extended range; something is wrong */
807 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
808 return -ENODEV;
809 }
810
811 if (!VDEV_IS_EXTENDED(info->vdevice)) {
812 err = xen_translate_vdev(info->vdevice, &minor, &offset);
813 if (err)
814 return err;
815 nr_parts = PARTS_PER_DISK;
816 } else {
817 minor = BLKIF_MINOR_EXT(info->vdevice);
818 nr_parts = PARTS_PER_EXT_DISK;
819 offset = minor / nr_parts;
820 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
821 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
822 "emulated IDE disks,\n\t choose an xvd device name"
823 "from xvde on\n", info->vdevice);
824 }
825 if (minor >> MINORBITS) {
826 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
827 info->vdevice, minor);
828 return -ENODEV;
829 }
830
831 if ((minor % nr_parts) == 0)
832 nr_minors = nr_parts;
833
834 err = xlbd_reserve_minors(minor, nr_minors);
835 if (err)
836 goto out;
837 err = -ENODEV;
838
839 gd = alloc_disk(nr_minors);
840 if (gd == NULL)
841 goto release;
842
843 strcpy(gd->disk_name, DEV_NAME);
844 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
845 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
846 if (nr_minors > 1)
847 *ptr = 0;
848 else
849 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
850 "%d", minor & (nr_parts - 1));
851
852 gd->major = XENVBD_MAJOR;
853 gd->first_minor = minor;
854 gd->fops = &xlvbd_block_fops;
855 gd->private_data = info;
856 gd->driverfs_dev = &(info->xbdev->dev);
857 set_capacity(gd, capacity);
858
859 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
860 info->max_indirect_segments ? :
861 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
862 del_gendisk(gd);
863 goto release;
864 }
865
866 info->rq = gd->queue;
867 info->gd = gd;
868
869 xlvbd_flush(info);
870
871 if (vdisk_info & VDISK_READONLY)
872 set_disk_ro(gd, 1);
873
874 if (vdisk_info & VDISK_REMOVABLE)
875 gd->flags |= GENHD_FL_REMOVABLE;
876
877 if (vdisk_info & VDISK_CDROM)
878 gd->flags |= GENHD_FL_CD;
879
880 return 0;
881
882 release:
883 xlbd_release_minors(minor, nr_minors);
884 out:
885 return err;
886 }
887
xlvbd_release_gendisk(struct blkfront_info * info)888 static void xlvbd_release_gendisk(struct blkfront_info *info)
889 {
890 unsigned int minor, nr_minors;
891 unsigned long flags;
892
893 if (info->rq == NULL)
894 return;
895
896 spin_lock_irqsave(&info->io_lock, flags);
897
898 /* No more blkif_request(). */
899 blk_stop_queue(info->rq);
900
901 /* No more gnttab callback work. */
902 gnttab_cancel_free_callback(&info->callback);
903 spin_unlock_irqrestore(&info->io_lock, flags);
904
905 /* Flush gnttab callback work. Must be done with no locks held. */
906 flush_work(&info->work);
907
908 del_gendisk(info->gd);
909
910 minor = info->gd->first_minor;
911 nr_minors = info->gd->minors;
912 xlbd_release_minors(minor, nr_minors);
913
914 blk_cleanup_queue(info->rq);
915 info->rq = NULL;
916
917 put_disk(info->gd);
918 info->gd = NULL;
919 }
920
kick_pending_request_queues(struct blkfront_info * info)921 static void kick_pending_request_queues(struct blkfront_info *info)
922 {
923 if (!RING_FULL(&info->ring)) {
924 /* Re-enable calldowns. */
925 blk_start_queue(info->rq);
926 /* Kick things off immediately. */
927 do_blkif_request(info->rq);
928 }
929 }
930
blkif_restart_queue(struct work_struct * work)931 static void blkif_restart_queue(struct work_struct *work)
932 {
933 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
934
935 spin_lock_irq(&info->io_lock);
936 if (info->connected == BLKIF_STATE_CONNECTED)
937 kick_pending_request_queues(info);
938 spin_unlock_irq(&info->io_lock);
939 }
940
blkif_free(struct blkfront_info * info,int suspend)941 static void blkif_free(struct blkfront_info *info, int suspend)
942 {
943 struct grant *persistent_gnt;
944 struct grant *n;
945 int i, j, segs;
946
947 /* Prevent new requests being issued until we fix things up. */
948 spin_lock_irq(&info->io_lock);
949 info->connected = suspend ?
950 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
951 /* No more blkif_request(). */
952 if (info->rq)
953 blk_stop_queue(info->rq);
954
955 /* Remove all persistent grants */
956 if (!list_empty(&info->grants)) {
957 list_for_each_entry_safe(persistent_gnt, n,
958 &info->grants, node) {
959 list_del(&persistent_gnt->node);
960 if (persistent_gnt->gref != GRANT_INVALID_REF) {
961 gnttab_end_foreign_access(persistent_gnt->gref,
962 0, 0UL);
963 info->persistent_gnts_c--;
964 }
965 if (info->feature_persistent)
966 __free_page(pfn_to_page(persistent_gnt->pfn));
967 kfree(persistent_gnt);
968 }
969 }
970 BUG_ON(info->persistent_gnts_c != 0);
971
972 /*
973 * Remove indirect pages, this only happens when using indirect
974 * descriptors but not persistent grants
975 */
976 if (!list_empty(&info->indirect_pages)) {
977 struct page *indirect_page, *n;
978
979 BUG_ON(info->feature_persistent);
980 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
981 list_del(&indirect_page->lru);
982 __free_page(indirect_page);
983 }
984 }
985
986 for (i = 0; i < BLK_RING_SIZE; i++) {
987 /*
988 * Clear persistent grants present in requests already
989 * on the shared ring
990 */
991 if (!info->shadow[i].request)
992 goto free_shadow;
993
994 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
995 info->shadow[i].req.u.indirect.nr_segments :
996 info->shadow[i].req.u.rw.nr_segments;
997 for (j = 0; j < segs; j++) {
998 persistent_gnt = info->shadow[i].grants_used[j];
999 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1000 if (info->feature_persistent)
1001 __free_page(pfn_to_page(persistent_gnt->pfn));
1002 kfree(persistent_gnt);
1003 }
1004
1005 if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1006 /*
1007 * If this is not an indirect operation don't try to
1008 * free indirect segments
1009 */
1010 goto free_shadow;
1011
1012 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1013 persistent_gnt = info->shadow[i].indirect_grants[j];
1014 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1015 __free_page(pfn_to_page(persistent_gnt->pfn));
1016 kfree(persistent_gnt);
1017 }
1018
1019 free_shadow:
1020 kfree(info->shadow[i].grants_used);
1021 info->shadow[i].grants_used = NULL;
1022 kfree(info->shadow[i].indirect_grants);
1023 info->shadow[i].indirect_grants = NULL;
1024 kfree(info->shadow[i].sg);
1025 info->shadow[i].sg = NULL;
1026 }
1027
1028 /* No more gnttab callback work. */
1029 gnttab_cancel_free_callback(&info->callback);
1030 spin_unlock_irq(&info->io_lock);
1031
1032 /* Flush gnttab callback work. Must be done with no locks held. */
1033 flush_work(&info->work);
1034
1035 /* Free resources associated with old device channel. */
1036 if (info->ring_ref != GRANT_INVALID_REF) {
1037 gnttab_end_foreign_access(info->ring_ref, 0,
1038 (unsigned long)info->ring.sring);
1039 info->ring_ref = GRANT_INVALID_REF;
1040 info->ring.sring = NULL;
1041 }
1042 if (info->irq)
1043 unbind_from_irqhandler(info->irq, info);
1044 info->evtchn = info->irq = 0;
1045
1046 }
1047
blkif_completion(struct blk_shadow * s,struct blkfront_info * info,struct blkif_response * bret)1048 static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1049 struct blkif_response *bret)
1050 {
1051 int i = 0;
1052 struct scatterlist *sg;
1053 char *bvec_data;
1054 void *shared_data;
1055 int nseg;
1056
1057 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
1058 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1059
1060 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1061 /*
1062 * Copy the data received from the backend into the bvec.
1063 * Since bv_offset can be different than 0, and bv_len different
1064 * than PAGE_SIZE, we have to keep track of the current offset,
1065 * to be sure we are copying the data from the right shared page.
1066 */
1067 for_each_sg(s->sg, sg, nseg, i) {
1068 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1069 shared_data = kmap_atomic(
1070 pfn_to_page(s->grants_used[i]->pfn));
1071 bvec_data = kmap_atomic(sg_page(sg));
1072 memcpy(bvec_data + sg->offset,
1073 shared_data + sg->offset,
1074 sg->length);
1075 kunmap_atomic(bvec_data);
1076 kunmap_atomic(shared_data);
1077 }
1078 }
1079 /* Add the persistent grant into the list of free grants */
1080 for (i = 0; i < nseg; i++) {
1081 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1082 /*
1083 * If the grant is still mapped by the backend (the
1084 * backend has chosen to make this grant persistent)
1085 * we add it at the head of the list, so it will be
1086 * reused first.
1087 */
1088 if (!info->feature_persistent)
1089 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1090 s->grants_used[i]->gref);
1091 list_add(&s->grants_used[i]->node, &info->grants);
1092 info->persistent_gnts_c++;
1093 } else {
1094 /*
1095 * If the grant is not mapped by the backend we end the
1096 * foreign access and add it to the tail of the list,
1097 * so it will not be picked again unless we run out of
1098 * persistent grants.
1099 */
1100 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1101 s->grants_used[i]->gref = GRANT_INVALID_REF;
1102 list_add_tail(&s->grants_used[i]->node, &info->grants);
1103 }
1104 }
1105 if (s->req.operation == BLKIF_OP_INDIRECT) {
1106 for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
1107 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1108 if (!info->feature_persistent)
1109 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1110 s->indirect_grants[i]->gref);
1111 list_add(&s->indirect_grants[i]->node, &info->grants);
1112 info->persistent_gnts_c++;
1113 } else {
1114 struct page *indirect_page;
1115
1116 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1117 /*
1118 * Add the used indirect page back to the list of
1119 * available pages for indirect grefs.
1120 */
1121 if (!info->feature_persistent) {
1122 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1123 list_add(&indirect_page->lru, &info->indirect_pages);
1124 }
1125 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1126 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1127 }
1128 }
1129 }
1130 }
1131
blkif_interrupt(int irq,void * dev_id)1132 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1133 {
1134 struct request *req;
1135 struct blkif_response *bret;
1136 RING_IDX i, rp;
1137 unsigned long flags;
1138 struct blkfront_info *info = (struct blkfront_info *)dev_id;
1139 int error;
1140
1141 spin_lock_irqsave(&info->io_lock, flags);
1142
1143 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1144 spin_unlock_irqrestore(&info->io_lock, flags);
1145 return IRQ_HANDLED;
1146 }
1147
1148 again:
1149 rp = info->ring.sring->rsp_prod;
1150 rmb(); /* Ensure we see queued responses up to 'rp'. */
1151
1152 for (i = info->ring.rsp_cons; i != rp; i++) {
1153 unsigned long id;
1154
1155 bret = RING_GET_RESPONSE(&info->ring, i);
1156 id = bret->id;
1157 /*
1158 * The backend has messed up and given us an id that we would
1159 * never have given to it (we stamp it up to BLK_RING_SIZE -
1160 * look in get_id_from_freelist.
1161 */
1162 if (id >= BLK_RING_SIZE) {
1163 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1164 info->gd->disk_name, op_name(bret->operation), id);
1165 /* We can't safely get the 'struct request' as
1166 * the id is busted. */
1167 continue;
1168 }
1169 req = info->shadow[id].request;
1170
1171 if (bret->operation != BLKIF_OP_DISCARD)
1172 blkif_completion(&info->shadow[id], info, bret);
1173
1174 if (add_id_to_freelist(info, id)) {
1175 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1176 info->gd->disk_name, op_name(bret->operation), id);
1177 continue;
1178 }
1179
1180 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1181 switch (bret->operation) {
1182 case BLKIF_OP_DISCARD:
1183 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1184 struct request_queue *rq = info->rq;
1185 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1186 info->gd->disk_name, op_name(bret->operation));
1187 error = -EOPNOTSUPP;
1188 info->feature_discard = 0;
1189 info->feature_secdiscard = 0;
1190 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1191 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
1192 }
1193 __blk_end_request_all(req, error);
1194 break;
1195 case BLKIF_OP_FLUSH_DISKCACHE:
1196 case BLKIF_OP_WRITE_BARRIER:
1197 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1198 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1199 info->gd->disk_name, op_name(bret->operation));
1200 error = -EOPNOTSUPP;
1201 }
1202 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1203 info->shadow[id].req.u.rw.nr_segments == 0)) {
1204 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1205 info->gd->disk_name, op_name(bret->operation));
1206 error = -EOPNOTSUPP;
1207 }
1208 if (unlikely(error)) {
1209 if (error == -EOPNOTSUPP)
1210 error = 0;
1211 info->feature_flush = 0;
1212 xlvbd_flush(info);
1213 }
1214 /* fall through */
1215 case BLKIF_OP_READ:
1216 case BLKIF_OP_WRITE:
1217 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1218 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1219 "request: %x\n", bret->status);
1220
1221 __blk_end_request_all(req, error);
1222 break;
1223 default:
1224 BUG();
1225 }
1226 }
1227
1228 info->ring.rsp_cons = i;
1229
1230 if (i != info->ring.req_prod_pvt) {
1231 int more_to_do;
1232 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
1233 if (more_to_do)
1234 goto again;
1235 } else
1236 info->ring.sring->rsp_event = i + 1;
1237
1238 kick_pending_request_queues(info);
1239
1240 spin_unlock_irqrestore(&info->io_lock, flags);
1241
1242 return IRQ_HANDLED;
1243 }
1244
1245
setup_blkring(struct xenbus_device * dev,struct blkfront_info * info)1246 static int setup_blkring(struct xenbus_device *dev,
1247 struct blkfront_info *info)
1248 {
1249 struct blkif_sring *sring;
1250 grant_ref_t gref;
1251 int err;
1252
1253 info->ring_ref = GRANT_INVALID_REF;
1254
1255 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
1256 if (!sring) {
1257 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1258 return -ENOMEM;
1259 }
1260 SHARED_RING_INIT(sring);
1261 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
1262
1263 err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref);
1264 if (err < 0) {
1265 free_page((unsigned long)sring);
1266 info->ring.sring = NULL;
1267 goto fail;
1268 }
1269 info->ring_ref = gref;
1270
1271 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1272 if (err)
1273 goto fail;
1274
1275 err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
1276 "blkif", info);
1277 if (err <= 0) {
1278 xenbus_dev_fatal(dev, err,
1279 "bind_evtchn_to_irqhandler failed");
1280 goto fail;
1281 }
1282 info->irq = err;
1283
1284 return 0;
1285 fail:
1286 blkif_free(info, 0);
1287 return err;
1288 }
1289
1290
1291 /* Common code used when first setting up, and when resuming. */
talk_to_blkback(struct xenbus_device * dev,struct blkfront_info * info)1292 static int talk_to_blkback(struct xenbus_device *dev,
1293 struct blkfront_info *info)
1294 {
1295 const char *message = NULL;
1296 struct xenbus_transaction xbt;
1297 int err;
1298
1299 /* Create shared ring, alloc event channel. */
1300 err = setup_blkring(dev, info);
1301 if (err)
1302 goto out;
1303
1304 again:
1305 err = xenbus_transaction_start(&xbt);
1306 if (err) {
1307 xenbus_dev_fatal(dev, err, "starting transaction");
1308 goto destroy_blkring;
1309 }
1310
1311 err = xenbus_printf(xbt, dev->nodename,
1312 "ring-ref", "%u", info->ring_ref);
1313 if (err) {
1314 message = "writing ring-ref";
1315 goto abort_transaction;
1316 }
1317 err = xenbus_printf(xbt, dev->nodename,
1318 "event-channel", "%u", info->evtchn);
1319 if (err) {
1320 message = "writing event-channel";
1321 goto abort_transaction;
1322 }
1323 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1324 XEN_IO_PROTO_ABI_NATIVE);
1325 if (err) {
1326 message = "writing protocol";
1327 goto abort_transaction;
1328 }
1329 err = xenbus_printf(xbt, dev->nodename,
1330 "feature-persistent", "%u", 1);
1331 if (err)
1332 dev_warn(&dev->dev,
1333 "writing persistent grants feature to xenbus");
1334
1335 err = xenbus_transaction_end(xbt, 0);
1336 if (err) {
1337 if (err == -EAGAIN)
1338 goto again;
1339 xenbus_dev_fatal(dev, err, "completing transaction");
1340 goto destroy_blkring;
1341 }
1342
1343 xenbus_switch_state(dev, XenbusStateInitialised);
1344
1345 return 0;
1346
1347 abort_transaction:
1348 xenbus_transaction_end(xbt, 1);
1349 if (message)
1350 xenbus_dev_fatal(dev, err, "%s", message);
1351 destroy_blkring:
1352 blkif_free(info, 0);
1353 out:
1354 return err;
1355 }
1356
1357 /**
1358 * Entry point to this code when a new device is created. Allocate the basic
1359 * structures and the ring buffer for communication with the backend, and
1360 * inform the backend of the appropriate details for those. Switch to
1361 * Initialised state.
1362 */
blkfront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)1363 static int blkfront_probe(struct xenbus_device *dev,
1364 const struct xenbus_device_id *id)
1365 {
1366 int err, vdevice, i;
1367 struct blkfront_info *info;
1368
1369 /* FIXME: Use dynamic device id if this is not set. */
1370 err = xenbus_scanf(XBT_NIL, dev->nodename,
1371 "virtual-device", "%i", &vdevice);
1372 if (err != 1) {
1373 /* go looking in the extended area instead */
1374 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1375 "%i", &vdevice);
1376 if (err != 1) {
1377 xenbus_dev_fatal(dev, err, "reading virtual-device");
1378 return err;
1379 }
1380 }
1381
1382 if (xen_hvm_domain()) {
1383 char *type;
1384 int len;
1385 /* no unplug has been done: do not hook devices != xen vbds */
1386 if (xen_has_pv_and_legacy_disk_devices()) {
1387 int major;
1388
1389 if (!VDEV_IS_EXTENDED(vdevice))
1390 major = BLKIF_MAJOR(vdevice);
1391 else
1392 major = XENVBD_MAJOR;
1393
1394 if (major != XENVBD_MAJOR) {
1395 printk(KERN_INFO
1396 "%s: HVM does not support vbd %d as xen block device\n",
1397 __func__, vdevice);
1398 return -ENODEV;
1399 }
1400 }
1401 /* do not create a PV cdrom device if we are an HVM guest */
1402 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1403 if (IS_ERR(type))
1404 return -ENODEV;
1405 if (strncmp(type, "cdrom", 5) == 0) {
1406 kfree(type);
1407 return -ENODEV;
1408 }
1409 kfree(type);
1410 }
1411 info = kzalloc(sizeof(*info), GFP_KERNEL);
1412 if (!info) {
1413 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1414 return -ENOMEM;
1415 }
1416
1417 mutex_init(&info->mutex);
1418 spin_lock_init(&info->io_lock);
1419 info->xbdev = dev;
1420 info->vdevice = vdevice;
1421 INIT_LIST_HEAD(&info->grants);
1422 INIT_LIST_HEAD(&info->indirect_pages);
1423 info->persistent_gnts_c = 0;
1424 info->connected = BLKIF_STATE_DISCONNECTED;
1425 INIT_WORK(&info->work, blkif_restart_queue);
1426
1427 for (i = 0; i < BLK_RING_SIZE; i++)
1428 info->shadow[i].req.u.rw.id = i+1;
1429 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
1430
1431 /* Front end dir is a number, which is used as the id. */
1432 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1433 dev_set_drvdata(&dev->dev, info);
1434
1435 err = talk_to_blkback(dev, info);
1436 if (err) {
1437 kfree(info);
1438 dev_set_drvdata(&dev->dev, NULL);
1439 return err;
1440 }
1441
1442 return 0;
1443 }
1444
split_bio_end(struct bio * bio,int error)1445 static void split_bio_end(struct bio *bio, int error)
1446 {
1447 struct split_bio *split_bio = bio->bi_private;
1448
1449 if (error)
1450 split_bio->err = error;
1451
1452 if (atomic_dec_and_test(&split_bio->pending)) {
1453 split_bio->bio->bi_phys_segments = 0;
1454 bio_endio(split_bio->bio, split_bio->err);
1455 kfree(split_bio);
1456 }
1457 bio_put(bio);
1458 }
1459
blkif_recover(struct blkfront_info * info)1460 static int blkif_recover(struct blkfront_info *info)
1461 {
1462 int i;
1463 struct request *req, *n;
1464 struct blk_shadow *copy;
1465 int rc;
1466 struct bio *bio, *cloned_bio;
1467 struct bio_list bio_list, merge_bio;
1468 unsigned int segs, offset;
1469 int pending, size;
1470 struct split_bio *split_bio;
1471 struct list_head requests;
1472
1473 /* Stage 1: Make a safe copy of the shadow state. */
1474 copy = kmemdup(info->shadow, sizeof(info->shadow),
1475 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
1476 if (!copy)
1477 return -ENOMEM;
1478
1479 /* Stage 2: Set up free list. */
1480 memset(&info->shadow, 0, sizeof(info->shadow));
1481 for (i = 0; i < BLK_RING_SIZE; i++)
1482 info->shadow[i].req.u.rw.id = i+1;
1483 info->shadow_free = info->ring.req_prod_pvt;
1484 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
1485
1486 rc = blkfront_setup_indirect(info);
1487 if (rc) {
1488 kfree(copy);
1489 return rc;
1490 }
1491
1492 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
1493 blk_queue_max_segments(info->rq, segs);
1494 bio_list_init(&bio_list);
1495 INIT_LIST_HEAD(&requests);
1496 for (i = 0; i < BLK_RING_SIZE; i++) {
1497 /* Not in use? */
1498 if (!copy[i].request)
1499 continue;
1500
1501 /*
1502 * Get the bios in the request so we can re-queue them.
1503 */
1504 if (copy[i].request->cmd_flags &
1505 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1506 /*
1507 * Flush operations don't contain bios, so
1508 * we need to requeue the whole request
1509 */
1510 list_add(©[i].request->queuelist, &requests);
1511 continue;
1512 }
1513 merge_bio.head = copy[i].request->bio;
1514 merge_bio.tail = copy[i].request->biotail;
1515 bio_list_merge(&bio_list, &merge_bio);
1516 copy[i].request->bio = NULL;
1517 blk_end_request_all(copy[i].request, 0);
1518 }
1519
1520 kfree(copy);
1521
1522 /*
1523 * Empty the queue, this is important because we might have
1524 * requests in the queue with more segments than what we
1525 * can handle now.
1526 */
1527 spin_lock_irq(&info->io_lock);
1528 while ((req = blk_fetch_request(info->rq)) != NULL) {
1529 if (req->cmd_flags &
1530 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1531 list_add(&req->queuelist, &requests);
1532 continue;
1533 }
1534 merge_bio.head = req->bio;
1535 merge_bio.tail = req->biotail;
1536 bio_list_merge(&bio_list, &merge_bio);
1537 req->bio = NULL;
1538 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
1539 pr_alert("diskcache flush request found!\n");
1540 __blk_end_request_all(req, 0);
1541 }
1542 spin_unlock_irq(&info->io_lock);
1543
1544 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1545
1546 spin_lock_irq(&info->io_lock);
1547
1548 /* Now safe for us to use the shared ring */
1549 info->connected = BLKIF_STATE_CONNECTED;
1550
1551 /* Kick any other new requests queued since we resumed */
1552 kick_pending_request_queues(info);
1553
1554 list_for_each_entry_safe(req, n, &requests, queuelist) {
1555 /* Requeue pending requests (flush or discard) */
1556 list_del_init(&req->queuelist);
1557 BUG_ON(req->nr_phys_segments > segs);
1558 blk_requeue_request(info->rq, req);
1559 }
1560 spin_unlock_irq(&info->io_lock);
1561
1562 while ((bio = bio_list_pop(&bio_list)) != NULL) {
1563 /* Traverse the list of pending bios and re-queue them */
1564 if (bio_segments(bio) > segs) {
1565 /*
1566 * This bio has more segments than what we can
1567 * handle, we have to split it.
1568 */
1569 pending = (bio_segments(bio) + segs - 1) / segs;
1570 split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
1571 BUG_ON(split_bio == NULL);
1572 atomic_set(&split_bio->pending, pending);
1573 split_bio->bio = bio;
1574 for (i = 0; i < pending; i++) {
1575 offset = (i * segs * PAGE_SIZE) >> 9;
1576 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1577 (unsigned int)bio_sectors(bio) - offset);
1578 cloned_bio = bio_clone(bio, GFP_NOIO);
1579 BUG_ON(cloned_bio == NULL);
1580 bio_trim(cloned_bio, offset, size);
1581 cloned_bio->bi_private = split_bio;
1582 cloned_bio->bi_end_io = split_bio_end;
1583 submit_bio(cloned_bio->bi_rw, cloned_bio);
1584 }
1585 /*
1586 * Now we have to wait for all those smaller bios to
1587 * end, so we can also end the "parent" bio.
1588 */
1589 continue;
1590 }
1591 /* We don't need to split this bio */
1592 submit_bio(bio->bi_rw, bio);
1593 }
1594
1595 return 0;
1596 }
1597
1598 /**
1599 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1600 * driver restart. We tear down our blkif structure and recreate it, but
1601 * leave the device-layer structures intact so that this is transparent to the
1602 * rest of the kernel.
1603 */
blkfront_resume(struct xenbus_device * dev)1604 static int blkfront_resume(struct xenbus_device *dev)
1605 {
1606 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1607 int err;
1608
1609 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
1610
1611 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
1612
1613 err = talk_to_blkback(dev, info);
1614
1615 /*
1616 * We have to wait for the backend to switch to
1617 * connected state, since we want to read which
1618 * features it supports.
1619 */
1620
1621 return err;
1622 }
1623
1624 static void
blkfront_closing(struct blkfront_info * info)1625 blkfront_closing(struct blkfront_info *info)
1626 {
1627 struct xenbus_device *xbdev = info->xbdev;
1628 struct block_device *bdev = NULL;
1629
1630 mutex_lock(&info->mutex);
1631
1632 if (xbdev->state == XenbusStateClosing) {
1633 mutex_unlock(&info->mutex);
1634 return;
1635 }
1636
1637 if (info->gd)
1638 bdev = bdget_disk(info->gd, 0);
1639
1640 mutex_unlock(&info->mutex);
1641
1642 if (!bdev) {
1643 xenbus_frontend_closed(xbdev);
1644 return;
1645 }
1646
1647 mutex_lock(&bdev->bd_mutex);
1648
1649 if (bdev->bd_openers) {
1650 xenbus_dev_error(xbdev, -EBUSY,
1651 "Device in use; refusing to close");
1652 xenbus_switch_state(xbdev, XenbusStateClosing);
1653 } else {
1654 xlvbd_release_gendisk(info);
1655 xenbus_frontend_closed(xbdev);
1656 }
1657
1658 mutex_unlock(&bdev->bd_mutex);
1659 bdput(bdev);
1660 }
1661
blkfront_setup_discard(struct blkfront_info * info)1662 static void blkfront_setup_discard(struct blkfront_info *info)
1663 {
1664 int err;
1665 unsigned int discard_granularity;
1666 unsigned int discard_alignment;
1667 unsigned int discard_secure;
1668
1669 info->feature_discard = 1;
1670 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1671 "discard-granularity", "%u", &discard_granularity,
1672 "discard-alignment", "%u", &discard_alignment,
1673 NULL);
1674 if (!err) {
1675 info->discard_granularity = discard_granularity;
1676 info->discard_alignment = discard_alignment;
1677 }
1678 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1679 "discard-secure", "%d", &discard_secure,
1680 NULL);
1681 if (!err)
1682 info->feature_secdiscard = !!discard_secure;
1683 }
1684
blkfront_setup_indirect(struct blkfront_info * info)1685 static int blkfront_setup_indirect(struct blkfront_info *info)
1686 {
1687 unsigned int indirect_segments, segs;
1688 int err, i;
1689
1690 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1691 "feature-max-indirect-segments", "%u", &indirect_segments,
1692 NULL);
1693 if (err) {
1694 info->max_indirect_segments = 0;
1695 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1696 } else {
1697 info->max_indirect_segments = min(indirect_segments,
1698 xen_blkif_max_segments);
1699 segs = info->max_indirect_segments;
1700 }
1701
1702 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
1703 if (err)
1704 goto out_of_memory;
1705
1706 if (!info->feature_persistent && info->max_indirect_segments) {
1707 /*
1708 * We are using indirect descriptors but not persistent
1709 * grants, we need to allocate a set of pages that can be
1710 * used for mapping indirect grefs
1711 */
1712 int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
1713
1714 BUG_ON(!list_empty(&info->indirect_pages));
1715 for (i = 0; i < num; i++) {
1716 struct page *indirect_page = alloc_page(GFP_NOIO);
1717 if (!indirect_page)
1718 goto out_of_memory;
1719 list_add(&indirect_page->lru, &info->indirect_pages);
1720 }
1721 }
1722
1723 for (i = 0; i < BLK_RING_SIZE; i++) {
1724 info->shadow[i].grants_used = kzalloc(
1725 sizeof(info->shadow[i].grants_used[0]) * segs,
1726 GFP_NOIO);
1727 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
1728 if (info->max_indirect_segments)
1729 info->shadow[i].indirect_grants = kzalloc(
1730 sizeof(info->shadow[i].indirect_grants[0]) *
1731 INDIRECT_GREFS(segs),
1732 GFP_NOIO);
1733 if ((info->shadow[i].grants_used == NULL) ||
1734 (info->shadow[i].sg == NULL) ||
1735 (info->max_indirect_segments &&
1736 (info->shadow[i].indirect_grants == NULL)))
1737 goto out_of_memory;
1738 sg_init_table(info->shadow[i].sg, segs);
1739 }
1740
1741
1742 return 0;
1743
1744 out_of_memory:
1745 for (i = 0; i < BLK_RING_SIZE; i++) {
1746 kfree(info->shadow[i].grants_used);
1747 info->shadow[i].grants_used = NULL;
1748 kfree(info->shadow[i].sg);
1749 info->shadow[i].sg = NULL;
1750 kfree(info->shadow[i].indirect_grants);
1751 info->shadow[i].indirect_grants = NULL;
1752 }
1753 if (!list_empty(&info->indirect_pages)) {
1754 struct page *indirect_page, *n;
1755 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
1756 list_del(&indirect_page->lru);
1757 __free_page(indirect_page);
1758 }
1759 }
1760 return -ENOMEM;
1761 }
1762
1763 /*
1764 * Invoked when the backend is finally 'ready' (and has told produced
1765 * the details about the physical device - #sectors, size, etc).
1766 */
blkfront_connect(struct blkfront_info * info)1767 static void blkfront_connect(struct blkfront_info *info)
1768 {
1769 unsigned long long sectors;
1770 unsigned long sector_size;
1771 unsigned int physical_sector_size;
1772 unsigned int binfo;
1773 int err;
1774 int barrier, flush, discard, persistent;
1775
1776 switch (info->connected) {
1777 case BLKIF_STATE_CONNECTED:
1778 /*
1779 * Potentially, the back-end may be signalling
1780 * a capacity change; update the capacity.
1781 */
1782 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1783 "sectors", "%Lu", §ors);
1784 if (XENBUS_EXIST_ERR(err))
1785 return;
1786 printk(KERN_INFO "Setting capacity to %Lu\n",
1787 sectors);
1788 set_capacity(info->gd, sectors);
1789 revalidate_disk(info->gd);
1790
1791 return;
1792 case BLKIF_STATE_SUSPENDED:
1793 /*
1794 * If we are recovering from suspension, we need to wait
1795 * for the backend to announce it's features before
1796 * reconnecting, at least we need to know if the backend
1797 * supports indirect descriptors, and how many.
1798 */
1799 blkif_recover(info);
1800 return;
1801
1802 default:
1803 break;
1804 }
1805
1806 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
1807 __func__, info->xbdev->otherend);
1808
1809 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1810 "sectors", "%llu", §ors,
1811 "info", "%u", &binfo,
1812 "sector-size", "%lu", §or_size,
1813 NULL);
1814 if (err) {
1815 xenbus_dev_fatal(info->xbdev, err,
1816 "reading backend fields at %s",
1817 info->xbdev->otherend);
1818 return;
1819 }
1820
1821 /*
1822 * physcial-sector-size is a newer field, so old backends may not
1823 * provide this. Assume physical sector size to be the same as
1824 * sector_size in that case.
1825 */
1826 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1827 "physical-sector-size", "%u", &physical_sector_size);
1828 if (err != 1)
1829 physical_sector_size = sector_size;
1830
1831 info->feature_flush = 0;
1832
1833 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1834 "feature-barrier", "%d", &barrier,
1835 NULL);
1836
1837 /*
1838 * If there's no "feature-barrier" defined, then it means
1839 * we're dealing with a very old backend which writes
1840 * synchronously; nothing to do.
1841 *
1842 * If there are barriers, then we use flush.
1843 */
1844 if (!err && barrier)
1845 info->feature_flush = REQ_FLUSH | REQ_FUA;
1846 /*
1847 * And if there is "feature-flush-cache" use that above
1848 * barriers.
1849 */
1850 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1851 "feature-flush-cache", "%d", &flush,
1852 NULL);
1853
1854 if (!err && flush)
1855 info->feature_flush = REQ_FLUSH;
1856
1857 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1858 "feature-discard", "%d", &discard,
1859 NULL);
1860
1861 if (!err && discard)
1862 blkfront_setup_discard(info);
1863
1864 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1865 "feature-persistent", "%u", &persistent,
1866 NULL);
1867 if (err)
1868 info->feature_persistent = 0;
1869 else
1870 info->feature_persistent = persistent;
1871
1872 err = blkfront_setup_indirect(info);
1873 if (err) {
1874 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1875 info->xbdev->otherend);
1876 return;
1877 }
1878
1879 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
1880 physical_sector_size);
1881 if (err) {
1882 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1883 info->xbdev->otherend);
1884 return;
1885 }
1886
1887 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1888
1889 /* Kick pending requests. */
1890 spin_lock_irq(&info->io_lock);
1891 info->connected = BLKIF_STATE_CONNECTED;
1892 kick_pending_request_queues(info);
1893 spin_unlock_irq(&info->io_lock);
1894
1895 add_disk(info->gd);
1896
1897 info->is_ready = 1;
1898 }
1899
1900 /**
1901 * Callback received when the backend's state changes.
1902 */
blkback_changed(struct xenbus_device * dev,enum xenbus_state backend_state)1903 static void blkback_changed(struct xenbus_device *dev,
1904 enum xenbus_state backend_state)
1905 {
1906 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1907
1908 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
1909
1910 switch (backend_state) {
1911 case XenbusStateInitialising:
1912 case XenbusStateInitWait:
1913 case XenbusStateInitialised:
1914 case XenbusStateReconfiguring:
1915 case XenbusStateReconfigured:
1916 case XenbusStateUnknown:
1917 break;
1918
1919 case XenbusStateConnected:
1920 blkfront_connect(info);
1921 break;
1922
1923 case XenbusStateClosed:
1924 if (dev->state == XenbusStateClosed)
1925 break;
1926 /* Missed the backend's Closing state -- fallthrough */
1927 case XenbusStateClosing:
1928 if (info)
1929 blkfront_closing(info);
1930 break;
1931 }
1932 }
1933
blkfront_remove(struct xenbus_device * xbdev)1934 static int blkfront_remove(struct xenbus_device *xbdev)
1935 {
1936 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
1937 struct block_device *bdev = NULL;
1938 struct gendisk *disk;
1939
1940 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
1941
1942 blkif_free(info, 0);
1943
1944 mutex_lock(&info->mutex);
1945
1946 disk = info->gd;
1947 if (disk)
1948 bdev = bdget_disk(disk, 0);
1949
1950 info->xbdev = NULL;
1951 mutex_unlock(&info->mutex);
1952
1953 if (!bdev) {
1954 kfree(info);
1955 return 0;
1956 }
1957
1958 /*
1959 * The xbdev was removed before we reached the Closed
1960 * state. See if it's safe to remove the disk. If the bdev
1961 * isn't closed yet, we let release take care of it.
1962 */
1963
1964 mutex_lock(&bdev->bd_mutex);
1965 info = disk->private_data;
1966
1967 dev_warn(disk_to_dev(disk),
1968 "%s was hot-unplugged, %d stale handles\n",
1969 xbdev->nodename, bdev->bd_openers);
1970
1971 if (info && !bdev->bd_openers) {
1972 xlvbd_release_gendisk(info);
1973 disk->private_data = NULL;
1974 kfree(info);
1975 }
1976
1977 mutex_unlock(&bdev->bd_mutex);
1978 bdput(bdev);
1979
1980 return 0;
1981 }
1982
blkfront_is_ready(struct xenbus_device * dev)1983 static int blkfront_is_ready(struct xenbus_device *dev)
1984 {
1985 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1986
1987 return info->is_ready && info->xbdev;
1988 }
1989
blkif_open(struct block_device * bdev,fmode_t mode)1990 static int blkif_open(struct block_device *bdev, fmode_t mode)
1991 {
1992 struct gendisk *disk = bdev->bd_disk;
1993 struct blkfront_info *info;
1994 int err = 0;
1995
1996 mutex_lock(&blkfront_mutex);
1997
1998 info = disk->private_data;
1999 if (!info) {
2000 /* xbdev gone */
2001 err = -ERESTARTSYS;
2002 goto out;
2003 }
2004
2005 mutex_lock(&info->mutex);
2006
2007 if (!info->gd)
2008 /* xbdev is closed */
2009 err = -ERESTARTSYS;
2010
2011 mutex_unlock(&info->mutex);
2012
2013 out:
2014 mutex_unlock(&blkfront_mutex);
2015 return err;
2016 }
2017
blkif_release(struct gendisk * disk,fmode_t mode)2018 static void blkif_release(struct gendisk *disk, fmode_t mode)
2019 {
2020 struct blkfront_info *info = disk->private_data;
2021 struct block_device *bdev;
2022 struct xenbus_device *xbdev;
2023
2024 mutex_lock(&blkfront_mutex);
2025
2026 bdev = bdget_disk(disk, 0);
2027
2028 if (!bdev) {
2029 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2030 goto out_mutex;
2031 }
2032 if (bdev->bd_openers)
2033 goto out;
2034
2035 /*
2036 * Check if we have been instructed to close. We will have
2037 * deferred this request, because the bdev was still open.
2038 */
2039
2040 mutex_lock(&info->mutex);
2041 xbdev = info->xbdev;
2042
2043 if (xbdev && xbdev->state == XenbusStateClosing) {
2044 /* pending switch to state closed */
2045 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2046 xlvbd_release_gendisk(info);
2047 xenbus_frontend_closed(info->xbdev);
2048 }
2049
2050 mutex_unlock(&info->mutex);
2051
2052 if (!xbdev) {
2053 /* sudden device removal */
2054 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2055 xlvbd_release_gendisk(info);
2056 disk->private_data = NULL;
2057 kfree(info);
2058 }
2059
2060 out:
2061 bdput(bdev);
2062 out_mutex:
2063 mutex_unlock(&blkfront_mutex);
2064 }
2065
2066 static const struct block_device_operations xlvbd_block_fops =
2067 {
2068 .owner = THIS_MODULE,
2069 .open = blkif_open,
2070 .release = blkif_release,
2071 .getgeo = blkif_getgeo,
2072 .ioctl = blkif_ioctl,
2073 };
2074
2075
2076 static const struct xenbus_device_id blkfront_ids[] = {
2077 { "vbd" },
2078 { "" }
2079 };
2080
2081 static struct xenbus_driver blkfront_driver = {
2082 .ids = blkfront_ids,
2083 .probe = blkfront_probe,
2084 .remove = blkfront_remove,
2085 .resume = blkfront_resume,
2086 .otherend_changed = blkback_changed,
2087 .is_ready = blkfront_is_ready,
2088 };
2089
xlblk_init(void)2090 static int __init xlblk_init(void)
2091 {
2092 int ret;
2093
2094 if (!xen_domain())
2095 return -ENODEV;
2096
2097 if (!xen_has_pv_disk_devices())
2098 return -ENODEV;
2099
2100 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2101 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2102 XENVBD_MAJOR, DEV_NAME);
2103 return -ENODEV;
2104 }
2105
2106 ret = xenbus_register_frontend(&blkfront_driver);
2107 if (ret) {
2108 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2109 return ret;
2110 }
2111
2112 return 0;
2113 }
2114 module_init(xlblk_init);
2115
2116
xlblk_exit(void)2117 static void __exit xlblk_exit(void)
2118 {
2119 xenbus_unregister_driver(&blkfront_driver);
2120 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2121 kfree(minors);
2122 }
2123 module_exit(xlblk_exit);
2124
2125 MODULE_DESCRIPTION("Xen virtual block device frontend");
2126 MODULE_LICENSE("GPL");
2127 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2128 MODULE_ALIAS("xen:vbd");
2129 MODULE_ALIAS("xenblk");
2130