root/drivers/block/xen-blkback/blkback.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. persistent_gnt_timeout
  2. get_free_page
  3. put_free_pages
  4. shrink_free_pagepool
  5. add_persistent_gnt
  6. get_persistent_gnt
  7. put_persistent_gnt
  8. free_persistent_gnts
  9. xen_blkbk_unmap_purged_grants
  10. purge_persistent_gnt
  11. alloc_req
  12. free_req
  13. xen_vbd_translate
  14. xen_vbd_resize
  15. blkif_notify_work
  16. xen_blkif_be_int
  17. print_stats
  18. xen_blkif_schedule
  19. xen_blkbk_free_caches
  20. xen_blkbk_unmap_prepare
  21. xen_blkbk_unmap_and_respond_callback
  22. xen_blkbk_unmap_and_respond
  23. xen_blkbk_unmap
  24. xen_blkbk_map
  25. xen_blkbk_map_seg
  26. xen_blkbk_parse_indirect
  27. dispatch_discard_io
  28. dispatch_other_io
  29. xen_blk_drain_io
  30. __end_block_io_op
  31. end_block_io_op
  32. __do_block_io_op
  33. do_block_io_op
  34. dispatch_rw_block_io
  35. make_response
  36. xen_blkif_init

   1 /******************************************************************************
   2  *
   3  * Back-end of the driver for virtual block devices. This portion of the
   4  * driver exports a 'unified' block-device interface that can be accessed
   5  * by any operating system that implements a compatible front end. A
   6  * reference front-end implementation can be found in:
   7  *  drivers/block/xen-blkfront.c
   8  *
   9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10  * Copyright (c) 2005, Christopher Clark
  11  *
  12  * This program is free software; you can redistribute it and/or
  13  * modify it under the terms of the GNU General Public License version 2
  14  * as published by the Free Software Foundation; or, when distributed
  15  * separately from the Linux kernel or incorporated into other
  16  * software packages, subject to the following license:
  17  *
  18  * Permission is hereby granted, free of charge, to any person obtaining a copy
  19  * of this source file (the "Software"), to deal in the Software without
  20  * restriction, including without limitation the rights to use, copy, modify,
  21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22  * and to permit persons to whom the Software is furnished to do so, subject to
  23  * the following conditions:
  24  *
  25  * The above copyright notice and this permission notice shall be included in
  26  * all copies or substantial portions of the Software.
  27  *
  28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34  * IN THE SOFTWARE.
  35  */
  36 
  37 #define pr_fmt(fmt) "xen-blkback: " fmt
  38 
  39 #include <linux/spinlock.h>
  40 #include <linux/kthread.h>
  41 #include <linux/list.h>
  42 #include <linux/delay.h>
  43 #include <linux/freezer.h>
  44 #include <linux/bitmap.h>
  45 
  46 #include <xen/events.h>
  47 #include <xen/page.h>
  48 #include <xen/xen.h>
  49 #include <asm/xen/hypervisor.h>
  50 #include <asm/xen/hypercall.h>
  51 #include <xen/balloon.h>
  52 #include <xen/grant_table.h>
  53 #include "common.h"
  54 
  55 /*
  56  * Maximum number of unused free pages to keep in the internal buffer.
  57  * Setting this to a value too low will reduce memory used in each backend,
  58  * but can have a performance penalty.
  59  *
  60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  61  * be set to a lower value that might degrade performance on some intensive
  62  * IO workloads.
  63  */
  64 
  65 static int xen_blkif_max_buffer_pages = 1024;
  66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  67 MODULE_PARM_DESC(max_buffer_pages,
  68 "Maximum number of free pages to keep in each block backend buffer");
  69 
  70 /*
  71  * Maximum number of grants to map persistently in blkback. For maximum
  72  * performance this should be the total numbers of grants that can be used
  73  * to fill the ring, but since this might become too high, specially with
  74  * the use of indirect descriptors, we set it to a value that provides good
  75  * performance without using too much memory.
  76  *
  77  * When the list of persistent grants is full we clean it up using a LRU
  78  * algorithm.
  79  */
  80 
  81 static int xen_blkif_max_pgrants = 1056;
  82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  83 MODULE_PARM_DESC(max_persistent_grants,
  84                  "Maximum number of grants to map persistently");
  85 
  86 /*
  87  * How long a persistent grant is allowed to remain allocated without being in
  88  * use. The time is in seconds, 0 means indefinitely long.
  89  */
  90 
  91 static unsigned int xen_blkif_pgrant_timeout = 60;
  92 module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
  93                    uint, 0644);
  94 MODULE_PARM_DESC(persistent_grant_unused_seconds,
  95                  "Time in seconds an unused persistent grant is allowed to "
  96                  "remain allocated. Default is 60, 0 means unlimited.");
  97 
  98 /*
  99  * Maximum number of rings/queues blkback supports, allow as many queues as there
 100  * are CPUs if user has not specified a value.
 101  */
 102 unsigned int xenblk_max_queues;
 103 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
 104 MODULE_PARM_DESC(max_queues,
 105                  "Maximum number of hardware queues per virtual disk." \
 106                  "By default it is the number of online CPUs.");
 107 
 108 /*
 109  * Maximum order of pages to be used for the shared ring between front and
 110  * backend, 4KB page granularity is used.
 111  */
 112 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
 113 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
 114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
 115 /*
 116  * The LRU mechanism to clean the lists of persistent grants needs to
 117  * be executed periodically. The time interval between consecutive executions
 118  * of the purge mechanism is set in ms.
 119  */
 120 #define LRU_INTERVAL 100
 121 
 122 /*
 123  * When the persistent grants list is full we will remove unused grants
 124  * from the list. The percent number of grants to be removed at each LRU
 125  * execution.
 126  */
 127 #define LRU_PERCENT_CLEAN 5
 128 
 129 /* Run-time switchable: /sys/module/blkback/parameters/ */
 130 static unsigned int log_stats;
 131 module_param(log_stats, int, 0644);
 132 
 133 #define BLKBACK_INVALID_HANDLE (~0)
 134 
 135 /* Number of free pages to remove on each call to gnttab_free_pages */
 136 #define NUM_BATCH_FREE_PAGES 10
 137 
 138 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
 139 {
 140         return xen_blkif_pgrant_timeout &&
 141                (jiffies - persistent_gnt->last_used >=
 142                 HZ * xen_blkif_pgrant_timeout);
 143 }
 144 
 145 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
 146 {
 147         unsigned long flags;
 148 
 149         spin_lock_irqsave(&ring->free_pages_lock, flags);
 150         if (list_empty(&ring->free_pages)) {
 151                 BUG_ON(ring->free_pages_num != 0);
 152                 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
 153                 return gnttab_alloc_pages(1, page);
 154         }
 155         BUG_ON(ring->free_pages_num == 0);
 156         page[0] = list_first_entry(&ring->free_pages, struct page, lru);
 157         list_del(&page[0]->lru);
 158         ring->free_pages_num--;
 159         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
 160 
 161         return 0;
 162 }
 163 
 164 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
 165                                   int num)
 166 {
 167         unsigned long flags;
 168         int i;
 169 
 170         spin_lock_irqsave(&ring->free_pages_lock, flags);
 171         for (i = 0; i < num; i++)
 172                 list_add(&page[i]->lru, &ring->free_pages);
 173         ring->free_pages_num += num;
 174         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
 175 }
 176 
 177 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
 178 {
 179         /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
 180         struct page *page[NUM_BATCH_FREE_PAGES];
 181         unsigned int num_pages = 0;
 182         unsigned long flags;
 183 
 184         spin_lock_irqsave(&ring->free_pages_lock, flags);
 185         while (ring->free_pages_num > num) {
 186                 BUG_ON(list_empty(&ring->free_pages));
 187                 page[num_pages] = list_first_entry(&ring->free_pages,
 188                                                    struct page, lru);
 189                 list_del(&page[num_pages]->lru);
 190                 ring->free_pages_num--;
 191                 if (++num_pages == NUM_BATCH_FREE_PAGES) {
 192                         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
 193                         gnttab_free_pages(num_pages, page);
 194                         spin_lock_irqsave(&ring->free_pages_lock, flags);
 195                         num_pages = 0;
 196                 }
 197         }
 198         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
 199         if (num_pages != 0)
 200                 gnttab_free_pages(num_pages, page);
 201 }
 202 
 203 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
 204 
 205 static int do_block_io_op(struct xen_blkif_ring *ring);
 206 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 207                                 struct blkif_request *req,
 208                                 struct pending_req *pending_req);
 209 static void make_response(struct xen_blkif_ring *ring, u64 id,
 210                           unsigned short op, int st);
 211 
 212 #define foreach_grant_safe(pos, n, rbtree, node) \
 213         for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
 214              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
 215              &(pos)->node != NULL; \
 216              (pos) = container_of(n, typeof(*(pos)), node), \
 217              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
 218 
 219 
 220 /*
 221  * We don't need locking around the persistent grant helpers
 222  * because blkback uses a single-thread for each backend, so we
 223  * can be sure that this functions will never be called recursively.
 224  *
 225  * The only exception to that is put_persistent_grant, that can be called
 226  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
 227  * bit operations to modify the flags of a persistent grant and to count
 228  * the number of used grants.
 229  */
 230 static int add_persistent_gnt(struct xen_blkif_ring *ring,
 231                                struct persistent_gnt *persistent_gnt)
 232 {
 233         struct rb_node **new = NULL, *parent = NULL;
 234         struct persistent_gnt *this;
 235         struct xen_blkif *blkif = ring->blkif;
 236 
 237         if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
 238                 if (!blkif->vbd.overflow_max_grants)
 239                         blkif->vbd.overflow_max_grants = 1;
 240                 return -EBUSY;
 241         }
 242         /* Figure out where to put new node */
 243         new = &ring->persistent_gnts.rb_node;
 244         while (*new) {
 245                 this = container_of(*new, struct persistent_gnt, node);
 246 
 247                 parent = *new;
 248                 if (persistent_gnt->gnt < this->gnt)
 249                         new = &((*new)->rb_left);
 250                 else if (persistent_gnt->gnt > this->gnt)
 251                         new = &((*new)->rb_right);
 252                 else {
 253                         pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
 254                         return -EINVAL;
 255                 }
 256         }
 257 
 258         persistent_gnt->active = true;
 259         /* Add new node and rebalance tree. */
 260         rb_link_node(&(persistent_gnt->node), parent, new);
 261         rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
 262         ring->persistent_gnt_c++;
 263         atomic_inc(&ring->persistent_gnt_in_use);
 264         return 0;
 265 }
 266 
 267 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
 268                                                  grant_ref_t gref)
 269 {
 270         struct persistent_gnt *data;
 271         struct rb_node *node = NULL;
 272 
 273         node = ring->persistent_gnts.rb_node;
 274         while (node) {
 275                 data = container_of(node, struct persistent_gnt, node);
 276 
 277                 if (gref < data->gnt)
 278                         node = node->rb_left;
 279                 else if (gref > data->gnt)
 280                         node = node->rb_right;
 281                 else {
 282                         if (data->active) {
 283                                 pr_alert_ratelimited("requesting a grant already in use\n");
 284                                 return NULL;
 285                         }
 286                         data->active = true;
 287                         atomic_inc(&ring->persistent_gnt_in_use);
 288                         return data;
 289                 }
 290         }
 291         return NULL;
 292 }
 293 
 294 static void put_persistent_gnt(struct xen_blkif_ring *ring,
 295                                struct persistent_gnt *persistent_gnt)
 296 {
 297         if (!persistent_gnt->active)
 298                 pr_alert_ratelimited("freeing a grant already unused\n");
 299         persistent_gnt->last_used = jiffies;
 300         persistent_gnt->active = false;
 301         atomic_dec(&ring->persistent_gnt_in_use);
 302 }
 303 
 304 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
 305                                  unsigned int num)
 306 {
 307         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 308         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 309         struct persistent_gnt *persistent_gnt;
 310         struct rb_node *n;
 311         int segs_to_unmap = 0;
 312         struct gntab_unmap_queue_data unmap_data;
 313 
 314         unmap_data.pages = pages;
 315         unmap_data.unmap_ops = unmap;
 316         unmap_data.kunmap_ops = NULL;
 317 
 318         foreach_grant_safe(persistent_gnt, n, root, node) {
 319                 BUG_ON(persistent_gnt->handle ==
 320                         BLKBACK_INVALID_HANDLE);
 321                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
 322                         (unsigned long) pfn_to_kaddr(page_to_pfn(
 323                                 persistent_gnt->page)),
 324                         GNTMAP_host_map,
 325                         persistent_gnt->handle);
 326 
 327                 pages[segs_to_unmap] = persistent_gnt->page;
 328 
 329                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
 330                         !rb_next(&persistent_gnt->node)) {
 331 
 332                         unmap_data.count = segs_to_unmap;
 333                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
 334 
 335                         put_free_pages(ring, pages, segs_to_unmap);
 336                         segs_to_unmap = 0;
 337                 }
 338 
 339                 rb_erase(&persistent_gnt->node, root);
 340                 kfree(persistent_gnt);
 341                 num--;
 342         }
 343         BUG_ON(num != 0);
 344 }
 345 
 346 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
 347 {
 348         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 349         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 350         struct persistent_gnt *persistent_gnt;
 351         int segs_to_unmap = 0;
 352         struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
 353         struct gntab_unmap_queue_data unmap_data;
 354 
 355         unmap_data.pages = pages;
 356         unmap_data.unmap_ops = unmap;
 357         unmap_data.kunmap_ops = NULL;
 358 
 359         while(!list_empty(&ring->persistent_purge_list)) {
 360                 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
 361                                                   struct persistent_gnt,
 362                                                   remove_node);
 363                 list_del(&persistent_gnt->remove_node);
 364 
 365                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
 366                         vaddr(persistent_gnt->page),
 367                         GNTMAP_host_map,
 368                         persistent_gnt->handle);
 369 
 370                 pages[segs_to_unmap] = persistent_gnt->page;
 371 
 372                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 373                         unmap_data.count = segs_to_unmap;
 374                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
 375                         put_free_pages(ring, pages, segs_to_unmap);
 376                         segs_to_unmap = 0;
 377                 }
 378                 kfree(persistent_gnt);
 379         }
 380         if (segs_to_unmap > 0) {
 381                 unmap_data.count = segs_to_unmap;
 382                 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
 383                 put_free_pages(ring, pages, segs_to_unmap);
 384         }
 385 }
 386 
 387 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
 388 {
 389         struct persistent_gnt *persistent_gnt;
 390         struct rb_node *n;
 391         unsigned int num_clean, total;
 392         bool scan_used = false;
 393         struct rb_root *root;
 394 
 395         if (work_busy(&ring->persistent_purge_work)) {
 396                 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
 397                 goto out;
 398         }
 399 
 400         if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
 401             (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
 402             !ring->blkif->vbd.overflow_max_grants)) {
 403                 num_clean = 0;
 404         } else {
 405                 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
 406                 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
 407                             num_clean;
 408                 num_clean = min(ring->persistent_gnt_c, num_clean);
 409                 pr_debug("Going to purge at least %u persistent grants\n",
 410                          num_clean);
 411         }
 412 
 413         /*
 414          * At this point, we can assure that there will be no calls
 415          * to get_persistent_grant (because we are executing this code from
 416          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
 417          * which means that the number of currently used grants will go down,
 418          * but never up, so we will always be able to remove the requested
 419          * number of grants.
 420          */
 421 
 422         total = 0;
 423 
 424         BUG_ON(!list_empty(&ring->persistent_purge_list));
 425         root = &ring->persistent_gnts;
 426 purge_list:
 427         foreach_grant_safe(persistent_gnt, n, root, node) {
 428                 BUG_ON(persistent_gnt->handle ==
 429                         BLKBACK_INVALID_HANDLE);
 430 
 431                 if (persistent_gnt->active)
 432                         continue;
 433                 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
 434                         continue;
 435                 if (scan_used && total >= num_clean)
 436                         continue;
 437 
 438                 rb_erase(&persistent_gnt->node, root);
 439                 list_add(&persistent_gnt->remove_node,
 440                          &ring->persistent_purge_list);
 441                 total++;
 442         }
 443         /*
 444          * Check whether we also need to start cleaning
 445          * grants that were used since last purge in order to cope
 446          * with the requested num
 447          */
 448         if (!scan_used && total < num_clean) {
 449                 pr_debug("Still missing %u purged frames\n", num_clean - total);
 450                 scan_used = true;
 451                 goto purge_list;
 452         }
 453 
 454         if (total) {
 455                 ring->persistent_gnt_c -= total;
 456                 ring->blkif->vbd.overflow_max_grants = 0;
 457 
 458                 /* We can defer this work */
 459                 schedule_work(&ring->persistent_purge_work);
 460                 pr_debug("Purged %u/%u\n", num_clean, total);
 461         }
 462 
 463 out:
 464         return;
 465 }
 466 
 467 /*
 468  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
 469  */
 470 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
 471 {
 472         struct pending_req *req = NULL;
 473         unsigned long flags;
 474 
 475         spin_lock_irqsave(&ring->pending_free_lock, flags);
 476         if (!list_empty(&ring->pending_free)) {
 477                 req = list_entry(ring->pending_free.next, struct pending_req,
 478                                  free_list);
 479                 list_del(&req->free_list);
 480         }
 481         spin_unlock_irqrestore(&ring->pending_free_lock, flags);
 482         return req;
 483 }
 484 
 485 /*
 486  * Return the 'pending_req' structure back to the freepool. We also
 487  * wake up the thread if it was waiting for a free page.
 488  */
 489 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
 490 {
 491         unsigned long flags;
 492         int was_empty;
 493 
 494         spin_lock_irqsave(&ring->pending_free_lock, flags);
 495         was_empty = list_empty(&ring->pending_free);
 496         list_add(&req->free_list, &ring->pending_free);
 497         spin_unlock_irqrestore(&ring->pending_free_lock, flags);
 498         if (was_empty)
 499                 wake_up(&ring->pending_free_wq);
 500 }
 501 
 502 /*
 503  * Routines for managing virtual block devices (vbds).
 504  */
 505 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
 506                              int operation)
 507 {
 508         struct xen_vbd *vbd = &blkif->vbd;
 509         int rc = -EACCES;
 510 
 511         if ((operation != REQ_OP_READ) && vbd->readonly)
 512                 goto out;
 513 
 514         if (likely(req->nr_sects)) {
 515                 blkif_sector_t end = req->sector_number + req->nr_sects;
 516 
 517                 if (unlikely(end < req->sector_number))
 518                         goto out;
 519                 if (unlikely(end > vbd_sz(vbd)))
 520                         goto out;
 521         }
 522 
 523         req->dev  = vbd->pdevice;
 524         req->bdev = vbd->bdev;
 525         rc = 0;
 526 
 527  out:
 528         return rc;
 529 }
 530 
 531 static void xen_vbd_resize(struct xen_blkif *blkif)
 532 {
 533         struct xen_vbd *vbd = &blkif->vbd;
 534         struct xenbus_transaction xbt;
 535         int err;
 536         struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
 537         unsigned long long new_size = vbd_sz(vbd);
 538 
 539         pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
 540                 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
 541         pr_info("VBD Resize: new size %llu\n", new_size);
 542         vbd->size = new_size;
 543 again:
 544         err = xenbus_transaction_start(&xbt);
 545         if (err) {
 546                 pr_warn("Error starting transaction\n");
 547                 return;
 548         }
 549         err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
 550                             (unsigned long long)vbd_sz(vbd));
 551         if (err) {
 552                 pr_warn("Error writing new size\n");
 553                 goto abort;
 554         }
 555         /*
 556          * Write the current state; we will use this to synchronize
 557          * the front-end. If the current state is "connected" the
 558          * front-end will get the new size information online.
 559          */
 560         err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
 561         if (err) {
 562                 pr_warn("Error writing the state\n");
 563                 goto abort;
 564         }
 565 
 566         err = xenbus_transaction_end(xbt, 0);
 567         if (err == -EAGAIN)
 568                 goto again;
 569         if (err)
 570                 pr_warn("Error ending transaction\n");
 571         return;
 572 abort:
 573         xenbus_transaction_end(xbt, 1);
 574 }
 575 
 576 /*
 577  * Notification from the guest OS.
 578  */
 579 static void blkif_notify_work(struct xen_blkif_ring *ring)
 580 {
 581         ring->waiting_reqs = 1;
 582         wake_up(&ring->wq);
 583 }
 584 
 585 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
 586 {
 587         blkif_notify_work(dev_id);
 588         return IRQ_HANDLED;
 589 }
 590 
 591 /*
 592  * SCHEDULER FUNCTIONS
 593  */
 594 
 595 static void print_stats(struct xen_blkif_ring *ring)
 596 {
 597         pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
 598                  "  |  ds %4llu | pg: %4u/%4d\n",
 599                  current->comm, ring->st_oo_req,
 600                  ring->st_rd_req, ring->st_wr_req,
 601                  ring->st_f_req, ring->st_ds_req,
 602                  ring->persistent_gnt_c,
 603                  xen_blkif_max_pgrants);
 604         ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
 605         ring->st_rd_req = 0;
 606         ring->st_wr_req = 0;
 607         ring->st_oo_req = 0;
 608         ring->st_ds_req = 0;
 609 }
 610 
 611 int xen_blkif_schedule(void *arg)
 612 {
 613         struct xen_blkif_ring *ring = arg;
 614         struct xen_blkif *blkif = ring->blkif;
 615         struct xen_vbd *vbd = &blkif->vbd;
 616         unsigned long timeout;
 617         int ret;
 618 
 619         set_freezable();
 620         while (!kthread_should_stop()) {
 621                 if (try_to_freeze())
 622                         continue;
 623                 if (unlikely(vbd->size != vbd_sz(vbd)))
 624                         xen_vbd_resize(blkif);
 625 
 626                 timeout = msecs_to_jiffies(LRU_INTERVAL);
 627 
 628                 timeout = wait_event_interruptible_timeout(
 629                         ring->wq,
 630                         ring->waiting_reqs || kthread_should_stop(),
 631                         timeout);
 632                 if (timeout == 0)
 633                         goto purge_gnt_list;
 634                 timeout = wait_event_interruptible_timeout(
 635                         ring->pending_free_wq,
 636                         !list_empty(&ring->pending_free) ||
 637                         kthread_should_stop(),
 638                         timeout);
 639                 if (timeout == 0)
 640                         goto purge_gnt_list;
 641 
 642                 ring->waiting_reqs = 0;
 643                 smp_mb(); /* clear flag *before* checking for work */
 644 
 645                 ret = do_block_io_op(ring);
 646                 if (ret > 0)
 647                         ring->waiting_reqs = 1;
 648                 if (ret == -EACCES)
 649                         wait_event_interruptible(ring->shutdown_wq,
 650                                                  kthread_should_stop());
 651 
 652 purge_gnt_list:
 653                 if (blkif->vbd.feature_gnt_persistent &&
 654                     time_after(jiffies, ring->next_lru)) {
 655                         purge_persistent_gnt(ring);
 656                         ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
 657                 }
 658 
 659                 /* Shrink if we have more than xen_blkif_max_buffer_pages */
 660                 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
 661 
 662                 if (log_stats && time_after(jiffies, ring->st_print))
 663                         print_stats(ring);
 664         }
 665 
 666         /* Drain pending purge work */
 667         flush_work(&ring->persistent_purge_work);
 668 
 669         if (log_stats)
 670                 print_stats(ring);
 671 
 672         ring->xenblkd = NULL;
 673 
 674         return 0;
 675 }
 676 
 677 /*
 678  * Remove persistent grants and empty the pool of free pages
 679  */
 680 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
 681 {
 682         /* Free all persistent grant pages */
 683         if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
 684                 free_persistent_gnts(ring, &ring->persistent_gnts,
 685                         ring->persistent_gnt_c);
 686 
 687         BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
 688         ring->persistent_gnt_c = 0;
 689 
 690         /* Since we are shutting down remove all pages from the buffer */
 691         shrink_free_pagepool(ring, 0 /* All */);
 692 }
 693 
 694 static unsigned int xen_blkbk_unmap_prepare(
 695         struct xen_blkif_ring *ring,
 696         struct grant_page **pages,
 697         unsigned int num,
 698         struct gnttab_unmap_grant_ref *unmap_ops,
 699         struct page **unmap_pages)
 700 {
 701         unsigned int i, invcount = 0;
 702 
 703         for (i = 0; i < num; i++) {
 704                 if (pages[i]->persistent_gnt != NULL) {
 705                         put_persistent_gnt(ring, pages[i]->persistent_gnt);
 706                         continue;
 707                 }
 708                 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
 709                         continue;
 710                 unmap_pages[invcount] = pages[i]->page;
 711                 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
 712                                     GNTMAP_host_map, pages[i]->handle);
 713                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
 714                 invcount++;
 715         }
 716 
 717         return invcount;
 718 }
 719 
 720 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
 721 {
 722         struct pending_req *pending_req = (struct pending_req *)(data->data);
 723         struct xen_blkif_ring *ring = pending_req->ring;
 724         struct xen_blkif *blkif = ring->blkif;
 725 
 726         /* BUG_ON used to reproduce existing behaviour,
 727            but is this the best way to deal with this? */
 728         BUG_ON(result);
 729 
 730         put_free_pages(ring, data->pages, data->count);
 731         make_response(ring, pending_req->id,
 732                       pending_req->operation, pending_req->status);
 733         free_req(ring, pending_req);
 734         /*
 735          * Make sure the request is freed before releasing blkif,
 736          * or there could be a race between free_req and the
 737          * cleanup done in xen_blkif_free during shutdown.
 738          *
 739          * NB: The fact that we might try to wake up pending_free_wq
 740          * before drain_complete (in case there's a drain going on)
 741          * it's not a problem with our current implementation
 742          * because we can assure there's no thread waiting on
 743          * pending_free_wq if there's a drain going on, but it has
 744          * to be taken into account if the current model is changed.
 745          */
 746         if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
 747                 complete(&blkif->drain_complete);
 748         }
 749         xen_blkif_put(blkif);
 750 }
 751 
 752 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
 753 {
 754         struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
 755         struct xen_blkif_ring *ring = req->ring;
 756         struct grant_page **pages = req->segments;
 757         unsigned int invcount;
 758 
 759         invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
 760                                            req->unmap, req->unmap_pages);
 761 
 762         work->data = req;
 763         work->done = xen_blkbk_unmap_and_respond_callback;
 764         work->unmap_ops = req->unmap;
 765         work->kunmap_ops = NULL;
 766         work->pages = req->unmap_pages;
 767         work->count = invcount;
 768 
 769         gnttab_unmap_refs_async(&req->gnttab_unmap_data);
 770 }
 771 
 772 
 773 /*
 774  * Unmap the grant references.
 775  *
 776  * This could accumulate ops up to the batch size to reduce the number
 777  * of hypercalls, but since this is only used in error paths there's
 778  * no real need.
 779  */
 780 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
 781                             struct grant_page *pages[],
 782                             int num)
 783 {
 784         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 785         struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 786         unsigned int invcount = 0;
 787         int ret;
 788 
 789         while (num) {
 790                 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 791 
 792                 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
 793                                                    unmap, unmap_pages);
 794                 if (invcount) {
 795                         ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
 796                         BUG_ON(ret);
 797                         put_free_pages(ring, unmap_pages, invcount);
 798                 }
 799                 pages += batch;
 800                 num -= batch;
 801         }
 802 }
 803 
 804 static int xen_blkbk_map(struct xen_blkif_ring *ring,
 805                          struct grant_page *pages[],
 806                          int num, bool ro)
 807 {
 808         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 809         struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 810         struct persistent_gnt *persistent_gnt = NULL;
 811         phys_addr_t addr = 0;
 812         int i, seg_idx, new_map_idx;
 813         int segs_to_map = 0;
 814         int ret = 0;
 815         int last_map = 0, map_until = 0;
 816         int use_persistent_gnts;
 817         struct xen_blkif *blkif = ring->blkif;
 818 
 819         use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
 820 
 821         /*
 822          * Fill out preq.nr_sects with proper amount of sectors, and setup
 823          * assign map[..] with the PFN of the page in our domain with the
 824          * corresponding grant reference for each page.
 825          */
 826 again:
 827         for (i = map_until; i < num; i++) {
 828                 uint32_t flags;
 829 
 830                 if (use_persistent_gnts) {
 831                         persistent_gnt = get_persistent_gnt(
 832                                 ring,
 833                                 pages[i]->gref);
 834                 }
 835 
 836                 if (persistent_gnt) {
 837                         /*
 838                          * We are using persistent grants and
 839                          * the grant is already mapped
 840                          */
 841                         pages[i]->page = persistent_gnt->page;
 842                         pages[i]->persistent_gnt = persistent_gnt;
 843                 } else {
 844                         if (get_free_page(ring, &pages[i]->page))
 845                                 goto out_of_memory;
 846                         addr = vaddr(pages[i]->page);
 847                         pages_to_gnt[segs_to_map] = pages[i]->page;
 848                         pages[i]->persistent_gnt = NULL;
 849                         flags = GNTMAP_host_map;
 850                         if (!use_persistent_gnts && ro)
 851                                 flags |= GNTMAP_readonly;
 852                         gnttab_set_map_op(&map[segs_to_map++], addr,
 853                                           flags, pages[i]->gref,
 854                                           blkif->domid);
 855                 }
 856                 map_until = i + 1;
 857                 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
 858                         break;
 859         }
 860 
 861         if (segs_to_map) {
 862                 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
 863                 BUG_ON(ret);
 864         }
 865 
 866         /*
 867          * Now swizzle the MFN in our domain with the MFN from the other domain
 868          * so that when we access vaddr(pending_req,i) it has the contents of
 869          * the page from the other domain.
 870          */
 871         for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
 872                 if (!pages[seg_idx]->persistent_gnt) {
 873                         /* This is a newly mapped grant */
 874                         BUG_ON(new_map_idx >= segs_to_map);
 875                         if (unlikely(map[new_map_idx].status != 0)) {
 876                                 pr_debug("invalid buffer -- could not remap it\n");
 877                                 put_free_pages(ring, &pages[seg_idx]->page, 1);
 878                                 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
 879                                 ret |= 1;
 880                                 goto next;
 881                         }
 882                         pages[seg_idx]->handle = map[new_map_idx].handle;
 883                 } else {
 884                         continue;
 885                 }
 886                 if (use_persistent_gnts &&
 887                     ring->persistent_gnt_c < xen_blkif_max_pgrants) {
 888                         /*
 889                          * We are using persistent grants, the grant is
 890                          * not mapped but we might have room for it.
 891                          */
 892                         persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
 893                                                  GFP_KERNEL);
 894                         if (!persistent_gnt) {
 895                                 /*
 896                                  * If we don't have enough memory to
 897                                  * allocate the persistent_gnt struct
 898                                  * map this grant non-persistenly
 899                                  */
 900                                 goto next;
 901                         }
 902                         persistent_gnt->gnt = map[new_map_idx].ref;
 903                         persistent_gnt->handle = map[new_map_idx].handle;
 904                         persistent_gnt->page = pages[seg_idx]->page;
 905                         if (add_persistent_gnt(ring,
 906                                                persistent_gnt)) {
 907                                 kfree(persistent_gnt);
 908                                 persistent_gnt = NULL;
 909                                 goto next;
 910                         }
 911                         pages[seg_idx]->persistent_gnt = persistent_gnt;
 912                         pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
 913                                  persistent_gnt->gnt, ring->persistent_gnt_c,
 914                                  xen_blkif_max_pgrants);
 915                         goto next;
 916                 }
 917                 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
 918                         blkif->vbd.overflow_max_grants = 1;
 919                         pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
 920                                  blkif->domid, blkif->vbd.handle);
 921                 }
 922                 /*
 923                  * We could not map this grant persistently, so use it as
 924                  * a non-persistent grant.
 925                  */
 926 next:
 927                 new_map_idx++;
 928         }
 929         segs_to_map = 0;
 930         last_map = map_until;
 931         if (map_until != num)
 932                 goto again;
 933 
 934         return ret;
 935 
 936 out_of_memory:
 937         pr_alert("%s: out of memory\n", __func__);
 938         put_free_pages(ring, pages_to_gnt, segs_to_map);
 939         for (i = last_map; i < num; i++)
 940                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
 941         return -ENOMEM;
 942 }
 943 
 944 static int xen_blkbk_map_seg(struct pending_req *pending_req)
 945 {
 946         int rc;
 947 
 948         rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
 949                            pending_req->nr_segs,
 950                            (pending_req->operation != BLKIF_OP_READ));
 951 
 952         return rc;
 953 }
 954 
 955 static int xen_blkbk_parse_indirect(struct blkif_request *req,
 956                                     struct pending_req *pending_req,
 957                                     struct seg_buf seg[],
 958                                     struct phys_req *preq)
 959 {
 960         struct grant_page **pages = pending_req->indirect_pages;
 961         struct xen_blkif_ring *ring = pending_req->ring;
 962         int indirect_grefs, rc, n, nseg, i;
 963         struct blkif_request_segment *segments = NULL;
 964 
 965         nseg = pending_req->nr_segs;
 966         indirect_grefs = INDIRECT_PAGES(nseg);
 967         BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
 968 
 969         for (i = 0; i < indirect_grefs; i++)
 970                 pages[i]->gref = req->u.indirect.indirect_grefs[i];
 971 
 972         rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
 973         if (rc)
 974                 goto unmap;
 975 
 976         for (n = 0, i = 0; n < nseg; n++) {
 977                 uint8_t first_sect, last_sect;
 978 
 979                 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
 980                         /* Map indirect segments */
 981                         if (segments)
 982                                 kunmap_atomic(segments);
 983                         segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
 984                 }
 985                 i = n % SEGS_PER_INDIRECT_FRAME;
 986 
 987                 pending_req->segments[n]->gref = segments[i].gref;
 988 
 989                 first_sect = READ_ONCE(segments[i].first_sect);
 990                 last_sect = READ_ONCE(segments[i].last_sect);
 991                 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
 992                         rc = -EINVAL;
 993                         goto unmap;
 994                 }
 995 
 996                 seg[n].nsec = last_sect - first_sect + 1;
 997                 seg[n].offset = first_sect << 9;
 998                 preq->nr_sects += seg[n].nsec;
 999         }
1000 
1001 unmap:
1002         if (segments)
1003                 kunmap_atomic(segments);
1004         xen_blkbk_unmap(ring, pages, indirect_grefs);
1005         return rc;
1006 }
1007 
1008 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1009                                 struct blkif_request *req)
1010 {
1011         int err = 0;
1012         int status = BLKIF_RSP_OKAY;
1013         struct xen_blkif *blkif = ring->blkif;
1014         struct block_device *bdev = blkif->vbd.bdev;
1015         unsigned long secure;
1016         struct phys_req preq;
1017 
1018         xen_blkif_get(blkif);
1019 
1020         preq.sector_number = req->u.discard.sector_number;
1021         preq.nr_sects      = req->u.discard.nr_sectors;
1022 
1023         err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1024         if (err) {
1025                 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1026                         preq.sector_number,
1027                         preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1028                 goto fail_response;
1029         }
1030         ring->st_ds_req++;
1031 
1032         secure = (blkif->vbd.discard_secure &&
1033                  (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1034                  BLKDEV_DISCARD_SECURE : 0;
1035 
1036         err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1037                                    req->u.discard.nr_sectors,
1038                                    GFP_KERNEL, secure);
1039 fail_response:
1040         if (err == -EOPNOTSUPP) {
1041                 pr_debug("discard op failed, not supported\n");
1042                 status = BLKIF_RSP_EOPNOTSUPP;
1043         } else if (err)
1044                 status = BLKIF_RSP_ERROR;
1045 
1046         make_response(ring, req->u.discard.id, req->operation, status);
1047         xen_blkif_put(blkif);
1048         return err;
1049 }
1050 
1051 static int dispatch_other_io(struct xen_blkif_ring *ring,
1052                              struct blkif_request *req,
1053                              struct pending_req *pending_req)
1054 {
1055         free_req(ring, pending_req);
1056         make_response(ring, req->u.other.id, req->operation,
1057                       BLKIF_RSP_EOPNOTSUPP);
1058         return -EIO;
1059 }
1060 
1061 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1062 {
1063         struct xen_blkif *blkif = ring->blkif;
1064 
1065         atomic_set(&blkif->drain, 1);
1066         do {
1067                 if (atomic_read(&ring->inflight) == 0)
1068                         break;
1069                 wait_for_completion_interruptible_timeout(
1070                                 &blkif->drain_complete, HZ);
1071 
1072                 if (!atomic_read(&blkif->drain))
1073                         break;
1074         } while (!kthread_should_stop());
1075         atomic_set(&blkif->drain, 0);
1076 }
1077 
1078 static void __end_block_io_op(struct pending_req *pending_req,
1079                 blk_status_t error)
1080 {
1081         /* An error fails the entire request. */
1082         if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1083             error == BLK_STS_NOTSUPP) {
1084                 pr_debug("flush diskcache op failed, not supported\n");
1085                 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1086                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1087         } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1088                    error == BLK_STS_NOTSUPP) {
1089                 pr_debug("write barrier op failed, not supported\n");
1090                 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1091                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1092         } else if (error) {
1093                 pr_debug("Buffer not up-to-date at end of operation,"
1094                          " error=%d\n", error);
1095                 pending_req->status = BLKIF_RSP_ERROR;
1096         }
1097 
1098         /*
1099          * If all of the bio's have completed it is time to unmap
1100          * the grant references associated with 'request' and provide
1101          * the proper response on the ring.
1102          */
1103         if (atomic_dec_and_test(&pending_req->pendcnt))
1104                 xen_blkbk_unmap_and_respond(pending_req);
1105 }
1106 
1107 /*
1108  * bio callback.
1109  */
1110 static void end_block_io_op(struct bio *bio)
1111 {
1112         __end_block_io_op(bio->bi_private, bio->bi_status);
1113         bio_put(bio);
1114 }
1115 
1116 
1117 
1118 /*
1119  * Function to copy the from the ring buffer the 'struct blkif_request'
1120  * (which has the sectors we want, number of them, grant references, etc),
1121  * and transmute  it to the block API to hand it over to the proper block disk.
1122  */
1123 static int
1124 __do_block_io_op(struct xen_blkif_ring *ring)
1125 {
1126         union blkif_back_rings *blk_rings = &ring->blk_rings;
1127         struct blkif_request req;
1128         struct pending_req *pending_req;
1129         RING_IDX rc, rp;
1130         int more_to_do = 0;
1131 
1132         rc = blk_rings->common.req_cons;
1133         rp = blk_rings->common.sring->req_prod;
1134         rmb(); /* Ensure we see queued requests up to 'rp'. */
1135 
1136         if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1137                 rc = blk_rings->common.rsp_prod_pvt;
1138                 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1139                         rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1140                 return -EACCES;
1141         }
1142         while (rc != rp) {
1143 
1144                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1145                         break;
1146 
1147                 if (kthread_should_stop()) {
1148                         more_to_do = 1;
1149                         break;
1150                 }
1151 
1152                 pending_req = alloc_req(ring);
1153                 if (NULL == pending_req) {
1154                         ring->st_oo_req++;
1155                         more_to_do = 1;
1156                         break;
1157                 }
1158 
1159                 switch (ring->blkif->blk_protocol) {
1160                 case BLKIF_PROTOCOL_NATIVE:
1161                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1162                         break;
1163                 case BLKIF_PROTOCOL_X86_32:
1164                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1165                         break;
1166                 case BLKIF_PROTOCOL_X86_64:
1167                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1168                         break;
1169                 default:
1170                         BUG();
1171                 }
1172                 blk_rings->common.req_cons = ++rc; /* before make_response() */
1173 
1174                 /* Apply all sanity checks to /private copy/ of request. */
1175                 barrier();
1176 
1177                 switch (req.operation) {
1178                 case BLKIF_OP_READ:
1179                 case BLKIF_OP_WRITE:
1180                 case BLKIF_OP_WRITE_BARRIER:
1181                 case BLKIF_OP_FLUSH_DISKCACHE:
1182                 case BLKIF_OP_INDIRECT:
1183                         if (dispatch_rw_block_io(ring, &req, pending_req))
1184                                 goto done;
1185                         break;
1186                 case BLKIF_OP_DISCARD:
1187                         free_req(ring, pending_req);
1188                         if (dispatch_discard_io(ring, &req))
1189                                 goto done;
1190                         break;
1191                 default:
1192                         if (dispatch_other_io(ring, &req, pending_req))
1193                                 goto done;
1194                         break;
1195                 }
1196 
1197                 /* Yield point for this unbounded loop. */
1198                 cond_resched();
1199         }
1200 done:
1201         return more_to_do;
1202 }
1203 
1204 static int
1205 do_block_io_op(struct xen_blkif_ring *ring)
1206 {
1207         union blkif_back_rings *blk_rings = &ring->blk_rings;
1208         int more_to_do;
1209 
1210         do {
1211                 more_to_do = __do_block_io_op(ring);
1212                 if (more_to_do)
1213                         break;
1214 
1215                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1216         } while (more_to_do);
1217 
1218         return more_to_do;
1219 }
1220 /*
1221  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1222  * and call the 'submit_bio' to pass it to the underlying storage.
1223  */
1224 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1225                                 struct blkif_request *req,
1226                                 struct pending_req *pending_req)
1227 {
1228         struct phys_req preq;
1229         struct seg_buf *seg = pending_req->seg;
1230         unsigned int nseg;
1231         struct bio *bio = NULL;
1232         struct bio **biolist = pending_req->biolist;
1233         int i, nbio = 0;
1234         int operation;
1235         int operation_flags = 0;
1236         struct blk_plug plug;
1237         bool drain = false;
1238         struct grant_page **pages = pending_req->segments;
1239         unsigned short req_operation;
1240 
1241         req_operation = req->operation == BLKIF_OP_INDIRECT ?
1242                         req->u.indirect.indirect_op : req->operation;
1243 
1244         if ((req->operation == BLKIF_OP_INDIRECT) &&
1245             (req_operation != BLKIF_OP_READ) &&
1246             (req_operation != BLKIF_OP_WRITE)) {
1247                 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1248                 goto fail_response;
1249         }
1250 
1251         switch (req_operation) {
1252         case BLKIF_OP_READ:
1253                 ring->st_rd_req++;
1254                 operation = REQ_OP_READ;
1255                 break;
1256         case BLKIF_OP_WRITE:
1257                 ring->st_wr_req++;
1258                 operation = REQ_OP_WRITE;
1259                 operation_flags = REQ_SYNC | REQ_IDLE;
1260                 break;
1261         case BLKIF_OP_WRITE_BARRIER:
1262                 drain = true;
1263                 /* fall through */
1264         case BLKIF_OP_FLUSH_DISKCACHE:
1265                 ring->st_f_req++;
1266                 operation = REQ_OP_WRITE;
1267                 operation_flags = REQ_PREFLUSH;
1268                 break;
1269         default:
1270                 operation = 0; /* make gcc happy */
1271                 goto fail_response;
1272                 break;
1273         }
1274 
1275         /* Check that the number of segments is sane. */
1276         nseg = req->operation == BLKIF_OP_INDIRECT ?
1277                req->u.indirect.nr_segments : req->u.rw.nr_segments;
1278 
1279         if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1280             unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1281                      (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1282             unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1283                      (nseg > MAX_INDIRECT_SEGMENTS))) {
1284                 pr_debug("Bad number of segments in request (%d)\n", nseg);
1285                 /* Haven't submitted any bio's yet. */
1286                 goto fail_response;
1287         }
1288 
1289         preq.nr_sects      = 0;
1290 
1291         pending_req->ring      = ring;
1292         pending_req->id        = req->u.rw.id;
1293         pending_req->operation = req_operation;
1294         pending_req->status    = BLKIF_RSP_OKAY;
1295         pending_req->nr_segs   = nseg;
1296 
1297         if (req->operation != BLKIF_OP_INDIRECT) {
1298                 preq.dev               = req->u.rw.handle;
1299                 preq.sector_number     = req->u.rw.sector_number;
1300                 for (i = 0; i < nseg; i++) {
1301                         pages[i]->gref = req->u.rw.seg[i].gref;
1302                         seg[i].nsec = req->u.rw.seg[i].last_sect -
1303                                 req->u.rw.seg[i].first_sect + 1;
1304                         seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1305                         if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1306                             (req->u.rw.seg[i].last_sect <
1307                              req->u.rw.seg[i].first_sect))
1308                                 goto fail_response;
1309                         preq.nr_sects += seg[i].nsec;
1310                 }
1311         } else {
1312                 preq.dev               = req->u.indirect.handle;
1313                 preq.sector_number     = req->u.indirect.sector_number;
1314                 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1315                         goto fail_response;
1316         }
1317 
1318         if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1319                 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1320                          operation == REQ_OP_READ ? "read" : "write",
1321                          preq.sector_number,
1322                          preq.sector_number + preq.nr_sects,
1323                          ring->blkif->vbd.pdevice);
1324                 goto fail_response;
1325         }
1326 
1327         /*
1328          * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1329          * is set there.
1330          */
1331         for (i = 0; i < nseg; i++) {
1332                 if (((int)preq.sector_number|(int)seg[i].nsec) &
1333                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1334                         pr_debug("Misaligned I/O request from domain %d\n",
1335                                  ring->blkif->domid);
1336                         goto fail_response;
1337                 }
1338         }
1339 
1340         /* Wait on all outstanding I/O's and once that has been completed
1341          * issue the flush.
1342          */
1343         if (drain)
1344                 xen_blk_drain_io(pending_req->ring);
1345 
1346         /*
1347          * If we have failed at this point, we need to undo the M2P override,
1348          * set gnttab_set_unmap_op on all of the grant references and perform
1349          * the hypercall to unmap the grants - that is all done in
1350          * xen_blkbk_unmap.
1351          */
1352         if (xen_blkbk_map_seg(pending_req))
1353                 goto fail_flush;
1354 
1355         /*
1356          * This corresponding xen_blkif_put is done in __end_block_io_op, or
1357          * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1358          */
1359         xen_blkif_get(ring->blkif);
1360         atomic_inc(&ring->inflight);
1361 
1362         for (i = 0; i < nseg; i++) {
1363                 while ((bio == NULL) ||
1364                        (bio_add_page(bio,
1365                                      pages[i]->page,
1366                                      seg[i].nsec << 9,
1367                                      seg[i].offset) == 0)) {
1368 
1369                         int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1370                         bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1371                         if (unlikely(bio == NULL))
1372                                 goto fail_put_bio;
1373 
1374                         biolist[nbio++] = bio;
1375                         bio_set_dev(bio, preq.bdev);
1376                         bio->bi_private = pending_req;
1377                         bio->bi_end_io  = end_block_io_op;
1378                         bio->bi_iter.bi_sector  = preq.sector_number;
1379                         bio_set_op_attrs(bio, operation, operation_flags);
1380                 }
1381 
1382                 preq.sector_number += seg[i].nsec;
1383         }
1384 
1385         /* This will be hit if the operation was a flush or discard. */
1386         if (!bio) {
1387                 BUG_ON(operation_flags != REQ_PREFLUSH);
1388 
1389                 bio = bio_alloc(GFP_KERNEL, 0);
1390                 if (unlikely(bio == NULL))
1391                         goto fail_put_bio;
1392 
1393                 biolist[nbio++] = bio;
1394                 bio_set_dev(bio, preq.bdev);
1395                 bio->bi_private = pending_req;
1396                 bio->bi_end_io  = end_block_io_op;
1397                 bio_set_op_attrs(bio, operation, operation_flags);
1398         }
1399 
1400         atomic_set(&pending_req->pendcnt, nbio);
1401         blk_start_plug(&plug);
1402 
1403         for (i = 0; i < nbio; i++)
1404                 submit_bio(biolist[i]);
1405 
1406         /* Let the I/Os go.. */
1407         blk_finish_plug(&plug);
1408 
1409         if (operation == REQ_OP_READ)
1410                 ring->st_rd_sect += preq.nr_sects;
1411         else if (operation == REQ_OP_WRITE)
1412                 ring->st_wr_sect += preq.nr_sects;
1413 
1414         return 0;
1415 
1416  fail_flush:
1417         xen_blkbk_unmap(ring, pending_req->segments,
1418                         pending_req->nr_segs);
1419  fail_response:
1420         /* Haven't submitted any bio's yet. */
1421         make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1422         free_req(ring, pending_req);
1423         msleep(1); /* back off a bit */
1424         return -EIO;
1425 
1426  fail_put_bio:
1427         for (i = 0; i < nbio; i++)
1428                 bio_put(biolist[i]);
1429         atomic_set(&pending_req->pendcnt, 1);
1430         __end_block_io_op(pending_req, BLK_STS_RESOURCE);
1431         msleep(1); /* back off a bit */
1432         return -EIO;
1433 }
1434 
1435 
1436 
1437 /*
1438  * Put a response on the ring on how the operation fared.
1439  */
1440 static void make_response(struct xen_blkif_ring *ring, u64 id,
1441                           unsigned short op, int st)
1442 {
1443         struct blkif_response *resp;
1444         unsigned long     flags;
1445         union blkif_back_rings *blk_rings;
1446         int notify;
1447 
1448         spin_lock_irqsave(&ring->blk_ring_lock, flags);
1449         blk_rings = &ring->blk_rings;
1450         /* Place on the response ring for the relevant domain. */
1451         switch (ring->blkif->blk_protocol) {
1452         case BLKIF_PROTOCOL_NATIVE:
1453                 resp = RING_GET_RESPONSE(&blk_rings->native,
1454                                          blk_rings->native.rsp_prod_pvt);
1455                 break;
1456         case BLKIF_PROTOCOL_X86_32:
1457                 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1458                                          blk_rings->x86_32.rsp_prod_pvt);
1459                 break;
1460         case BLKIF_PROTOCOL_X86_64:
1461                 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1462                                          blk_rings->x86_64.rsp_prod_pvt);
1463                 break;
1464         default:
1465                 BUG();
1466         }
1467 
1468         resp->id        = id;
1469         resp->operation = op;
1470         resp->status    = st;
1471 
1472         blk_rings->common.rsp_prod_pvt++;
1473         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1474         spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1475         if (notify)
1476                 notify_remote_via_irq(ring->irq);
1477 }
1478 
1479 static int __init xen_blkif_init(void)
1480 {
1481         int rc = 0;
1482 
1483         if (!xen_domain())
1484                 return -ENODEV;
1485 
1486         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1487                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1488                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1489                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1490         }
1491 
1492         if (xenblk_max_queues == 0)
1493                 xenblk_max_queues = num_online_cpus();
1494 
1495         rc = xen_blkif_interface_init();
1496         if (rc)
1497                 goto failed_init;
1498 
1499         rc = xen_blkif_xenbus_init();
1500         if (rc)
1501                 goto failed_init;
1502 
1503  failed_init:
1504         return rc;
1505 }
1506 
1507 module_init(xen_blkif_init);
1508 
1509 MODULE_LICENSE("Dual BSD/GPL");
1510 MODULE_ALIAS("xen-backend:vbd");

/* [<][>][^][v][top][bottom][index][help] */