root/drivers/block/sunvdc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_vdc_port
  2. vdc_version_supported
  3. vdc_tx_dring_avail
  4. vdc_getgeo
  5. vdc_ioctl
  6. vdc_blk_queue_start
  7. vdc_finish
  8. vdc_handshake_complete
  9. vdc_handle_unknown
  10. vdc_send_attr
  11. vdc_handle_attr
  12. vdc_end_special
  13. vdc_end_one
  14. vdc_ack
  15. vdc_nack
  16. vdc_event
  17. __vdc_tx_trigger
  18. __send_request
  19. vdc_queue_rq
  20. generic_request
  21. vdc_alloc_tx_ring
  22. vdc_free_tx_ring
  23. vdc_port_up
  24. vdc_port_down
  25. cleanup_queue
  26. init_queue
  27. probe_disk
  28. print_version
  29. vdc_device_probed
  30. vdc_port_mpgroup_check
  31. vdc_port_probe
  32. vdc_port_remove
  33. vdc_requeue_inflight
  34. vdc_queue_drain
  35. vdc_ldc_reset_timer_work
  36. vdc_ldc_reset_work
  37. vdc_ldc_reset
  38. vdc_init
  39. vdc_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* sunvdc.c: Sun LDOM Virtual Disk Client.
   3  *
   4  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
   5  */
   6 
   7 #include <linux/module.h>
   8 #include <linux/kernel.h>
   9 #include <linux/types.h>
  10 #include <linux/blk-mq.h>
  11 #include <linux/hdreg.h>
  12 #include <linux/genhd.h>
  13 #include <linux/cdrom.h>
  14 #include <linux/slab.h>
  15 #include <linux/spinlock.h>
  16 #include <linux/completion.h>
  17 #include <linux/delay.h>
  18 #include <linux/init.h>
  19 #include <linux/list.h>
  20 #include <linux/scatterlist.h>
  21 
  22 #include <asm/vio.h>
  23 #include <asm/ldc.h>
  24 
  25 #define DRV_MODULE_NAME         "sunvdc"
  26 #define PFX DRV_MODULE_NAME     ": "
  27 #define DRV_MODULE_VERSION      "1.2"
  28 #define DRV_MODULE_RELDATE      "November 24, 2014"
  29 
  30 static char version[] =
  31         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  32 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  33 MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
  34 MODULE_LICENSE("GPL");
  35 MODULE_VERSION(DRV_MODULE_VERSION);
  36 
  37 #define VDC_TX_RING_SIZE        512
  38 #define VDC_DEFAULT_BLK_SIZE    512
  39 
  40 #define MAX_XFER_BLKS           (128 * 1024)
  41 #define MAX_XFER_SIZE           (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
  42 #define MAX_RING_COOKIES        ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
  43 
  44 #define WAITING_FOR_LINK_UP     0x01
  45 #define WAITING_FOR_TX_SPACE    0x02
  46 #define WAITING_FOR_GEN_CMD     0x04
  47 #define WAITING_FOR_ANY         -1
  48 
  49 #define VDC_MAX_RETRIES 10
  50 
  51 static struct workqueue_struct *sunvdc_wq;
  52 
  53 struct vdc_req_entry {
  54         struct request          *req;
  55 };
  56 
  57 struct vdc_port {
  58         struct vio_driver_state vio;
  59 
  60         struct gendisk          *disk;
  61 
  62         struct vdc_completion   *cmp;
  63 
  64         u64                     req_id;
  65         u64                     seq;
  66         struct vdc_req_entry    rq_arr[VDC_TX_RING_SIZE];
  67 
  68         unsigned long           ring_cookies;
  69 
  70         u64                     max_xfer_size;
  71         u32                     vdisk_block_size;
  72         u32                     drain;
  73 
  74         u64                     ldc_timeout;
  75         struct delayed_work     ldc_reset_timer_work;
  76         struct work_struct      ldc_reset_work;
  77 
  78         /* The server fills these in for us in the disk attribute
  79          * ACK packet.
  80          */
  81         u64                     operations;
  82         u32                     vdisk_size;
  83         u8                      vdisk_type;
  84         u8                      vdisk_mtype;
  85         u32                     vdisk_phys_blksz;
  86 
  87         struct blk_mq_tag_set   tag_set;
  88 
  89         char                    disk_name[32];
  90 };
  91 
  92 static void vdc_ldc_reset(struct vdc_port *port);
  93 static void vdc_ldc_reset_work(struct work_struct *work);
  94 static void vdc_ldc_reset_timer_work(struct work_struct *work);
  95 
  96 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
  97 {
  98         return container_of(vio, struct vdc_port, vio);
  99 }
 100 
 101 /* Ordered from largest major to lowest */
 102 static struct vio_version vdc_versions[] = {
 103         { .major = 1, .minor = 2 },
 104         { .major = 1, .minor = 1 },
 105         { .major = 1, .minor = 0 },
 106 };
 107 
 108 static inline int vdc_version_supported(struct vdc_port *port,
 109                                         u16 major, u16 minor)
 110 {
 111         return port->vio.ver.major == major && port->vio.ver.minor >= minor;
 112 }
 113 
 114 #define VDCBLK_NAME     "vdisk"
 115 static int vdc_major;
 116 #define PARTITION_SHIFT 3
 117 
 118 static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
 119 {
 120         return vio_dring_avail(dr, VDC_TX_RING_SIZE);
 121 }
 122 
 123 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 124 {
 125         struct gendisk *disk = bdev->bd_disk;
 126         sector_t nsect = get_capacity(disk);
 127         sector_t cylinders = nsect;
 128 
 129         geo->heads = 0xff;
 130         geo->sectors = 0x3f;
 131         sector_div(cylinders, geo->heads * geo->sectors);
 132         geo->cylinders = cylinders;
 133         if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
 134                 geo->cylinders = 0xffff;
 135 
 136         return 0;
 137 }
 138 
 139 /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
 140  * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
 141  * Needed to be able to install inside an ldom from an iso image.
 142  */
 143 static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
 144                      unsigned command, unsigned long argument)
 145 {
 146         int i;
 147         struct gendisk *disk;
 148 
 149         switch (command) {
 150         case CDROMMULTISESSION:
 151                 pr_debug(PFX "Multisession CDs not supported\n");
 152                 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
 153                         if (put_user(0, (char __user *)(argument + i)))
 154                                 return -EFAULT;
 155                 return 0;
 156 
 157         case CDROM_GET_CAPABILITY:
 158                 disk = bdev->bd_disk;
 159 
 160                 if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
 161                         return 0;
 162                 return -EINVAL;
 163 
 164         default:
 165                 pr_debug(PFX "ioctl %08x not supported\n", command);
 166                 return -EINVAL;
 167         }
 168 }
 169 
 170 static const struct block_device_operations vdc_fops = {
 171         .owner          = THIS_MODULE,
 172         .getgeo         = vdc_getgeo,
 173         .ioctl          = vdc_ioctl,
 174 };
 175 
 176 static void vdc_blk_queue_start(struct vdc_port *port)
 177 {
 178         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 179 
 180         /* restart blk queue when ring is half emptied. also called after
 181          * handshake completes, so check for initial handshake before we've
 182          * allocated a disk.
 183          */
 184         if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
 185                 blk_mq_start_stopped_hw_queues(port->disk->queue, true);
 186 }
 187 
 188 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
 189 {
 190         if (vio->cmp &&
 191             (waiting_for == -1 ||
 192              vio->cmp->waiting_for == waiting_for)) {
 193                 vio->cmp->err = err;
 194                 complete(&vio->cmp->com);
 195                 vio->cmp = NULL;
 196         }
 197 }
 198 
 199 static void vdc_handshake_complete(struct vio_driver_state *vio)
 200 {
 201         struct vdc_port *port = to_vdc_port(vio);
 202 
 203         cancel_delayed_work(&port->ldc_reset_timer_work);
 204         vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
 205         vdc_blk_queue_start(port);
 206 }
 207 
 208 static int vdc_handle_unknown(struct vdc_port *port, void *arg)
 209 {
 210         struct vio_msg_tag *pkt = arg;
 211 
 212         printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
 213                pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
 214         printk(KERN_ERR PFX "Resetting connection.\n");
 215 
 216         ldc_disconnect(port->vio.lp);
 217 
 218         return -ECONNRESET;
 219 }
 220 
 221 static int vdc_send_attr(struct vio_driver_state *vio)
 222 {
 223         struct vdc_port *port = to_vdc_port(vio);
 224         struct vio_disk_attr_info pkt;
 225 
 226         memset(&pkt, 0, sizeof(pkt));
 227 
 228         pkt.tag.type = VIO_TYPE_CTRL;
 229         pkt.tag.stype = VIO_SUBTYPE_INFO;
 230         pkt.tag.stype_env = VIO_ATTR_INFO;
 231         pkt.tag.sid = vio_send_sid(vio);
 232 
 233         pkt.xfer_mode = VIO_DRING_MODE;
 234         pkt.vdisk_block_size = port->vdisk_block_size;
 235         pkt.max_xfer_size = port->max_xfer_size;
 236 
 237         viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 238                pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
 239 
 240         return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
 241 }
 242 
 243 static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
 244 {
 245         struct vdc_port *port = to_vdc_port(vio);
 246         struct vio_disk_attr_info *pkt = arg;
 247 
 248         viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
 249                "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 250                pkt->tag.stype, pkt->operations,
 251                pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
 252                pkt->xfer_mode, pkt->vdisk_block_size,
 253                pkt->max_xfer_size);
 254 
 255         if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
 256                 switch (pkt->vdisk_type) {
 257                 case VD_DISK_TYPE_DISK:
 258                 case VD_DISK_TYPE_SLICE:
 259                         break;
 260 
 261                 default:
 262                         printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
 263                                vio->name, pkt->vdisk_type);
 264                         return -ECONNRESET;
 265                 }
 266 
 267                 if (pkt->vdisk_block_size > port->vdisk_block_size) {
 268                         printk(KERN_ERR PFX "%s: BLOCK size increased "
 269                                "%u --> %u\n",
 270                                vio->name,
 271                                port->vdisk_block_size, pkt->vdisk_block_size);
 272                         return -ECONNRESET;
 273                 }
 274 
 275                 port->operations = pkt->operations;
 276                 port->vdisk_type = pkt->vdisk_type;
 277                 if (vdc_version_supported(port, 1, 1)) {
 278                         port->vdisk_size = pkt->vdisk_size;
 279                         port->vdisk_mtype = pkt->vdisk_mtype;
 280                 }
 281                 if (pkt->max_xfer_size < port->max_xfer_size)
 282                         port->max_xfer_size = pkt->max_xfer_size;
 283                 port->vdisk_block_size = pkt->vdisk_block_size;
 284 
 285                 port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
 286                 if (vdc_version_supported(port, 1, 2))
 287                         port->vdisk_phys_blksz = pkt->phys_block_size;
 288 
 289                 return 0;
 290         } else {
 291                 printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
 292 
 293                 return -ECONNRESET;
 294         }
 295 }
 296 
 297 static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
 298 {
 299         int err = desc->status;
 300 
 301         vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
 302 }
 303 
 304 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 305                         unsigned int index)
 306 {
 307         struct vio_disk_desc *desc = vio_dring_entry(dr, index);
 308         struct vdc_req_entry *rqe = &port->rq_arr[index];
 309         struct request *req;
 310 
 311         if (unlikely(desc->hdr.state != VIO_DESC_DONE))
 312                 return;
 313 
 314         ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
 315         desc->hdr.state = VIO_DESC_FREE;
 316         dr->cons = vio_dring_next(dr, index);
 317 
 318         req = rqe->req;
 319         if (req == NULL) {
 320                 vdc_end_special(port, desc);
 321                 return;
 322         }
 323 
 324         rqe->req = NULL;
 325 
 326         blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
 327 
 328         vdc_blk_queue_start(port);
 329 }
 330 
 331 static int vdc_ack(struct vdc_port *port, void *msgbuf)
 332 {
 333         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 334         struct vio_dring_data *pkt = msgbuf;
 335 
 336         if (unlikely(pkt->dring_ident != dr->ident ||
 337                      pkt->start_idx != pkt->end_idx ||
 338                      pkt->start_idx >= VDC_TX_RING_SIZE))
 339                 return 0;
 340 
 341         vdc_end_one(port, dr, pkt->start_idx);
 342 
 343         return 0;
 344 }
 345 
 346 static int vdc_nack(struct vdc_port *port, void *msgbuf)
 347 {
 348         /* XXX Implement me XXX */
 349         return 0;
 350 }
 351 
 352 static void vdc_event(void *arg, int event)
 353 {
 354         struct vdc_port *port = arg;
 355         struct vio_driver_state *vio = &port->vio;
 356         unsigned long flags;
 357         int err;
 358 
 359         spin_lock_irqsave(&vio->lock, flags);
 360 
 361         if (unlikely(event == LDC_EVENT_RESET)) {
 362                 vio_link_state_change(vio, event);
 363                 queue_work(sunvdc_wq, &port->ldc_reset_work);
 364                 goto out;
 365         }
 366 
 367         if (unlikely(event == LDC_EVENT_UP)) {
 368                 vio_link_state_change(vio, event);
 369                 goto out;
 370         }
 371 
 372         if (unlikely(event != LDC_EVENT_DATA_READY)) {
 373                 pr_warn(PFX "Unexpected LDC event %d\n", event);
 374                 goto out;
 375         }
 376 
 377         err = 0;
 378         while (1) {
 379                 union {
 380                         struct vio_msg_tag tag;
 381                         u64 raw[8];
 382                 } msgbuf;
 383 
 384                 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
 385                 if (unlikely(err < 0)) {
 386                         if (err == -ECONNRESET)
 387                                 vio_conn_reset(vio);
 388                         break;
 389                 }
 390                 if (err == 0)
 391                         break;
 392                 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
 393                        msgbuf.tag.type,
 394                        msgbuf.tag.stype,
 395                        msgbuf.tag.stype_env,
 396                        msgbuf.tag.sid);
 397                 err = vio_validate_sid(vio, &msgbuf.tag);
 398                 if (err < 0)
 399                         break;
 400 
 401                 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
 402                         if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
 403                                 err = vdc_ack(port, &msgbuf);
 404                         else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
 405                                 err = vdc_nack(port, &msgbuf);
 406                         else
 407                                 err = vdc_handle_unknown(port, &msgbuf);
 408                 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
 409                         err = vio_control_pkt_engine(vio, &msgbuf);
 410                 } else {
 411                         err = vdc_handle_unknown(port, &msgbuf);
 412                 }
 413                 if (err < 0)
 414                         break;
 415         }
 416         if (err < 0)
 417                 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
 418 out:
 419         spin_unlock_irqrestore(&vio->lock, flags);
 420 }
 421 
 422 static int __vdc_tx_trigger(struct vdc_port *port)
 423 {
 424         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 425         struct vio_dring_data hdr = {
 426                 .tag = {
 427                         .type           = VIO_TYPE_DATA,
 428                         .stype          = VIO_SUBTYPE_INFO,
 429                         .stype_env      = VIO_DRING_DATA,
 430                         .sid            = vio_send_sid(&port->vio),
 431                 },
 432                 .dring_ident            = dr->ident,
 433                 .start_idx              = dr->prod,
 434                 .end_idx                = dr->prod,
 435         };
 436         int err, delay;
 437         int retries = 0;
 438 
 439         hdr.seq = dr->snd_nxt;
 440         delay = 1;
 441         do {
 442                 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
 443                 if (err > 0) {
 444                         dr->snd_nxt++;
 445                         break;
 446                 }
 447                 udelay(delay);
 448                 if ((delay <<= 1) > 128)
 449                         delay = 128;
 450                 if (retries++ > VDC_MAX_RETRIES)
 451                         break;
 452         } while (err == -EAGAIN);
 453 
 454         if (err == -ENOTCONN)
 455                 vdc_ldc_reset(port);
 456         return err;
 457 }
 458 
 459 static int __send_request(struct request *req)
 460 {
 461         struct vdc_port *port = req->rq_disk->private_data;
 462         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 463         struct scatterlist sg[MAX_RING_COOKIES];
 464         struct vdc_req_entry *rqe;
 465         struct vio_disk_desc *desc;
 466         unsigned int map_perm;
 467         int nsg, err, i;
 468         u64 len;
 469         u8 op;
 470 
 471         if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
 472                 return -EINVAL;
 473 
 474         map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 475 
 476         if (rq_data_dir(req) == READ) {
 477                 map_perm |= LDC_MAP_W;
 478                 op = VD_OP_BREAD;
 479         } else {
 480                 map_perm |= LDC_MAP_R;
 481                 op = VD_OP_BWRITE;
 482         }
 483 
 484         sg_init_table(sg, port->ring_cookies);
 485         nsg = blk_rq_map_sg(req->q, req, sg);
 486 
 487         len = 0;
 488         for (i = 0; i < nsg; i++)
 489                 len += sg[i].length;
 490 
 491         desc = vio_dring_cur(dr);
 492 
 493         err = ldc_map_sg(port->vio.lp, sg, nsg,
 494                          desc->cookies, port->ring_cookies,
 495                          map_perm);
 496         if (err < 0) {
 497                 printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
 498                 return err;
 499         }
 500 
 501         rqe = &port->rq_arr[dr->prod];
 502         rqe->req = req;
 503 
 504         desc->hdr.ack = VIO_ACK_ENABLE;
 505         desc->req_id = port->req_id;
 506         desc->operation = op;
 507         if (port->vdisk_type == VD_DISK_TYPE_DISK) {
 508                 desc->slice = 0xff;
 509         } else {
 510                 desc->slice = 0;
 511         }
 512         desc->status = ~0;
 513         desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
 514         desc->size = len;
 515         desc->ncookies = err;
 516 
 517         /* This has to be a non-SMP write barrier because we are writing
 518          * to memory which is shared with the peer LDOM.
 519          */
 520         wmb();
 521         desc->hdr.state = VIO_DESC_READY;
 522 
 523         err = __vdc_tx_trigger(port);
 524         if (err < 0) {
 525                 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
 526         } else {
 527                 port->req_id++;
 528                 dr->prod = vio_dring_next(dr, dr->prod);
 529         }
 530 
 531         return err;
 532 }
 533 
 534 static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
 535                                  const struct blk_mq_queue_data *bd)
 536 {
 537         struct vdc_port *port = hctx->queue->queuedata;
 538         struct vio_dring_state *dr;
 539         unsigned long flags;
 540 
 541         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 542 
 543         blk_mq_start_request(bd->rq);
 544 
 545         spin_lock_irqsave(&port->vio.lock, flags);
 546 
 547         /*
 548          * Doing drain, just end the request in error
 549          */
 550         if (unlikely(port->drain)) {
 551                 spin_unlock_irqrestore(&port->vio.lock, flags);
 552                 return BLK_STS_IOERR;
 553         }
 554 
 555         if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
 556                 spin_unlock_irqrestore(&port->vio.lock, flags);
 557                 blk_mq_stop_hw_queue(hctx);
 558                 return BLK_STS_DEV_RESOURCE;
 559         }
 560 
 561         if (__send_request(bd->rq) < 0) {
 562                 spin_unlock_irqrestore(&port->vio.lock, flags);
 563                 return BLK_STS_IOERR;
 564         }
 565 
 566         spin_unlock_irqrestore(&port->vio.lock, flags);
 567         return BLK_STS_OK;
 568 }
 569 
 570 static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
 571 {
 572         struct vio_dring_state *dr;
 573         struct vio_completion comp;
 574         struct vio_disk_desc *desc;
 575         unsigned int map_perm;
 576         unsigned long flags;
 577         int op_len, err;
 578         void *req_buf;
 579 
 580         if (!(((u64)1 << (u64)op) & port->operations))
 581                 return -EOPNOTSUPP;
 582 
 583         switch (op) {
 584         case VD_OP_BREAD:
 585         case VD_OP_BWRITE:
 586         default:
 587                 return -EINVAL;
 588 
 589         case VD_OP_FLUSH:
 590                 op_len = 0;
 591                 map_perm = 0;
 592                 break;
 593 
 594         case VD_OP_GET_WCE:
 595                 op_len = sizeof(u32);
 596                 map_perm = LDC_MAP_W;
 597                 break;
 598 
 599         case VD_OP_SET_WCE:
 600                 op_len = sizeof(u32);
 601                 map_perm = LDC_MAP_R;
 602                 break;
 603 
 604         case VD_OP_GET_VTOC:
 605                 op_len = sizeof(struct vio_disk_vtoc);
 606                 map_perm = LDC_MAP_W;
 607                 break;
 608 
 609         case VD_OP_SET_VTOC:
 610                 op_len = sizeof(struct vio_disk_vtoc);
 611                 map_perm = LDC_MAP_R;
 612                 break;
 613 
 614         case VD_OP_GET_DISKGEOM:
 615                 op_len = sizeof(struct vio_disk_geom);
 616                 map_perm = LDC_MAP_W;
 617                 break;
 618 
 619         case VD_OP_SET_DISKGEOM:
 620                 op_len = sizeof(struct vio_disk_geom);
 621                 map_perm = LDC_MAP_R;
 622                 break;
 623 
 624         case VD_OP_SCSICMD:
 625                 op_len = 16;
 626                 map_perm = LDC_MAP_RW;
 627                 break;
 628 
 629         case VD_OP_GET_DEVID:
 630                 op_len = sizeof(struct vio_disk_devid);
 631                 map_perm = LDC_MAP_W;
 632                 break;
 633 
 634         case VD_OP_GET_EFI:
 635         case VD_OP_SET_EFI:
 636                 return -EOPNOTSUPP;
 637         };
 638 
 639         map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 640 
 641         op_len = (op_len + 7) & ~7;
 642         req_buf = kzalloc(op_len, GFP_KERNEL);
 643         if (!req_buf)
 644                 return -ENOMEM;
 645 
 646         if (len > op_len)
 647                 len = op_len;
 648 
 649         if (map_perm & LDC_MAP_R)
 650                 memcpy(req_buf, buf, len);
 651 
 652         spin_lock_irqsave(&port->vio.lock, flags);
 653 
 654         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 655 
 656         /* XXX If we want to use this code generically we have to
 657          * XXX handle TX ring exhaustion etc.
 658          */
 659         desc = vio_dring_cur(dr);
 660 
 661         err = ldc_map_single(port->vio.lp, req_buf, op_len,
 662                              desc->cookies, port->ring_cookies,
 663                              map_perm);
 664         if (err < 0) {
 665                 spin_unlock_irqrestore(&port->vio.lock, flags);
 666                 kfree(req_buf);
 667                 return err;
 668         }
 669 
 670         init_completion(&comp.com);
 671         comp.waiting_for = WAITING_FOR_GEN_CMD;
 672         port->vio.cmp = &comp;
 673 
 674         desc->hdr.ack = VIO_ACK_ENABLE;
 675         desc->req_id = port->req_id;
 676         desc->operation = op;
 677         desc->slice = 0;
 678         desc->status = ~0;
 679         desc->offset = 0;
 680         desc->size = op_len;
 681         desc->ncookies = err;
 682 
 683         /* This has to be a non-SMP write barrier because we are writing
 684          * to memory which is shared with the peer LDOM.
 685          */
 686         wmb();
 687         desc->hdr.state = VIO_DESC_READY;
 688 
 689         err = __vdc_tx_trigger(port);
 690         if (err >= 0) {
 691                 port->req_id++;
 692                 dr->prod = vio_dring_next(dr, dr->prod);
 693                 spin_unlock_irqrestore(&port->vio.lock, flags);
 694 
 695                 wait_for_completion(&comp.com);
 696                 err = comp.err;
 697         } else {
 698                 port->vio.cmp = NULL;
 699                 spin_unlock_irqrestore(&port->vio.lock, flags);
 700         }
 701 
 702         if (map_perm & LDC_MAP_W)
 703                 memcpy(buf, req_buf, len);
 704 
 705         kfree(req_buf);
 706 
 707         return err;
 708 }
 709 
 710 static int vdc_alloc_tx_ring(struct vdc_port *port)
 711 {
 712         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 713         unsigned long len, entry_size;
 714         int ncookies;
 715         void *dring;
 716 
 717         entry_size = sizeof(struct vio_disk_desc) +
 718                 (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
 719         len = (VDC_TX_RING_SIZE * entry_size);
 720 
 721         ncookies = VIO_MAX_RING_COOKIES;
 722         dring = ldc_alloc_exp_dring(port->vio.lp, len,
 723                                     dr->cookies, &ncookies,
 724                                     (LDC_MAP_SHADOW |
 725                                      LDC_MAP_DIRECT |
 726                                      LDC_MAP_RW));
 727         if (IS_ERR(dring))
 728                 return PTR_ERR(dring);
 729 
 730         dr->base = dring;
 731         dr->entry_size = entry_size;
 732         dr->num_entries = VDC_TX_RING_SIZE;
 733         dr->prod = dr->cons = 0;
 734         dr->pending = VDC_TX_RING_SIZE;
 735         dr->ncookies = ncookies;
 736 
 737         return 0;
 738 }
 739 
 740 static void vdc_free_tx_ring(struct vdc_port *port)
 741 {
 742         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 743 
 744         if (dr->base) {
 745                 ldc_free_exp_dring(port->vio.lp, dr->base,
 746                                    (dr->entry_size * dr->num_entries),
 747                                    dr->cookies, dr->ncookies);
 748                 dr->base = NULL;
 749                 dr->entry_size = 0;
 750                 dr->num_entries = 0;
 751                 dr->pending = 0;
 752                 dr->ncookies = 0;
 753         }
 754 }
 755 
 756 static int vdc_port_up(struct vdc_port *port)
 757 {
 758         struct vio_completion comp;
 759 
 760         init_completion(&comp.com);
 761         comp.err = 0;
 762         comp.waiting_for = WAITING_FOR_LINK_UP;
 763         port->vio.cmp = &comp;
 764 
 765         vio_port_up(&port->vio);
 766         wait_for_completion(&comp.com);
 767         return comp.err;
 768 }
 769 
 770 static void vdc_port_down(struct vdc_port *port)
 771 {
 772         ldc_disconnect(port->vio.lp);
 773         ldc_unbind(port->vio.lp);
 774         vdc_free_tx_ring(port);
 775         vio_ldc_free(&port->vio);
 776 }
 777 
 778 static const struct blk_mq_ops vdc_mq_ops = {
 779         .queue_rq       = vdc_queue_rq,
 780 };
 781 
 782 static void cleanup_queue(struct request_queue *q)
 783 {
 784         struct vdc_port *port = q->queuedata;
 785 
 786         blk_cleanup_queue(q);
 787         blk_mq_free_tag_set(&port->tag_set);
 788 }
 789 
 790 static struct request_queue *init_queue(struct vdc_port *port)
 791 {
 792         struct request_queue *q;
 793 
 794         q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
 795                                         BLK_MQ_F_SHOULD_MERGE);
 796         if (IS_ERR(q))
 797                 return q;
 798 
 799         q->queuedata = port;
 800         return q;
 801 }
 802 
 803 static int probe_disk(struct vdc_port *port)
 804 {
 805         struct request_queue *q;
 806         struct gendisk *g;
 807         int err;
 808 
 809         err = vdc_port_up(port);
 810         if (err)
 811                 return err;
 812 
 813         /* Using version 1.2 means vdisk_phys_blksz should be set unless the
 814          * disk is reserved by another system.
 815          */
 816         if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
 817                 return -ENODEV;
 818 
 819         if (vdc_version_supported(port, 1, 1)) {
 820                 /* vdisk_size should be set during the handshake, if it wasn't
 821                  * then the underlying disk is reserved by another system
 822                  */
 823                 if (port->vdisk_size == -1)
 824                         return -ENODEV;
 825         } else {
 826                 struct vio_disk_geom geom;
 827 
 828                 err = generic_request(port, VD_OP_GET_DISKGEOM,
 829                                       &geom, sizeof(geom));
 830                 if (err < 0) {
 831                         printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
 832                                "error %d\n", err);
 833                         return err;
 834                 }
 835                 port->vdisk_size = ((u64)geom.num_cyl *
 836                                     (u64)geom.num_hd *
 837                                     (u64)geom.num_sec);
 838         }
 839 
 840         q = init_queue(port);
 841         if (IS_ERR(q)) {
 842                 printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
 843                        port->vio.name);
 844                 return PTR_ERR(q);
 845         }
 846         g = alloc_disk(1 << PARTITION_SHIFT);
 847         if (!g) {
 848                 printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
 849                        port->vio.name);
 850                 cleanup_queue(q);
 851                 return -ENOMEM;
 852         }
 853 
 854         port->disk = g;
 855 
 856         /* Each segment in a request is up to an aligned page in size. */
 857         blk_queue_segment_boundary(q, PAGE_SIZE - 1);
 858         blk_queue_max_segment_size(q, PAGE_SIZE);
 859 
 860         blk_queue_max_segments(q, port->ring_cookies);
 861         blk_queue_max_hw_sectors(q, port->max_xfer_size);
 862         g->major = vdc_major;
 863         g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
 864         strcpy(g->disk_name, port->disk_name);
 865 
 866         g->fops = &vdc_fops;
 867         g->queue = q;
 868         g->private_data = port;
 869 
 870         set_capacity(g, port->vdisk_size);
 871 
 872         if (vdc_version_supported(port, 1, 1)) {
 873                 switch (port->vdisk_mtype) {
 874                 case VD_MEDIA_TYPE_CD:
 875                         pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
 876                         g->flags |= GENHD_FL_CD;
 877                         g->flags |= GENHD_FL_REMOVABLE;
 878                         set_disk_ro(g, 1);
 879                         break;
 880 
 881                 case VD_MEDIA_TYPE_DVD:
 882                         pr_info(PFX "Virtual DVD %s\n", port->disk_name);
 883                         g->flags |= GENHD_FL_CD;
 884                         g->flags |= GENHD_FL_REMOVABLE;
 885                         set_disk_ro(g, 1);
 886                         break;
 887 
 888                 case VD_MEDIA_TYPE_FIXED:
 889                         pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
 890                         break;
 891                 }
 892         }
 893 
 894         blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
 895 
 896         pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
 897                g->disk_name,
 898                port->vdisk_size, (port->vdisk_size >> (20 - 9)),
 899                port->vio.ver.major, port->vio.ver.minor);
 900 
 901         device_add_disk(&port->vio.vdev->dev, g, NULL);
 902 
 903         return 0;
 904 }
 905 
 906 static struct ldc_channel_config vdc_ldc_cfg = {
 907         .event          = vdc_event,
 908         .mtu            = 64,
 909         .mode           = LDC_MODE_UNRELIABLE,
 910 };
 911 
 912 static struct vio_driver_ops vdc_vio_ops = {
 913         .send_attr              = vdc_send_attr,
 914         .handle_attr            = vdc_handle_attr,
 915         .handshake_complete     = vdc_handshake_complete,
 916 };
 917 
 918 static void print_version(void)
 919 {
 920         static int version_printed;
 921 
 922         if (version_printed++ == 0)
 923                 printk(KERN_INFO "%s", version);
 924 }
 925 
 926 struct vdc_check_port_data {
 927         int     dev_no;
 928         char    *type;
 929 };
 930 
 931 static int vdc_device_probed(struct device *dev, void *arg)
 932 {
 933         struct vio_dev *vdev = to_vio_dev(dev);
 934         struct vdc_check_port_data *port_data;
 935 
 936         port_data = (struct vdc_check_port_data *)arg;
 937 
 938         if ((vdev->dev_no == port_data->dev_no) &&
 939             (!(strcmp((char *)&vdev->type, port_data->type))) &&
 940                 dev_get_drvdata(dev)) {
 941                 /* This device has already been configured
 942                  * by vdc_port_probe()
 943                  */
 944                 return 1;
 945         } else {
 946                 return 0;
 947         }
 948 }
 949 
 950 /* Determine whether the VIO device is part of an mpgroup
 951  * by locating all the virtual-device-port nodes associated
 952  * with the parent virtual-device node for the VIO device
 953  * and checking whether any of these nodes are vdc-ports
 954  * which have already been configured.
 955  *
 956  * Returns true if this device is part of an mpgroup and has
 957  * already been probed.
 958  */
 959 static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
 960 {
 961         struct vdc_check_port_data port_data;
 962         struct device *dev;
 963 
 964         port_data.dev_no = vdev->dev_no;
 965         port_data.type = (char *)&vdev->type;
 966 
 967         dev = device_find_child(vdev->dev.parent, &port_data,
 968                                 vdc_device_probed);
 969 
 970         if (dev)
 971                 return true;
 972 
 973         return false;
 974 }
 975 
 976 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 977 {
 978         struct mdesc_handle *hp;
 979         struct vdc_port *port;
 980         int err;
 981         const u64 *ldc_timeout;
 982 
 983         print_version();
 984 
 985         hp = mdesc_grab();
 986 
 987         err = -ENODEV;
 988         if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
 989                 printk(KERN_ERR PFX "Port id [%llu] too large.\n",
 990                        vdev->dev_no);
 991                 goto err_out_release_mdesc;
 992         }
 993 
 994         /* Check if this device is part of an mpgroup */
 995         if (vdc_port_mpgroup_check(vdev)) {
 996                 printk(KERN_WARNING
 997                         "VIO: Ignoring extra vdisk port %s",
 998                         dev_name(&vdev->dev));
 999                 goto err_out_release_mdesc;
1000         }
1001 
1002         port = kzalloc(sizeof(*port), GFP_KERNEL);
1003         err = -ENOMEM;
1004         if (!port) {
1005                 printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
1006                 goto err_out_release_mdesc;
1007         }
1008 
1009         if (vdev->dev_no >= 26)
1010                 snprintf(port->disk_name, sizeof(port->disk_name),
1011                          VDCBLK_NAME "%c%c",
1012                          'a' + ((int)vdev->dev_no / 26) - 1,
1013                          'a' + ((int)vdev->dev_no % 26));
1014         else
1015                 snprintf(port->disk_name, sizeof(port->disk_name),
1016                          VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
1017         port->vdisk_size = -1;
1018 
1019         /* Actual wall time may be double due to do_generic_file_read() doing
1020          * a readahead I/O first, and once that fails it will try to read a
1021          * single page.
1022          */
1023         ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
1024         port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
1025         INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
1026         INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
1027 
1028         err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
1029                               vdc_versions, ARRAY_SIZE(vdc_versions),
1030                               &vdc_vio_ops, port->disk_name);
1031         if (err)
1032                 goto err_out_free_port;
1033 
1034         port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
1035         port->max_xfer_size = MAX_XFER_SIZE;
1036         port->ring_cookies = MAX_RING_COOKIES;
1037 
1038         err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1039         if (err)
1040                 goto err_out_free_port;
1041 
1042         err = vdc_alloc_tx_ring(port);
1043         if (err)
1044                 goto err_out_free_ldc;
1045 
1046         err = probe_disk(port);
1047         if (err)
1048                 goto err_out_free_tx_ring;
1049 
1050         /* Note that the device driver_data is used to determine
1051          * whether the port has been probed.
1052          */
1053         dev_set_drvdata(&vdev->dev, port);
1054 
1055         mdesc_release(hp);
1056 
1057         return 0;
1058 
1059 err_out_free_tx_ring:
1060         vdc_free_tx_ring(port);
1061 
1062 err_out_free_ldc:
1063         vio_ldc_free(&port->vio);
1064 
1065 err_out_free_port:
1066         kfree(port);
1067 
1068 err_out_release_mdesc:
1069         mdesc_release(hp);
1070         return err;
1071 }
1072 
1073 static int vdc_port_remove(struct vio_dev *vdev)
1074 {
1075         struct vdc_port *port = dev_get_drvdata(&vdev->dev);
1076 
1077         if (port) {
1078                 blk_mq_stop_hw_queues(port->disk->queue);
1079 
1080                 flush_work(&port->ldc_reset_work);
1081                 cancel_delayed_work_sync(&port->ldc_reset_timer_work);
1082                 del_timer_sync(&port->vio.timer);
1083 
1084                 del_gendisk(port->disk);
1085                 cleanup_queue(port->disk->queue);
1086                 put_disk(port->disk);
1087                 port->disk = NULL;
1088 
1089                 vdc_free_tx_ring(port);
1090                 vio_ldc_free(&port->vio);
1091 
1092                 dev_set_drvdata(&vdev->dev, NULL);
1093 
1094                 kfree(port);
1095         }
1096         return 0;
1097 }
1098 
1099 static void vdc_requeue_inflight(struct vdc_port *port)
1100 {
1101         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1102         u32 idx;
1103 
1104         for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
1105                 struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
1106                 struct vdc_req_entry *rqe = &port->rq_arr[idx];
1107                 struct request *req;
1108 
1109                 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
1110                 desc->hdr.state = VIO_DESC_FREE;
1111                 dr->cons = vio_dring_next(dr, idx);
1112 
1113                 req = rqe->req;
1114                 if (req == NULL) {
1115                         vdc_end_special(port, desc);
1116                         continue;
1117                 }
1118 
1119                 rqe->req = NULL;
1120                 blk_mq_requeue_request(req, false);
1121         }
1122 }
1123 
1124 static void vdc_queue_drain(struct vdc_port *port)
1125 {
1126         struct request_queue *q = port->disk->queue;
1127 
1128         /*
1129          * Mark the queue as draining, then freeze/quiesce to ensure
1130          * that all existing requests are seen in ->queue_rq() and killed
1131          */
1132         port->drain = 1;
1133         spin_unlock_irq(&port->vio.lock);
1134 
1135         blk_mq_freeze_queue(q);
1136         blk_mq_quiesce_queue(q);
1137 
1138         spin_lock_irq(&port->vio.lock);
1139         port->drain = 0;
1140         blk_mq_unquiesce_queue(q);
1141         blk_mq_unfreeze_queue(q);
1142 }
1143 
1144 static void vdc_ldc_reset_timer_work(struct work_struct *work)
1145 {
1146         struct vdc_port *port;
1147         struct vio_driver_state *vio;
1148 
1149         port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
1150         vio = &port->vio;
1151 
1152         spin_lock_irq(&vio->lock);
1153         if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1154                 pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1155                         port->disk_name, port->ldc_timeout);
1156                 vdc_queue_drain(port);
1157                 vdc_blk_queue_start(port);
1158         }
1159         spin_unlock_irq(&vio->lock);
1160 }
1161 
1162 static void vdc_ldc_reset_work(struct work_struct *work)
1163 {
1164         struct vdc_port *port;
1165         struct vio_driver_state *vio;
1166         unsigned long flags;
1167 
1168         port = container_of(work, struct vdc_port, ldc_reset_work);
1169         vio = &port->vio;
1170 
1171         spin_lock_irqsave(&vio->lock, flags);
1172         vdc_ldc_reset(port);
1173         spin_unlock_irqrestore(&vio->lock, flags);
1174 }
1175 
1176 static void vdc_ldc_reset(struct vdc_port *port)
1177 {
1178         int err;
1179 
1180         assert_spin_locked(&port->vio.lock);
1181 
1182         pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1183         blk_mq_stop_hw_queues(port->disk->queue);
1184         vdc_requeue_inflight(port);
1185         vdc_port_down(port);
1186 
1187         err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1188         if (err) {
1189                 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1190                 return;
1191         }
1192 
1193         err = vdc_alloc_tx_ring(port);
1194         if (err) {
1195                 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1196                 goto err_free_ldc;
1197         }
1198 
1199         if (port->ldc_timeout)
1200                 mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
1201                           round_jiffies(jiffies + HZ * port->ldc_timeout));
1202         mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1203         return;
1204 
1205 err_free_ldc:
1206         vio_ldc_free(&port->vio);
1207 }
1208 
1209 static const struct vio_device_id vdc_port_match[] = {
1210         {
1211                 .type = "vdc-port",
1212         },
1213         {},
1214 };
1215 MODULE_DEVICE_TABLE(vio, vdc_port_match);
1216 
1217 static struct vio_driver vdc_port_driver = {
1218         .id_table       = vdc_port_match,
1219         .probe          = vdc_port_probe,
1220         .remove         = vdc_port_remove,
1221         .name           = "vdc_port",
1222 };
1223 
1224 static int __init vdc_init(void)
1225 {
1226         int err;
1227 
1228         sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1229         if (!sunvdc_wq)
1230                 return -ENOMEM;
1231 
1232         err = register_blkdev(0, VDCBLK_NAME);
1233         if (err < 0)
1234                 goto out_free_wq;
1235 
1236         vdc_major = err;
1237 
1238         err = vio_register_driver(&vdc_port_driver);
1239         if (err)
1240                 goto out_unregister_blkdev;
1241 
1242         return 0;
1243 
1244 out_unregister_blkdev:
1245         unregister_blkdev(vdc_major, VDCBLK_NAME);
1246         vdc_major = 0;
1247 
1248 out_free_wq:
1249         destroy_workqueue(sunvdc_wq);
1250         return err;
1251 }
1252 
1253 static void __exit vdc_exit(void)
1254 {
1255         vio_unregister_driver(&vdc_port_driver);
1256         unregister_blkdev(vdc_major, VDCBLK_NAME);
1257         destroy_workqueue(sunvdc_wq);
1258 }
1259 
1260 module_init(vdc_init);
1261 module_exit(vdc_exit);

/* [<][>][^][v][top][bottom][index][help] */