1/* 2 * Virtio PCI driver - modern (virtio 1.0) device support 3 * 4 * This module allows virtio devices to be used over a virtual PCI device. 5 * This can be used with QEMU based VMMs like KVM or Xen. 6 * 7 * Copyright IBM Corp. 2007 8 * Copyright Red Hat, Inc. 2014 9 * 10 * Authors: 11 * Anthony Liguori <aliguori@us.ibm.com> 12 * Rusty Russell <rusty@rustcorp.com.au> 13 * Michael S. Tsirkin <mst@redhat.com> 14 * 15 * This work is licensed under the terms of the GNU GPL, version 2 or later. 16 * See the COPYING file in the top-level directory. 17 * 18 */ 19 20#include <linux/delay.h> 21#define VIRTIO_PCI_NO_LEGACY 22#include "virtio_pci_common.h" 23 24/* 25 * Type-safe wrappers for io accesses. 26 * Use these to enforce at compile time the following spec requirement: 27 * 28 * The driver MUST access each field using the “natural” access 29 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 30 * for 16-bit fields and 8-bit accesses for 8-bit fields. 31 */ 32static inline u8 vp_ioread8(u8 __iomem *addr) 33{ 34 return ioread8(addr); 35} 36static inline u16 vp_ioread16 (u16 __iomem *addr) 37{ 38 return ioread16(addr); 39} 40 41static inline u32 vp_ioread32(u32 __iomem *addr) 42{ 43 return ioread32(addr); 44} 45 46static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 47{ 48 iowrite8(value, addr); 49} 50 51static inline void vp_iowrite16(u16 value, u16 __iomem *addr) 52{ 53 iowrite16(value, addr); 54} 55 56static inline void vp_iowrite32(u32 value, u32 __iomem *addr) 57{ 58 iowrite32(value, addr); 59} 60 61static void vp_iowrite64_twopart(u64 val, 62 __le32 __iomem *lo, __le32 __iomem *hi) 63{ 64 vp_iowrite32((u32)val, lo); 65 vp_iowrite32(val >> 32, hi); 66} 67 68static void __iomem *map_capability(struct pci_dev *dev, int off, 69 size_t minlen, 70 u32 align, 71 u32 start, u32 size, 72 size_t *len) 73{ 74 u8 bar; 75 u32 offset, length; 76 void __iomem *p; 77 78 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 79 bar), 80 &bar); 81 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 82 &offset); 83 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 84 &length); 85 86 if (length <= start) { 87 dev_err(&dev->dev, 88 "virtio_pci: bad capability len %u (>%u expected)\n", 89 length, start); 90 return NULL; 91 } 92 93 if (length - start < minlen) { 94 dev_err(&dev->dev, 95 "virtio_pci: bad capability len %u (>=%zu expected)\n", 96 length, minlen); 97 return NULL; 98 } 99 100 length -= start; 101 102 if (start + offset < offset) { 103 dev_err(&dev->dev, 104 "virtio_pci: map wrap-around %u+%u\n", 105 start, offset); 106 return NULL; 107 } 108 109 offset += start; 110 111 if (offset & (align - 1)) { 112 dev_err(&dev->dev, 113 "virtio_pci: offset %u not aligned to %u\n", 114 offset, align); 115 return NULL; 116 } 117 118 if (length > size) 119 length = size; 120 121 if (len) 122 *len = length; 123 124 if (minlen + offset < minlen || 125 minlen + offset > pci_resource_len(dev, bar)) { 126 dev_err(&dev->dev, 127 "virtio_pci: map virtio %zu@%u " 128 "out of range on bar %i length %lu\n", 129 minlen, offset, 130 bar, (unsigned long)pci_resource_len(dev, bar)); 131 return NULL; 132 } 133 134 p = pci_iomap_range(dev, bar, offset, length); 135 if (!p) 136 dev_err(&dev->dev, 137 "virtio_pci: unable to map virtio %u@%u on bar %i\n", 138 length, offset, bar); 139 return p; 140} 141 142/* virtio config->get_features() implementation */ 143static u64 vp_get_features(struct virtio_device *vdev) 144{ 145 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 146 u64 features; 147 148 vp_iowrite32(0, &vp_dev->common->device_feature_select); 149 features = vp_ioread32(&vp_dev->common->device_feature); 150 vp_iowrite32(1, &vp_dev->common->device_feature_select); 151 features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); 152 153 return features; 154} 155 156/* virtio config->finalize_features() implementation */ 157static int vp_finalize_features(struct virtio_device *vdev) 158{ 159 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 160 161 /* Give virtio_ring a chance to accept features. */ 162 vring_transport_features(vdev); 163 164 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 165 dev_err(&vdev->dev, "virtio: device uses modern interface " 166 "but does not have VIRTIO_F_VERSION_1\n"); 167 return -EINVAL; 168 } 169 170 vp_iowrite32(0, &vp_dev->common->guest_feature_select); 171 vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); 172 vp_iowrite32(1, &vp_dev->common->guest_feature_select); 173 vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); 174 175 return 0; 176} 177 178/* virtio config->get() implementation */ 179static void vp_get(struct virtio_device *vdev, unsigned offset, 180 void *buf, unsigned len) 181{ 182 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 183 u8 b; 184 __le16 w; 185 __le32 l; 186 187 BUG_ON(offset + len > vp_dev->device_len); 188 189 switch (len) { 190 case 1: 191 b = ioread8(vp_dev->device + offset); 192 memcpy(buf, &b, sizeof b); 193 break; 194 case 2: 195 w = cpu_to_le16(ioread16(vp_dev->device + offset)); 196 memcpy(buf, &w, sizeof w); 197 break; 198 case 4: 199 l = cpu_to_le32(ioread32(vp_dev->device + offset)); 200 memcpy(buf, &l, sizeof l); 201 break; 202 case 8: 203 l = cpu_to_le32(ioread32(vp_dev->device + offset)); 204 memcpy(buf, &l, sizeof l); 205 l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); 206 memcpy(buf + sizeof l, &l, sizeof l); 207 break; 208 default: 209 BUG(); 210 } 211} 212 213/* the config->set() implementation. it's symmetric to the config->get() 214 * implementation */ 215static void vp_set(struct virtio_device *vdev, unsigned offset, 216 const void *buf, unsigned len) 217{ 218 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 219 u8 b; 220 __le16 w; 221 __le32 l; 222 223 BUG_ON(offset + len > vp_dev->device_len); 224 225 switch (len) { 226 case 1: 227 memcpy(&b, buf, sizeof b); 228 iowrite8(b, vp_dev->device + offset); 229 break; 230 case 2: 231 memcpy(&w, buf, sizeof w); 232 iowrite16(le16_to_cpu(w), vp_dev->device + offset); 233 break; 234 case 4: 235 memcpy(&l, buf, sizeof l); 236 iowrite32(le32_to_cpu(l), vp_dev->device + offset); 237 break; 238 case 8: 239 memcpy(&l, buf, sizeof l); 240 iowrite32(le32_to_cpu(l), vp_dev->device + offset); 241 memcpy(&l, buf + sizeof l, sizeof l); 242 iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); 243 break; 244 default: 245 BUG(); 246 } 247} 248 249static u32 vp_generation(struct virtio_device *vdev) 250{ 251 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 252 return vp_ioread8(&vp_dev->common->config_generation); 253} 254 255/* config->{get,set}_status() implementations */ 256static u8 vp_get_status(struct virtio_device *vdev) 257{ 258 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 259 return vp_ioread8(&vp_dev->common->device_status); 260} 261 262static void vp_set_status(struct virtio_device *vdev, u8 status) 263{ 264 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 265 /* We should never be setting status to 0. */ 266 BUG_ON(status == 0); 267 vp_iowrite8(status, &vp_dev->common->device_status); 268} 269 270static void vp_reset(struct virtio_device *vdev) 271{ 272 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 273 /* 0 status means a reset. */ 274 vp_iowrite8(0, &vp_dev->common->device_status); 275 /* After writing 0 to device_status, the driver MUST wait for a read of 276 * device_status to return 0 before reinitializing the device. 277 * This will flush out the status write, and flush in device writes, 278 * including MSI-X interrupts, if any. 279 */ 280 while (vp_ioread8(&vp_dev->common->device_status)) 281 msleep(1); 282 /* Flush pending VQ/configuration callbacks. */ 283 vp_synchronize_vectors(vdev); 284} 285 286static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 287{ 288 /* Setup the vector used for configuration events */ 289 vp_iowrite16(vector, &vp_dev->common->msix_config); 290 /* Verify we had enough resources to assign the vector */ 291 /* Will also flush the write out to device */ 292 return vp_ioread16(&vp_dev->common->msix_config); 293} 294 295static size_t vring_pci_size(u16 num) 296{ 297 /* We only need a cacheline separation. */ 298 return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES)); 299} 300 301static void *alloc_virtqueue_pages(int *num) 302{ 303 void *pages; 304 305 /* TODO: allocate each queue chunk individually */ 306 for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) { 307 pages = alloc_pages_exact(vring_pci_size(*num), 308 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); 309 if (pages) 310 return pages; 311 } 312 313 if (!*num) 314 return NULL; 315 316 /* Try to get a single page. You are my only hope! */ 317 return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO); 318} 319 320static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 321 struct virtio_pci_vq_info *info, 322 unsigned index, 323 void (*callback)(struct virtqueue *vq), 324 const char *name, 325 u16 msix_vec) 326{ 327 struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; 328 struct virtqueue *vq; 329 u16 num, off; 330 int err; 331 332 if (index >= vp_ioread16(&cfg->num_queues)) 333 return ERR_PTR(-ENOENT); 334 335 /* Select the queue we're interested in */ 336 vp_iowrite16(index, &cfg->queue_select); 337 338 /* Check if queue is either not available or already active. */ 339 num = vp_ioread16(&cfg->queue_size); 340 if (!num || vp_ioread16(&cfg->queue_enable)) 341 return ERR_PTR(-ENOENT); 342 343 if (num & (num - 1)) { 344 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); 345 return ERR_PTR(-EINVAL); 346 } 347 348 /* get offset of notification word for this vq */ 349 off = vp_ioread16(&cfg->queue_notify_off); 350 351 info->num = num; 352 info->msix_vector = msix_vec; 353 354 info->queue = alloc_virtqueue_pages(&info->num); 355 if (info->queue == NULL) 356 return ERR_PTR(-ENOMEM); 357 358 /* create the vring */ 359 vq = vring_new_virtqueue(index, info->num, 360 SMP_CACHE_BYTES, &vp_dev->vdev, 361 true, info->queue, vp_notify, callback, name); 362 if (!vq) { 363 err = -ENOMEM; 364 goto err_new_queue; 365 } 366 367 /* activate the queue */ 368 vp_iowrite16(num, &cfg->queue_size); 369 vp_iowrite64_twopart(virt_to_phys(info->queue), 370 &cfg->queue_desc_lo, &cfg->queue_desc_hi); 371 vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), 372 &cfg->queue_avail_lo, &cfg->queue_avail_hi); 373 vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), 374 &cfg->queue_used_lo, &cfg->queue_used_hi); 375 376 if (vp_dev->notify_base) { 377 /* offset should not wrap */ 378 if ((u64)off * vp_dev->notify_offset_multiplier + 2 379 > vp_dev->notify_len) { 380 dev_warn(&vp_dev->pci_dev->dev, 381 "bad notification offset %u (x %u) " 382 "for queue %u > %zd", 383 off, vp_dev->notify_offset_multiplier, 384 index, vp_dev->notify_len); 385 err = -EINVAL; 386 goto err_map_notify; 387 } 388 vq->priv = (void __force *)vp_dev->notify_base + 389 off * vp_dev->notify_offset_multiplier; 390 } else { 391 vq->priv = (void __force *)map_capability(vp_dev->pci_dev, 392 vp_dev->notify_map_cap, 2, 2, 393 off * vp_dev->notify_offset_multiplier, 2, 394 NULL); 395 } 396 397 if (!vq->priv) { 398 err = -ENOMEM; 399 goto err_map_notify; 400 } 401 402 if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 403 vp_iowrite16(msix_vec, &cfg->queue_msix_vector); 404 msix_vec = vp_ioread16(&cfg->queue_msix_vector); 405 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 406 err = -EBUSY; 407 goto err_assign_vector; 408 } 409 } 410 411 return vq; 412 413err_assign_vector: 414 if (!vp_dev->notify_base) 415 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); 416err_map_notify: 417 vring_del_virtqueue(vq); 418err_new_queue: 419 free_pages_exact(info->queue, vring_pci_size(info->num)); 420 return ERR_PTR(err); 421} 422 423static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, 424 struct virtqueue *vqs[], 425 vq_callback_t *callbacks[], 426 const char *names[]) 427{ 428 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 429 struct virtqueue *vq; 430 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); 431 432 if (rc) 433 return rc; 434 435 /* Select and activate all queues. Has to be done last: once we do 436 * this, there's no way to go back except reset. 437 */ 438 list_for_each_entry(vq, &vdev->vqs, list) { 439 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 440 vp_iowrite16(1, &vp_dev->common->queue_enable); 441 } 442 443 return 0; 444} 445 446static void del_vq(struct virtio_pci_vq_info *info) 447{ 448 struct virtqueue *vq = info->vq; 449 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 450 451 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 452 453 if (vp_dev->msix_enabled) { 454 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 455 &vp_dev->common->queue_msix_vector); 456 /* Flush the write out to device */ 457 vp_ioread16(&vp_dev->common->queue_msix_vector); 458 } 459 460 if (!vp_dev->notify_base) 461 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); 462 463 vring_del_virtqueue(vq); 464 465 free_pages_exact(info->queue, vring_pci_size(info->num)); 466} 467 468static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 469 .get = NULL, 470 .set = NULL, 471 .generation = vp_generation, 472 .get_status = vp_get_status, 473 .set_status = vp_set_status, 474 .reset = vp_reset, 475 .find_vqs = vp_modern_find_vqs, 476 .del_vqs = vp_del_vqs, 477 .get_features = vp_get_features, 478 .finalize_features = vp_finalize_features, 479 .bus_name = vp_bus_name, 480 .set_vq_affinity = vp_set_vq_affinity, 481}; 482 483static const struct virtio_config_ops virtio_pci_config_ops = { 484 .get = vp_get, 485 .set = vp_set, 486 .generation = vp_generation, 487 .get_status = vp_get_status, 488 .set_status = vp_set_status, 489 .reset = vp_reset, 490 .find_vqs = vp_modern_find_vqs, 491 .del_vqs = vp_del_vqs, 492 .get_features = vp_get_features, 493 .finalize_features = vp_finalize_features, 494 .bus_name = vp_bus_name, 495 .set_vq_affinity = vp_set_vq_affinity, 496}; 497 498/** 499 * virtio_pci_find_capability - walk capabilities to find device info. 500 * @dev: the pci device 501 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 502 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 503 * 504 * Returns offset of the capability, or 0. 505 */ 506static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 507 u32 ioresource_types) 508{ 509 int pos; 510 511 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 512 pos > 0; 513 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 514 u8 type, bar; 515 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 516 cfg_type), 517 &type); 518 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 519 bar), 520 &bar); 521 522 /* Ignore structures with reserved BAR values */ 523 if (bar > 0x5) 524 continue; 525 526 if (type == cfg_type) { 527 if (pci_resource_len(dev, bar) && 528 pci_resource_flags(dev, bar) & ioresource_types) 529 return pos; 530 } 531 } 532 return 0; 533} 534 535/* This is part of the ABI. Don't screw with it. */ 536static inline void check_offsets(void) 537{ 538 /* Note: disk space was harmed in compilation of this function. */ 539 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 540 offsetof(struct virtio_pci_cap, cap_vndr)); 541 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 542 offsetof(struct virtio_pci_cap, cap_next)); 543 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 544 offsetof(struct virtio_pci_cap, cap_len)); 545 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 546 offsetof(struct virtio_pci_cap, cfg_type)); 547 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 548 offsetof(struct virtio_pci_cap, bar)); 549 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 550 offsetof(struct virtio_pci_cap, offset)); 551 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 552 offsetof(struct virtio_pci_cap, length)); 553 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 554 offsetof(struct virtio_pci_notify_cap, 555 notify_off_multiplier)); 556 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 557 offsetof(struct virtio_pci_common_cfg, 558 device_feature_select)); 559 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 560 offsetof(struct virtio_pci_common_cfg, device_feature)); 561 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 562 offsetof(struct virtio_pci_common_cfg, 563 guest_feature_select)); 564 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 565 offsetof(struct virtio_pci_common_cfg, guest_feature)); 566 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 567 offsetof(struct virtio_pci_common_cfg, msix_config)); 568 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 569 offsetof(struct virtio_pci_common_cfg, num_queues)); 570 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 571 offsetof(struct virtio_pci_common_cfg, device_status)); 572 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 573 offsetof(struct virtio_pci_common_cfg, config_generation)); 574 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 575 offsetof(struct virtio_pci_common_cfg, queue_select)); 576 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 577 offsetof(struct virtio_pci_common_cfg, queue_size)); 578 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 579 offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 580 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 581 offsetof(struct virtio_pci_common_cfg, queue_enable)); 582 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 583 offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 584 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 585 offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 586 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 587 offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 588 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 589 offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 590 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 591 offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 592 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 593 offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 594 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 595 offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 596} 597 598/* the PCI probing function */ 599int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 600{ 601 struct pci_dev *pci_dev = vp_dev->pci_dev; 602 int err, common, isr, notify, device; 603 u32 notify_length; 604 u32 notify_offset; 605 606 check_offsets(); 607 608 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 609 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 610 return -ENODEV; 611 612 if (pci_dev->device < 0x1040) { 613 /* Transitional devices: use the PCI subsystem device id as 614 * virtio device id, same as legacy driver always did. 615 */ 616 vp_dev->vdev.id.device = pci_dev->subsystem_device; 617 } else { 618 /* Modern devices: simply use PCI device id, but start from 0x1040. */ 619 vp_dev->vdev.id.device = pci_dev->device - 0x1040; 620 } 621 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 622 623 /* check for a common config: if not, use legacy mode (bar 0). */ 624 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 625 IORESOURCE_IO | IORESOURCE_MEM); 626 if (!common) { 627 dev_info(&pci_dev->dev, 628 "virtio_pci: leaving for legacy driver\n"); 629 return -ENODEV; 630 } 631 632 /* If common is there, these should be too... */ 633 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 634 IORESOURCE_IO | IORESOURCE_MEM); 635 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 636 IORESOURCE_IO | IORESOURCE_MEM); 637 if (!isr || !notify) { 638 dev_err(&pci_dev->dev, 639 "virtio_pci: missing capabilities %i/%i/%i\n", 640 common, isr, notify); 641 return -EINVAL; 642 } 643 644 /* Device capability is only mandatory for devices that have 645 * device-specific configuration. 646 */ 647 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 648 IORESOURCE_IO | IORESOURCE_MEM); 649 650 err = -EINVAL; 651 vp_dev->common = map_capability(pci_dev, common, 652 sizeof(struct virtio_pci_common_cfg), 4, 653 0, sizeof(struct virtio_pci_common_cfg), 654 NULL); 655 if (!vp_dev->common) 656 goto err_map_common; 657 vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, 658 0, 1, 659 NULL); 660 if (!vp_dev->isr) 661 goto err_map_isr; 662 663 /* Read notify_off_multiplier from config space. */ 664 pci_read_config_dword(pci_dev, 665 notify + offsetof(struct virtio_pci_notify_cap, 666 notify_off_multiplier), 667 &vp_dev->notify_offset_multiplier); 668 /* Read notify length and offset from config space. */ 669 pci_read_config_dword(pci_dev, 670 notify + offsetof(struct virtio_pci_notify_cap, 671 cap.length), 672 ¬ify_length); 673 674 pci_read_config_dword(pci_dev, 675 notify + offsetof(struct virtio_pci_notify_cap, 676 cap.length), 677 ¬ify_offset); 678 679 /* We don't know how many VQs we'll map, ahead of the time. 680 * If notify length is small, map it all now. 681 * Otherwise, map each VQ individually later. 682 */ 683 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 684 vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, 685 0, notify_length, 686 &vp_dev->notify_len); 687 if (!vp_dev->notify_base) 688 goto err_map_notify; 689 } else { 690 vp_dev->notify_map_cap = notify; 691 } 692 693 /* Again, we don't know how much we should map, but PAGE_SIZE 694 * is more than enough for all existing devices. 695 */ 696 if (device) { 697 vp_dev->device = map_capability(pci_dev, device, 0, 4, 698 0, PAGE_SIZE, 699 &vp_dev->device_len); 700 if (!vp_dev->device) 701 goto err_map_device; 702 703 vp_dev->vdev.config = &virtio_pci_config_ops; 704 } else { 705 vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 706 } 707 708 vp_dev->config_vector = vp_config_vector; 709 vp_dev->setup_vq = setup_vq; 710 vp_dev->del_vq = del_vq; 711 712 return 0; 713 714err_map_device: 715 if (vp_dev->notify_base) 716 pci_iounmap(pci_dev, vp_dev->notify_base); 717err_map_notify: 718 pci_iounmap(pci_dev, vp_dev->isr); 719err_map_isr: 720 pci_iounmap(pci_dev, vp_dev->common); 721err_map_common: 722 return err; 723} 724 725void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 726{ 727 struct pci_dev *pci_dev = vp_dev->pci_dev; 728 729 if (vp_dev->device) 730 pci_iounmap(pci_dev, vp_dev->device); 731 if (vp_dev->notify_base) 732 pci_iounmap(pci_dev, vp_dev->notify_base); 733 pci_iounmap(pci_dev, vp_dev->isr); 734 pci_iounmap(pci_dev, vp_dev->common); 735} 736