root/drivers/virtio/virtio_pci_common.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vp_synchronize_vectors
  2. vp_notify
  3. vp_config_changed
  4. vp_vring_interrupt
  5. vp_interrupt
  6. vp_request_msix_vectors
  7. vp_setup_vq
  8. vp_del_vq
  9. vp_del_vqs
  10. vp_find_vqs_msix
  11. vp_find_vqs_intx
  12. vp_find_vqs
  13. vp_bus_name
  14. vp_set_vq_affinity
  15. vp_get_vq_affinity
  16. virtio_pci_freeze
  17. virtio_pci_restore
  18. virtio_pci_release_dev
  19. virtio_pci_probe
  20. virtio_pci_remove
  21. virtio_pci_sriov_configure

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Virtio PCI driver - common functionality for all device versions
   4  *
   5  * This module allows virtio devices to be used over a virtual PCI device.
   6  * This can be used with QEMU based VMMs like KVM or Xen.
   7  *
   8  * Copyright IBM Corp. 2007
   9  * Copyright Red Hat, Inc. 2014
  10  *
  11  * Authors:
  12  *  Anthony Liguori  <aliguori@us.ibm.com>
  13  *  Rusty Russell <rusty@rustcorp.com.au>
  14  *  Michael S. Tsirkin <mst@redhat.com>
  15  */
  16 
  17 #include "virtio_pci_common.h"
  18 
  19 static bool force_legacy = false;
  20 
  21 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
  22 module_param(force_legacy, bool, 0444);
  23 MODULE_PARM_DESC(force_legacy,
  24                  "Force legacy mode for transitional virtio 1 devices");
  25 #endif
  26 
  27 /* wait for pending irq handlers */
  28 void vp_synchronize_vectors(struct virtio_device *vdev)
  29 {
  30         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  31         int i;
  32 
  33         if (vp_dev->intx_enabled)
  34                 synchronize_irq(vp_dev->pci_dev->irq);
  35 
  36         for (i = 0; i < vp_dev->msix_vectors; ++i)
  37                 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
  38 }
  39 
  40 /* the notify function used when creating a virt queue */
  41 bool vp_notify(struct virtqueue *vq)
  42 {
  43         /* we write the queue's selector into the notification register to
  44          * signal the other end */
  45         iowrite16(vq->index, (void __iomem *)vq->priv);
  46         return true;
  47 }
  48 
  49 /* Handle a configuration change: Tell driver if it wants to know. */
  50 static irqreturn_t vp_config_changed(int irq, void *opaque)
  51 {
  52         struct virtio_pci_device *vp_dev = opaque;
  53 
  54         virtio_config_changed(&vp_dev->vdev);
  55         return IRQ_HANDLED;
  56 }
  57 
  58 /* Notify all virtqueues on an interrupt. */
  59 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
  60 {
  61         struct virtio_pci_device *vp_dev = opaque;
  62         struct virtio_pci_vq_info *info;
  63         irqreturn_t ret = IRQ_NONE;
  64         unsigned long flags;
  65 
  66         spin_lock_irqsave(&vp_dev->lock, flags);
  67         list_for_each_entry(info, &vp_dev->virtqueues, node) {
  68                 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
  69                         ret = IRQ_HANDLED;
  70         }
  71         spin_unlock_irqrestore(&vp_dev->lock, flags);
  72 
  73         return ret;
  74 }
  75 
  76 /* A small wrapper to also acknowledge the interrupt when it's handled.
  77  * I really need an EIO hook for the vring so I can ack the interrupt once we
  78  * know that we'll be handling the IRQ but before we invoke the callback since
  79  * the callback may notify the host which results in the host attempting to
  80  * raise an interrupt that we would then mask once we acknowledged the
  81  * interrupt. */
  82 static irqreturn_t vp_interrupt(int irq, void *opaque)
  83 {
  84         struct virtio_pci_device *vp_dev = opaque;
  85         u8 isr;
  86 
  87         /* reading the ISR has the effect of also clearing it so it's very
  88          * important to save off the value. */
  89         isr = ioread8(vp_dev->isr);
  90 
  91         /* It's definitely not us if the ISR was not high */
  92         if (!isr)
  93                 return IRQ_NONE;
  94 
  95         /* Configuration change?  Tell driver if it wants to know. */
  96         if (isr & VIRTIO_PCI_ISR_CONFIG)
  97                 vp_config_changed(irq, opaque);
  98 
  99         return vp_vring_interrupt(irq, opaque);
 100 }
 101 
 102 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 103                                    bool per_vq_vectors, struct irq_affinity *desc)
 104 {
 105         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 106         const char *name = dev_name(&vp_dev->vdev.dev);
 107         unsigned flags = PCI_IRQ_MSIX;
 108         unsigned i, v;
 109         int err = -ENOMEM;
 110 
 111         vp_dev->msix_vectors = nvectors;
 112 
 113         vp_dev->msix_names = kmalloc_array(nvectors,
 114                                            sizeof(*vp_dev->msix_names),
 115                                            GFP_KERNEL);
 116         if (!vp_dev->msix_names)
 117                 goto error;
 118         vp_dev->msix_affinity_masks
 119                 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
 120                           GFP_KERNEL);
 121         if (!vp_dev->msix_affinity_masks)
 122                 goto error;
 123         for (i = 0; i < nvectors; ++i)
 124                 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
 125                                         GFP_KERNEL))
 126                         goto error;
 127 
 128         if (desc) {
 129                 flags |= PCI_IRQ_AFFINITY;
 130                 desc->pre_vectors++; /* virtio config vector */
 131         }
 132 
 133         err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
 134                                              nvectors, flags, desc);
 135         if (err < 0)
 136                 goto error;
 137         vp_dev->msix_enabled = 1;
 138 
 139         /* Set the vector used for configuration */
 140         v = vp_dev->msix_used_vectors;
 141         snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
 142                  "%s-config", name);
 143         err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
 144                           vp_config_changed, 0, vp_dev->msix_names[v],
 145                           vp_dev);
 146         if (err)
 147                 goto error;
 148         ++vp_dev->msix_used_vectors;
 149 
 150         v = vp_dev->config_vector(vp_dev, v);
 151         /* Verify we had enough resources to assign the vector */
 152         if (v == VIRTIO_MSI_NO_VECTOR) {
 153                 err = -EBUSY;
 154                 goto error;
 155         }
 156 
 157         if (!per_vq_vectors) {
 158                 /* Shared vector for all VQs */
 159                 v = vp_dev->msix_used_vectors;
 160                 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
 161                          "%s-virtqueues", name);
 162                 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
 163                                   vp_vring_interrupt, 0, vp_dev->msix_names[v],
 164                                   vp_dev);
 165                 if (err)
 166                         goto error;
 167                 ++vp_dev->msix_used_vectors;
 168         }
 169         return 0;
 170 error:
 171         return err;
 172 }
 173 
 174 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
 175                                      void (*callback)(struct virtqueue *vq),
 176                                      const char *name,
 177                                      bool ctx,
 178                                      u16 msix_vec)
 179 {
 180         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 181         struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
 182         struct virtqueue *vq;
 183         unsigned long flags;
 184 
 185         /* fill out our structure that represents an active queue */
 186         if (!info)
 187                 return ERR_PTR(-ENOMEM);
 188 
 189         vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
 190                               msix_vec);
 191         if (IS_ERR(vq))
 192                 goto out_info;
 193 
 194         info->vq = vq;
 195         if (callback) {
 196                 spin_lock_irqsave(&vp_dev->lock, flags);
 197                 list_add(&info->node, &vp_dev->virtqueues);
 198                 spin_unlock_irqrestore(&vp_dev->lock, flags);
 199         } else {
 200                 INIT_LIST_HEAD(&info->node);
 201         }
 202 
 203         vp_dev->vqs[index] = info;
 204         return vq;
 205 
 206 out_info:
 207         kfree(info);
 208         return vq;
 209 }
 210 
 211 static void vp_del_vq(struct virtqueue *vq)
 212 {
 213         struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 214         struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
 215         unsigned long flags;
 216 
 217         spin_lock_irqsave(&vp_dev->lock, flags);
 218         list_del(&info->node);
 219         spin_unlock_irqrestore(&vp_dev->lock, flags);
 220 
 221         vp_dev->del_vq(info);
 222         kfree(info);
 223 }
 224 
 225 /* the config->del_vqs() implementation */
 226 void vp_del_vqs(struct virtio_device *vdev)
 227 {
 228         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 229         struct virtqueue *vq, *n;
 230         int i;
 231 
 232         list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
 233                 if (vp_dev->per_vq_vectors) {
 234                         int v = vp_dev->vqs[vq->index]->msix_vector;
 235 
 236                         if (v != VIRTIO_MSI_NO_VECTOR) {
 237                                 int irq = pci_irq_vector(vp_dev->pci_dev, v);
 238 
 239                                 irq_set_affinity_hint(irq, NULL);
 240                                 free_irq(irq, vq);
 241                         }
 242                 }
 243                 vp_del_vq(vq);
 244         }
 245         vp_dev->per_vq_vectors = false;
 246 
 247         if (vp_dev->intx_enabled) {
 248                 free_irq(vp_dev->pci_dev->irq, vp_dev);
 249                 vp_dev->intx_enabled = 0;
 250         }
 251 
 252         for (i = 0; i < vp_dev->msix_used_vectors; ++i)
 253                 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
 254 
 255         if (vp_dev->msix_affinity_masks) {
 256                 for (i = 0; i < vp_dev->msix_vectors; i++)
 257                         if (vp_dev->msix_affinity_masks[i])
 258                                 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
 259         }
 260 
 261         if (vp_dev->msix_enabled) {
 262                 /* Disable the vector used for configuration */
 263                 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
 264 
 265                 pci_free_irq_vectors(vp_dev->pci_dev);
 266                 vp_dev->msix_enabled = 0;
 267         }
 268 
 269         vp_dev->msix_vectors = 0;
 270         vp_dev->msix_used_vectors = 0;
 271         kfree(vp_dev->msix_names);
 272         vp_dev->msix_names = NULL;
 273         kfree(vp_dev->msix_affinity_masks);
 274         vp_dev->msix_affinity_masks = NULL;
 275         kfree(vp_dev->vqs);
 276         vp_dev->vqs = NULL;
 277 }
 278 
 279 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
 280                 struct virtqueue *vqs[], vq_callback_t *callbacks[],
 281                 const char * const names[], bool per_vq_vectors,
 282                 const bool *ctx,
 283                 struct irq_affinity *desc)
 284 {
 285         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 286         u16 msix_vec;
 287         int i, err, nvectors, allocated_vectors, queue_idx = 0;
 288 
 289         vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
 290         if (!vp_dev->vqs)
 291                 return -ENOMEM;
 292 
 293         if (per_vq_vectors) {
 294                 /* Best option: one for change interrupt, one per vq. */
 295                 nvectors = 1;
 296                 for (i = 0; i < nvqs; ++i)
 297                         if (names[i] && callbacks[i])
 298                                 ++nvectors;
 299         } else {
 300                 /* Second best: one for change, shared for all vqs. */
 301                 nvectors = 2;
 302         }
 303 
 304         err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
 305                                       per_vq_vectors ? desc : NULL);
 306         if (err)
 307                 goto error_find;
 308 
 309         vp_dev->per_vq_vectors = per_vq_vectors;
 310         allocated_vectors = vp_dev->msix_used_vectors;
 311         for (i = 0; i < nvqs; ++i) {
 312                 if (!names[i]) {
 313                         vqs[i] = NULL;
 314                         continue;
 315                 }
 316 
 317                 if (!callbacks[i])
 318                         msix_vec = VIRTIO_MSI_NO_VECTOR;
 319                 else if (vp_dev->per_vq_vectors)
 320                         msix_vec = allocated_vectors++;
 321                 else
 322                         msix_vec = VP_MSIX_VQ_VECTOR;
 323                 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
 324                                      ctx ? ctx[i] : false,
 325                                      msix_vec);
 326                 if (IS_ERR(vqs[i])) {
 327                         err = PTR_ERR(vqs[i]);
 328                         goto error_find;
 329                 }
 330 
 331                 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
 332                         continue;
 333 
 334                 /* allocate per-vq irq if available and necessary */
 335                 snprintf(vp_dev->msix_names[msix_vec],
 336                          sizeof *vp_dev->msix_names,
 337                          "%s-%s",
 338                          dev_name(&vp_dev->vdev.dev), names[i]);
 339                 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
 340                                   vring_interrupt, 0,
 341                                   vp_dev->msix_names[msix_vec],
 342                                   vqs[i]);
 343                 if (err)
 344                         goto error_find;
 345         }
 346         return 0;
 347 
 348 error_find:
 349         vp_del_vqs(vdev);
 350         return err;
 351 }
 352 
 353 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
 354                 struct virtqueue *vqs[], vq_callback_t *callbacks[],
 355                 const char * const names[], const bool *ctx)
 356 {
 357         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 358         int i, err, queue_idx = 0;
 359 
 360         vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
 361         if (!vp_dev->vqs)
 362                 return -ENOMEM;
 363 
 364         err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
 365                         dev_name(&vdev->dev), vp_dev);
 366         if (err)
 367                 goto out_del_vqs;
 368 
 369         vp_dev->intx_enabled = 1;
 370         vp_dev->per_vq_vectors = false;
 371         for (i = 0; i < nvqs; ++i) {
 372                 if (!names[i]) {
 373                         vqs[i] = NULL;
 374                         continue;
 375                 }
 376                 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
 377                                      ctx ? ctx[i] : false,
 378                                      VIRTIO_MSI_NO_VECTOR);
 379                 if (IS_ERR(vqs[i])) {
 380                         err = PTR_ERR(vqs[i]);
 381                         goto out_del_vqs;
 382                 }
 383         }
 384 
 385         return 0;
 386 out_del_vqs:
 387         vp_del_vqs(vdev);
 388         return err;
 389 }
 390 
 391 /* the config->find_vqs() implementation */
 392 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 393                 struct virtqueue *vqs[], vq_callback_t *callbacks[],
 394                 const char * const names[], const bool *ctx,
 395                 struct irq_affinity *desc)
 396 {
 397         int err;
 398 
 399         /* Try MSI-X with one vector per queue. */
 400         err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
 401         if (!err)
 402                 return 0;
 403         /* Fallback: MSI-X with one vector for config, one shared for queues. */
 404         err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
 405         if (!err)
 406                 return 0;
 407         /* Finally fall back to regular interrupts. */
 408         return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
 409 }
 410 
 411 const char *vp_bus_name(struct virtio_device *vdev)
 412 {
 413         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 414 
 415         return pci_name(vp_dev->pci_dev);
 416 }
 417 
 418 /* Setup the affinity for a virtqueue:
 419  * - force the affinity for per vq vector
 420  * - OR over all affinities for shared MSI
 421  * - ignore the affinity request if we're using INTX
 422  */
 423 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
 424 {
 425         struct virtio_device *vdev = vq->vdev;
 426         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 427         struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
 428         struct cpumask *mask;
 429         unsigned int irq;
 430 
 431         if (!vq->callback)
 432                 return -EINVAL;
 433 
 434         if (vp_dev->msix_enabled) {
 435                 mask = vp_dev->msix_affinity_masks[info->msix_vector];
 436                 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
 437                 if (!cpu_mask)
 438                         irq_set_affinity_hint(irq, NULL);
 439                 else {
 440                         cpumask_copy(mask, cpu_mask);
 441                         irq_set_affinity_hint(irq, mask);
 442                 }
 443         }
 444         return 0;
 445 }
 446 
 447 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
 448 {
 449         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 450 
 451         if (!vp_dev->per_vq_vectors ||
 452             vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
 453                 return NULL;
 454 
 455         return pci_irq_get_affinity(vp_dev->pci_dev,
 456                                     vp_dev->vqs[index]->msix_vector);
 457 }
 458 
 459 #ifdef CONFIG_PM_SLEEP
 460 static int virtio_pci_freeze(struct device *dev)
 461 {
 462         struct pci_dev *pci_dev = to_pci_dev(dev);
 463         struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 464         int ret;
 465 
 466         ret = virtio_device_freeze(&vp_dev->vdev);
 467 
 468         if (!ret)
 469                 pci_disable_device(pci_dev);
 470         return ret;
 471 }
 472 
 473 static int virtio_pci_restore(struct device *dev)
 474 {
 475         struct pci_dev *pci_dev = to_pci_dev(dev);
 476         struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 477         int ret;
 478 
 479         ret = pci_enable_device(pci_dev);
 480         if (ret)
 481                 return ret;
 482 
 483         pci_set_master(pci_dev);
 484         return virtio_device_restore(&vp_dev->vdev);
 485 }
 486 
 487 static const struct dev_pm_ops virtio_pci_pm_ops = {
 488         SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
 489 };
 490 #endif
 491 
 492 
 493 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
 494 static const struct pci_device_id virtio_pci_id_table[] = {
 495         { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
 496         { 0 }
 497 };
 498 
 499 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
 500 
 501 static void virtio_pci_release_dev(struct device *_d)
 502 {
 503         struct virtio_device *vdev = dev_to_virtio(_d);
 504         struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 505 
 506         /* As struct device is a kobject, it's not safe to
 507          * free the memory (including the reference counter itself)
 508          * until it's release callback. */
 509         kfree(vp_dev);
 510 }
 511 
 512 static int virtio_pci_probe(struct pci_dev *pci_dev,
 513                             const struct pci_device_id *id)
 514 {
 515         struct virtio_pci_device *vp_dev, *reg_dev = NULL;
 516         int rc;
 517 
 518         /* allocate our structure and fill it out */
 519         vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
 520         if (!vp_dev)
 521                 return -ENOMEM;
 522 
 523         pci_set_drvdata(pci_dev, vp_dev);
 524         vp_dev->vdev.dev.parent = &pci_dev->dev;
 525         vp_dev->vdev.dev.release = virtio_pci_release_dev;
 526         vp_dev->pci_dev = pci_dev;
 527         INIT_LIST_HEAD(&vp_dev->virtqueues);
 528         spin_lock_init(&vp_dev->lock);
 529 
 530         /* enable the device */
 531         rc = pci_enable_device(pci_dev);
 532         if (rc)
 533                 goto err_enable_device;
 534 
 535         if (force_legacy) {
 536                 rc = virtio_pci_legacy_probe(vp_dev);
 537                 /* Also try modern mode if we can't map BAR0 (no IO space). */
 538                 if (rc == -ENODEV || rc == -ENOMEM)
 539                         rc = virtio_pci_modern_probe(vp_dev);
 540                 if (rc)
 541                         goto err_probe;
 542         } else {
 543                 rc = virtio_pci_modern_probe(vp_dev);
 544                 if (rc == -ENODEV)
 545                         rc = virtio_pci_legacy_probe(vp_dev);
 546                 if (rc)
 547                         goto err_probe;
 548         }
 549 
 550         pci_set_master(pci_dev);
 551 
 552         rc = register_virtio_device(&vp_dev->vdev);
 553         reg_dev = vp_dev;
 554         if (rc)
 555                 goto err_register;
 556 
 557         return 0;
 558 
 559 err_register:
 560         if (vp_dev->ioaddr)
 561              virtio_pci_legacy_remove(vp_dev);
 562         else
 563              virtio_pci_modern_remove(vp_dev);
 564 err_probe:
 565         pci_disable_device(pci_dev);
 566 err_enable_device:
 567         if (reg_dev)
 568                 put_device(&vp_dev->vdev.dev);
 569         else
 570                 kfree(vp_dev);
 571         return rc;
 572 }
 573 
 574 static void virtio_pci_remove(struct pci_dev *pci_dev)
 575 {
 576         struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 577         struct device *dev = get_device(&vp_dev->vdev.dev);
 578 
 579         pci_disable_sriov(pci_dev);
 580 
 581         unregister_virtio_device(&vp_dev->vdev);
 582 
 583         if (vp_dev->ioaddr)
 584                 virtio_pci_legacy_remove(vp_dev);
 585         else
 586                 virtio_pci_modern_remove(vp_dev);
 587 
 588         pci_disable_device(pci_dev);
 589         put_device(dev);
 590 }
 591 
 592 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
 593 {
 594         struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 595         struct virtio_device *vdev = &vp_dev->vdev;
 596         int ret;
 597 
 598         if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
 599                 return -EBUSY;
 600 
 601         if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
 602                 return -EINVAL;
 603 
 604         if (pci_vfs_assigned(pci_dev))
 605                 return -EPERM;
 606 
 607         if (num_vfs == 0) {
 608                 pci_disable_sriov(pci_dev);
 609                 return 0;
 610         }
 611 
 612         ret = pci_enable_sriov(pci_dev, num_vfs);
 613         if (ret < 0)
 614                 return ret;
 615 
 616         return num_vfs;
 617 }
 618 
 619 static struct pci_driver virtio_pci_driver = {
 620         .name           = "virtio-pci",
 621         .id_table       = virtio_pci_id_table,
 622         .probe          = virtio_pci_probe,
 623         .remove         = virtio_pci_remove,
 624 #ifdef CONFIG_PM_SLEEP
 625         .driver.pm      = &virtio_pci_pm_ops,
 626 #endif
 627         .sriov_configure = virtio_pci_sriov_configure,
 628 };
 629 
 630 module_pci_driver(virtio_pci_driver);
 631 
 632 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
 633 MODULE_DESCRIPTION("virtio-pci");
 634 MODULE_LICENSE("GPL");
 635 MODULE_VERSION("1");

/* [<][>][^][v][top][bottom][index][help] */