Lines Matching refs:vp_dev

33 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);  in vp_synchronize_vectors()  local
36 if (vp_dev->intx_enabled) in vp_synchronize_vectors()
37 synchronize_irq(vp_dev->pci_dev->irq); in vp_synchronize_vectors()
39 for (i = 0; i < vp_dev->msix_vectors; ++i) in vp_synchronize_vectors()
40 synchronize_irq(vp_dev->msix_entries[i].vector); in vp_synchronize_vectors()
55 struct virtio_pci_device *vp_dev = opaque; in vp_config_changed() local
57 virtio_config_changed(&vp_dev->vdev); in vp_config_changed()
64 struct virtio_pci_device *vp_dev = opaque; in vp_vring_interrupt() local
69 spin_lock_irqsave(&vp_dev->lock, flags); in vp_vring_interrupt()
70 list_for_each_entry(info, &vp_dev->virtqueues, node) { in vp_vring_interrupt()
74 spin_unlock_irqrestore(&vp_dev->lock, flags); in vp_vring_interrupt()
87 struct virtio_pci_device *vp_dev = opaque; in vp_interrupt() local
92 isr = ioread8(vp_dev->isr); in vp_interrupt()
107 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_free_vectors() local
110 if (vp_dev->intx_enabled) { in vp_free_vectors()
111 free_irq(vp_dev->pci_dev->irq, vp_dev); in vp_free_vectors()
112 vp_dev->intx_enabled = 0; in vp_free_vectors()
115 for (i = 0; i < vp_dev->msix_used_vectors; ++i) in vp_free_vectors()
116 free_irq(vp_dev->msix_entries[i].vector, vp_dev); in vp_free_vectors()
118 for (i = 0; i < vp_dev->msix_vectors; i++) in vp_free_vectors()
119 if (vp_dev->msix_affinity_masks[i]) in vp_free_vectors()
120 free_cpumask_var(vp_dev->msix_affinity_masks[i]); in vp_free_vectors()
122 if (vp_dev->msix_enabled) { in vp_free_vectors()
124 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); in vp_free_vectors()
126 pci_disable_msix(vp_dev->pci_dev); in vp_free_vectors()
127 vp_dev->msix_enabled = 0; in vp_free_vectors()
130 vp_dev->msix_vectors = 0; in vp_free_vectors()
131 vp_dev->msix_used_vectors = 0; in vp_free_vectors()
132 kfree(vp_dev->msix_names); in vp_free_vectors()
133 vp_dev->msix_names = NULL; in vp_free_vectors()
134 kfree(vp_dev->msix_entries); in vp_free_vectors()
135 vp_dev->msix_entries = NULL; in vp_free_vectors()
136 kfree(vp_dev->msix_affinity_masks); in vp_free_vectors()
137 vp_dev->msix_affinity_masks = NULL; in vp_free_vectors()
143 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_request_msix_vectors() local
144 const char *name = dev_name(&vp_dev->vdev.dev); in vp_request_msix_vectors()
148 vp_dev->msix_vectors = nvectors; in vp_request_msix_vectors()
150 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, in vp_request_msix_vectors()
152 if (!vp_dev->msix_entries) in vp_request_msix_vectors()
154 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, in vp_request_msix_vectors()
156 if (!vp_dev->msix_names) in vp_request_msix_vectors()
158 vp_dev->msix_affinity_masks in vp_request_msix_vectors()
159 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, in vp_request_msix_vectors()
161 if (!vp_dev->msix_affinity_masks) in vp_request_msix_vectors()
164 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], in vp_request_msix_vectors()
169 vp_dev->msix_entries[i].entry = i; in vp_request_msix_vectors()
171 err = pci_enable_msix_exact(vp_dev->pci_dev, in vp_request_msix_vectors()
172 vp_dev->msix_entries, nvectors); in vp_request_msix_vectors()
175 vp_dev->msix_enabled = 1; in vp_request_msix_vectors()
178 v = vp_dev->msix_used_vectors; in vp_request_msix_vectors()
179 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, in vp_request_msix_vectors()
181 err = request_irq(vp_dev->msix_entries[v].vector, in vp_request_msix_vectors()
182 vp_config_changed, 0, vp_dev->msix_names[v], in vp_request_msix_vectors()
183 vp_dev); in vp_request_msix_vectors()
186 ++vp_dev->msix_used_vectors; in vp_request_msix_vectors()
188 v = vp_dev->config_vector(vp_dev, v); in vp_request_msix_vectors()
197 v = vp_dev->msix_used_vectors; in vp_request_msix_vectors()
198 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, in vp_request_msix_vectors()
200 err = request_irq(vp_dev->msix_entries[v].vector, in vp_request_msix_vectors()
201 vp_vring_interrupt, 0, vp_dev->msix_names[v], in vp_request_msix_vectors()
202 vp_dev); in vp_request_msix_vectors()
205 ++vp_dev->msix_used_vectors; in vp_request_msix_vectors()
216 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_request_intx() local
218 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, in vp_request_intx()
219 IRQF_SHARED, dev_name(&vdev->dev), vp_dev); in vp_request_intx()
221 vp_dev->intx_enabled = 1; in vp_request_intx()
230 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_setup_vq() local
239 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); in vp_setup_vq()
245 spin_lock_irqsave(&vp_dev->lock, flags); in vp_setup_vq()
246 list_add(&info->node, &vp_dev->virtqueues); in vp_setup_vq()
247 spin_unlock_irqrestore(&vp_dev->lock, flags); in vp_setup_vq()
252 vp_dev->vqs[index] = info; in vp_setup_vq()
262 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); in vp_del_vq() local
263 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_del_vq()
266 spin_lock_irqsave(&vp_dev->lock, flags); in vp_del_vq()
268 spin_unlock_irqrestore(&vp_dev->lock, flags); in vp_del_vq()
270 vp_dev->del_vq(info); in vp_del_vq()
277 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_del_vqs() local
282 info = vp_dev->vqs[vq->index]; in vp_del_vqs()
283 if (vp_dev->per_vq_vectors && in vp_del_vqs()
285 free_irq(vp_dev->msix_entries[info->msix_vector].vector, in vp_del_vqs()
289 vp_dev->per_vq_vectors = false; in vp_del_vqs()
292 kfree(vp_dev->vqs); in vp_del_vqs()
293 vp_dev->vqs = NULL; in vp_del_vqs()
303 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_try_to_find_vqs() local
307 vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); in vp_try_to_find_vqs()
308 if (!vp_dev->vqs) in vp_try_to_find_vqs()
333 vp_dev->per_vq_vectors = per_vq_vectors; in vp_try_to_find_vqs()
334 allocated_vectors = vp_dev->msix_used_vectors; in vp_try_to_find_vqs()
339 } else if (!callbacks[i] || !vp_dev->msix_enabled) in vp_try_to_find_vqs()
341 else if (vp_dev->per_vq_vectors) in vp_try_to_find_vqs()
351 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) in vp_try_to_find_vqs()
355 snprintf(vp_dev->msix_names[msix_vec], in vp_try_to_find_vqs()
356 sizeof *vp_dev->msix_names, in vp_try_to_find_vqs()
358 dev_name(&vp_dev->vdev.dev), names[i]); in vp_try_to_find_vqs()
359 err = request_irq(vp_dev->msix_entries[msix_vec].vector, in vp_try_to_find_vqs()
361 vp_dev->msix_names[msix_vec], in vp_try_to_find_vqs()
399 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_bus_name() local
401 return pci_name(vp_dev->pci_dev); in vp_bus_name()
412 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in vp_set_vq_affinity() local
413 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_set_vq_affinity()
420 if (vp_dev->msix_enabled) { in vp_set_vq_affinity()
421 mask = vp_dev->msix_affinity_masks[info->msix_vector]; in vp_set_vq_affinity()
422 irq = vp_dev->msix_entries[info->msix_vector].vector; in vp_set_vq_affinity()
438 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); in virtio_pci_freeze() local
441 ret = virtio_device_freeze(&vp_dev->vdev); in virtio_pci_freeze()
451 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); in virtio_pci_restore() local
459 return virtio_device_restore(&vp_dev->vdev); in virtio_pci_restore()
479 struct virtio_pci_device *vp_dev = to_vp_device(vdev); in virtio_pci_release_dev() local
484 kfree(vp_dev); in virtio_pci_release_dev()
490 struct virtio_pci_device *vp_dev; in virtio_pci_probe() local
494 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); in virtio_pci_probe()
495 if (!vp_dev) in virtio_pci_probe()
498 pci_set_drvdata(pci_dev, vp_dev); in virtio_pci_probe()
499 vp_dev->vdev.dev.parent = &pci_dev->dev; in virtio_pci_probe()
500 vp_dev->vdev.dev.release = virtio_pci_release_dev; in virtio_pci_probe()
501 vp_dev->pci_dev = pci_dev; in virtio_pci_probe()
502 INIT_LIST_HEAD(&vp_dev->virtqueues); in virtio_pci_probe()
503 spin_lock_init(&vp_dev->lock); in virtio_pci_probe()
518 rc = virtio_pci_legacy_probe(vp_dev); in virtio_pci_probe()
521 rc = virtio_pci_modern_probe(vp_dev); in virtio_pci_probe()
525 rc = virtio_pci_modern_probe(vp_dev); in virtio_pci_probe()
527 rc = virtio_pci_legacy_probe(vp_dev); in virtio_pci_probe()
534 rc = register_virtio_device(&vp_dev->vdev); in virtio_pci_probe()
541 if (vp_dev->ioaddr) in virtio_pci_probe()
542 virtio_pci_legacy_remove(vp_dev); in virtio_pci_probe()
544 virtio_pci_modern_remove(vp_dev); in virtio_pci_probe()
550 kfree(vp_dev); in virtio_pci_probe()
556 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); in virtio_pci_remove() local
557 struct device *dev = get_device(&vp_dev->vdev.dev); in virtio_pci_remove()
559 unregister_virtio_device(&vp_dev->vdev); in virtio_pci_remove()
561 if (vp_dev->ioaddr) in virtio_pci_remove()
562 virtio_pci_legacy_remove(vp_dev); in virtio_pci_remove()
564 virtio_pci_modern_remove(vp_dev); in virtio_pci_remove()