This source file includes following definitions.
- list_first
- __xen_pcibk_get_pci_dev
- match_slot
- __xen_pcibk_add_pci_dev
- __xen_pcibk_release_pci_dev
- __xen_pcibk_init_devices
- __xen_pcibk_publish_pci_roots
- __xen_pcibk_release_devices
- __xen_pcibk_get_pcifront_dev
1
2
3
4
5
6
7
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/pci.h>
14 #include <linux/mutex.h>
15 #include "pciback.h"
16
17 #define PCI_SLOT_MAX 32
18
19 struct vpci_dev_data {
20
21 struct list_head dev_list[PCI_SLOT_MAX];
22 struct mutex lock;
23 };
24
25 static inline struct list_head *list_first(struct list_head *head)
26 {
27 return head->next;
28 }
29
30 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
31 unsigned int domain,
32 unsigned int bus,
33 unsigned int devfn)
34 {
35 struct pci_dev_entry *entry;
36 struct pci_dev *dev = NULL;
37 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
38
39 if (domain != 0 || bus != 0)
40 return NULL;
41
42 if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
43 mutex_lock(&vpci_dev->lock);
44
45 list_for_each_entry(entry,
46 &vpci_dev->dev_list[PCI_SLOT(devfn)],
47 list) {
48 if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
49 dev = entry->dev;
50 break;
51 }
52 }
53
54 mutex_unlock(&vpci_dev->lock);
55 }
56 return dev;
57 }
58
59 static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
60 {
61 if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
62 && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
63 return 1;
64
65 return 0;
66 }
67
68 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
69 struct pci_dev *dev, int devid,
70 publish_pci_dev_cb publish_cb)
71 {
72 int err = 0, slot, func = -1;
73 struct pci_dev_entry *t, *dev_entry;
74 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
75
76 if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
77 err = -EFAULT;
78 xenbus_dev_fatal(pdev->xdev, err,
79 "Can't export bridges on the virtual PCI bus");
80 goto out;
81 }
82
83 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
84 if (!dev_entry) {
85 err = -ENOMEM;
86 xenbus_dev_fatal(pdev->xdev, err,
87 "Error adding entry to virtual PCI bus");
88 goto out;
89 }
90
91 dev_entry->dev = dev;
92
93 mutex_lock(&vpci_dev->lock);
94
95
96
97
98
99 if (!dev->is_virtfn) {
100 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
101 if (list_empty(&vpci_dev->dev_list[slot]))
102 continue;
103
104 t = list_entry(list_first(&vpci_dev->dev_list[slot]),
105 struct pci_dev_entry, list);
106
107 if (match_slot(dev, t->dev)) {
108 pr_info("vpci: %s: assign to virtual slot %d func %d\n",
109 pci_name(dev), slot,
110 PCI_FUNC(dev->devfn));
111 list_add_tail(&dev_entry->list,
112 &vpci_dev->dev_list[slot]);
113 func = PCI_FUNC(dev->devfn);
114 goto unlock;
115 }
116 }
117 }
118
119
120 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
121 if (list_empty(&vpci_dev->dev_list[slot])) {
122 pr_info("vpci: %s: assign to virtual slot %d\n",
123 pci_name(dev), slot);
124 list_add_tail(&dev_entry->list,
125 &vpci_dev->dev_list[slot]);
126 func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
127 goto unlock;
128 }
129 }
130
131 err = -ENOMEM;
132 xenbus_dev_fatal(pdev->xdev, err,
133 "No more space on root virtual PCI bus");
134
135 unlock:
136 mutex_unlock(&vpci_dev->lock);
137
138
139 if (!err)
140 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
141 else
142 kfree(dev_entry);
143
144 out:
145 return err;
146 }
147
148 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
149 struct pci_dev *dev, bool lock)
150 {
151 int slot;
152 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
153 struct pci_dev *found_dev = NULL;
154
155 mutex_lock(&vpci_dev->lock);
156
157 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
158 struct pci_dev_entry *e;
159
160 list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
161 if (e->dev == dev) {
162 list_del(&e->list);
163 found_dev = e->dev;
164 kfree(e);
165 goto out;
166 }
167 }
168 }
169
170 out:
171 mutex_unlock(&vpci_dev->lock);
172
173 if (found_dev) {
174 if (lock)
175 device_lock(&found_dev->dev);
176 pcistub_put_pci_dev(found_dev);
177 if (lock)
178 device_unlock(&found_dev->dev);
179 }
180 }
181
182 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
183 {
184 int slot;
185 struct vpci_dev_data *vpci_dev;
186
187 vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
188 if (!vpci_dev)
189 return -ENOMEM;
190
191 mutex_init(&vpci_dev->lock);
192
193 for (slot = 0; slot < PCI_SLOT_MAX; slot++)
194 INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
195
196 pdev->pci_dev_data = vpci_dev;
197
198 return 0;
199 }
200
201 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
202 publish_pci_root_cb publish_cb)
203 {
204
205 return publish_cb(pdev, 0, 0);
206 }
207
208 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
209 {
210 int slot;
211 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
212
213 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
214 struct pci_dev_entry *e, *tmp;
215 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
216 list) {
217 struct pci_dev *dev = e->dev;
218 list_del(&e->list);
219 device_lock(&dev->dev);
220 pcistub_put_pci_dev(dev);
221 device_unlock(&dev->dev);
222 kfree(e);
223 }
224 }
225
226 kfree(vpci_dev);
227 pdev->pci_dev_data = NULL;
228 }
229
230 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
231 struct xen_pcibk_device *pdev,
232 unsigned int *domain, unsigned int *bus,
233 unsigned int *devfn)
234 {
235 struct pci_dev_entry *entry;
236 struct pci_dev *dev = NULL;
237 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
238 int found = 0, slot;
239
240 mutex_lock(&vpci_dev->lock);
241 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
242 list_for_each_entry(entry,
243 &vpci_dev->dev_list[slot],
244 list) {
245 dev = entry->dev;
246 if (dev && dev->bus->number == pcidev->bus->number
247 && pci_domain_nr(dev->bus) ==
248 pci_domain_nr(pcidev->bus)
249 && dev->devfn == pcidev->devfn) {
250 found = 1;
251 *domain = 0;
252 *bus = 0;
253 *devfn = PCI_DEVFN(slot,
254 PCI_FUNC(pcidev->devfn));
255 }
256 }
257 }
258 mutex_unlock(&vpci_dev->lock);
259 return found;
260 }
261
262 const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
263 .name = "vpci",
264 .init = __xen_pcibk_init_devices,
265 .free = __xen_pcibk_release_devices,
266 .find = __xen_pcibk_get_pcifront_dev,
267 .publish = __xen_pcibk_publish_pci_roots,
268 .release = __xen_pcibk_release_pci_dev,
269 .add = __xen_pcibk_add_pci_dev,
270 .get = __xen_pcibk_get_pci_dev,
271 };