This source file includes following definitions.
- xen_add_device
- xen_remove_device
- xen_pci_notifier
- register_xen_pci_notifier
- xen_mcfg_late
1
2
3
4
5
6
7
8 #include <linux/pci.h>
9 #include <linux/acpi.h>
10 #include <linux/pci-acpi.h>
11 #include <xen/xen.h>
12 #include <xen/interface/physdev.h>
13 #include <xen/interface/xen.h>
14
15 #include <asm/xen/hypervisor.h>
16 #include <asm/xen/hypercall.h>
17 #include "../pci/pci.h"
18 #ifdef CONFIG_PCI_MMCONFIG
19 #include <asm/pci_x86.h>
20
21 static int xen_mcfg_late(void);
22 #endif
23
24 static bool __read_mostly pci_seg_supported = true;
25
26 static int xen_add_device(struct device *dev)
27 {
28 int r;
29 struct pci_dev *pci_dev = to_pci_dev(dev);
30 #ifdef CONFIG_PCI_IOV
31 struct pci_dev *physfn = pci_dev->physfn;
32 #endif
33 #ifdef CONFIG_PCI_MMCONFIG
34 static bool pci_mcfg_reserved = false;
35
36
37
38
39
40 if (!pci_mcfg_reserved) {
41 xen_mcfg_late();
42 pci_mcfg_reserved = true;
43 }
44 #endif
45 if (pci_seg_supported) {
46 struct {
47 struct physdev_pci_device_add add;
48 uint32_t pxm;
49 } add_ext = {
50 .add.seg = pci_domain_nr(pci_dev->bus),
51 .add.bus = pci_dev->bus->number,
52 .add.devfn = pci_dev->devfn
53 };
54 struct physdev_pci_device_add *add = &add_ext.add;
55
56 #ifdef CONFIG_ACPI
57 acpi_handle handle;
58 #endif
59
60 #ifdef CONFIG_PCI_IOV
61 if (pci_dev->is_virtfn) {
62 add->flags = XEN_PCI_DEV_VIRTFN;
63 add->physfn.bus = physfn->bus->number;
64 add->physfn.devfn = physfn->devfn;
65 } else
66 #endif
67 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
68 add->flags = XEN_PCI_DEV_EXTFN;
69
70 #ifdef CONFIG_ACPI
71 handle = ACPI_HANDLE(&pci_dev->dev);
72 #ifdef CONFIG_PCI_IOV
73 if (!handle && pci_dev->is_virtfn)
74 handle = ACPI_HANDLE(physfn->bus->bridge);
75 #endif
76 if (!handle) {
77
78
79
80
81 struct pci_bus *pbus;
82 for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
83 handle = acpi_pci_get_bridge_handle(pbus);
84 if (handle)
85 break;
86 }
87 }
88 if (handle) {
89 acpi_status status;
90
91 do {
92 unsigned long long pxm;
93
94 status = acpi_evaluate_integer(handle, "_PXM",
95 NULL, &pxm);
96 if (ACPI_SUCCESS(status)) {
97 add->optarr[0] = pxm;
98 add->flags |= XEN_PCI_DEV_PXM;
99 break;
100 }
101 status = acpi_get_parent(handle, &handle);
102 } while (ACPI_SUCCESS(status));
103 }
104 #endif
105
106 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
107 if (r != -ENOSYS)
108 return r;
109 pci_seg_supported = false;
110 }
111
112 if (pci_domain_nr(pci_dev->bus))
113 r = -ENOSYS;
114 #ifdef CONFIG_PCI_IOV
115 else if (pci_dev->is_virtfn) {
116 struct physdev_manage_pci_ext manage_pci_ext = {
117 .bus = pci_dev->bus->number,
118 .devfn = pci_dev->devfn,
119 .is_virtfn = 1,
120 .physfn.bus = physfn->bus->number,
121 .physfn.devfn = physfn->devfn,
122 };
123
124 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
125 &manage_pci_ext);
126 }
127 #endif
128 else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
129 struct physdev_manage_pci_ext manage_pci_ext = {
130 .bus = pci_dev->bus->number,
131 .devfn = pci_dev->devfn,
132 .is_extfn = 1,
133 };
134
135 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
136 &manage_pci_ext);
137 } else {
138 struct physdev_manage_pci manage_pci = {
139 .bus = pci_dev->bus->number,
140 .devfn = pci_dev->devfn,
141 };
142
143 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
144 &manage_pci);
145 }
146
147 return r;
148 }
149
150 static int xen_remove_device(struct device *dev)
151 {
152 int r;
153 struct pci_dev *pci_dev = to_pci_dev(dev);
154
155 if (pci_seg_supported) {
156 struct physdev_pci_device device = {
157 .seg = pci_domain_nr(pci_dev->bus),
158 .bus = pci_dev->bus->number,
159 .devfn = pci_dev->devfn
160 };
161
162 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
163 &device);
164 } else if (pci_domain_nr(pci_dev->bus))
165 r = -ENOSYS;
166 else {
167 struct physdev_manage_pci manage_pci = {
168 .bus = pci_dev->bus->number,
169 .devfn = pci_dev->devfn
170 };
171
172 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
173 &manage_pci);
174 }
175
176 return r;
177 }
178
179 static int xen_pci_notifier(struct notifier_block *nb,
180 unsigned long action, void *data)
181 {
182 struct device *dev = data;
183 int r = 0;
184
185 switch (action) {
186 case BUS_NOTIFY_ADD_DEVICE:
187 r = xen_add_device(dev);
188 break;
189 case BUS_NOTIFY_DEL_DEVICE:
190 r = xen_remove_device(dev);
191 break;
192 default:
193 return NOTIFY_DONE;
194 }
195 if (r)
196 dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
197 action == BUS_NOTIFY_ADD_DEVICE ? "add" :
198 (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
199 return NOTIFY_OK;
200 }
201
202 static struct notifier_block device_nb = {
203 .notifier_call = xen_pci_notifier,
204 };
205
206 static int __init register_xen_pci_notifier(void)
207 {
208 if (!xen_initial_domain())
209 return 0;
210
211 return bus_register_notifier(&pci_bus_type, &device_nb);
212 }
213
214 arch_initcall(register_xen_pci_notifier);
215
216 #ifdef CONFIG_PCI_MMCONFIG
217 static int xen_mcfg_late(void)
218 {
219 struct pci_mmcfg_region *cfg;
220 int rc;
221
222 if (!xen_initial_domain())
223 return 0;
224
225 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
226 return 0;
227
228 if (list_empty(&pci_mmcfg_list))
229 return 0;
230
231
232 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
233 struct physdev_pci_mmcfg_reserved r;
234
235 r.address = cfg->address;
236 r.segment = cfg->segment;
237 r.start_bus = cfg->start_bus;
238 r.end_bus = cfg->end_bus;
239 r.flags = XEN_PCI_MMCFG_RESERVED;
240
241 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
242 switch (rc) {
243 case 0:
244 case -ENOSYS:
245 continue;
246
247 default:
248 pr_warn("Failed to report MMCONFIG reservation"
249 " state for %s to hypervisor"
250 " (%d)\n",
251 cfg->name, rc);
252 }
253 }
254 return 0;
255 }
256 #endif