This source file includes following definitions.
- vgpu_pci_cfg_mem_write
- intel_vgpu_emulate_cfg_read
- map_aperture
- trap_gttmmio
- emulate_pci_command_write
- emulate_pci_rom_bar_write
- emulate_pci_bar_write
- intel_vgpu_emulate_cfg_write
- intel_vgpu_init_cfg_space
- intel_vgpu_reset_cfg_space
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include "i915_drv.h"
35 #include "gvt.h"
36
37 enum {
38 INTEL_GVT_PCI_BAR_GTTMMIO = 0,
39 INTEL_GVT_PCI_BAR_APERTURE,
40 INTEL_GVT_PCI_BAR_PIO,
41 INTEL_GVT_PCI_BAR_MAX,
42 };
43
44
45
46
47
48 static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 [PCI_COMMAND] = 0xff, 0x07,
50 [PCI_STATUS] = 0x00, 0xf9,
51 [PCI_CACHE_LINE_SIZE] = 0xff,
52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54 [PCI_INTERRUPT_LINE] = 0xff,
55 };
56
57
58
59
60
61
62
63
64
65
66
67
68 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
69 u8 *src, unsigned int bytes)
70 {
71 u8 *cfg_base = vgpu_cfg_space(vgpu);
72 u8 mask, new, old;
73 int i = 0;
74
75 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
76 mask = pci_cfg_space_rw_bmp[off + i];
77 old = cfg_base[off + i];
78 new = src[i] & mask;
79
80
81
82
83
84
85 if (off + i == PCI_STATUS + 1)
86 new = (~new & old) & mask;
87
88 cfg_base[off + i] = (old & ~mask) | new;
89 }
90
91
92 if (i < bytes)
93 memcpy(cfg_base + off + i, src + i, bytes - i);
94 }
95
96
97
98
99
100
101
102
103
104
105
106 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
107 void *p_data, unsigned int bytes)
108 {
109 if (WARN_ON(bytes > 4))
110 return -EINVAL;
111
112 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
113 return -EINVAL;
114
115 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
116 return 0;
117 }
118
119 static int map_aperture(struct intel_vgpu *vgpu, bool map)
120 {
121 phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
122 unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
123 u64 first_gfn;
124 u64 val;
125 int ret;
126
127 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
128 return 0;
129
130 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
131 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
132 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
133 else
134 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
135
136 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
137
138 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
139 aperture_pa >> PAGE_SHIFT,
140 aperture_sz >> PAGE_SHIFT,
141 map);
142 if (ret)
143 return ret;
144
145 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
146 return 0;
147 }
148
149 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
150 {
151 u64 start, end;
152 u64 val;
153 int ret;
154
155 if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
156 return 0;
157
158 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
159 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
160 start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
161 else
162 start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
163
164 start &= ~GENMASK(3, 0);
165 end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
166
167 ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
168 if (ret)
169 return ret;
170
171 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
172 return 0;
173 }
174
175 static int emulate_pci_command_write(struct intel_vgpu *vgpu,
176 unsigned int offset, void *p_data, unsigned int bytes)
177 {
178 u8 old = vgpu_cfg_space(vgpu)[offset];
179 u8 new = *(u8 *)p_data;
180 u8 changed = old ^ new;
181 int ret;
182
183 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
184 if (!(changed & PCI_COMMAND_MEMORY))
185 return 0;
186
187 if (old & PCI_COMMAND_MEMORY) {
188 ret = trap_gttmmio(vgpu, false);
189 if (ret)
190 return ret;
191 ret = map_aperture(vgpu, false);
192 if (ret)
193 return ret;
194 } else {
195 ret = trap_gttmmio(vgpu, true);
196 if (ret)
197 return ret;
198 ret = map_aperture(vgpu, true);
199 if (ret)
200 return ret;
201 }
202
203 return 0;
204 }
205
206 static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
207 unsigned int offset, void *p_data, unsigned int bytes)
208 {
209 u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
210 u32 new = *(u32 *)(p_data);
211
212 if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
213
214 *pval = 0;
215 else
216 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
217 return 0;
218 }
219
220 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
221 void *p_data, unsigned int bytes)
222 {
223 u32 new = *(u32 *)(p_data);
224 bool lo = IS_ALIGNED(offset, 8);
225 u64 size;
226 int ret = 0;
227 bool mmio_enabled =
228 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
229 struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
230
231
232
233
234
235
236
237
238 if (new == 0xffffffff) {
239 switch (offset) {
240 case PCI_BASE_ADDRESS_0:
241 case PCI_BASE_ADDRESS_1:
242 size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
243 intel_vgpu_write_pci_bar(vgpu, offset,
244 size >> (lo ? 0 : 32), lo);
245
246
247
248
249 ret = trap_gttmmio(vgpu, false);
250 break;
251 case PCI_BASE_ADDRESS_2:
252 case PCI_BASE_ADDRESS_3:
253 size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
254 intel_vgpu_write_pci_bar(vgpu, offset,
255 size >> (lo ? 0 : 32), lo);
256 ret = map_aperture(vgpu, false);
257 break;
258 default:
259
260 intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
261 }
262 } else {
263 switch (offset) {
264 case PCI_BASE_ADDRESS_0:
265 case PCI_BASE_ADDRESS_1:
266
267
268
269
270 trap_gttmmio(vgpu, false);
271 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
272 ret = trap_gttmmio(vgpu, mmio_enabled);
273 break;
274 case PCI_BASE_ADDRESS_2:
275 case PCI_BASE_ADDRESS_3:
276 map_aperture(vgpu, false);
277 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
278 ret = map_aperture(vgpu, mmio_enabled);
279 break;
280 default:
281 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
282 }
283 }
284 return ret;
285 }
286
287
288
289
290
291
292
293
294
295
296
297 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
298 void *p_data, unsigned int bytes)
299 {
300 int ret;
301
302 if (WARN_ON(bytes > 4))
303 return -EINVAL;
304
305 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
306 return -EINVAL;
307
308
309 if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
310 if (WARN_ON(bytes > 2))
311 return -EINVAL;
312 return emulate_pci_command_write(vgpu, offset, p_data, bytes);
313 }
314
315 switch (rounddown(offset, 4)) {
316 case PCI_ROM_ADDRESS:
317 if (WARN_ON(!IS_ALIGNED(offset, 4)))
318 return -EINVAL;
319 return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
320
321 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
322 if (WARN_ON(!IS_ALIGNED(offset, 4)))
323 return -EINVAL;
324 return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
325
326 case INTEL_GVT_PCI_SWSCI:
327 if (WARN_ON(!IS_ALIGNED(offset, 4)))
328 return -EINVAL;
329 ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
330 if (ret)
331 return ret;
332 break;
333
334 case INTEL_GVT_PCI_OPREGION:
335 if (WARN_ON(!IS_ALIGNED(offset, 4)))
336 return -EINVAL;
337 ret = intel_vgpu_opregion_base_write_handler(vgpu,
338 *(u32 *)p_data);
339 if (ret)
340 return ret;
341
342 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
343 break;
344 default:
345 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
346 break;
347 }
348 return 0;
349 }
350
351
352
353
354
355
356
357
358 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
359 bool primary)
360 {
361 struct intel_gvt *gvt = vgpu->gvt;
362 const struct intel_gvt_device_info *info = &gvt->device_info;
363 u16 *gmch_ctl;
364
365 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
366 info->cfg_space_size);
367
368 if (!primary) {
369 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
370 INTEL_GVT_PCI_CLASS_VGA_OTHER;
371 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
372 INTEL_GVT_PCI_CLASS_VGA_OTHER;
373 }
374
375
376 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
377 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
378
379 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
380 gvt_aperture_pa_base(gvt), true);
381
382 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
383 | PCI_COMMAND_MEMORY
384 | PCI_COMMAND_MASTER);
385
386
387
388 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
389 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
390 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
391 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
392
393 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
394 pci_resource_len(gvt->dev_priv->drm.pdev, 0);
395 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
396 pci_resource_len(gvt->dev_priv->drm.pdev, 2);
397
398 memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
399 }
400
401
402
403
404
405
406
407 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
408 {
409 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
410 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
411 INTEL_GVT_PCI_CLASS_VGA_OTHER;
412
413 if (cmd & PCI_COMMAND_MEMORY) {
414 trap_gttmmio(vgpu, false);
415 map_aperture(vgpu, false);
416 }
417
418
419
420
421
422
423 intel_vgpu_init_cfg_space(vgpu, primary);
424 }