This source file includes following definitions.
- virt_vbt_generation
- intel_vgpu_init_opregion
- map_vgpu_opregion
- intel_vgpu_opregion_base_write_handler
- intel_vgpu_clean_opregion
- opregion_func_name
- opregion_subfunc_name
- querying_capabilities
- intel_vgpu_emulate_opregion_request
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/acpi.h>
25 #include "i915_drv.h"
26 #include "gvt.h"
27
28
29
30
31
32 #define _INTEL_BIOS_PRIVATE
33 #include "display/intel_vbt_defs.h"
34
35 #define OPREGION_SIGNATURE "IntelGraphicsMem"
36 #define MBOX_VBT (1<<3)
37
38
39 #define DEVICE_TYPE_CRT 0x01
40 #define DEVICE_TYPE_EFP1 0x04
41 #define DEVICE_TYPE_EFP2 0x40
42 #define DEVICE_TYPE_EFP3 0x20
43 #define DEVICE_TYPE_EFP4 0x10
44
45 struct opregion_header {
46 u8 signature[16];
47 u32 size;
48 u32 opregion_ver;
49 u8 bios_ver[32];
50 u8 vbios_ver[16];
51 u8 driver_ver[16];
52 u32 mboxes;
53 u32 driver_model;
54 u32 pcon;
55 u8 dver[32];
56 u8 rsvd[124];
57 } __packed;
58
59 struct bdb_data_header {
60 u8 id;
61 u16 size;
62 } __packed;
63
64
65
66
67
68 struct efp_child_device_config {
69 u16 handle;
70 u16 device_type;
71 u16 device_class;
72 u8 i2c_speed;
73 u8 dp_onboard_redriver;
74 u8 dp_ondock_redriver;
75 u8 hdmi_level_shifter_value:4;
76 u8 hdmi_max_data_rate:4;
77 u16 dtd_buf_ptr;
78 u8 edidless_efp:1;
79 u8 compression_enable:1;
80 u8 compression_method:1;
81 u8 ganged_edp:1;
82 u8 skip0:4;
83 u8 compression_structure_index:4;
84 u8 skip1:4;
85 u8 slave_port;
86 u8 skip2;
87 u8 dvo_port;
88 u8 i2c_pin;
89 u8 slave_addr;
90 u8 ddc_pin;
91 u16 edid_ptr;
92 u8 dvo_config;
93 u8 efp_docked_port:1;
94 u8 lane_reversal:1;
95 u8 onboard_lspcon:1;
96 u8 iboost_enable:1;
97 u8 hpd_invert:1;
98 u8 slip3:3;
99 u8 hdmi_compat:1;
100 u8 dp_compat:1;
101 u8 tmds_compat:1;
102 u8 skip4:5;
103 u8 aux_channel;
104 u8 dongle_detect;
105 u8 pipe_cap:2;
106 u8 sdvo_stall:1;
107 u8 hpd_status:2;
108 u8 integrated_encoder:1;
109 u8 skip5:2;
110 u8 dvo_wiring;
111 u8 mipi_bridge_type;
112 u16 device_class_ext;
113 u8 dvo_function;
114 } __packed;
115
116 struct vbt {
117
118 struct vbt_header header;
119 struct bdb_header bdb_header;
120
121 struct bdb_data_header general_features_header;
122 struct bdb_general_features general_features;
123
124 struct bdb_data_header general_definitions_header;
125 struct bdb_general_definitions general_definitions;
126
127 struct efp_child_device_config child0;
128 struct efp_child_device_config child1;
129 struct efp_child_device_config child2;
130 struct efp_child_device_config child3;
131
132 struct bdb_data_header driver_features_header;
133 struct bdb_driver_features driver_features;
134 };
135
136 static void virt_vbt_generation(struct vbt *v)
137 {
138 int num_child;
139
140 memset(v, 0, sizeof(struct vbt));
141
142 v->header.signature[0] = '$';
143 v->header.signature[1] = 'V';
144 v->header.signature[2] = 'B';
145 v->header.signature[3] = 'T';
146
147
148 v->header.version = 155;
149 v->header.header_size = sizeof(v->header);
150 v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
151 v->header.bdb_offset = offsetof(struct vbt, bdb_header);
152
153 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
154 v->bdb_header.version = 186;
155 v->bdb_header.header_size = sizeof(v->bdb_header);
156
157 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
158 - sizeof(struct bdb_header);
159
160
161 v->general_features_header.id = BDB_GENERAL_FEATURES;
162 v->general_features_header.size = sizeof(struct bdb_general_features);
163 v->general_features.int_crt_support = 0;
164 v->general_features.int_tv_support = 0;
165
166
167 num_child = 4;
168 v->general_definitions.child_dev_size =
169 sizeof(struct efp_child_device_config);
170 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
171
172 v->general_definitions_header.size =
173 sizeof(struct bdb_general_definitions) +
174 num_child * v->general_definitions.child_dev_size;
175
176
177 v->child0.handle = DEVICE_TYPE_EFP1;
178 v->child0.device_type = DEVICE_TYPE_DP;
179 v->child0.dvo_port = DVO_PORT_DPA;
180 v->child0.aux_channel = DP_AUX_A;
181 v->child0.dp_compat = true;
182 v->child0.integrated_encoder = true;
183
184
185 v->child1.handle = DEVICE_TYPE_EFP2;
186 v->child1.device_type = DEVICE_TYPE_DP;
187 v->child1.dvo_port = DVO_PORT_DPB;
188 v->child1.aux_channel = DP_AUX_B;
189 v->child1.dp_compat = true;
190 v->child1.integrated_encoder = true;
191
192
193 v->child2.handle = DEVICE_TYPE_EFP3;
194 v->child2.device_type = DEVICE_TYPE_DP;
195 v->child2.dvo_port = DVO_PORT_DPC;
196 v->child2.aux_channel = DP_AUX_C;
197 v->child2.dp_compat = true;
198 v->child2.integrated_encoder = true;
199
200
201 v->child3.handle = DEVICE_TYPE_EFP4;
202 v->child3.device_type = DEVICE_TYPE_DP;
203 v->child3.dvo_port = DVO_PORT_DPD;
204 v->child3.aux_channel = DP_AUX_D;
205 v->child3.dp_compat = true;
206 v->child3.integrated_encoder = true;
207
208
209 v->driver_features_header.id = BDB_DRIVER_FEATURES;
210 v->driver_features_header.size = sizeof(struct bdb_driver_features);
211 v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
212 }
213
214
215
216
217
218
219
220
221 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
222 {
223 u8 *buf;
224 struct opregion_header *header;
225 struct vbt v;
226 const char opregion_signature[16] = OPREGION_SIGNATURE;
227
228 gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
229 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
230 __GFP_ZERO,
231 get_order(INTEL_GVT_OPREGION_SIZE));
232 if (!vgpu_opregion(vgpu)->va) {
233 gvt_err("fail to get memory for vgpu virt opregion\n");
234 return -ENOMEM;
235 }
236
237
238 buf = (u8 *)vgpu_opregion(vgpu)->va;
239 header = (struct opregion_header *)buf;
240 memcpy(header->signature, opregion_signature,
241 sizeof(opregion_signature));
242 header->size = 0x8;
243 header->opregion_ver = 0x02000000;
244 header->mboxes = MBOX_VBT;
245
246
247
248
249
250 buf[INTEL_GVT_OPREGION_CLID] = 0x3;
251
252
253 virt_vbt_generation(&v);
254 memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
255
256 return 0;
257 }
258
259 static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
260 {
261 u64 mfn;
262 int i, ret;
263
264 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
265 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
266 + i * PAGE_SIZE);
267 if (mfn == INTEL_GVT_INVALID_ADDR) {
268 gvt_vgpu_err("fail to get MFN from VA\n");
269 return -EINVAL;
270 }
271 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
272 vgpu_opregion(vgpu)->gfn[i],
273 mfn, 1, map);
274 if (ret) {
275 gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
276 ret);
277 return ret;
278 }
279 }
280
281 vgpu_opregion(vgpu)->mapped = map;
282
283 return 0;
284 }
285
286
287
288
289
290
291
292
293
294
295 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
296 {
297
298 int i, ret = 0;
299
300 gvt_dbg_core("emulate opregion from kernel\n");
301
302 switch (intel_gvt_host.hypervisor_type) {
303 case INTEL_GVT_HYPERVISOR_KVM:
304 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
305 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
306 break;
307 case INTEL_GVT_HYPERVISOR_XEN:
308
309
310
311
312 if (vgpu_opregion(vgpu)->mapped)
313 map_vgpu_opregion(vgpu, false);
314
315 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
316 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
317
318 ret = map_vgpu_opregion(vgpu, true);
319 break;
320 default:
321 ret = -EINVAL;
322 gvt_vgpu_err("not supported hypervisor\n");
323 }
324
325 return ret;
326 }
327
328
329
330
331
332
333 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
334 {
335 gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
336
337 if (!vgpu_opregion(vgpu)->va)
338 return;
339
340 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
341 if (vgpu_opregion(vgpu)->mapped)
342 map_vgpu_opregion(vgpu, false);
343 } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
344
345 }
346 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
347 get_order(INTEL_GVT_OPREGION_SIZE));
348
349 vgpu_opregion(vgpu)->va = NULL;
350
351 }
352
353
354 #define GVT_OPREGION_FUNC(scic) \
355 ({ \
356 u32 __ret; \
357 __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
358 OPREGION_SCIC_FUNC_SHIFT; \
359 __ret; \
360 })
361
362 #define GVT_OPREGION_SUBFUNC(scic) \
363 ({ \
364 u32 __ret; \
365 __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
366 OPREGION_SCIC_SUBFUNC_SHIFT; \
367 __ret; \
368 })
369
370 static const char *opregion_func_name(u32 func)
371 {
372 const char *name = NULL;
373
374 switch (func) {
375 case 0 ... 3:
376 case 5:
377 case 7 ... 15:
378 name = "Reserved";
379 break;
380
381 case 4:
382 name = "Get BIOS Data";
383 break;
384
385 case 6:
386 name = "System BIOS Callbacks";
387 break;
388
389 default:
390 name = "Unknown";
391 break;
392 }
393 return name;
394 }
395
396 static const char *opregion_subfunc_name(u32 subfunc)
397 {
398 const char *name = NULL;
399
400 switch (subfunc) {
401 case 0:
402 name = "Supported Calls";
403 break;
404
405 case 1:
406 name = "Requested Callbacks";
407 break;
408
409 case 2 ... 3:
410 case 8 ... 9:
411 name = "Reserved";
412 break;
413
414 case 5:
415 name = "Boot Display";
416 break;
417
418 case 6:
419 name = "TV-Standard/Video-Connector";
420 break;
421
422 case 7:
423 name = "Internal Graphics";
424 break;
425
426 case 10:
427 name = "Spread Spectrum Clocks";
428 break;
429
430 case 11:
431 name = "Get AKSV";
432 break;
433
434 default:
435 name = "Unknown";
436 break;
437 }
438 return name;
439 };
440
441 static bool querying_capabilities(u32 scic)
442 {
443 u32 func, subfunc;
444
445 func = GVT_OPREGION_FUNC(scic);
446 subfunc = GVT_OPREGION_SUBFUNC(scic);
447
448 if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
449 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
450 || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
451 subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
452 || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
453 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
454 return true;
455 }
456 return false;
457 }
458
459
460
461
462
463
464
465
466
467 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
468 {
469 u32 scic, parm;
470 u32 func, subfunc;
471 u64 scic_pa = 0, parm_pa = 0;
472 int ret;
473
474 switch (intel_gvt_host.hypervisor_type) {
475 case INTEL_GVT_HYPERVISOR_XEN:
476 scic = *((u32 *)vgpu_opregion(vgpu)->va +
477 INTEL_GVT_OPREGION_SCIC);
478 parm = *((u32 *)vgpu_opregion(vgpu)->va +
479 INTEL_GVT_OPREGION_PARM);
480 break;
481 case INTEL_GVT_HYPERVISOR_KVM:
482 scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
483 INTEL_GVT_OPREGION_SCIC;
484 parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
485 INTEL_GVT_OPREGION_PARM;
486
487 ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
488 &scic, sizeof(scic));
489 if (ret) {
490 gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
491 ret, scic_pa, sizeof(scic));
492 return ret;
493 }
494
495 ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
496 &parm, sizeof(parm));
497 if (ret) {
498 gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
499 ret, scic_pa, sizeof(scic));
500 return ret;
501 }
502
503 break;
504 default:
505 gvt_vgpu_err("not supported hypervisor\n");
506 return -EINVAL;
507 }
508
509 if (!(swsci & SWSCI_SCI_SELECT)) {
510 gvt_vgpu_err("requesting SMI service\n");
511 return 0;
512 }
513
514 if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
515 & SWSCI_SCI_TRIGGER) ||
516 !(swsci & SWSCI_SCI_TRIGGER)) {
517 return 0;
518 }
519
520 func = GVT_OPREGION_FUNC(scic);
521 subfunc = GVT_OPREGION_SUBFUNC(scic);
522 if (!querying_capabilities(scic)) {
523 gvt_vgpu_err("requesting runtime service: func \"%s\","
524 " subfunc \"%s\"\n",
525 opregion_func_name(func),
526 opregion_subfunc_name(subfunc));
527
528
529
530
531 scic &= ~OPREGION_SCIC_EXIT_MASK;
532 goto out;
533 }
534
535 scic = 0;
536 parm = 0;
537
538 out:
539 switch (intel_gvt_host.hypervisor_type) {
540 case INTEL_GVT_HYPERVISOR_XEN:
541 *((u32 *)vgpu_opregion(vgpu)->va +
542 INTEL_GVT_OPREGION_SCIC) = scic;
543 *((u32 *)vgpu_opregion(vgpu)->va +
544 INTEL_GVT_OPREGION_PARM) = parm;
545 break;
546 case INTEL_GVT_HYPERVISOR_KVM:
547 ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
548 &scic, sizeof(scic));
549 if (ret) {
550 gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
551 ret, scic_pa, sizeof(scic));
552 return ret;
553 }
554
555 ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
556 &parm, sizeof(parm));
557 if (ret) {
558 gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
559 ret, scic_pa, sizeof(scic));
560 return ret;
561 }
562
563 break;
564 default:
565 gvt_vgpu_err("not supported hypervisor\n");
566 return -EINVAL;
567 }
568
569 return 0;
570 }