This source file includes following definitions.
- populate_pvinfo_page
- intel_gvt_init_vgpu_types
- intel_gvt_clean_vgpu_types
- intel_gvt_update_vgpu_types
- intel_gvt_activate_vgpu
- intel_gvt_deactivate_vgpu
- intel_gvt_release_vgpu
- intel_gvt_destroy_vgpu
- intel_gvt_create_idle_vgpu
- intel_gvt_destroy_idle_vgpu
- __intel_gvt_create_vgpu
- intel_gvt_create_vgpu
- intel_gvt_reset_vgpu_locked
- intel_gvt_reset_vgpu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include "i915_drv.h"
35 #include "gvt.h"
36 #include "i915_pvinfo.h"
37
38 void populate_pvinfo_page(struct intel_vgpu *vgpu)
39 {
40
41 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
42 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
43 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
44 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
45 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
46
47 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
48 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
49 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
50
51 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
52 vgpu_aperture_gmadr_base(vgpu);
53 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
54 vgpu_aperture_sz(vgpu);
55 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
56 vgpu_hidden_gmadr_base(vgpu);
57 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
58 vgpu_hidden_sz(vgpu);
59
60 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
61
62 vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
63 vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
64
65 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
66 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
67 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
68 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
69 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
70 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
71
72 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
73 }
74
75 #define VGPU_MAX_WEIGHT 16
76 #define VGPU_WEIGHT(vgpu_num) \
77 (VGPU_MAX_WEIGHT / (vgpu_num))
78
79 static struct {
80 unsigned int low_mm;
81 unsigned int high_mm;
82 unsigned int fence;
83
84
85
86
87
88 unsigned int weight;
89 enum intel_vgpu_edid edid;
90 char *name;
91 } vgpu_types[] = {
92
93 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
94 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
95 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
96 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
97 };
98
99
100
101
102
103
104
105
106 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
107 {
108 unsigned int num_types;
109 unsigned int i, low_avail, high_avail;
110 unsigned int min_low;
111
112
113
114
115
116
117
118
119
120
121
122
123
124 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
125 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
126 num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
127
128 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
129 GFP_KERNEL);
130 if (!gvt->types)
131 return -ENOMEM;
132
133 min_low = MB_TO_BYTES(32);
134 for (i = 0; i < num_types; ++i) {
135 if (low_avail / vgpu_types[i].low_mm == 0)
136 break;
137
138 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
139 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
140 gvt->types[i].fence = vgpu_types[i].fence;
141
142 if (vgpu_types[i].weight < 1 ||
143 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
144 return -EINVAL;
145
146 gvt->types[i].weight = vgpu_types[i].weight;
147 gvt->types[i].resolution = vgpu_types[i].edid;
148 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
149 high_avail / vgpu_types[i].high_mm);
150
151 if (IS_GEN(gvt->dev_priv, 8))
152 sprintf(gvt->types[i].name, "GVTg_V4_%s",
153 vgpu_types[i].name);
154 else if (IS_GEN(gvt->dev_priv, 9))
155 sprintf(gvt->types[i].name, "GVTg_V5_%s",
156 vgpu_types[i].name);
157
158 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
159 i, gvt->types[i].name,
160 gvt->types[i].avail_instance,
161 gvt->types[i].low_gm_size,
162 gvt->types[i].high_gm_size, gvt->types[i].fence,
163 gvt->types[i].weight,
164 vgpu_edid_str(gvt->types[i].resolution));
165 }
166
167 gvt->num_types = i;
168 return 0;
169 }
170
171 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
172 {
173 kfree(gvt->types);
174 }
175
176 static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
177 {
178 int i;
179 unsigned int low_gm_avail, high_gm_avail, fence_avail;
180 unsigned int low_gm_min, high_gm_min, fence_min;
181
182
183
184
185 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
186 gvt->gm.vgpu_allocated_low_gm_size;
187 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
188 gvt->gm.vgpu_allocated_high_gm_size;
189 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
190 gvt->fence.vgpu_allocated_fence_num;
191
192 for (i = 0; i < gvt->num_types; i++) {
193 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
194 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
195 fence_min = fence_avail / gvt->types[i].fence;
196 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
197 fence_min);
198
199 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
200 i, gvt->types[i].name,
201 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
202 gvt->types[i].high_gm_size, gvt->types[i].fence);
203 }
204 }
205
206
207
208
209
210
211
212
213 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
214 {
215 mutex_lock(&vgpu->gvt->lock);
216 vgpu->active = true;
217 mutex_unlock(&vgpu->gvt->lock);
218 }
219
220
221
222
223
224
225
226
227
228 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
229 {
230 mutex_lock(&vgpu->vgpu_lock);
231
232 vgpu->active = false;
233
234 if (atomic_read(&vgpu->submission.running_workload_num)) {
235 mutex_unlock(&vgpu->vgpu_lock);
236 intel_gvt_wait_vgpu_idle(vgpu);
237 mutex_lock(&vgpu->vgpu_lock);
238 }
239
240 intel_vgpu_stop_schedule(vgpu);
241
242 mutex_unlock(&vgpu->vgpu_lock);
243 }
244
245
246
247
248
249
250
251
252
253
254 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
255 {
256 intel_gvt_deactivate_vgpu(vgpu);
257
258 mutex_lock(&vgpu->vgpu_lock);
259 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
260 intel_vgpu_dmabuf_cleanup(vgpu);
261 mutex_unlock(&vgpu->vgpu_lock);
262 }
263
264
265
266
267
268
269
270
271 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
272 {
273 struct intel_gvt *gvt = vgpu->gvt;
274
275 WARN(vgpu->active, "vGPU is still active!\n");
276
277
278
279
280
281 mutex_lock(&gvt->lock);
282 idr_remove(&gvt->vgpu_idr, vgpu->id);
283 mutex_unlock(&gvt->lock);
284
285 mutex_lock(&vgpu->vgpu_lock);
286 intel_gvt_debugfs_remove_vgpu(vgpu);
287 intel_vgpu_clean_sched_policy(vgpu);
288 intel_vgpu_clean_submission(vgpu);
289 intel_vgpu_clean_display(vgpu);
290 intel_vgpu_clean_opregion(vgpu);
291 intel_vgpu_reset_ggtt(vgpu, true);
292 intel_vgpu_clean_gtt(vgpu);
293 intel_gvt_hypervisor_detach_vgpu(vgpu);
294 intel_vgpu_free_resource(vgpu);
295 intel_vgpu_clean_mmio(vgpu);
296 intel_vgpu_dmabuf_cleanup(vgpu);
297 mutex_unlock(&vgpu->vgpu_lock);
298
299 mutex_lock(&gvt->lock);
300 if (idr_is_empty(&gvt->vgpu_idr))
301 intel_gvt_clean_irq(gvt);
302 intel_gvt_update_vgpu_types(gvt);
303 mutex_unlock(&gvt->lock);
304
305 vfree(vgpu);
306 }
307
308 #define IDLE_VGPU_IDR 0
309
310
311
312
313
314
315
316
317
318
319 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
320 {
321 struct intel_vgpu *vgpu;
322 enum intel_engine_id i;
323 int ret;
324
325 vgpu = vzalloc(sizeof(*vgpu));
326 if (!vgpu)
327 return ERR_PTR(-ENOMEM);
328
329 vgpu->id = IDLE_VGPU_IDR;
330 vgpu->gvt = gvt;
331 mutex_init(&vgpu->vgpu_lock);
332
333 for (i = 0; i < I915_NUM_ENGINES; i++)
334 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
335
336 ret = intel_vgpu_init_sched_policy(vgpu);
337 if (ret)
338 goto out_free_vgpu;
339
340 vgpu->active = false;
341
342 return vgpu;
343
344 out_free_vgpu:
345 vfree(vgpu);
346 return ERR_PTR(ret);
347 }
348
349
350
351
352
353
354
355
356 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
357 {
358 mutex_lock(&vgpu->vgpu_lock);
359 intel_vgpu_clean_sched_policy(vgpu);
360 mutex_unlock(&vgpu->vgpu_lock);
361
362 vfree(vgpu);
363 }
364
365 static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
366 struct intel_vgpu_creation_params *param)
367 {
368 struct intel_vgpu *vgpu;
369 int ret;
370
371 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
372 param->handle, param->low_gm_sz, param->high_gm_sz,
373 param->fence_sz);
374
375 vgpu = vzalloc(sizeof(*vgpu));
376 if (!vgpu)
377 return ERR_PTR(-ENOMEM);
378
379 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
380 GFP_KERNEL);
381 if (ret < 0)
382 goto out_free_vgpu;
383
384 vgpu->id = ret;
385 vgpu->handle = param->handle;
386 vgpu->gvt = gvt;
387 vgpu->sched_ctl.weight = param->weight;
388 mutex_init(&vgpu->vgpu_lock);
389 mutex_init(&vgpu->dmabuf_lock);
390 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
391 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
392 idr_init(&vgpu->object_idr);
393 intel_vgpu_init_cfg_space(vgpu, param->primary);
394
395 ret = intel_vgpu_init_mmio(vgpu);
396 if (ret)
397 goto out_clean_idr;
398
399 ret = intel_vgpu_alloc_resource(vgpu, param);
400 if (ret)
401 goto out_clean_vgpu_mmio;
402
403 populate_pvinfo_page(vgpu);
404
405 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
406 if (ret)
407 goto out_clean_vgpu_resource;
408
409 ret = intel_vgpu_init_gtt(vgpu);
410 if (ret)
411 goto out_detach_hypervisor_vgpu;
412
413 ret = intel_vgpu_init_opregion(vgpu);
414 if (ret)
415 goto out_clean_gtt;
416
417 ret = intel_vgpu_init_display(vgpu, param->resolution);
418 if (ret)
419 goto out_clean_opregion;
420
421 ret = intel_vgpu_setup_submission(vgpu);
422 if (ret)
423 goto out_clean_display;
424
425 ret = intel_vgpu_init_sched_policy(vgpu);
426 if (ret)
427 goto out_clean_submission;
428
429 intel_gvt_debugfs_add_vgpu(vgpu);
430
431 ret = intel_gvt_hypervisor_set_opregion(vgpu);
432 if (ret)
433 goto out_clean_sched_policy;
434
435
436 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
437 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
438 if (ret)
439 goto out_clean_sched_policy;
440
441 return vgpu;
442
443 out_clean_sched_policy:
444 intel_vgpu_clean_sched_policy(vgpu);
445 out_clean_submission:
446 intel_vgpu_clean_submission(vgpu);
447 out_clean_display:
448 intel_vgpu_clean_display(vgpu);
449 out_clean_opregion:
450 intel_vgpu_clean_opregion(vgpu);
451 out_clean_gtt:
452 intel_vgpu_clean_gtt(vgpu);
453 out_detach_hypervisor_vgpu:
454 intel_gvt_hypervisor_detach_vgpu(vgpu);
455 out_clean_vgpu_resource:
456 intel_vgpu_free_resource(vgpu);
457 out_clean_vgpu_mmio:
458 intel_vgpu_clean_mmio(vgpu);
459 out_clean_idr:
460 idr_remove(&gvt->vgpu_idr, vgpu->id);
461 out_free_vgpu:
462 vfree(vgpu);
463 return ERR_PTR(ret);
464 }
465
466
467
468
469
470
471
472
473
474
475
476 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
477 struct intel_vgpu_type *type)
478 {
479 struct intel_vgpu_creation_params param;
480 struct intel_vgpu *vgpu;
481
482 param.handle = 0;
483 param.primary = 1;
484 param.low_gm_sz = type->low_gm_size;
485 param.high_gm_sz = type->high_gm_size;
486 param.fence_sz = type->fence;
487 param.weight = type->weight;
488 param.resolution = type->resolution;
489
490
491 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
492 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
493
494 mutex_lock(&gvt->lock);
495 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
496 if (!IS_ERR(vgpu))
497
498 intel_gvt_update_vgpu_types(gvt);
499 mutex_unlock(&gvt->lock);
500
501 return vgpu;
502 }
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
533 intel_engine_mask_t engine_mask)
534 {
535 struct intel_gvt *gvt = vgpu->gvt;
536 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
537 intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
538
539 gvt_dbg_core("------------------------------------------\n");
540 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
541 vgpu->id, dmlr, engine_mask);
542
543 vgpu->resetting_eng = resetting_eng;
544
545 intel_vgpu_stop_schedule(vgpu);
546
547
548
549
550 if (scheduler->current_vgpu == NULL) {
551 mutex_unlock(&vgpu->vgpu_lock);
552 intel_gvt_wait_vgpu_idle(vgpu);
553 mutex_lock(&vgpu->vgpu_lock);
554 }
555
556 intel_vgpu_reset_submission(vgpu, resetting_eng);
557
558 if (engine_mask == ALL_ENGINES || dmlr) {
559 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
560 intel_vgpu_invalidate_ppgtt(vgpu);
561
562 if (dmlr) {
563 intel_vgpu_reset_gtt(vgpu);
564 intel_vgpu_reset_resource(vgpu);
565 }
566
567 intel_vgpu_reset_mmio(vgpu, dmlr);
568 populate_pvinfo_page(vgpu);
569
570 if (dmlr) {
571 intel_vgpu_reset_display(vgpu);
572 intel_vgpu_reset_cfg_space(vgpu);
573
574 vgpu->failsafe = false;
575 vgpu->pv_notified = false;
576 }
577 }
578
579 vgpu->resetting_eng = 0;
580 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
581 gvt_dbg_core("------------------------------------------\n");
582 }
583
584
585
586
587
588
589
590
591 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
592 {
593 mutex_lock(&vgpu->vgpu_lock);
594 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
595 mutex_unlock(&vgpu->vgpu_lock);
596 }