This source file includes following definitions.
- set_context_pdp_root_pointer
- update_shadow_pdps
- sr_oa_regs
- populate_shadow_context
- is_gvt_request
- save_ring_hw_state
- shadow_context_status_change
- shadow_context_descriptor_update
- copy_workload_to_ring_buffer
- release_shadow_wa_ctx
- set_context_ppgtt_from_shadow
- intel_gvt_workload_req_alloc
- intel_gvt_scan_and_shadow_workload
- prepare_shadow_batch_buffer
- update_wa_ctx_2_shadow_ctx
- prepare_shadow_wa_ctx
- update_vreg_in_ctx
- release_shadow_batch_buffer
- prepare_workload
- dispatch_workload
- pick_next_workload
- update_guest_context
- intel_vgpu_clean_workloads
- complete_current_workload
- workload_thread
- intel_gvt_wait_vgpu_idle
- intel_gvt_clean_workload_scheduler
- intel_gvt_init_workload_scheduler
- i915_context_ppgtt_root_restore
- intel_vgpu_clean_submission
- intel_vgpu_reset_submission
- i915_context_ppgtt_root_save
- intel_vgpu_setup_submission
- intel_vgpu_select_submission_ops
- intel_vgpu_destroy_workload
- alloc_workload
- read_guest_pdps
- prepare_mm
- intel_vgpu_create_workload
- intel_vgpu_queue_workload
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/kthread.h>
37
38 #include "gem/i915_gem_context.h"
39 #include "gem/i915_gem_pm.h"
40 #include "gt/intel_context.h"
41
42 #include "i915_drv.h"
43 #include "gvt.h"
44
45 #define RING_CTX_OFF(x) \
46 offsetof(struct execlist_ring_context, x)
47
48 static void set_context_pdp_root_pointer(
49 struct execlist_ring_context *ring_context,
50 u32 pdp[8])
51 {
52 int i;
53
54 for (i = 0; i < 8; i++)
55 ring_context->pdps[i].val = pdp[7 - i];
56 }
57
58 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
59 {
60 struct drm_i915_gem_object *ctx_obj =
61 workload->req->hw_context->state->obj;
62 struct execlist_ring_context *shadow_ring_context;
63 struct page *page;
64
65 if (WARN_ON(!workload->shadow_mm))
66 return;
67
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
69 return;
70
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
75 kunmap(page);
76 }
77
78
79
80
81
82
83 static void sr_oa_regs(struct intel_vgpu_workload *workload,
84 u32 *reg_state, bool save)
85 {
86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
87 u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
88 u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
89 int i = 0;
90 u32 flex_mmio[] = {
91 i915_mmio_reg_offset(EU_PERF_CNTL0),
92 i915_mmio_reg_offset(EU_PERF_CNTL1),
93 i915_mmio_reg_offset(EU_PERF_CNTL2),
94 i915_mmio_reg_offset(EU_PERF_CNTL3),
95 i915_mmio_reg_offset(EU_PERF_CNTL4),
96 i915_mmio_reg_offset(EU_PERF_CNTL5),
97 i915_mmio_reg_offset(EU_PERF_CNTL6),
98 };
99
100 if (workload->ring_id != RCS0)
101 return;
102
103 if (save) {
104 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
105
106 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
107 u32 state_offset = ctx_flexeu0 + i * 2;
108
109 workload->flex_mmio[i] = reg_state[state_offset + 1];
110 }
111 } else {
112 reg_state[ctx_oactxctrl] =
113 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
114 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
115
116 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
117 u32 state_offset = ctx_flexeu0 + i * 2;
118 u32 mmio = flex_mmio[i];
119
120 reg_state[state_offset] = mmio;
121 reg_state[state_offset + 1] = workload->flex_mmio[i];
122 }
123 }
124 }
125
126 static int populate_shadow_context(struct intel_vgpu_workload *workload)
127 {
128 struct intel_vgpu *vgpu = workload->vgpu;
129 struct intel_gvt *gvt = vgpu->gvt;
130 int ring_id = workload->ring_id;
131 struct drm_i915_gem_object *ctx_obj =
132 workload->req->hw_context->state->obj;
133 struct execlist_ring_context *shadow_ring_context;
134 struct page *page;
135 void *dst;
136 unsigned long context_gpa, context_page_num;
137 int i;
138
139 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
140 shadow_ring_context = kmap(page);
141
142 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
143 #define COPY_REG(name) \
144 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
145 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
146 #define COPY_REG_MASKED(name) {\
147 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
148 + RING_CTX_OFF(name.val),\
149 &shadow_ring_context->name.val, 4);\
150 shadow_ring_context->name.val |= 0xffff << 16;\
151 }
152
153 COPY_REG_MASKED(ctx_ctrl);
154 COPY_REG(ctx_timestamp);
155
156 if (ring_id == RCS0) {
157 COPY_REG(bb_per_ctx_ptr);
158 COPY_REG(rcs_indirect_ctx);
159 COPY_REG(rcs_indirect_ctx_offset);
160 }
161 #undef COPY_REG
162 #undef COPY_REG_MASKED
163
164 intel_gvt_hypervisor_read_gpa(vgpu,
165 workload->ring_context_gpa +
166 sizeof(*shadow_ring_context),
167 (void *)shadow_ring_context +
168 sizeof(*shadow_ring_context),
169 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
170
171 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
172 kunmap(page);
173
174 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
175 return 0;
176
177 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
178 workload->ctx_desc.lrca);
179
180 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
181
182 context_page_num = context_page_num >> PAGE_SHIFT;
183
184 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
185 context_page_num = 19;
186
187 i = 2;
188 while (i < context_page_num) {
189 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
190 (u32)((workload->ctx_desc.lrca + i) <<
191 I915_GTT_PAGE_SHIFT));
192 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
193 gvt_vgpu_err("Invalid guest context descriptor\n");
194 return -EFAULT;
195 }
196
197 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
198 dst = kmap(page);
199 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
200 I915_GTT_PAGE_SIZE);
201 kunmap(page);
202 i++;
203 }
204 return 0;
205 }
206
207 static inline bool is_gvt_request(struct i915_request *req)
208 {
209 return i915_gem_context_force_single_submission(req->gem_context);
210 }
211
212 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
213 {
214 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
215 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
216 i915_reg_t reg;
217
218 reg = RING_INSTDONE(ring_base);
219 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
220 reg = RING_ACTHD(ring_base);
221 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
222 reg = RING_ACTHD_UDW(ring_base);
223 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
224 }
225
226 static int shadow_context_status_change(struct notifier_block *nb,
227 unsigned long action, void *data)
228 {
229 struct i915_request *req = data;
230 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
231 shadow_ctx_notifier_block[req->engine->id]);
232 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
233 enum intel_engine_id ring_id = req->engine->id;
234 struct intel_vgpu_workload *workload;
235 unsigned long flags;
236
237 if (!is_gvt_request(req)) {
238 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
239 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
240 scheduler->engine_owner[ring_id]) {
241
242 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
243 NULL, ring_id);
244 scheduler->engine_owner[ring_id] = NULL;
245 }
246 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
247
248 return NOTIFY_OK;
249 }
250
251 workload = scheduler->current_workload[ring_id];
252 if (unlikely(!workload))
253 return NOTIFY_OK;
254
255 switch (action) {
256 case INTEL_CONTEXT_SCHEDULE_IN:
257 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
258 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
259
260 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
261 workload->vgpu, ring_id);
262 scheduler->engine_owner[ring_id] = workload->vgpu;
263 } else
264 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
265 ring_id, workload->vgpu->id);
266 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
267 atomic_set(&workload->shadow_ctx_active, 1);
268 break;
269 case INTEL_CONTEXT_SCHEDULE_OUT:
270 save_ring_hw_state(workload->vgpu, ring_id);
271 atomic_set(&workload->shadow_ctx_active, 0);
272 break;
273 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
274 save_ring_hw_state(workload->vgpu, ring_id);
275 break;
276 default:
277 WARN_ON(1);
278 return NOTIFY_OK;
279 }
280 wake_up(&workload->shadow_ctx_status_wq);
281 return NOTIFY_OK;
282 }
283
284 static void
285 shadow_context_descriptor_update(struct intel_context *ce,
286 struct intel_vgpu_workload *workload)
287 {
288 u64 desc = ce->lrc_desc;
289
290
291
292
293
294 desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
295 desc |= workload->ctx_desc.addressing_mode <<
296 GEN8_CTX_ADDRESSING_MODE_SHIFT;
297
298 ce->lrc_desc = desc;
299 }
300
301 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
302 {
303 struct intel_vgpu *vgpu = workload->vgpu;
304 struct i915_request *req = workload->req;
305 void *shadow_ring_buffer_va;
306 u32 *cs;
307 int err;
308
309 if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
310 intel_vgpu_restore_inhibit_context(vgpu, req);
311
312
313
314
315
316
317
318
319
320
321
322 if (req->engine->emit_init_breadcrumb) {
323 err = req->engine->emit_init_breadcrumb(req);
324 if (err) {
325 gvt_vgpu_err("fail to emit init breadcrumb\n");
326 return err;
327 }
328 }
329
330
331 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
332 if (IS_ERR(cs)) {
333 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
334 workload->rb_len);
335 return PTR_ERR(cs);
336 }
337
338 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
339
340
341 workload->shadow_ring_buffer_va = cs;
342
343 memcpy(cs, shadow_ring_buffer_va,
344 workload->rb_len);
345
346 cs += workload->rb_len / sizeof(u32);
347 intel_ring_advance(workload->req, cs);
348
349 return 0;
350 }
351
352 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
353 {
354 if (!wa_ctx->indirect_ctx.obj)
355 return;
356
357 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
358 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
359
360 wa_ctx->indirect_ctx.obj = NULL;
361 wa_ctx->indirect_ctx.shadow_va = NULL;
362 }
363
364 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
365 struct i915_gem_context *ctx)
366 {
367 struct intel_vgpu_mm *mm = workload->shadow_mm;
368 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
369 int i = 0;
370
371 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
372 px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
373 } else {
374 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
375 struct i915_page_directory * const pd =
376 i915_pd_entry(ppgtt->pd, i);
377
378
379
380 if (!pd)
381 break;
382 px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
383 }
384 }
385 }
386
387 static int
388 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
389 {
390 struct intel_vgpu *vgpu = workload->vgpu;
391 struct intel_vgpu_submission *s = &vgpu->submission;
392 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
393 struct i915_request *rq;
394
395 lockdep_assert_held(&dev_priv->drm.struct_mutex);
396
397 if (workload->req)
398 return 0;
399
400 rq = i915_request_create(s->shadow[workload->ring_id]);
401 if (IS_ERR(rq)) {
402 gvt_vgpu_err("fail to allocate gem request\n");
403 return PTR_ERR(rq);
404 }
405
406 workload->req = i915_request_get(rq);
407 return 0;
408 }
409
410
411
412
413
414
415
416
417
418 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
419 {
420 struct intel_vgpu *vgpu = workload->vgpu;
421 struct intel_vgpu_submission *s = &vgpu->submission;
422 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
423 int ret;
424
425 lockdep_assert_held(&dev_priv->drm.struct_mutex);
426
427 if (workload->shadow)
428 return 0;
429
430 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
431 shadow_context_descriptor_update(s->shadow[workload->ring_id],
432 workload);
433
434 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
435 if (ret)
436 return ret;
437
438 if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
439 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
440 if (ret)
441 goto err_shadow;
442 }
443
444 workload->shadow = true;
445 return 0;
446 err_shadow:
447 release_shadow_wa_ctx(&workload->wa_ctx);
448 return ret;
449 }
450
451 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
452
453 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
454 {
455 struct intel_gvt *gvt = workload->vgpu->gvt;
456 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
457 struct intel_vgpu_shadow_bb *bb;
458 int ret;
459
460 list_for_each_entry(bb, &workload->shadow_bb, list) {
461
462
463
464
465
466
467
468 if (bb->bb_offset)
469 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
470 + bb->bb_offset;
471
472 if (bb->ppgtt) {
473
474
475
476
477
478
479
480
481 if (bb->clflush & CLFLUSH_AFTER) {
482 drm_clflush_virt_range(bb->va,
483 bb->obj->base.size);
484 bb->clflush &= ~CLFLUSH_AFTER;
485 }
486 i915_gem_object_finish_access(bb->obj);
487 bb->accessing = false;
488
489 } else {
490 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
491 NULL, 0, 0, 0);
492 if (IS_ERR(bb->vma)) {
493 ret = PTR_ERR(bb->vma);
494 goto err;
495 }
496
497
498 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
499 if (gmadr_bytes == 8)
500 bb->bb_start_cmd_va[2] = 0;
501
502
503 if (bb->clflush & CLFLUSH_AFTER) {
504 drm_clflush_virt_range(bb->va,
505 bb->obj->base.size);
506 bb->clflush &= ~CLFLUSH_AFTER;
507 }
508
509 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
510 false);
511 if (ret)
512 goto err;
513
514 ret = i915_vma_move_to_active(bb->vma,
515 workload->req,
516 0);
517 if (ret)
518 goto err;
519
520 i915_gem_object_finish_access(bb->obj);
521 bb->accessing = false;
522 }
523 }
524 return 0;
525 err:
526 release_shadow_batch_buffer(workload);
527 return ret;
528 }
529
530 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
531 {
532 struct intel_vgpu_workload *workload =
533 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
534 struct i915_request *rq = workload->req;
535 struct execlist_ring_context *shadow_ring_context =
536 (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
537
538 shadow_ring_context->bb_per_ctx_ptr.val =
539 (shadow_ring_context->bb_per_ctx_ptr.val &
540 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
541 shadow_ring_context->rcs_indirect_ctx.val =
542 (shadow_ring_context->rcs_indirect_ctx.val &
543 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
544 }
545
546 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
547 {
548 struct i915_vma *vma;
549 unsigned char *per_ctx_va =
550 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
551 wa_ctx->indirect_ctx.size;
552
553 if (wa_ctx->indirect_ctx.size == 0)
554 return 0;
555
556 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
557 0, CACHELINE_BYTES, 0);
558 if (IS_ERR(vma))
559 return PTR_ERR(vma);
560
561
562
563
564
565
566 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
567
568 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
569 memset(per_ctx_va, 0, CACHELINE_BYTES);
570
571 update_wa_ctx_2_shadow_ctx(wa_ctx);
572 return 0;
573 }
574
575 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
576 {
577 struct intel_vgpu *vgpu = workload->vgpu;
578 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
579 u32 ring_base;
580
581 ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
582 vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
583 }
584
585 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
586 {
587 struct intel_vgpu *vgpu = workload->vgpu;
588 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
589 struct intel_vgpu_shadow_bb *bb, *pos;
590
591 if (list_empty(&workload->shadow_bb))
592 return;
593
594 bb = list_first_entry(&workload->shadow_bb,
595 struct intel_vgpu_shadow_bb, list);
596
597 mutex_lock(&dev_priv->drm.struct_mutex);
598
599 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
600 if (bb->obj) {
601 if (bb->accessing)
602 i915_gem_object_finish_access(bb->obj);
603
604 if (bb->va && !IS_ERR(bb->va))
605 i915_gem_object_unpin_map(bb->obj);
606
607 if (bb->vma && !IS_ERR(bb->vma)) {
608 i915_vma_unpin(bb->vma);
609 i915_vma_close(bb->vma);
610 }
611 i915_gem_object_put(bb->obj);
612 }
613 list_del(&bb->list);
614 kfree(bb);
615 }
616
617 mutex_unlock(&dev_priv->drm.struct_mutex);
618 }
619
620 static int prepare_workload(struct intel_vgpu_workload *workload)
621 {
622 struct intel_vgpu *vgpu = workload->vgpu;
623 struct intel_vgpu_submission *s = &vgpu->submission;
624 int ring = workload->ring_id;
625 int ret = 0;
626
627 ret = intel_vgpu_pin_mm(workload->shadow_mm);
628 if (ret) {
629 gvt_vgpu_err("fail to vgpu pin mm\n");
630 return ret;
631 }
632
633 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
634 !workload->shadow_mm->ppgtt_mm.shadowed) {
635 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
636 return -EINVAL;
637 }
638
639 update_shadow_pdps(workload);
640
641 set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
642
643 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
644 if (ret) {
645 gvt_vgpu_err("fail to vgpu sync oos pages\n");
646 goto err_unpin_mm;
647 }
648
649 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
650 if (ret) {
651 gvt_vgpu_err("fail to flush post shadow\n");
652 goto err_unpin_mm;
653 }
654
655 ret = copy_workload_to_ring_buffer(workload);
656 if (ret) {
657 gvt_vgpu_err("fail to generate request\n");
658 goto err_unpin_mm;
659 }
660
661 ret = prepare_shadow_batch_buffer(workload);
662 if (ret) {
663 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
664 goto err_unpin_mm;
665 }
666
667 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
668 if (ret) {
669 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
670 goto err_shadow_batch;
671 }
672
673 if (workload->prepare) {
674 ret = workload->prepare(workload);
675 if (ret)
676 goto err_shadow_wa_ctx;
677 }
678
679 return 0;
680 err_shadow_wa_ctx:
681 release_shadow_wa_ctx(&workload->wa_ctx);
682 err_shadow_batch:
683 release_shadow_batch_buffer(workload);
684 err_unpin_mm:
685 intel_vgpu_unpin_mm(workload->shadow_mm);
686 return ret;
687 }
688
689 static int dispatch_workload(struct intel_vgpu_workload *workload)
690 {
691 struct intel_vgpu *vgpu = workload->vgpu;
692 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
693 struct i915_request *rq;
694 int ring_id = workload->ring_id;
695 int ret;
696
697 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
698 ring_id, workload);
699
700 mutex_lock(&vgpu->vgpu_lock);
701 mutex_lock(&dev_priv->drm.struct_mutex);
702
703 ret = intel_gvt_workload_req_alloc(workload);
704 if (ret)
705 goto err_req;
706
707 ret = intel_gvt_scan_and_shadow_workload(workload);
708 if (ret)
709 goto out;
710
711 ret = populate_shadow_context(workload);
712 if (ret) {
713 release_shadow_wa_ctx(&workload->wa_ctx);
714 goto out;
715 }
716
717 ret = prepare_workload(workload);
718 out:
719 if (ret) {
720
721
722
723 rq = fetch_and_zero(&workload->req);
724 i915_request_put(rq);
725 }
726
727 if (!IS_ERR_OR_NULL(workload->req)) {
728 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
729 ring_id, workload->req);
730 i915_request_add(workload->req);
731 workload->dispatched = true;
732 }
733 err_req:
734 if (ret)
735 workload->status = ret;
736 mutex_unlock(&dev_priv->drm.struct_mutex);
737 mutex_unlock(&vgpu->vgpu_lock);
738 return ret;
739 }
740
741 static struct intel_vgpu_workload *pick_next_workload(
742 struct intel_gvt *gvt, int ring_id)
743 {
744 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
745 struct intel_vgpu_workload *workload = NULL;
746
747 mutex_lock(&gvt->sched_lock);
748
749
750
751
752
753 if (!scheduler->current_vgpu) {
754 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
755 goto out;
756 }
757
758 if (scheduler->need_reschedule) {
759 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
760 goto out;
761 }
762
763 if (!scheduler->current_vgpu->active ||
764 list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
765 goto out;
766
767
768
769
770
771 if (scheduler->current_workload[ring_id]) {
772 workload = scheduler->current_workload[ring_id];
773 gvt_dbg_sched("ring id %d still have current workload %p\n",
774 ring_id, workload);
775 goto out;
776 }
777
778
779
780
781
782
783
784 scheduler->current_workload[ring_id] = container_of(
785 workload_q_head(scheduler->current_vgpu, ring_id)->next,
786 struct intel_vgpu_workload, list);
787
788 workload = scheduler->current_workload[ring_id];
789
790 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
791
792 atomic_inc(&workload->vgpu->submission.running_workload_num);
793 out:
794 mutex_unlock(&gvt->sched_lock);
795 return workload;
796 }
797
798 static void update_guest_context(struct intel_vgpu_workload *workload)
799 {
800 struct i915_request *rq = workload->req;
801 struct intel_vgpu *vgpu = workload->vgpu;
802 struct intel_gvt *gvt = vgpu->gvt;
803 struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
804 struct execlist_ring_context *shadow_ring_context;
805 struct page *page;
806 void *src;
807 unsigned long context_gpa, context_page_num;
808 int i;
809 struct drm_i915_private *dev_priv = gvt->dev_priv;
810 u32 ring_base;
811 u32 head, tail;
812 u16 wrap_count;
813
814 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
815 workload->ctx_desc.lrca);
816
817 head = workload->rb_head;
818 tail = workload->rb_tail;
819 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
820
821 if (tail < head) {
822 if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
823 wrap_count = 0;
824 else
825 wrap_count += 1;
826 }
827
828 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
829
830 ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
831 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
832 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
833
834 context_page_num = rq->engine->context_size;
835 context_page_num = context_page_num >> PAGE_SHIFT;
836
837 if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
838 context_page_num = 19;
839
840 i = 2;
841
842 while (i < context_page_num) {
843 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
844 (u32)((workload->ctx_desc.lrca + i) <<
845 I915_GTT_PAGE_SHIFT));
846 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
847 gvt_vgpu_err("invalid guest context descriptor\n");
848 return;
849 }
850
851 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
852 src = kmap(page);
853 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
854 I915_GTT_PAGE_SIZE);
855 kunmap(page);
856 i++;
857 }
858
859 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
860 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
861
862 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
863 shadow_ring_context = kmap(page);
864
865 #define COPY_REG(name) \
866 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
867 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
868
869 COPY_REG(ctx_ctrl);
870 COPY_REG(ctx_timestamp);
871
872 #undef COPY_REG
873
874 intel_gvt_hypervisor_write_gpa(vgpu,
875 workload->ring_context_gpa +
876 sizeof(*shadow_ring_context),
877 (void *)shadow_ring_context +
878 sizeof(*shadow_ring_context),
879 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
880
881 kunmap(page);
882 }
883
884 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
885 intel_engine_mask_t engine_mask)
886 {
887 struct intel_vgpu_submission *s = &vgpu->submission;
888 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
889 struct intel_engine_cs *engine;
890 struct intel_vgpu_workload *pos, *n;
891 intel_engine_mask_t tmp;
892
893
894 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
895 list_for_each_entry_safe(pos, n,
896 &s->workload_q_head[engine->id], list) {
897 list_del_init(&pos->list);
898 intel_vgpu_destroy_workload(pos);
899 }
900 clear_bit(engine->id, s->shadow_ctx_desc_updated);
901 }
902 }
903
904 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
905 {
906 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
907 struct intel_vgpu_workload *workload =
908 scheduler->current_workload[ring_id];
909 struct intel_vgpu *vgpu = workload->vgpu;
910 struct intel_vgpu_submission *s = &vgpu->submission;
911 struct i915_request *rq = workload->req;
912 int event;
913
914 mutex_lock(&vgpu->vgpu_lock);
915 mutex_lock(&gvt->sched_lock);
916
917
918
919
920
921 if (rq) {
922 wait_event(workload->shadow_ctx_status_wq,
923 !atomic_read(&workload->shadow_ctx_active));
924
925
926
927
928
929
930 if (likely(workload->status == -EINPROGRESS)) {
931 if (workload->req->fence.error == -EIO)
932 workload->status = -EIO;
933 else
934 workload->status = 0;
935 }
936
937 if (!workload->status &&
938 !(vgpu->resetting_eng & BIT(ring_id))) {
939 update_guest_context(workload);
940
941 for_each_set_bit(event, workload->pending_events,
942 INTEL_GVT_EVENT_MAX)
943 intel_vgpu_trigger_virtual_event(vgpu, event);
944 }
945
946 i915_request_put(fetch_and_zero(&workload->req));
947 }
948
949 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
950 ring_id, workload, workload->status);
951
952 scheduler->current_workload[ring_id] = NULL;
953
954 list_del_init(&workload->list);
955
956 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
957
958
959
960
961
962
963
964
965
966
967
968
969
970 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
971 }
972
973 workload->complete(workload);
974
975 atomic_dec(&s->running_workload_num);
976 wake_up(&scheduler->workload_complete_wq);
977
978 if (gvt->scheduler.need_reschedule)
979 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
980
981 mutex_unlock(&gvt->sched_lock);
982 mutex_unlock(&vgpu->vgpu_lock);
983 }
984
985 struct workload_thread_param {
986 struct intel_gvt *gvt;
987 int ring_id;
988 };
989
990 static int workload_thread(void *priv)
991 {
992 struct workload_thread_param *p = (struct workload_thread_param *)priv;
993 struct intel_gvt *gvt = p->gvt;
994 int ring_id = p->ring_id;
995 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
996 struct intel_vgpu_workload *workload = NULL;
997 struct intel_vgpu *vgpu = NULL;
998 int ret;
999 bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
1000 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1001 struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
1002
1003 kfree(p);
1004
1005 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
1006
1007 while (!kthread_should_stop()) {
1008 add_wait_queue(&scheduler->waitq[ring_id], &wait);
1009 do {
1010 workload = pick_next_workload(gvt, ring_id);
1011 if (workload)
1012 break;
1013 wait_woken(&wait, TASK_INTERRUPTIBLE,
1014 MAX_SCHEDULE_TIMEOUT);
1015 } while (!kthread_should_stop());
1016 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
1017
1018 if (!workload)
1019 break;
1020
1021 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
1022 workload->ring_id, workload,
1023 workload->vgpu->id);
1024
1025 intel_runtime_pm_get(rpm);
1026
1027 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
1028 workload->ring_id, workload);
1029
1030 if (need_force_wake)
1031 intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
1032 FORCEWAKE_ALL);
1033
1034
1035
1036
1037
1038
1039 update_vreg_in_ctx(workload);
1040
1041 ret = dispatch_workload(workload);
1042
1043 if (ret) {
1044 vgpu = workload->vgpu;
1045 gvt_vgpu_err("fail to dispatch workload, skip\n");
1046 goto complete;
1047 }
1048
1049 gvt_dbg_sched("ring id %d wait workload %p\n",
1050 workload->ring_id, workload);
1051 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1052
1053 complete:
1054 gvt_dbg_sched("will complete workload %p, status: %d\n",
1055 workload, workload->status);
1056
1057 complete_current_workload(gvt, ring_id);
1058
1059 if (need_force_wake)
1060 intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
1061 FORCEWAKE_ALL);
1062
1063 intel_runtime_pm_put_unchecked(rpm);
1064 if (ret && (vgpu_is_vm_unhealthy(ret)))
1065 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1066 }
1067 return 0;
1068 }
1069
1070 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1071 {
1072 struct intel_vgpu_submission *s = &vgpu->submission;
1073 struct intel_gvt *gvt = vgpu->gvt;
1074 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1075
1076 if (atomic_read(&s->running_workload_num)) {
1077 gvt_dbg_sched("wait vgpu idle\n");
1078
1079 wait_event(scheduler->workload_complete_wq,
1080 !atomic_read(&s->running_workload_num));
1081 }
1082 }
1083
1084 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1085 {
1086 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1087 struct intel_engine_cs *engine;
1088 enum intel_engine_id i;
1089
1090 gvt_dbg_core("clean workload scheduler\n");
1091
1092 for_each_engine(engine, gvt->dev_priv, i) {
1093 atomic_notifier_chain_unregister(
1094 &engine->context_status_notifier,
1095 &gvt->shadow_ctx_notifier_block[i]);
1096 kthread_stop(scheduler->thread[i]);
1097 }
1098 }
1099
1100 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1101 {
1102 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1103 struct workload_thread_param *param = NULL;
1104 struct intel_engine_cs *engine;
1105 enum intel_engine_id i;
1106 int ret;
1107
1108 gvt_dbg_core("init workload scheduler\n");
1109
1110 init_waitqueue_head(&scheduler->workload_complete_wq);
1111
1112 for_each_engine(engine, gvt->dev_priv, i) {
1113 init_waitqueue_head(&scheduler->waitq[i]);
1114
1115 param = kzalloc(sizeof(*param), GFP_KERNEL);
1116 if (!param) {
1117 ret = -ENOMEM;
1118 goto err;
1119 }
1120
1121 param->gvt = gvt;
1122 param->ring_id = i;
1123
1124 scheduler->thread[i] = kthread_run(workload_thread, param,
1125 "gvt workload %d", i);
1126 if (IS_ERR(scheduler->thread[i])) {
1127 gvt_err("fail to create workload thread\n");
1128 ret = PTR_ERR(scheduler->thread[i]);
1129 goto err;
1130 }
1131
1132 gvt->shadow_ctx_notifier_block[i].notifier_call =
1133 shadow_context_status_change;
1134 atomic_notifier_chain_register(&engine->context_status_notifier,
1135 &gvt->shadow_ctx_notifier_block[i]);
1136 }
1137 return 0;
1138 err:
1139 intel_gvt_clean_workload_scheduler(gvt);
1140 kfree(param);
1141 param = NULL;
1142 return ret;
1143 }
1144
1145 static void
1146 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1147 struct i915_ppgtt *ppgtt)
1148 {
1149 int i;
1150
1151 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1152 px_dma(ppgtt->pd) = s->i915_context_pml4;
1153 } else {
1154 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1155 struct i915_page_directory * const pd =
1156 i915_pd_entry(ppgtt->pd, i);
1157
1158 px_dma(pd) = s->i915_context_pdps[i];
1159 }
1160 }
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1171 {
1172 struct intel_vgpu_submission *s = &vgpu->submission;
1173 struct intel_engine_cs *engine;
1174 enum intel_engine_id id;
1175
1176 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1177
1178 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
1179 for_each_engine(engine, vgpu->gvt->dev_priv, id)
1180 intel_context_unpin(s->shadow[id]);
1181
1182 kmem_cache_destroy(s->workloads);
1183 }
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1195 intel_engine_mask_t engine_mask)
1196 {
1197 struct intel_vgpu_submission *s = &vgpu->submission;
1198
1199 if (!s->active)
1200 return;
1201
1202 intel_vgpu_clean_workloads(vgpu, engine_mask);
1203 s->ops->reset(vgpu, engine_mask);
1204 }
1205
1206 static void
1207 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1208 struct i915_ppgtt *ppgtt)
1209 {
1210 int i;
1211
1212 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1213 s->i915_context_pml4 = px_dma(ppgtt->pd);
1214 } else {
1215 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1216 struct i915_page_directory * const pd =
1217 i915_pd_entry(ppgtt->pd, i);
1218
1219 s->i915_context_pdps[i] = px_dma(pd);
1220 }
1221 }
1222 }
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1235 {
1236 struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
1237 struct intel_vgpu_submission *s = &vgpu->submission;
1238 struct intel_engine_cs *engine;
1239 struct i915_gem_context *ctx;
1240 enum intel_engine_id i;
1241 int ret;
1242
1243 mutex_lock(&i915->drm.struct_mutex);
1244
1245 ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
1246 if (IS_ERR(ctx)) {
1247 ret = PTR_ERR(ctx);
1248 goto out_unlock;
1249 }
1250
1251 i915_gem_context_set_force_single_submission(ctx);
1252
1253 i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
1254
1255 for_each_engine(engine, i915, i) {
1256 struct intel_context *ce;
1257
1258 INIT_LIST_HEAD(&s->workload_q_head[i]);
1259 s->shadow[i] = ERR_PTR(-EINVAL);
1260
1261 ce = intel_context_create(ctx, engine);
1262 if (IS_ERR(ce)) {
1263 ret = PTR_ERR(ce);
1264 goto out_shadow_ctx;
1265 }
1266
1267 if (!USES_GUC_SUBMISSION(i915)) {
1268 const unsigned int ring_size = 512 * SZ_4K;
1269
1270 ce->ring = __intel_context_ring_size(ring_size);
1271 }
1272
1273 ret = intel_context_pin(ce);
1274 intel_context_put(ce);
1275 if (ret)
1276 goto out_shadow_ctx;
1277
1278 s->shadow[i] = ce;
1279 }
1280
1281 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1282
1283 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1284 sizeof(struct intel_vgpu_workload), 0,
1285 SLAB_HWCACHE_ALIGN,
1286 offsetof(struct intel_vgpu_workload, rb_tail),
1287 sizeof_field(struct intel_vgpu_workload, rb_tail),
1288 NULL);
1289
1290 if (!s->workloads) {
1291 ret = -ENOMEM;
1292 goto out_shadow_ctx;
1293 }
1294
1295 atomic_set(&s->running_workload_num, 0);
1296 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1297
1298 i915_gem_context_put(ctx);
1299 mutex_unlock(&i915->drm.struct_mutex);
1300 return 0;
1301
1302 out_shadow_ctx:
1303 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
1304 for_each_engine(engine, i915, i) {
1305 if (IS_ERR(s->shadow[i]))
1306 break;
1307
1308 intel_context_unpin(s->shadow[i]);
1309 intel_context_put(s->shadow[i]);
1310 }
1311 i915_gem_context_put(ctx);
1312 out_unlock:
1313 mutex_unlock(&i915->drm.struct_mutex);
1314 return ret;
1315 }
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1330 intel_engine_mask_t engine_mask,
1331 unsigned int interface)
1332 {
1333 struct intel_vgpu_submission *s = &vgpu->submission;
1334 const struct intel_vgpu_submission_ops *ops[] = {
1335 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1336 &intel_vgpu_execlist_submission_ops,
1337 };
1338 int ret;
1339
1340 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1341 return -EINVAL;
1342
1343 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1344 return -EINVAL;
1345
1346 if (s->active)
1347 s->ops->clean(vgpu, engine_mask);
1348
1349 if (interface == 0) {
1350 s->ops = NULL;
1351 s->virtual_submission_interface = 0;
1352 s->active = false;
1353 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1354 return 0;
1355 }
1356
1357 ret = ops[interface]->init(vgpu, engine_mask);
1358 if (ret)
1359 return ret;
1360
1361 s->ops = ops[interface];
1362 s->virtual_submission_interface = interface;
1363 s->active = true;
1364
1365 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1366 vgpu->id, s->ops->name);
1367
1368 return 0;
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1379 {
1380 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1381
1382 release_shadow_batch_buffer(workload);
1383 release_shadow_wa_ctx(&workload->wa_ctx);
1384
1385 if (workload->shadow_mm)
1386 intel_vgpu_mm_put(workload->shadow_mm);
1387
1388 kmem_cache_free(s->workloads, workload);
1389 }
1390
1391 static struct intel_vgpu_workload *
1392 alloc_workload(struct intel_vgpu *vgpu)
1393 {
1394 struct intel_vgpu_submission *s = &vgpu->submission;
1395 struct intel_vgpu_workload *workload;
1396
1397 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1398 if (!workload)
1399 return ERR_PTR(-ENOMEM);
1400
1401 INIT_LIST_HEAD(&workload->list);
1402 INIT_LIST_HEAD(&workload->shadow_bb);
1403
1404 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1405 atomic_set(&workload->shadow_ctx_active, 0);
1406
1407 workload->status = -EINPROGRESS;
1408 workload->vgpu = vgpu;
1409
1410 return workload;
1411 }
1412
1413 #define RING_CTX_OFF(x) \
1414 offsetof(struct execlist_ring_context, x)
1415
1416 static void read_guest_pdps(struct intel_vgpu *vgpu,
1417 u64 ring_context_gpa, u32 pdp[8])
1418 {
1419 u64 gpa;
1420 int i;
1421
1422 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1423
1424 for (i = 0; i < 8; i++)
1425 intel_gvt_hypervisor_read_gpa(vgpu,
1426 gpa + i * 8, &pdp[7 - i], 4);
1427 }
1428
1429 static int prepare_mm(struct intel_vgpu_workload *workload)
1430 {
1431 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1432 struct intel_vgpu_mm *mm;
1433 struct intel_vgpu *vgpu = workload->vgpu;
1434 enum intel_gvt_gtt_type root_entry_type;
1435 u64 pdps[GVT_RING_CTX_NR_PDPS];
1436
1437 switch (desc->addressing_mode) {
1438 case 1:
1439 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1440 break;
1441 case 3:
1442 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1443 break;
1444 default:
1445 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1446 return -EINVAL;
1447 }
1448
1449 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1450
1451 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1452 if (IS_ERR(mm))
1453 return PTR_ERR(mm);
1454
1455 workload->shadow_mm = mm;
1456 return 0;
1457 }
1458
1459 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1460 ((a)->lrca == (b)->lrca))
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475 struct intel_vgpu_workload *
1476 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1477 struct execlist_ctx_descriptor_format *desc)
1478 {
1479 struct intel_vgpu_submission *s = &vgpu->submission;
1480 struct list_head *q = workload_q_head(vgpu, ring_id);
1481 struct intel_vgpu_workload *last_workload = NULL;
1482 struct intel_vgpu_workload *workload = NULL;
1483 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1484 u64 ring_context_gpa;
1485 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1486 u32 guest_head;
1487 int ret;
1488
1489 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1490 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1491 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1492 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1493 return ERR_PTR(-EINVAL);
1494 }
1495
1496 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1497 RING_CTX_OFF(ring_header.val), &head, 4);
1498
1499 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1500 RING_CTX_OFF(ring_tail.val), &tail, 4);
1501
1502 guest_head = head;
1503
1504 head &= RB_HEAD_OFF_MASK;
1505 tail &= RB_TAIL_OFF_MASK;
1506
1507 list_for_each_entry_reverse(last_workload, q, list) {
1508
1509 if (same_context(&last_workload->ctx_desc, desc)) {
1510 gvt_dbg_el("ring id %d cur workload == last\n",
1511 ring_id);
1512 gvt_dbg_el("ctx head %x real head %lx\n", head,
1513 last_workload->rb_tail);
1514
1515
1516
1517
1518 head = last_workload->rb_tail;
1519 break;
1520 }
1521 }
1522
1523 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1524
1525
1526 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1527 RING_CTX_OFF(rb_start.val), &start, 4);
1528 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1529 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1530 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1531 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1532
1533 if (!intel_gvt_ggtt_validate_range(vgpu, start,
1534 _RING_CTL_BUF_SIZE(ctl))) {
1535 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1536 return ERR_PTR(-EINVAL);
1537 }
1538
1539 workload = alloc_workload(vgpu);
1540 if (IS_ERR(workload))
1541 return workload;
1542
1543 workload->ring_id = ring_id;
1544 workload->ctx_desc = *desc;
1545 workload->ring_context_gpa = ring_context_gpa;
1546 workload->rb_head = head;
1547 workload->guest_rb_head = guest_head;
1548 workload->rb_tail = tail;
1549 workload->rb_start = start;
1550 workload->rb_ctl = ctl;
1551
1552 if (ring_id == RCS0) {
1553 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1554 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1555 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1556 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1557
1558 workload->wa_ctx.indirect_ctx.guest_gma =
1559 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1560 workload->wa_ctx.indirect_ctx.size =
1561 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1562 CACHELINE_BYTES;
1563
1564 if (workload->wa_ctx.indirect_ctx.size != 0) {
1565 if (!intel_gvt_ggtt_validate_range(vgpu,
1566 workload->wa_ctx.indirect_ctx.guest_gma,
1567 workload->wa_ctx.indirect_ctx.size)) {
1568 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1569 workload->wa_ctx.indirect_ctx.guest_gma);
1570 kmem_cache_free(s->workloads, workload);
1571 return ERR_PTR(-EINVAL);
1572 }
1573 }
1574
1575 workload->wa_ctx.per_ctx.guest_gma =
1576 per_ctx & PER_CTX_ADDR_MASK;
1577 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1578 if (workload->wa_ctx.per_ctx.valid) {
1579 if (!intel_gvt_ggtt_validate_range(vgpu,
1580 workload->wa_ctx.per_ctx.guest_gma,
1581 CACHELINE_BYTES)) {
1582 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1583 workload->wa_ctx.per_ctx.guest_gma);
1584 kmem_cache_free(s->workloads, workload);
1585 return ERR_PTR(-EINVAL);
1586 }
1587 }
1588 }
1589
1590 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1591 workload, ring_id, head, tail, start, ctl);
1592
1593 ret = prepare_mm(workload);
1594 if (ret) {
1595 kmem_cache_free(s->workloads, workload);
1596 return ERR_PTR(ret);
1597 }
1598
1599
1600
1601
1602 if (list_empty(workload_q_head(vgpu, ring_id))) {
1603 intel_runtime_pm_get(&dev_priv->runtime_pm);
1604 mutex_lock(&dev_priv->drm.struct_mutex);
1605 ret = intel_gvt_scan_and_shadow_workload(workload);
1606 mutex_unlock(&dev_priv->drm.struct_mutex);
1607 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
1608 }
1609
1610 if (ret) {
1611 if (vgpu_is_vm_unhealthy(ret))
1612 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1613 intel_vgpu_destroy_workload(workload);
1614 return ERR_PTR(ret);
1615 }
1616
1617 return workload;
1618 }
1619
1620
1621
1622
1623
1624 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1625 {
1626 list_add_tail(&workload->list,
1627 workload_q_head(workload->vgpu, workload->ring_id));
1628 intel_gvt_kick_schedule(workload->vgpu->gvt);
1629 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1630 }