This source file includes following definitions.
- live_sanitycheck
- emit_semaphore_chain
- semaphore_queue
- release_queue
- slice_semaphore_queue
- live_timeslice_preempt
- live_busywait_preempt
- spinner_create_request
- live_preempt
- live_late_preempt
- preempt_client_init
- preempt_client_fini
- live_nopreempt
- live_suppress_self_preempt
- dummy_notify
- dummy_request
- dummy_request_free
- live_suppress_wait_preempt
- live_chain_preempt
- live_preempt_hang
- random_range
- random_priority
- smoke_context
- smoke_submit
- smoke_crescendo_thread
- smoke_random
- live_preempt_smoke
- live_virtual_engine
- mask_virtual_engine
- live_virtual_mask
- live_virtual_bond
- intel_execlists_live_selftests
1
2
3
4
5
6
7 #include <linux/prime_numbers.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
11
12 #include "i915_selftest.h"
13 #include "selftests/i915_random.h"
14 #include "selftests/igt_flush_test.h"
15 #include "selftests/igt_live_test.h"
16 #include "selftests/igt_spinner.h"
17 #include "selftests/lib_sw_fence.h"
18
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21
22 static int live_sanitycheck(void *arg)
23 {
24 struct drm_i915_private *i915 = arg;
25 struct i915_gem_engines_iter it;
26 struct i915_gem_context *ctx;
27 struct intel_context *ce;
28 struct igt_spinner spin;
29 intel_wakeref_t wakeref;
30 int err = -ENOMEM;
31
32 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
33 return 0;
34
35 mutex_lock(&i915->drm.struct_mutex);
36 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
37
38 if (igt_spinner_init(&spin, &i915->gt))
39 goto err_unlock;
40
41 ctx = kernel_context(i915);
42 if (!ctx)
43 goto err_spin;
44
45 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
46 struct i915_request *rq;
47
48 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
49 if (IS_ERR(rq)) {
50 err = PTR_ERR(rq);
51 goto err_ctx;
52 }
53
54 i915_request_add(rq);
55 if (!igt_wait_for_spinner(&spin, rq)) {
56 GEM_TRACE("spinner failed to start\n");
57 GEM_TRACE_DUMP();
58 intel_gt_set_wedged(&i915->gt);
59 err = -EIO;
60 goto err_ctx;
61 }
62
63 igt_spinner_end(&spin);
64 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
65 err = -EIO;
66 goto err_ctx;
67 }
68 }
69
70 err = 0;
71 err_ctx:
72 i915_gem_context_unlock_engines(ctx);
73 kernel_context_close(ctx);
74 err_spin:
75 igt_spinner_fini(&spin);
76 err_unlock:
77 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
78 mutex_unlock(&i915->drm.struct_mutex);
79 return err;
80 }
81
82 static int
83 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
84 {
85 u32 *cs;
86
87 cs = intel_ring_begin(rq, 10);
88 if (IS_ERR(cs))
89 return PTR_ERR(cs);
90
91 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
92
93 *cs++ = MI_SEMAPHORE_WAIT |
94 MI_SEMAPHORE_GLOBAL_GTT |
95 MI_SEMAPHORE_POLL |
96 MI_SEMAPHORE_SAD_NEQ_SDD;
97 *cs++ = 0;
98 *cs++ = i915_ggtt_offset(vma) + 4 * idx;
99 *cs++ = 0;
100
101 if (idx > 0) {
102 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
103 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
104 *cs++ = 0;
105 *cs++ = 1;
106 } else {
107 *cs++ = MI_NOOP;
108 *cs++ = MI_NOOP;
109 *cs++ = MI_NOOP;
110 *cs++ = MI_NOOP;
111 }
112
113 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
114
115 intel_ring_advance(rq, cs);
116 return 0;
117 }
118
119 static struct i915_request *
120 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
121 {
122 struct i915_gem_context *ctx;
123 struct i915_request *rq;
124 int err;
125
126 ctx = kernel_context(engine->i915);
127 if (!ctx)
128 return ERR_PTR(-ENOMEM);
129
130 rq = igt_request_alloc(ctx, engine);
131 if (IS_ERR(rq))
132 goto out_ctx;
133
134 err = emit_semaphore_chain(rq, vma, idx);
135 i915_request_add(rq);
136 if (err)
137 rq = ERR_PTR(err);
138
139 out_ctx:
140 kernel_context_close(ctx);
141 return rq;
142 }
143
144 static int
145 release_queue(struct intel_engine_cs *engine,
146 struct i915_vma *vma,
147 int idx)
148 {
149 struct i915_sched_attr attr = {
150 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
151 };
152 struct i915_request *rq;
153 u32 *cs;
154
155 rq = i915_request_create(engine->kernel_context);
156 if (IS_ERR(rq))
157 return PTR_ERR(rq);
158
159 cs = intel_ring_begin(rq, 4);
160 if (IS_ERR(cs)) {
161 i915_request_add(rq);
162 return PTR_ERR(cs);
163 }
164
165 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
166 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
167 *cs++ = 0;
168 *cs++ = 1;
169
170 intel_ring_advance(rq, cs);
171 i915_request_add(rq);
172
173 engine->schedule(rq, &attr);
174
175 return 0;
176 }
177
178 static int
179 slice_semaphore_queue(struct intel_engine_cs *outer,
180 struct i915_vma *vma,
181 int count)
182 {
183 struct intel_engine_cs *engine;
184 struct i915_request *head;
185 enum intel_engine_id id;
186 int err, i, n = 0;
187
188 head = semaphore_queue(outer, vma, n++);
189 if (IS_ERR(head))
190 return PTR_ERR(head);
191
192 i915_request_get(head);
193 for_each_engine(engine, outer->i915, id) {
194 for (i = 0; i < count; i++) {
195 struct i915_request *rq;
196
197 rq = semaphore_queue(engine, vma, n++);
198 if (IS_ERR(rq)) {
199 err = PTR_ERR(rq);
200 goto out;
201 }
202 }
203 }
204
205 err = release_queue(outer, vma, n);
206 if (err)
207 goto out;
208
209 if (i915_request_wait(head,
210 I915_WAIT_LOCKED,
211 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
212 pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
213 count, n);
214 GEM_TRACE_DUMP();
215 intel_gt_set_wedged(outer->gt);
216 err = -EIO;
217 }
218
219 out:
220 i915_request_put(head);
221 return err;
222 }
223
224 static int live_timeslice_preempt(void *arg)
225 {
226 struct drm_i915_private *i915 = arg;
227 struct drm_i915_gem_object *obj;
228 intel_wakeref_t wakeref;
229 struct i915_vma *vma;
230 void *vaddr;
231 int err = 0;
232 int count;
233
234
235
236
237
238
239
240
241
242
243 mutex_lock(&i915->drm.struct_mutex);
244 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
245
246 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
247 if (IS_ERR(obj)) {
248 err = PTR_ERR(obj);
249 goto err_unlock;
250 }
251
252 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
253 if (IS_ERR(vma)) {
254 err = PTR_ERR(vma);
255 goto err_obj;
256 }
257
258 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
259 if (IS_ERR(vaddr)) {
260 err = PTR_ERR(vaddr);
261 goto err_obj;
262 }
263
264 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
265 if (err)
266 goto err_map;
267
268 for_each_prime_number_from(count, 1, 16) {
269 struct intel_engine_cs *engine;
270 enum intel_engine_id id;
271
272 for_each_engine(engine, i915, id) {
273 if (!intel_engine_has_preemption(engine))
274 continue;
275
276 memset(vaddr, 0, PAGE_SIZE);
277
278 err = slice_semaphore_queue(engine, vma, count);
279 if (err)
280 goto err_pin;
281
282 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
283 err = -EIO;
284 goto err_pin;
285 }
286 }
287 }
288
289 err_pin:
290 i915_vma_unpin(vma);
291 err_map:
292 i915_gem_object_unpin_map(obj);
293 err_obj:
294 i915_gem_object_put(obj);
295 err_unlock:
296 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
297 mutex_unlock(&i915->drm.struct_mutex);
298
299 return err;
300 }
301
302 static int live_busywait_preempt(void *arg)
303 {
304 struct drm_i915_private *i915 = arg;
305 struct i915_gem_context *ctx_hi, *ctx_lo;
306 struct intel_engine_cs *engine;
307 struct drm_i915_gem_object *obj;
308 struct i915_vma *vma;
309 enum intel_engine_id id;
310 intel_wakeref_t wakeref;
311 int err = -ENOMEM;
312 u32 *map;
313
314
315
316
317
318
319 mutex_lock(&i915->drm.struct_mutex);
320 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
321
322 ctx_hi = kernel_context(i915);
323 if (!ctx_hi)
324 goto err_unlock;
325 ctx_hi->sched.priority =
326 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
327
328 ctx_lo = kernel_context(i915);
329 if (!ctx_lo)
330 goto err_ctx_hi;
331 ctx_lo->sched.priority =
332 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
333
334 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
335 if (IS_ERR(obj)) {
336 err = PTR_ERR(obj);
337 goto err_ctx_lo;
338 }
339
340 map = i915_gem_object_pin_map(obj, I915_MAP_WC);
341 if (IS_ERR(map)) {
342 err = PTR_ERR(map);
343 goto err_obj;
344 }
345
346 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
347 if (IS_ERR(vma)) {
348 err = PTR_ERR(vma);
349 goto err_map;
350 }
351
352 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
353 if (err)
354 goto err_map;
355
356 for_each_engine(engine, i915, id) {
357 struct i915_request *lo, *hi;
358 struct igt_live_test t;
359 u32 *cs;
360
361 if (!intel_engine_has_preemption(engine))
362 continue;
363
364 if (!intel_engine_can_store_dword(engine))
365 continue;
366
367 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
368 err = -EIO;
369 goto err_vma;
370 }
371
372
373
374
375
376
377
378
379
380
381 lo = igt_request_alloc(ctx_lo, engine);
382 if (IS_ERR(lo)) {
383 err = PTR_ERR(lo);
384 goto err_vma;
385 }
386
387 cs = intel_ring_begin(lo, 8);
388 if (IS_ERR(cs)) {
389 err = PTR_ERR(cs);
390 i915_request_add(lo);
391 goto err_vma;
392 }
393
394 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
395 *cs++ = i915_ggtt_offset(vma);
396 *cs++ = 0;
397 *cs++ = 1;
398
399
400
401 *cs++ = MI_SEMAPHORE_WAIT |
402 MI_SEMAPHORE_GLOBAL_GTT |
403 MI_SEMAPHORE_POLL |
404 MI_SEMAPHORE_SAD_EQ_SDD;
405 *cs++ = 0;
406 *cs++ = i915_ggtt_offset(vma);
407 *cs++ = 0;
408
409 intel_ring_advance(lo, cs);
410 i915_request_add(lo);
411
412 if (wait_for(READ_ONCE(*map), 10)) {
413 err = -ETIMEDOUT;
414 goto err_vma;
415 }
416
417
418 if (i915_request_wait(lo, 0, 1) != -ETIME) {
419 pr_err("%s: Busywaiting request did not!\n",
420 engine->name);
421 err = -EIO;
422 goto err_vma;
423 }
424
425 hi = igt_request_alloc(ctx_hi, engine);
426 if (IS_ERR(hi)) {
427 err = PTR_ERR(hi);
428 goto err_vma;
429 }
430
431 cs = intel_ring_begin(hi, 4);
432 if (IS_ERR(cs)) {
433 err = PTR_ERR(cs);
434 i915_request_add(hi);
435 goto err_vma;
436 }
437
438 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
439 *cs++ = i915_ggtt_offset(vma);
440 *cs++ = 0;
441 *cs++ = 0;
442
443 intel_ring_advance(hi, cs);
444 i915_request_add(hi);
445
446 if (i915_request_wait(lo, 0, HZ / 5) < 0) {
447 struct drm_printer p = drm_info_printer(i915->drm.dev);
448
449 pr_err("%s: Failed to preempt semaphore busywait!\n",
450 engine->name);
451
452 intel_engine_dump(engine, &p, "%s\n", engine->name);
453 GEM_TRACE_DUMP();
454
455 intel_gt_set_wedged(&i915->gt);
456 err = -EIO;
457 goto err_vma;
458 }
459 GEM_BUG_ON(READ_ONCE(*map));
460
461 if (igt_live_test_end(&t)) {
462 err = -EIO;
463 goto err_vma;
464 }
465 }
466
467 err = 0;
468 err_vma:
469 i915_vma_unpin(vma);
470 err_map:
471 i915_gem_object_unpin_map(obj);
472 err_obj:
473 i915_gem_object_put(obj);
474 err_ctx_lo:
475 kernel_context_close(ctx_lo);
476 err_ctx_hi:
477 kernel_context_close(ctx_hi);
478 err_unlock:
479 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
480 mutex_unlock(&i915->drm.struct_mutex);
481 return err;
482 }
483
484 static struct i915_request *
485 spinner_create_request(struct igt_spinner *spin,
486 struct i915_gem_context *ctx,
487 struct intel_engine_cs *engine,
488 u32 arb)
489 {
490 struct intel_context *ce;
491 struct i915_request *rq;
492
493 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
494 if (IS_ERR(ce))
495 return ERR_CAST(ce);
496
497 rq = igt_spinner_create_request(spin, ce, arb);
498 intel_context_put(ce);
499 return rq;
500 }
501
502 static int live_preempt(void *arg)
503 {
504 struct drm_i915_private *i915 = arg;
505 struct i915_gem_context *ctx_hi, *ctx_lo;
506 struct igt_spinner spin_hi, spin_lo;
507 struct intel_engine_cs *engine;
508 enum intel_engine_id id;
509 intel_wakeref_t wakeref;
510 int err = -ENOMEM;
511
512 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
513 return 0;
514
515 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
516 pr_err("Logical preemption supported, but not exposed\n");
517
518 mutex_lock(&i915->drm.struct_mutex);
519 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
520
521 if (igt_spinner_init(&spin_hi, &i915->gt))
522 goto err_unlock;
523
524 if (igt_spinner_init(&spin_lo, &i915->gt))
525 goto err_spin_hi;
526
527 ctx_hi = kernel_context(i915);
528 if (!ctx_hi)
529 goto err_spin_lo;
530 ctx_hi->sched.priority =
531 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
532
533 ctx_lo = kernel_context(i915);
534 if (!ctx_lo)
535 goto err_ctx_hi;
536 ctx_lo->sched.priority =
537 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
538
539 for_each_engine(engine, i915, id) {
540 struct igt_live_test t;
541 struct i915_request *rq;
542
543 if (!intel_engine_has_preemption(engine))
544 continue;
545
546 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
547 err = -EIO;
548 goto err_ctx_lo;
549 }
550
551 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
552 MI_ARB_CHECK);
553 if (IS_ERR(rq)) {
554 err = PTR_ERR(rq);
555 goto err_ctx_lo;
556 }
557
558 i915_request_add(rq);
559 if (!igt_wait_for_spinner(&spin_lo, rq)) {
560 GEM_TRACE("lo spinner failed to start\n");
561 GEM_TRACE_DUMP();
562 intel_gt_set_wedged(&i915->gt);
563 err = -EIO;
564 goto err_ctx_lo;
565 }
566
567 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
568 MI_ARB_CHECK);
569 if (IS_ERR(rq)) {
570 igt_spinner_end(&spin_lo);
571 err = PTR_ERR(rq);
572 goto err_ctx_lo;
573 }
574
575 i915_request_add(rq);
576 if (!igt_wait_for_spinner(&spin_hi, rq)) {
577 GEM_TRACE("hi spinner failed to start\n");
578 GEM_TRACE_DUMP();
579 intel_gt_set_wedged(&i915->gt);
580 err = -EIO;
581 goto err_ctx_lo;
582 }
583
584 igt_spinner_end(&spin_hi);
585 igt_spinner_end(&spin_lo);
586
587 if (igt_live_test_end(&t)) {
588 err = -EIO;
589 goto err_ctx_lo;
590 }
591 }
592
593 err = 0;
594 err_ctx_lo:
595 kernel_context_close(ctx_lo);
596 err_ctx_hi:
597 kernel_context_close(ctx_hi);
598 err_spin_lo:
599 igt_spinner_fini(&spin_lo);
600 err_spin_hi:
601 igt_spinner_fini(&spin_hi);
602 err_unlock:
603 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
604 mutex_unlock(&i915->drm.struct_mutex);
605 return err;
606 }
607
608 static int live_late_preempt(void *arg)
609 {
610 struct drm_i915_private *i915 = arg;
611 struct i915_gem_context *ctx_hi, *ctx_lo;
612 struct igt_spinner spin_hi, spin_lo;
613 struct intel_engine_cs *engine;
614 struct i915_sched_attr attr = {};
615 enum intel_engine_id id;
616 intel_wakeref_t wakeref;
617 int err = -ENOMEM;
618
619 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
620 return 0;
621
622 mutex_lock(&i915->drm.struct_mutex);
623 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
624
625 if (igt_spinner_init(&spin_hi, &i915->gt))
626 goto err_unlock;
627
628 if (igt_spinner_init(&spin_lo, &i915->gt))
629 goto err_spin_hi;
630
631 ctx_hi = kernel_context(i915);
632 if (!ctx_hi)
633 goto err_spin_lo;
634
635 ctx_lo = kernel_context(i915);
636 if (!ctx_lo)
637 goto err_ctx_hi;
638
639
640 ctx_lo->sched.priority = I915_USER_PRIORITY(1);
641
642 for_each_engine(engine, i915, id) {
643 struct igt_live_test t;
644 struct i915_request *rq;
645
646 if (!intel_engine_has_preemption(engine))
647 continue;
648
649 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
650 err = -EIO;
651 goto err_ctx_lo;
652 }
653
654 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
655 MI_ARB_CHECK);
656 if (IS_ERR(rq)) {
657 err = PTR_ERR(rq);
658 goto err_ctx_lo;
659 }
660
661 i915_request_add(rq);
662 if (!igt_wait_for_spinner(&spin_lo, rq)) {
663 pr_err("First context failed to start\n");
664 goto err_wedged;
665 }
666
667 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
668 MI_NOOP);
669 if (IS_ERR(rq)) {
670 igt_spinner_end(&spin_lo);
671 err = PTR_ERR(rq);
672 goto err_ctx_lo;
673 }
674
675 i915_request_add(rq);
676 if (igt_wait_for_spinner(&spin_hi, rq)) {
677 pr_err("Second context overtook first?\n");
678 goto err_wedged;
679 }
680
681 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
682 engine->schedule(rq, &attr);
683
684 if (!igt_wait_for_spinner(&spin_hi, rq)) {
685 pr_err("High priority context failed to preempt the low priority context\n");
686 GEM_TRACE_DUMP();
687 goto err_wedged;
688 }
689
690 igt_spinner_end(&spin_hi);
691 igt_spinner_end(&spin_lo);
692
693 if (igt_live_test_end(&t)) {
694 err = -EIO;
695 goto err_ctx_lo;
696 }
697 }
698
699 err = 0;
700 err_ctx_lo:
701 kernel_context_close(ctx_lo);
702 err_ctx_hi:
703 kernel_context_close(ctx_hi);
704 err_spin_lo:
705 igt_spinner_fini(&spin_lo);
706 err_spin_hi:
707 igt_spinner_fini(&spin_hi);
708 err_unlock:
709 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
710 mutex_unlock(&i915->drm.struct_mutex);
711 return err;
712
713 err_wedged:
714 igt_spinner_end(&spin_hi);
715 igt_spinner_end(&spin_lo);
716 intel_gt_set_wedged(&i915->gt);
717 err = -EIO;
718 goto err_ctx_lo;
719 }
720
721 struct preempt_client {
722 struct igt_spinner spin;
723 struct i915_gem_context *ctx;
724 };
725
726 static int preempt_client_init(struct drm_i915_private *i915,
727 struct preempt_client *c)
728 {
729 c->ctx = kernel_context(i915);
730 if (!c->ctx)
731 return -ENOMEM;
732
733 if (igt_spinner_init(&c->spin, &i915->gt))
734 goto err_ctx;
735
736 return 0;
737
738 err_ctx:
739 kernel_context_close(c->ctx);
740 return -ENOMEM;
741 }
742
743 static void preempt_client_fini(struct preempt_client *c)
744 {
745 igt_spinner_fini(&c->spin);
746 kernel_context_close(c->ctx);
747 }
748
749 static int live_nopreempt(void *arg)
750 {
751 struct drm_i915_private *i915 = arg;
752 struct intel_engine_cs *engine;
753 struct preempt_client a, b;
754 enum intel_engine_id id;
755 intel_wakeref_t wakeref;
756 int err = -ENOMEM;
757
758
759
760
761
762
763 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
764 return 0;
765
766 mutex_lock(&i915->drm.struct_mutex);
767 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
768
769 if (preempt_client_init(i915, &a))
770 goto err_unlock;
771 if (preempt_client_init(i915, &b))
772 goto err_client_a;
773 b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
774
775 for_each_engine(engine, i915, id) {
776 struct i915_request *rq_a, *rq_b;
777
778 if (!intel_engine_has_preemption(engine))
779 continue;
780
781 engine->execlists.preempt_hang.count = 0;
782
783 rq_a = spinner_create_request(&a.spin,
784 a.ctx, engine,
785 MI_ARB_CHECK);
786 if (IS_ERR(rq_a)) {
787 err = PTR_ERR(rq_a);
788 goto err_client_b;
789 }
790
791
792 rq_a->flags |= I915_REQUEST_NOPREEMPT;
793
794 i915_request_add(rq_a);
795 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
796 pr_err("First client failed to start\n");
797 goto err_wedged;
798 }
799
800 rq_b = spinner_create_request(&b.spin,
801 b.ctx, engine,
802 MI_ARB_CHECK);
803 if (IS_ERR(rq_b)) {
804 err = PTR_ERR(rq_b);
805 goto err_client_b;
806 }
807
808 i915_request_add(rq_b);
809
810
811 GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
812
813
814 if (igt_wait_for_spinner(&b.spin, rq_b)) {
815 pr_err("Second client started too early!\n");
816 goto err_wedged;
817 }
818
819 igt_spinner_end(&a.spin);
820
821 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
822 pr_err("Second client failed to start\n");
823 goto err_wedged;
824 }
825
826 igt_spinner_end(&b.spin);
827
828 if (engine->execlists.preempt_hang.count) {
829 pr_err("Preemption recorded x%d; should have been suppressed!\n",
830 engine->execlists.preempt_hang.count);
831 err = -EINVAL;
832 goto err_wedged;
833 }
834
835 if (igt_flush_test(i915, I915_WAIT_LOCKED))
836 goto err_wedged;
837 }
838
839 err = 0;
840 err_client_b:
841 preempt_client_fini(&b);
842 err_client_a:
843 preempt_client_fini(&a);
844 err_unlock:
845 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
846 mutex_unlock(&i915->drm.struct_mutex);
847 return err;
848
849 err_wedged:
850 igt_spinner_end(&b.spin);
851 igt_spinner_end(&a.spin);
852 intel_gt_set_wedged(&i915->gt);
853 err = -EIO;
854 goto err_client_b;
855 }
856
857 static int live_suppress_self_preempt(void *arg)
858 {
859 struct drm_i915_private *i915 = arg;
860 struct intel_engine_cs *engine;
861 struct i915_sched_attr attr = {
862 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
863 };
864 struct preempt_client a, b;
865 enum intel_engine_id id;
866 intel_wakeref_t wakeref;
867 int err = -ENOMEM;
868
869
870
871
872
873
874
875
876 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
877 return 0;
878
879 if (USES_GUC_SUBMISSION(i915))
880 return 0;
881
882 if (intel_vgpu_active(i915))
883 return 0;
884
885 mutex_lock(&i915->drm.struct_mutex);
886 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
887
888 if (preempt_client_init(i915, &a))
889 goto err_unlock;
890 if (preempt_client_init(i915, &b))
891 goto err_client_a;
892
893 for_each_engine(engine, i915, id) {
894 struct i915_request *rq_a, *rq_b;
895 int depth;
896
897 if (!intel_engine_has_preemption(engine))
898 continue;
899
900 engine->execlists.preempt_hang.count = 0;
901
902 rq_a = spinner_create_request(&a.spin,
903 a.ctx, engine,
904 MI_NOOP);
905 if (IS_ERR(rq_a)) {
906 err = PTR_ERR(rq_a);
907 goto err_client_b;
908 }
909
910 i915_request_add(rq_a);
911 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
912 pr_err("First client failed to start\n");
913 goto err_wedged;
914 }
915
916
917 mod_timer(&engine->execlists.timer, jiffies + HZ);
918 for (depth = 0; depth < 8; depth++) {
919 rq_b = spinner_create_request(&b.spin,
920 b.ctx, engine,
921 MI_NOOP);
922 if (IS_ERR(rq_b)) {
923 err = PTR_ERR(rq_b);
924 goto err_client_b;
925 }
926 i915_request_add(rq_b);
927
928 GEM_BUG_ON(i915_request_completed(rq_a));
929 engine->schedule(rq_a, &attr);
930 igt_spinner_end(&a.spin);
931
932 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
933 pr_err("Second client failed to start\n");
934 goto err_wedged;
935 }
936
937 swap(a, b);
938 rq_a = rq_b;
939 }
940 igt_spinner_end(&a.spin);
941
942 if (engine->execlists.preempt_hang.count) {
943 pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
944 engine->name,
945 engine->execlists.preempt_hang.count,
946 depth);
947 err = -EINVAL;
948 goto err_client_b;
949 }
950
951 if (igt_flush_test(i915, I915_WAIT_LOCKED))
952 goto err_wedged;
953 }
954
955 err = 0;
956 err_client_b:
957 preempt_client_fini(&b);
958 err_client_a:
959 preempt_client_fini(&a);
960 err_unlock:
961 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
962 mutex_unlock(&i915->drm.struct_mutex);
963 return err;
964
965 err_wedged:
966 igt_spinner_end(&b.spin);
967 igt_spinner_end(&a.spin);
968 intel_gt_set_wedged(&i915->gt);
969 err = -EIO;
970 goto err_client_b;
971 }
972
973 static int __i915_sw_fence_call
974 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
975 {
976 return NOTIFY_DONE;
977 }
978
979 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
980 {
981 struct i915_request *rq;
982
983 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
984 if (!rq)
985 return NULL;
986
987 INIT_LIST_HEAD(&rq->active_list);
988 rq->engine = engine;
989
990 i915_sched_node_init(&rq->sched);
991
992
993 rq->fence.seqno = 1;
994 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8);
995 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
996 GEM_BUG_ON(i915_request_completed(rq));
997
998 i915_sw_fence_init(&rq->submit, dummy_notify);
999 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
1000
1001 spin_lock_init(&rq->lock);
1002 rq->fence.lock = &rq->lock;
1003 INIT_LIST_HEAD(&rq->fence.cb_list);
1004
1005 return rq;
1006 }
1007
1008 static void dummy_request_free(struct i915_request *dummy)
1009 {
1010
1011 i915_sw_fence_commit(&dummy->submit);
1012
1013 i915_request_mark_complete(dummy);
1014 dma_fence_signal(&dummy->fence);
1015
1016 i915_sched_node_fini(&dummy->sched);
1017 i915_sw_fence_fini(&dummy->submit);
1018
1019 dma_fence_free(&dummy->fence);
1020 }
1021
1022 static int live_suppress_wait_preempt(void *arg)
1023 {
1024 struct drm_i915_private *i915 = arg;
1025 struct preempt_client client[4];
1026 struct intel_engine_cs *engine;
1027 enum intel_engine_id id;
1028 intel_wakeref_t wakeref;
1029 int err = -ENOMEM;
1030 int i;
1031
1032
1033
1034
1035
1036
1037
1038 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1039 return 0;
1040
1041 mutex_lock(&i915->drm.struct_mutex);
1042 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1043
1044 if (preempt_client_init(i915, &client[0]))
1045 goto err_unlock;
1046 if (preempt_client_init(i915, &client[1]))
1047 goto err_client_0;
1048 if (preempt_client_init(i915, &client[2]))
1049 goto err_client_1;
1050 if (preempt_client_init(i915, &client[3]))
1051 goto err_client_2;
1052
1053 for_each_engine(engine, i915, id) {
1054 int depth;
1055
1056 if (!intel_engine_has_preemption(engine))
1057 continue;
1058
1059 if (!engine->emit_init_breadcrumb)
1060 continue;
1061
1062 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
1063 struct i915_request *rq[ARRAY_SIZE(client)];
1064 struct i915_request *dummy;
1065
1066 engine->execlists.preempt_hang.count = 0;
1067
1068 dummy = dummy_request(engine);
1069 if (!dummy)
1070 goto err_client_3;
1071
1072 for (i = 0; i < ARRAY_SIZE(client); i++) {
1073 rq[i] = spinner_create_request(&client[i].spin,
1074 client[i].ctx, engine,
1075 MI_NOOP);
1076 if (IS_ERR(rq[i])) {
1077 err = PTR_ERR(rq[i]);
1078 goto err_wedged;
1079 }
1080
1081
1082 __i915_active_request_set(&rq[i]->timeline->last_request,
1083 dummy);
1084 i915_request_add(rq[i]);
1085 }
1086
1087 dummy_request_free(dummy);
1088
1089 GEM_BUG_ON(i915_request_completed(rq[0]));
1090 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
1091 pr_err("%s: First client failed to start\n",
1092 engine->name);
1093 goto err_wedged;
1094 }
1095 GEM_BUG_ON(!i915_request_started(rq[0]));
1096
1097 if (i915_request_wait(rq[depth],
1098 I915_WAIT_PRIORITY,
1099 1) != -ETIME) {
1100 pr_err("%s: Waiter depth:%d completed!\n",
1101 engine->name, depth);
1102 goto err_wedged;
1103 }
1104
1105 for (i = 0; i < ARRAY_SIZE(client); i++)
1106 igt_spinner_end(&client[i].spin);
1107
1108 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1109 goto err_wedged;
1110
1111 if (engine->execlists.preempt_hang.count) {
1112 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
1113 engine->name,
1114 engine->execlists.preempt_hang.count,
1115 depth);
1116 err = -EINVAL;
1117 goto err_client_3;
1118 }
1119 }
1120 }
1121
1122 err = 0;
1123 err_client_3:
1124 preempt_client_fini(&client[3]);
1125 err_client_2:
1126 preempt_client_fini(&client[2]);
1127 err_client_1:
1128 preempt_client_fini(&client[1]);
1129 err_client_0:
1130 preempt_client_fini(&client[0]);
1131 err_unlock:
1132 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1133 mutex_unlock(&i915->drm.struct_mutex);
1134 return err;
1135
1136 err_wedged:
1137 for (i = 0; i < ARRAY_SIZE(client); i++)
1138 igt_spinner_end(&client[i].spin);
1139 intel_gt_set_wedged(&i915->gt);
1140 err = -EIO;
1141 goto err_client_3;
1142 }
1143
1144 static int live_chain_preempt(void *arg)
1145 {
1146 struct drm_i915_private *i915 = arg;
1147 struct intel_engine_cs *engine;
1148 struct preempt_client hi, lo;
1149 enum intel_engine_id id;
1150 intel_wakeref_t wakeref;
1151 int err = -ENOMEM;
1152
1153
1154
1155
1156
1157
1158
1159 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1160 return 0;
1161
1162 mutex_lock(&i915->drm.struct_mutex);
1163 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1164
1165 if (preempt_client_init(i915, &hi))
1166 goto err_unlock;
1167
1168 if (preempt_client_init(i915, &lo))
1169 goto err_client_hi;
1170
1171 for_each_engine(engine, i915, id) {
1172 struct i915_sched_attr attr = {
1173 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
1174 };
1175 struct igt_live_test t;
1176 struct i915_request *rq;
1177 int ring_size, count, i;
1178
1179 if (!intel_engine_has_preemption(engine))
1180 continue;
1181
1182 rq = spinner_create_request(&lo.spin,
1183 lo.ctx, engine,
1184 MI_ARB_CHECK);
1185 if (IS_ERR(rq))
1186 goto err_wedged;
1187 i915_request_add(rq);
1188
1189 ring_size = rq->wa_tail - rq->head;
1190 if (ring_size < 0)
1191 ring_size += rq->ring->size;
1192 ring_size = rq->ring->size / ring_size;
1193 pr_debug("%s(%s): Using maximum of %d requests\n",
1194 __func__, engine->name, ring_size);
1195
1196 igt_spinner_end(&lo.spin);
1197 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1198 pr_err("Timed out waiting to flush %s\n", engine->name);
1199 goto err_wedged;
1200 }
1201
1202 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
1203 err = -EIO;
1204 goto err_wedged;
1205 }
1206
1207 for_each_prime_number_from(count, 1, ring_size) {
1208 rq = spinner_create_request(&hi.spin,
1209 hi.ctx, engine,
1210 MI_ARB_CHECK);
1211 if (IS_ERR(rq))
1212 goto err_wedged;
1213 i915_request_add(rq);
1214 if (!igt_wait_for_spinner(&hi.spin, rq))
1215 goto err_wedged;
1216
1217 rq = spinner_create_request(&lo.spin,
1218 lo.ctx, engine,
1219 MI_ARB_CHECK);
1220 if (IS_ERR(rq))
1221 goto err_wedged;
1222 i915_request_add(rq);
1223
1224 for (i = 0; i < count; i++) {
1225 rq = igt_request_alloc(lo.ctx, engine);
1226 if (IS_ERR(rq))
1227 goto err_wedged;
1228 i915_request_add(rq);
1229 }
1230
1231 rq = igt_request_alloc(hi.ctx, engine);
1232 if (IS_ERR(rq))
1233 goto err_wedged;
1234 i915_request_add(rq);
1235 engine->schedule(rq, &attr);
1236
1237 igt_spinner_end(&hi.spin);
1238 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1239 struct drm_printer p =
1240 drm_info_printer(i915->drm.dev);
1241
1242 pr_err("Failed to preempt over chain of %d\n",
1243 count);
1244 intel_engine_dump(engine, &p,
1245 "%s\n", engine->name);
1246 goto err_wedged;
1247 }
1248 igt_spinner_end(&lo.spin);
1249
1250 rq = igt_request_alloc(lo.ctx, engine);
1251 if (IS_ERR(rq))
1252 goto err_wedged;
1253 i915_request_add(rq);
1254 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1255 struct drm_printer p =
1256 drm_info_printer(i915->drm.dev);
1257
1258 pr_err("Failed to flush low priority chain of %d requests\n",
1259 count);
1260 intel_engine_dump(engine, &p,
1261 "%s\n", engine->name);
1262 goto err_wedged;
1263 }
1264 }
1265
1266 if (igt_live_test_end(&t)) {
1267 err = -EIO;
1268 goto err_wedged;
1269 }
1270 }
1271
1272 err = 0;
1273 err_client_lo:
1274 preempt_client_fini(&lo);
1275 err_client_hi:
1276 preempt_client_fini(&hi);
1277 err_unlock:
1278 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1279 mutex_unlock(&i915->drm.struct_mutex);
1280 return err;
1281
1282 err_wedged:
1283 igt_spinner_end(&hi.spin);
1284 igt_spinner_end(&lo.spin);
1285 intel_gt_set_wedged(&i915->gt);
1286 err = -EIO;
1287 goto err_client_lo;
1288 }
1289
1290 static int live_preempt_hang(void *arg)
1291 {
1292 struct drm_i915_private *i915 = arg;
1293 struct i915_gem_context *ctx_hi, *ctx_lo;
1294 struct igt_spinner spin_hi, spin_lo;
1295 struct intel_engine_cs *engine;
1296 enum intel_engine_id id;
1297 intel_wakeref_t wakeref;
1298 int err = -ENOMEM;
1299
1300 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1301 return 0;
1302
1303 if (!intel_has_reset_engine(i915))
1304 return 0;
1305
1306 mutex_lock(&i915->drm.struct_mutex);
1307 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1308
1309 if (igt_spinner_init(&spin_hi, &i915->gt))
1310 goto err_unlock;
1311
1312 if (igt_spinner_init(&spin_lo, &i915->gt))
1313 goto err_spin_hi;
1314
1315 ctx_hi = kernel_context(i915);
1316 if (!ctx_hi)
1317 goto err_spin_lo;
1318 ctx_hi->sched.priority =
1319 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
1320
1321 ctx_lo = kernel_context(i915);
1322 if (!ctx_lo)
1323 goto err_ctx_hi;
1324 ctx_lo->sched.priority =
1325 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
1326
1327 for_each_engine(engine, i915, id) {
1328 struct i915_request *rq;
1329
1330 if (!intel_engine_has_preemption(engine))
1331 continue;
1332
1333 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1334 MI_ARB_CHECK);
1335 if (IS_ERR(rq)) {
1336 err = PTR_ERR(rq);
1337 goto err_ctx_lo;
1338 }
1339
1340 i915_request_add(rq);
1341 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1342 GEM_TRACE("lo spinner failed to start\n");
1343 GEM_TRACE_DUMP();
1344 intel_gt_set_wedged(&i915->gt);
1345 err = -EIO;
1346 goto err_ctx_lo;
1347 }
1348
1349 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1350 MI_ARB_CHECK);
1351 if (IS_ERR(rq)) {
1352 igt_spinner_end(&spin_lo);
1353 err = PTR_ERR(rq);
1354 goto err_ctx_lo;
1355 }
1356
1357 init_completion(&engine->execlists.preempt_hang.completion);
1358 engine->execlists.preempt_hang.inject_hang = true;
1359
1360 i915_request_add(rq);
1361
1362 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1363 HZ / 10)) {
1364 pr_err("Preemption did not occur within timeout!");
1365 GEM_TRACE_DUMP();
1366 intel_gt_set_wedged(&i915->gt);
1367 err = -EIO;
1368 goto err_ctx_lo;
1369 }
1370
1371 set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
1372 intel_engine_reset(engine, NULL);
1373 clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
1374
1375 engine->execlists.preempt_hang.inject_hang = false;
1376
1377 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1378 GEM_TRACE("hi spinner failed to start\n");
1379 GEM_TRACE_DUMP();
1380 intel_gt_set_wedged(&i915->gt);
1381 err = -EIO;
1382 goto err_ctx_lo;
1383 }
1384
1385 igt_spinner_end(&spin_hi);
1386 igt_spinner_end(&spin_lo);
1387 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1388 err = -EIO;
1389 goto err_ctx_lo;
1390 }
1391 }
1392
1393 err = 0;
1394 err_ctx_lo:
1395 kernel_context_close(ctx_lo);
1396 err_ctx_hi:
1397 kernel_context_close(ctx_hi);
1398 err_spin_lo:
1399 igt_spinner_fini(&spin_lo);
1400 err_spin_hi:
1401 igt_spinner_fini(&spin_hi);
1402 err_unlock:
1403 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1404 mutex_unlock(&i915->drm.struct_mutex);
1405 return err;
1406 }
1407
1408 static int random_range(struct rnd_state *rnd, int min, int max)
1409 {
1410 return i915_prandom_u32_max_state(max - min, rnd) + min;
1411 }
1412
1413 static int random_priority(struct rnd_state *rnd)
1414 {
1415 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1416 }
1417
1418 struct preempt_smoke {
1419 struct drm_i915_private *i915;
1420 struct i915_gem_context **contexts;
1421 struct intel_engine_cs *engine;
1422 struct drm_i915_gem_object *batch;
1423 unsigned int ncontext;
1424 struct rnd_state prng;
1425 unsigned long count;
1426 };
1427
1428 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1429 {
1430 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1431 &smoke->prng)];
1432 }
1433
1434 static int smoke_submit(struct preempt_smoke *smoke,
1435 struct i915_gem_context *ctx, int prio,
1436 struct drm_i915_gem_object *batch)
1437 {
1438 struct i915_request *rq;
1439 struct i915_vma *vma = NULL;
1440 int err = 0;
1441
1442 if (batch) {
1443 vma = i915_vma_instance(batch, ctx->vm, NULL);
1444 if (IS_ERR(vma))
1445 return PTR_ERR(vma);
1446
1447 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1448 if (err)
1449 return err;
1450 }
1451
1452 ctx->sched.priority = prio;
1453
1454 rq = igt_request_alloc(ctx, smoke->engine);
1455 if (IS_ERR(rq)) {
1456 err = PTR_ERR(rq);
1457 goto unpin;
1458 }
1459
1460 if (vma) {
1461 i915_vma_lock(vma);
1462 err = i915_request_await_object(rq, vma->obj, false);
1463 if (!err)
1464 err = i915_vma_move_to_active(vma, rq, 0);
1465 if (!err)
1466 err = rq->engine->emit_bb_start(rq,
1467 vma->node.start,
1468 PAGE_SIZE, 0);
1469 i915_vma_unlock(vma);
1470 }
1471
1472 i915_request_add(rq);
1473
1474 unpin:
1475 if (vma)
1476 i915_vma_unpin(vma);
1477
1478 return err;
1479 }
1480
1481 static int smoke_crescendo_thread(void *arg)
1482 {
1483 struct preempt_smoke *smoke = arg;
1484 IGT_TIMEOUT(end_time);
1485 unsigned long count;
1486
1487 count = 0;
1488 do {
1489 struct i915_gem_context *ctx = smoke_context(smoke);
1490 int err;
1491
1492 mutex_lock(&smoke->i915->drm.struct_mutex);
1493 err = smoke_submit(smoke,
1494 ctx, count % I915_PRIORITY_MAX,
1495 smoke->batch);
1496 mutex_unlock(&smoke->i915->drm.struct_mutex);
1497 if (err)
1498 return err;
1499
1500 count++;
1501 } while (!__igt_timeout(end_time, NULL));
1502
1503 smoke->count = count;
1504 return 0;
1505 }
1506
1507 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1508 #define BATCH BIT(0)
1509 {
1510 struct task_struct *tsk[I915_NUM_ENGINES] = {};
1511 struct preempt_smoke arg[I915_NUM_ENGINES];
1512 struct intel_engine_cs *engine;
1513 enum intel_engine_id id;
1514 unsigned long count;
1515 int err = 0;
1516
1517 mutex_unlock(&smoke->i915->drm.struct_mutex);
1518
1519 for_each_engine(engine, smoke->i915, id) {
1520 arg[id] = *smoke;
1521 arg[id].engine = engine;
1522 if (!(flags & BATCH))
1523 arg[id].batch = NULL;
1524 arg[id].count = 0;
1525
1526 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1527 "igt/smoke:%d", id);
1528 if (IS_ERR(tsk[id])) {
1529 err = PTR_ERR(tsk[id]);
1530 break;
1531 }
1532 get_task_struct(tsk[id]);
1533 }
1534
1535 count = 0;
1536 for_each_engine(engine, smoke->i915, id) {
1537 int status;
1538
1539 if (IS_ERR_OR_NULL(tsk[id]))
1540 continue;
1541
1542 status = kthread_stop(tsk[id]);
1543 if (status && !err)
1544 err = status;
1545
1546 count += arg[id].count;
1547
1548 put_task_struct(tsk[id]);
1549 }
1550
1551 mutex_lock(&smoke->i915->drm.struct_mutex);
1552
1553 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1554 count, flags,
1555 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1556 return 0;
1557 }
1558
1559 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1560 {
1561 enum intel_engine_id id;
1562 IGT_TIMEOUT(end_time);
1563 unsigned long count;
1564
1565 count = 0;
1566 do {
1567 for_each_engine(smoke->engine, smoke->i915, id) {
1568 struct i915_gem_context *ctx = smoke_context(smoke);
1569 int err;
1570
1571 err = smoke_submit(smoke,
1572 ctx, random_priority(&smoke->prng),
1573 flags & BATCH ? smoke->batch : NULL);
1574 if (err)
1575 return err;
1576
1577 count++;
1578 }
1579 } while (!__igt_timeout(end_time, NULL));
1580
1581 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1582 count, flags,
1583 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1584 return 0;
1585 }
1586
1587 static int live_preempt_smoke(void *arg)
1588 {
1589 struct preempt_smoke smoke = {
1590 .i915 = arg,
1591 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1592 .ncontext = 1024,
1593 };
1594 const unsigned int phase[] = { 0, BATCH };
1595 intel_wakeref_t wakeref;
1596 struct igt_live_test t;
1597 int err = -ENOMEM;
1598 u32 *cs;
1599 int n;
1600
1601 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1602 return 0;
1603
1604 smoke.contexts = kmalloc_array(smoke.ncontext,
1605 sizeof(*smoke.contexts),
1606 GFP_KERNEL);
1607 if (!smoke.contexts)
1608 return -ENOMEM;
1609
1610 mutex_lock(&smoke.i915->drm.struct_mutex);
1611 wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
1612
1613 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1614 if (IS_ERR(smoke.batch)) {
1615 err = PTR_ERR(smoke.batch);
1616 goto err_unlock;
1617 }
1618
1619 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1620 if (IS_ERR(cs)) {
1621 err = PTR_ERR(cs);
1622 goto err_batch;
1623 }
1624 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1625 cs[n] = MI_ARB_CHECK;
1626 cs[n] = MI_BATCH_BUFFER_END;
1627 i915_gem_object_flush_map(smoke.batch);
1628 i915_gem_object_unpin_map(smoke.batch);
1629
1630 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1631 err = -EIO;
1632 goto err_batch;
1633 }
1634
1635 for (n = 0; n < smoke.ncontext; n++) {
1636 smoke.contexts[n] = kernel_context(smoke.i915);
1637 if (!smoke.contexts[n])
1638 goto err_ctx;
1639 }
1640
1641 for (n = 0; n < ARRAY_SIZE(phase); n++) {
1642 err = smoke_crescendo(&smoke, phase[n]);
1643 if (err)
1644 goto err_ctx;
1645
1646 err = smoke_random(&smoke, phase[n]);
1647 if (err)
1648 goto err_ctx;
1649 }
1650
1651 err_ctx:
1652 if (igt_live_test_end(&t))
1653 err = -EIO;
1654
1655 for (n = 0; n < smoke.ncontext; n++) {
1656 if (!smoke.contexts[n])
1657 break;
1658 kernel_context_close(smoke.contexts[n]);
1659 }
1660
1661 err_batch:
1662 i915_gem_object_put(smoke.batch);
1663 err_unlock:
1664 intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
1665 mutex_unlock(&smoke.i915->drm.struct_mutex);
1666 kfree(smoke.contexts);
1667
1668 return err;
1669 }
1670
1671 static int nop_virtual_engine(struct drm_i915_private *i915,
1672 struct intel_engine_cs **siblings,
1673 unsigned int nsibling,
1674 unsigned int nctx,
1675 unsigned int flags)
1676 #define CHAIN BIT(0)
1677 {
1678 IGT_TIMEOUT(end_time);
1679 struct i915_request *request[16];
1680 struct i915_gem_context *ctx[16];
1681 struct intel_context *ve[16];
1682 unsigned long n, prime, nc;
1683 struct igt_live_test t;
1684 ktime_t times[2] = {};
1685 int err;
1686
1687 GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
1688
1689 for (n = 0; n < nctx; n++) {
1690 ctx[n] = kernel_context(i915);
1691 if (!ctx[n]) {
1692 err = -ENOMEM;
1693 nctx = n;
1694 goto out;
1695 }
1696
1697 ve[n] = intel_execlists_create_virtual(ctx[n],
1698 siblings, nsibling);
1699 if (IS_ERR(ve[n])) {
1700 kernel_context_close(ctx[n]);
1701 err = PTR_ERR(ve[n]);
1702 nctx = n;
1703 goto out;
1704 }
1705
1706 err = intel_context_pin(ve[n]);
1707 if (err) {
1708 intel_context_put(ve[n]);
1709 kernel_context_close(ctx[n]);
1710 nctx = n;
1711 goto out;
1712 }
1713 }
1714
1715 err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
1716 if (err)
1717 goto out;
1718
1719 for_each_prime_number_from(prime, 1, 8192) {
1720 times[1] = ktime_get_raw();
1721
1722 if (flags & CHAIN) {
1723 for (nc = 0; nc < nctx; nc++) {
1724 for (n = 0; n < prime; n++) {
1725 request[nc] =
1726 i915_request_create(ve[nc]);
1727 if (IS_ERR(request[nc])) {
1728 err = PTR_ERR(request[nc]);
1729 goto out;
1730 }
1731
1732 i915_request_add(request[nc]);
1733 }
1734 }
1735 } else {
1736 for (n = 0; n < prime; n++) {
1737 for (nc = 0; nc < nctx; nc++) {
1738 request[nc] =
1739 i915_request_create(ve[nc]);
1740 if (IS_ERR(request[nc])) {
1741 err = PTR_ERR(request[nc]);
1742 goto out;
1743 }
1744
1745 i915_request_add(request[nc]);
1746 }
1747 }
1748 }
1749
1750 for (nc = 0; nc < nctx; nc++) {
1751 if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
1752 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1753 __func__, ve[0]->engine->name,
1754 request[nc]->fence.context,
1755 request[nc]->fence.seqno);
1756
1757 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1758 __func__, ve[0]->engine->name,
1759 request[nc]->fence.context,
1760 request[nc]->fence.seqno);
1761 GEM_TRACE_DUMP();
1762 intel_gt_set_wedged(&i915->gt);
1763 break;
1764 }
1765 }
1766
1767 times[1] = ktime_sub(ktime_get_raw(), times[1]);
1768 if (prime == 1)
1769 times[0] = times[1];
1770
1771 if (__igt_timeout(end_time, NULL))
1772 break;
1773 }
1774
1775 err = igt_live_test_end(&t);
1776 if (err)
1777 goto out;
1778
1779 pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
1780 nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
1781 prime, div64_u64(ktime_to_ns(times[1]), prime));
1782
1783 out:
1784 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1785 err = -EIO;
1786
1787 for (nc = 0; nc < nctx; nc++) {
1788 intel_context_unpin(ve[nc]);
1789 intel_context_put(ve[nc]);
1790 kernel_context_close(ctx[nc]);
1791 }
1792 return err;
1793 }
1794
1795 static int live_virtual_engine(void *arg)
1796 {
1797 struct drm_i915_private *i915 = arg;
1798 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1799 struct intel_engine_cs *engine;
1800 struct intel_gt *gt = &i915->gt;
1801 enum intel_engine_id id;
1802 unsigned int class, inst;
1803 int err = -ENODEV;
1804
1805 if (USES_GUC_SUBMISSION(i915))
1806 return 0;
1807
1808 mutex_lock(&i915->drm.struct_mutex);
1809
1810 for_each_engine(engine, i915, id) {
1811 err = nop_virtual_engine(i915, &engine, 1, 1, 0);
1812 if (err) {
1813 pr_err("Failed to wrap engine %s: err=%d\n",
1814 engine->name, err);
1815 goto out_unlock;
1816 }
1817 }
1818
1819 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1820 int nsibling, n;
1821
1822 nsibling = 0;
1823 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1824 if (!gt->engine_class[class][inst])
1825 continue;
1826
1827 siblings[nsibling++] = gt->engine_class[class][inst];
1828 }
1829 if (nsibling < 2)
1830 continue;
1831
1832 for (n = 1; n <= nsibling + 1; n++) {
1833 err = nop_virtual_engine(i915, siblings, nsibling,
1834 n, 0);
1835 if (err)
1836 goto out_unlock;
1837 }
1838
1839 err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
1840 if (err)
1841 goto out_unlock;
1842 }
1843
1844 out_unlock:
1845 mutex_unlock(&i915->drm.struct_mutex);
1846 return err;
1847 }
1848
1849 static int mask_virtual_engine(struct drm_i915_private *i915,
1850 struct intel_engine_cs **siblings,
1851 unsigned int nsibling)
1852 {
1853 struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
1854 struct i915_gem_context *ctx;
1855 struct intel_context *ve;
1856 struct igt_live_test t;
1857 unsigned int n;
1858 int err;
1859
1860
1861
1862
1863
1864
1865 ctx = kernel_context(i915);
1866 if (!ctx)
1867 return -ENOMEM;
1868
1869 ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
1870 if (IS_ERR(ve)) {
1871 err = PTR_ERR(ve);
1872 goto out_close;
1873 }
1874
1875 err = intel_context_pin(ve);
1876 if (err)
1877 goto out_put;
1878
1879 err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
1880 if (err)
1881 goto out_unpin;
1882
1883 for (n = 0; n < nsibling; n++) {
1884 request[n] = i915_request_create(ve);
1885 if (IS_ERR(request[n])) {
1886 err = PTR_ERR(request[n]);
1887 nsibling = n;
1888 goto out;
1889 }
1890
1891
1892 request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
1893
1894 i915_request_get(request[n]);
1895 i915_request_add(request[n]);
1896 }
1897
1898 for (n = 0; n < nsibling; n++) {
1899 if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
1900 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1901 __func__, ve->engine->name,
1902 request[n]->fence.context,
1903 request[n]->fence.seqno);
1904
1905 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1906 __func__, ve->engine->name,
1907 request[n]->fence.context,
1908 request[n]->fence.seqno);
1909 GEM_TRACE_DUMP();
1910 intel_gt_set_wedged(&i915->gt);
1911 err = -EIO;
1912 goto out;
1913 }
1914
1915 if (request[n]->engine != siblings[nsibling - n - 1]) {
1916 pr_err("Executed on wrong sibling '%s', expected '%s'\n",
1917 request[n]->engine->name,
1918 siblings[nsibling - n - 1]->name);
1919 err = -EINVAL;
1920 goto out;
1921 }
1922 }
1923
1924 err = igt_live_test_end(&t);
1925 if (err)
1926 goto out;
1927
1928 out:
1929 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1930 err = -EIO;
1931
1932 for (n = 0; n < nsibling; n++)
1933 i915_request_put(request[n]);
1934
1935 out_unpin:
1936 intel_context_unpin(ve);
1937 out_put:
1938 intel_context_put(ve);
1939 out_close:
1940 kernel_context_close(ctx);
1941 return err;
1942 }
1943
1944 static int live_virtual_mask(void *arg)
1945 {
1946 struct drm_i915_private *i915 = arg;
1947 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1948 struct intel_gt *gt = &i915->gt;
1949 unsigned int class, inst;
1950 int err = 0;
1951
1952 if (USES_GUC_SUBMISSION(i915))
1953 return 0;
1954
1955 mutex_lock(&i915->drm.struct_mutex);
1956
1957 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1958 unsigned int nsibling;
1959
1960 nsibling = 0;
1961 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1962 if (!gt->engine_class[class][inst])
1963 break;
1964
1965 siblings[nsibling++] = gt->engine_class[class][inst];
1966 }
1967 if (nsibling < 2)
1968 continue;
1969
1970 err = mask_virtual_engine(i915, siblings, nsibling);
1971 if (err)
1972 goto out_unlock;
1973 }
1974
1975 out_unlock:
1976 mutex_unlock(&i915->drm.struct_mutex);
1977 return err;
1978 }
1979
1980 static int bond_virtual_engine(struct drm_i915_private *i915,
1981 unsigned int class,
1982 struct intel_engine_cs **siblings,
1983 unsigned int nsibling,
1984 unsigned int flags)
1985 #define BOND_SCHEDULE BIT(0)
1986 {
1987 struct intel_engine_cs *master;
1988 struct i915_gem_context *ctx;
1989 struct i915_request *rq[16];
1990 enum intel_engine_id id;
1991 unsigned long n;
1992 int err;
1993
1994 GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
1995
1996 ctx = kernel_context(i915);
1997 if (!ctx)
1998 return -ENOMEM;
1999
2000 err = 0;
2001 rq[0] = ERR_PTR(-ENOMEM);
2002 for_each_engine(master, i915, id) {
2003 struct i915_sw_fence fence = {};
2004
2005 if (master->class == class)
2006 continue;
2007
2008 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
2009
2010 rq[0] = igt_request_alloc(ctx, master);
2011 if (IS_ERR(rq[0])) {
2012 err = PTR_ERR(rq[0]);
2013 goto out;
2014 }
2015 i915_request_get(rq[0]);
2016
2017 if (flags & BOND_SCHEDULE) {
2018 onstack_fence_init(&fence);
2019 err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
2020 &fence,
2021 GFP_KERNEL);
2022 }
2023 i915_request_add(rq[0]);
2024 if (err < 0)
2025 goto out;
2026
2027 for (n = 0; n < nsibling; n++) {
2028 struct intel_context *ve;
2029
2030 ve = intel_execlists_create_virtual(ctx,
2031 siblings,
2032 nsibling);
2033 if (IS_ERR(ve)) {
2034 err = PTR_ERR(ve);
2035 onstack_fence_fini(&fence);
2036 goto out;
2037 }
2038
2039 err = intel_virtual_engine_attach_bond(ve->engine,
2040 master,
2041 siblings[n]);
2042 if (err) {
2043 intel_context_put(ve);
2044 onstack_fence_fini(&fence);
2045 goto out;
2046 }
2047
2048 err = intel_context_pin(ve);
2049 intel_context_put(ve);
2050 if (err) {
2051 onstack_fence_fini(&fence);
2052 goto out;
2053 }
2054
2055 rq[n + 1] = i915_request_create(ve);
2056 intel_context_unpin(ve);
2057 if (IS_ERR(rq[n + 1])) {
2058 err = PTR_ERR(rq[n + 1]);
2059 onstack_fence_fini(&fence);
2060 goto out;
2061 }
2062 i915_request_get(rq[n + 1]);
2063
2064 err = i915_request_await_execution(rq[n + 1],
2065 &rq[0]->fence,
2066 ve->engine->bond_execute);
2067 i915_request_add(rq[n + 1]);
2068 if (err < 0) {
2069 onstack_fence_fini(&fence);
2070 goto out;
2071 }
2072 }
2073 onstack_fence_fini(&fence);
2074
2075 if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
2076 pr_err("Master request did not execute (on %s)!\n",
2077 rq[0]->engine->name);
2078 err = -EIO;
2079 goto out;
2080 }
2081
2082 for (n = 0; n < nsibling; n++) {
2083 if (i915_request_wait(rq[n + 1], 0,
2084 MAX_SCHEDULE_TIMEOUT) < 0) {
2085 err = -EIO;
2086 goto out;
2087 }
2088
2089 if (rq[n + 1]->engine != siblings[n]) {
2090 pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
2091 siblings[n]->name,
2092 rq[n + 1]->engine->name,
2093 rq[0]->engine->name);
2094 err = -EINVAL;
2095 goto out;
2096 }
2097 }
2098
2099 for (n = 0; !IS_ERR(rq[n]); n++)
2100 i915_request_put(rq[n]);
2101 rq[0] = ERR_PTR(-ENOMEM);
2102 }
2103
2104 out:
2105 for (n = 0; !IS_ERR(rq[n]); n++)
2106 i915_request_put(rq[n]);
2107 if (igt_flush_test(i915, I915_WAIT_LOCKED))
2108 err = -EIO;
2109
2110 kernel_context_close(ctx);
2111 return err;
2112 }
2113
2114 static int live_virtual_bond(void *arg)
2115 {
2116 static const struct phase {
2117 const char *name;
2118 unsigned int flags;
2119 } phases[] = {
2120 { "", 0 },
2121 { "schedule", BOND_SCHEDULE },
2122 { },
2123 };
2124 struct drm_i915_private *i915 = arg;
2125 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
2126 struct intel_gt *gt = &i915->gt;
2127 unsigned int class, inst;
2128 int err = 0;
2129
2130 if (USES_GUC_SUBMISSION(i915))
2131 return 0;
2132
2133 mutex_lock(&i915->drm.struct_mutex);
2134
2135 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
2136 const struct phase *p;
2137 int nsibling;
2138
2139 nsibling = 0;
2140 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
2141 if (!gt->engine_class[class][inst])
2142 break;
2143
2144 GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
2145 siblings[nsibling++] = gt->engine_class[class][inst];
2146 }
2147 if (nsibling < 2)
2148 continue;
2149
2150 for (p = phases; p->name; p++) {
2151 err = bond_virtual_engine(i915,
2152 class, siblings, nsibling,
2153 p->flags);
2154 if (err) {
2155 pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
2156 __func__, p->name, class, nsibling, err);
2157 goto out_unlock;
2158 }
2159 }
2160 }
2161
2162 out_unlock:
2163 mutex_unlock(&i915->drm.struct_mutex);
2164 return err;
2165 }
2166
2167 int intel_execlists_live_selftests(struct drm_i915_private *i915)
2168 {
2169 static const struct i915_subtest tests[] = {
2170 SUBTEST(live_sanitycheck),
2171 SUBTEST(live_timeslice_preempt),
2172 SUBTEST(live_busywait_preempt),
2173 SUBTEST(live_preempt),
2174 SUBTEST(live_late_preempt),
2175 SUBTEST(live_nopreempt),
2176 SUBTEST(live_suppress_self_preempt),
2177 SUBTEST(live_suppress_wait_preempt),
2178 SUBTEST(live_chain_preempt),
2179 SUBTEST(live_preempt_hang),
2180 SUBTEST(live_preempt_smoke),
2181 SUBTEST(live_virtual_engine),
2182 SUBTEST(live_virtual_mask),
2183 SUBTEST(live_virtual_bond),
2184 };
2185
2186 if (!HAS_EXECLISTS(i915))
2187 return 0;
2188
2189 if (intel_gt_is_wedged(&i915->gt))
2190 return 0;
2191
2192 return i915_live_subtests(tests, i915);
2193 }