This source file includes following definitions.
- intel_ring_update_space
- gen2_render_ring_flush
- gen4_render_ring_flush
- gen6_emit_post_sync_nonzero_flush
- gen6_render_ring_flush
- gen6_rcs_emit_breadcrumb
- gen7_render_ring_cs_stall_wa
- gen7_render_ring_flush
- gen7_rcs_emit_breadcrumb
- gen6_xcs_emit_breadcrumb
- gen7_xcs_emit_breadcrumb
- set_hwstam
- set_hws_pga
- status_page
- ring_setup_phys_status_page
- set_hwsp
- flush_cs_tlb
- ring_setup_status_page
- stop_ring
- xcs_resume
- reset_prepare
- reset_ring
- reset_finish
- rcs_resume
- cancel_requests
- i9xx_submit_request
- i9xx_emit_breadcrumb
- gen5_emit_breadcrumb
- gen5_irq_enable
- gen5_irq_disable
- i9xx_irq_enable
- i9xx_irq_disable
- i8xx_irq_enable
- i8xx_irq_disable
- bsd_ring_flush
- gen6_irq_enable
- gen6_irq_disable
- hsw_vebox_irq_enable
- hsw_vebox_irq_disable
- i965_emit_bb_start
- i830_emit_bb_start
- i915_emit_bb_start
- intel_ring_pin
- intel_ring_reset
- intel_ring_unpin
- create_ring_vma
- intel_engine_create_ring
- intel_ring_free
- __ring_context_fini
- ring_context_destroy
- vm_alias
- __context_pin_ppgtt
- __context_unpin_ppgtt
- ring_context_unpin
- alloc_context_vma
- ring_context_alloc
- ring_context_pin
- ring_context_reset
- load_pd_dir
- flush_pd_dir
- mi_set_context
- remap_l3_slice
- remap_l3
- switch_context
- ring_request_alloc
- wait_for_space
- intel_ring_begin
- intel_ring_cacheline_align
- gen6_bsd_submit_request
- mi_flush_dw
- gen6_flush_dw
- gen6_bsd_ring_flush
- hsw_emit_bb_start
- gen6_emit_bb_start
- gen6_ring_flush
- i9xx_set_default_submission
- gen6_bsd_set_default_submission
- ring_destroy
- setup_irq
- setup_common
- setup_rcs
- setup_vcs
- setup_bcs
- setup_vecs
- intel_ring_submission_setup
- intel_ring_submission_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 #include <linux/log2.h>
31
32 #include <drm/i915_drm.h>
33
34 #include "gem/i915_gem_context.h"
35
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_context.h"
39 #include "intel_gt.h"
40 #include "intel_gt_irq.h"
41 #include "intel_gt_pm_irq.h"
42 #include "intel_reset.h"
43 #include "intel_workarounds.h"
44
45
46
47
48 #define LEGACY_REQUEST_SIZE 200
49
50 unsigned int intel_ring_update_space(struct intel_ring *ring)
51 {
52 unsigned int space;
53
54 space = __intel_ring_space(ring->head, ring->emit, ring->size);
55
56 ring->space = space;
57 return space;
58 }
59
60 static int
61 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
62 {
63 unsigned int num_store_dw;
64 u32 cmd, *cs;
65
66 cmd = MI_FLUSH;
67 num_store_dw = 0;
68 if (mode & EMIT_INVALIDATE)
69 cmd |= MI_READ_FLUSH;
70 if (mode & EMIT_FLUSH)
71 num_store_dw = 4;
72
73 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
74 if (IS_ERR(cs))
75 return PTR_ERR(cs);
76
77 *cs++ = cmd;
78 while (num_store_dw--) {
79 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
80 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
81 INTEL_GT_SCRATCH_FIELD_DEFAULT);
82 *cs++ = 0;
83 }
84 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
85
86 intel_ring_advance(rq, cs);
87
88 return 0;
89 }
90
91 static int
92 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
93 {
94 u32 cmd, *cs;
95 int i;
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 cmd = MI_FLUSH;
126 if (mode & EMIT_INVALIDATE) {
127 cmd |= MI_EXE_FLUSH;
128 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
129 cmd |= MI_INVALIDATE_ISP;
130 }
131
132 i = 2;
133 if (mode & EMIT_INVALIDATE)
134 i += 20;
135
136 cs = intel_ring_begin(rq, i);
137 if (IS_ERR(cs))
138 return PTR_ERR(cs);
139
140 *cs++ = cmd;
141
142
143
144
145
146
147
148
149
150
151
152 if (mode & EMIT_INVALIDATE) {
153 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
154 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
155 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
156 PIPE_CONTROL_GLOBAL_GTT;
157 *cs++ = 0;
158 *cs++ = 0;
159
160 for (i = 0; i < 12; i++)
161 *cs++ = MI_FLUSH;
162
163 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
164 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
165 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
166 PIPE_CONTROL_GLOBAL_GTT;
167 *cs++ = 0;
168 *cs++ = 0;
169 }
170
171 *cs++ = cmd;
172
173 intel_ring_advance(rq, cs);
174
175 return 0;
176 }
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215 static int
216 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
217 {
218 u32 scratch_addr =
219 intel_gt_scratch_offset(rq->engine->gt,
220 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
221 u32 *cs;
222
223 cs = intel_ring_begin(rq, 6);
224 if (IS_ERR(cs))
225 return PTR_ERR(cs);
226
227 *cs++ = GFX_OP_PIPE_CONTROL(5);
228 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
229 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
230 *cs++ = 0;
231 *cs++ = 0;
232 *cs++ = MI_NOOP;
233 intel_ring_advance(rq, cs);
234
235 cs = intel_ring_begin(rq, 6);
236 if (IS_ERR(cs))
237 return PTR_ERR(cs);
238
239 *cs++ = GFX_OP_PIPE_CONTROL(5);
240 *cs++ = PIPE_CONTROL_QW_WRITE;
241 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
242 *cs++ = 0;
243 *cs++ = 0;
244 *cs++ = MI_NOOP;
245 intel_ring_advance(rq, cs);
246
247 return 0;
248 }
249
250 static int
251 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
252 {
253 u32 scratch_addr =
254 intel_gt_scratch_offset(rq->engine->gt,
255 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
256 u32 *cs, flags = 0;
257 int ret;
258
259
260 ret = gen6_emit_post_sync_nonzero_flush(rq);
261 if (ret)
262 return ret;
263
264
265
266
267
268 if (mode & EMIT_FLUSH) {
269 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
270 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
271
272
273
274
275 flags |= PIPE_CONTROL_CS_STALL;
276 }
277 if (mode & EMIT_INVALIDATE) {
278 flags |= PIPE_CONTROL_TLB_INVALIDATE;
279 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
280 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
281 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
282 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
283 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
284
285
286
287 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
288 }
289
290 cs = intel_ring_begin(rq, 4);
291 if (IS_ERR(cs))
292 return PTR_ERR(cs);
293
294 *cs++ = GFX_OP_PIPE_CONTROL(4);
295 *cs++ = flags;
296 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
297 *cs++ = 0;
298 intel_ring_advance(rq, cs);
299
300 return 0;
301 }
302
303 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
304 {
305
306 *cs++ = GFX_OP_PIPE_CONTROL(4);
307 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
308 *cs++ = 0;
309 *cs++ = 0;
310
311 *cs++ = GFX_OP_PIPE_CONTROL(4);
312 *cs++ = PIPE_CONTROL_QW_WRITE;
313 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
314 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
315 PIPE_CONTROL_GLOBAL_GTT;
316 *cs++ = 0;
317
318
319 *cs++ = GFX_OP_PIPE_CONTROL(4);
320 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
321 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
322 PIPE_CONTROL_DC_FLUSH_ENABLE |
323 PIPE_CONTROL_QW_WRITE |
324 PIPE_CONTROL_CS_STALL);
325 *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
326 *cs++ = rq->fence.seqno;
327
328 *cs++ = MI_USER_INTERRUPT;
329 *cs++ = MI_NOOP;
330
331 rq->tail = intel_ring_offset(rq, cs);
332 assert_ring_tail_valid(rq->ring, rq->tail);
333
334 return cs;
335 }
336
337 static int
338 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
339 {
340 u32 *cs;
341
342 cs = intel_ring_begin(rq, 4);
343 if (IS_ERR(cs))
344 return PTR_ERR(cs);
345
346 *cs++ = GFX_OP_PIPE_CONTROL(4);
347 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
348 *cs++ = 0;
349 *cs++ = 0;
350 intel_ring_advance(rq, cs);
351
352 return 0;
353 }
354
355 static int
356 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
357 {
358 u32 scratch_addr =
359 intel_gt_scratch_offset(rq->engine->gt,
360 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
361 u32 *cs, flags = 0;
362
363
364
365
366
367
368
369
370
371 flags |= PIPE_CONTROL_CS_STALL;
372
373
374
375
376
377 if (mode & EMIT_FLUSH) {
378 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
379 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
380 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
381 flags |= PIPE_CONTROL_FLUSH_ENABLE;
382 }
383 if (mode & EMIT_INVALIDATE) {
384 flags |= PIPE_CONTROL_TLB_INVALIDATE;
385 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
386 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
387 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
388 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
389 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
390 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
391
392
393
394 flags |= PIPE_CONTROL_QW_WRITE;
395 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
396
397 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
398
399
400
401
402 gen7_render_ring_cs_stall_wa(rq);
403 }
404
405 cs = intel_ring_begin(rq, 4);
406 if (IS_ERR(cs))
407 return PTR_ERR(cs);
408
409 *cs++ = GFX_OP_PIPE_CONTROL(4);
410 *cs++ = flags;
411 *cs++ = scratch_addr;
412 *cs++ = 0;
413 intel_ring_advance(rq, cs);
414
415 return 0;
416 }
417
418 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
419 {
420 *cs++ = GFX_OP_PIPE_CONTROL(4);
421 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
422 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
423 PIPE_CONTROL_DC_FLUSH_ENABLE |
424 PIPE_CONTROL_FLUSH_ENABLE |
425 PIPE_CONTROL_QW_WRITE |
426 PIPE_CONTROL_GLOBAL_GTT_IVB |
427 PIPE_CONTROL_CS_STALL);
428 *cs++ = rq->timeline->hwsp_offset;
429 *cs++ = rq->fence.seqno;
430
431 *cs++ = MI_USER_INTERRUPT;
432 *cs++ = MI_NOOP;
433
434 rq->tail = intel_ring_offset(rq, cs);
435 assert_ring_tail_valid(rq->ring, rq->tail);
436
437 return cs;
438 }
439
440 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
441 {
442 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
443 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
444
445 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
446 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
447 *cs++ = rq->fence.seqno;
448
449 *cs++ = MI_USER_INTERRUPT;
450
451 rq->tail = intel_ring_offset(rq, cs);
452 assert_ring_tail_valid(rq->ring, rq->tail);
453
454 return cs;
455 }
456
457 #define GEN7_XCS_WA 32
458 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
459 {
460 int i;
461
462 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
463 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
464
465 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
466 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
467 *cs++ = rq->fence.seqno;
468
469 for (i = 0; i < GEN7_XCS_WA; i++) {
470 *cs++ = MI_STORE_DWORD_INDEX;
471 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
472 *cs++ = rq->fence.seqno;
473 }
474
475 *cs++ = MI_FLUSH_DW;
476 *cs++ = 0;
477 *cs++ = 0;
478
479 *cs++ = MI_USER_INTERRUPT;
480 *cs++ = MI_NOOP;
481
482 rq->tail = intel_ring_offset(rq, cs);
483 assert_ring_tail_valid(rq->ring, rq->tail);
484
485 return cs;
486 }
487 #undef GEN7_XCS_WA
488
489 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
490 {
491
492
493
494
495 if (engine->class == RENDER_CLASS) {
496 if (INTEL_GEN(engine->i915) >= 6)
497 mask &= ~BIT(0);
498 else
499 mask &= ~I915_USER_INTERRUPT;
500 }
501
502 intel_engine_set_hwsp_writemask(engine, mask);
503 }
504
505 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
506 {
507 struct drm_i915_private *dev_priv = engine->i915;
508 u32 addr;
509
510 addr = lower_32_bits(phys);
511 if (INTEL_GEN(dev_priv) >= 4)
512 addr |= (phys >> 28) & 0xf0;
513
514 I915_WRITE(HWS_PGA, addr);
515 }
516
517 static struct page *status_page(struct intel_engine_cs *engine)
518 {
519 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
520
521 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
522 return sg_page(obj->mm.pages->sgl);
523 }
524
525 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
526 {
527 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
528 set_hwstam(engine, ~0u);
529 }
530
531 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
532 {
533 struct drm_i915_private *dev_priv = engine->i915;
534 i915_reg_t hwsp;
535
536
537
538
539
540 if (IS_GEN(dev_priv, 7)) {
541 switch (engine->id) {
542
543
544
545
546 default:
547 GEM_BUG_ON(engine->id);
548
549 case RCS0:
550 hwsp = RENDER_HWS_PGA_GEN7;
551 break;
552 case BCS0:
553 hwsp = BLT_HWS_PGA_GEN7;
554 break;
555 case VCS0:
556 hwsp = BSD_HWS_PGA_GEN7;
557 break;
558 case VECS0:
559 hwsp = VEBOX_HWS_PGA_GEN7;
560 break;
561 }
562 } else if (IS_GEN(dev_priv, 6)) {
563 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
564 } else {
565 hwsp = RING_HWS_PGA(engine->mmio_base);
566 }
567
568 I915_WRITE(hwsp, offset);
569 POSTING_READ(hwsp);
570 }
571
572 static void flush_cs_tlb(struct intel_engine_cs *engine)
573 {
574 struct drm_i915_private *dev_priv = engine->i915;
575
576 if (!IS_GEN_RANGE(dev_priv, 6, 7))
577 return;
578
579
580 WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
581
582 ENGINE_WRITE(engine, RING_INSTPM,
583 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
584 INSTPM_SYNC_FLUSH));
585 if (intel_wait_for_register(engine->uncore,
586 RING_INSTPM(engine->mmio_base),
587 INSTPM_SYNC_FLUSH, 0,
588 1000))
589 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
590 engine->name);
591 }
592
593 static void ring_setup_status_page(struct intel_engine_cs *engine)
594 {
595 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
596 set_hwstam(engine, ~0u);
597
598 flush_cs_tlb(engine);
599 }
600
601 static bool stop_ring(struct intel_engine_cs *engine)
602 {
603 struct drm_i915_private *dev_priv = engine->i915;
604
605 if (INTEL_GEN(dev_priv) > 2) {
606 ENGINE_WRITE(engine,
607 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
608 if (intel_wait_for_register(engine->uncore,
609 RING_MI_MODE(engine->mmio_base),
610 MODE_IDLE,
611 MODE_IDLE,
612 1000)) {
613 DRM_ERROR("%s : timed out trying to stop ring\n",
614 engine->name);
615
616
617
618
619
620
621 if (ENGINE_READ(engine, RING_HEAD) !=
622 ENGINE_READ(engine, RING_TAIL))
623 return false;
624 }
625 }
626
627 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
628
629 ENGINE_WRITE(engine, RING_HEAD, 0);
630 ENGINE_WRITE(engine, RING_TAIL, 0);
631
632
633 ENGINE_WRITE(engine, RING_CTL, 0);
634
635 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
636 }
637
638 static int xcs_resume(struct intel_engine_cs *engine)
639 {
640 struct drm_i915_private *dev_priv = engine->i915;
641 struct intel_ring *ring = engine->legacy.ring;
642 int ret = 0;
643
644 GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
645 engine->name, ring->head, ring->tail);
646
647 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
648
649
650 if (!stop_ring(engine)) {
651
652 DRM_DEBUG_DRIVER("%s head not reset to zero "
653 "ctl %08x head %08x tail %08x start %08x\n",
654 engine->name,
655 ENGINE_READ(engine, RING_CTL),
656 ENGINE_READ(engine, RING_HEAD),
657 ENGINE_READ(engine, RING_TAIL),
658 ENGINE_READ(engine, RING_START));
659
660 if (!stop_ring(engine)) {
661 DRM_ERROR("failed to set %s head to zero "
662 "ctl %08x head %08x tail %08x start %08x\n",
663 engine->name,
664 ENGINE_READ(engine, RING_CTL),
665 ENGINE_READ(engine, RING_HEAD),
666 ENGINE_READ(engine, RING_TAIL),
667 ENGINE_READ(engine, RING_START));
668 ret = -EIO;
669 goto out;
670 }
671 }
672
673 if (HWS_NEEDS_PHYSICAL(dev_priv))
674 ring_setup_phys_status_page(engine);
675 else
676 ring_setup_status_page(engine);
677
678 intel_engine_reset_breadcrumbs(engine);
679
680
681 ENGINE_POSTING_READ(engine, RING_HEAD);
682
683
684
685
686
687
688
689 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
690
691
692 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
693 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
694 intel_ring_update_space(ring);
695
696
697 ENGINE_WRITE(engine, RING_HEAD, ring->head);
698 ENGINE_WRITE(engine, RING_TAIL, ring->head);
699 ENGINE_POSTING_READ(engine, RING_TAIL);
700
701 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
702
703
704 if (intel_wait_for_register(engine->uncore,
705 RING_CTL(engine->mmio_base),
706 RING_VALID, RING_VALID,
707 50)) {
708 DRM_ERROR("%s initialization failed "
709 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
710 engine->name,
711 ENGINE_READ(engine, RING_CTL),
712 ENGINE_READ(engine, RING_CTL) & RING_VALID,
713 ENGINE_READ(engine, RING_HEAD), ring->head,
714 ENGINE_READ(engine, RING_TAIL), ring->tail,
715 ENGINE_READ(engine, RING_START),
716 i915_ggtt_offset(ring->vma));
717 ret = -EIO;
718 goto out;
719 }
720
721 if (INTEL_GEN(dev_priv) > 2)
722 ENGINE_WRITE(engine,
723 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
724
725
726 if (ring->tail != ring->head) {
727 ENGINE_WRITE(engine, RING_TAIL, ring->tail);
728 ENGINE_POSTING_READ(engine, RING_TAIL);
729 }
730
731
732 intel_engine_queue_breadcrumbs(engine);
733 out:
734 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
735
736 return ret;
737 }
738
739 static void reset_prepare(struct intel_engine_cs *engine)
740 {
741 struct intel_uncore *uncore = engine->uncore;
742 const u32 base = engine->mmio_base;
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758 GEM_TRACE("%s\n", engine->name);
759
760 if (intel_engine_stop_cs(engine))
761 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
762
763 intel_uncore_write_fw(uncore,
764 RING_HEAD(base),
765 intel_uncore_read_fw(uncore, RING_TAIL(base)));
766 intel_uncore_posting_read_fw(uncore, RING_HEAD(base));
767
768 intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
769 intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
770 intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
771
772
773 intel_uncore_write_fw(uncore, RING_CTL(base), 0);
774
775
776 if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
777 GEM_TRACE("%s: ring head [%x] not parked\n",
778 engine->name,
779 intel_uncore_read_fw(uncore, RING_HEAD(base)));
780 }
781
782 static void reset_ring(struct intel_engine_cs *engine, bool stalled)
783 {
784 struct i915_request *pos, *rq;
785 unsigned long flags;
786 u32 head;
787
788 rq = NULL;
789 spin_lock_irqsave(&engine->active.lock, flags);
790 list_for_each_entry(pos, &engine->active.requests, sched.link) {
791 if (!i915_request_completed(pos)) {
792 rq = pos;
793 break;
794 }
795 }
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 if (rq) {
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835 __i915_request_reset(rq, stalled);
836
837 GEM_BUG_ON(rq->ring != engine->legacy.ring);
838 head = rq->head;
839 } else {
840 head = engine->legacy.ring->tail;
841 }
842 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
843
844 spin_unlock_irqrestore(&engine->active.lock, flags);
845 }
846
847 static void reset_finish(struct intel_engine_cs *engine)
848 {
849 }
850
851 static int rcs_resume(struct intel_engine_cs *engine)
852 {
853 struct drm_i915_private *dev_priv = engine->i915;
854
855
856
857
858
859
860
861
862
863
864
865 if (IS_GEN(dev_priv, 4))
866 I915_WRITE(ECOSKPD,
867 _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
868
869
870 if (IS_GEN_RANGE(dev_priv, 4, 6))
871 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
872
873
874
875
876
877
878
879 if (IS_GEN_RANGE(dev_priv, 6, 7))
880 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
881
882
883
884 if (IS_GEN(dev_priv, 6))
885 I915_WRITE(GFX_MODE,
886 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
887
888
889 if (IS_GEN(dev_priv, 7))
890 I915_WRITE(GFX_MODE_GEN7,
891 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
892 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
893
894 if (IS_GEN(dev_priv, 6)) {
895
896
897
898
899
900 I915_WRITE(CACHE_MODE_0,
901 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
902 }
903
904 if (IS_GEN_RANGE(dev_priv, 6, 7))
905 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
906
907 return xcs_resume(engine);
908 }
909
910 static void cancel_requests(struct intel_engine_cs *engine)
911 {
912 struct i915_request *request;
913 unsigned long flags;
914
915 spin_lock_irqsave(&engine->active.lock, flags);
916
917
918 list_for_each_entry(request, &engine->active.requests, sched.link) {
919 if (!i915_request_signaled(request))
920 dma_fence_set_error(&request->fence, -EIO);
921
922 i915_request_mark_complete(request);
923 }
924
925
926
927 spin_unlock_irqrestore(&engine->active.lock, flags);
928 }
929
930 static void i9xx_submit_request(struct i915_request *request)
931 {
932 i915_request_submit(request);
933
934 ENGINE_WRITE(request->engine, RING_TAIL,
935 intel_ring_set_tail(request->ring, request->tail));
936 }
937
938 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
939 {
940 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
941 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
942
943 *cs++ = MI_FLUSH;
944
945 *cs++ = MI_STORE_DWORD_INDEX;
946 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
947 *cs++ = rq->fence.seqno;
948
949 *cs++ = MI_USER_INTERRUPT;
950 *cs++ = MI_NOOP;
951
952 rq->tail = intel_ring_offset(rq, cs);
953 assert_ring_tail_valid(rq->ring, rq->tail);
954
955 return cs;
956 }
957
958 #define GEN5_WA_STORES 8
959 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
960 {
961 int i;
962
963 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
964 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
965
966 *cs++ = MI_FLUSH;
967
968 BUILD_BUG_ON(GEN5_WA_STORES < 1);
969 for (i = 0; i < GEN5_WA_STORES; i++) {
970 *cs++ = MI_STORE_DWORD_INDEX;
971 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
972 *cs++ = rq->fence.seqno;
973 }
974
975 *cs++ = MI_USER_INTERRUPT;
976
977 rq->tail = intel_ring_offset(rq, cs);
978 assert_ring_tail_valid(rq->ring, rq->tail);
979
980 return cs;
981 }
982 #undef GEN5_WA_STORES
983
984 static void
985 gen5_irq_enable(struct intel_engine_cs *engine)
986 {
987 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
988 }
989
990 static void
991 gen5_irq_disable(struct intel_engine_cs *engine)
992 {
993 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
994 }
995
996 static void
997 i9xx_irq_enable(struct intel_engine_cs *engine)
998 {
999 engine->i915->irq_mask &= ~engine->irq_enable_mask;
1000 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
1001 intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
1002 }
1003
1004 static void
1005 i9xx_irq_disable(struct intel_engine_cs *engine)
1006 {
1007 engine->i915->irq_mask |= engine->irq_enable_mask;
1008 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
1009 }
1010
1011 static void
1012 i8xx_irq_enable(struct intel_engine_cs *engine)
1013 {
1014 struct drm_i915_private *i915 = engine->i915;
1015
1016 i915->irq_mask &= ~engine->irq_enable_mask;
1017 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
1018 ENGINE_POSTING_READ16(engine, RING_IMR);
1019 }
1020
1021 static void
1022 i8xx_irq_disable(struct intel_engine_cs *engine)
1023 {
1024 struct drm_i915_private *i915 = engine->i915;
1025
1026 i915->irq_mask |= engine->irq_enable_mask;
1027 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
1028 }
1029
1030 static int
1031 bsd_ring_flush(struct i915_request *rq, u32 mode)
1032 {
1033 u32 *cs;
1034
1035 cs = intel_ring_begin(rq, 2);
1036 if (IS_ERR(cs))
1037 return PTR_ERR(cs);
1038
1039 *cs++ = MI_FLUSH;
1040 *cs++ = MI_NOOP;
1041 intel_ring_advance(rq, cs);
1042 return 0;
1043 }
1044
1045 static void
1046 gen6_irq_enable(struct intel_engine_cs *engine)
1047 {
1048 ENGINE_WRITE(engine, RING_IMR,
1049 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1050
1051
1052 ENGINE_POSTING_READ(engine, RING_IMR);
1053
1054 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
1055 }
1056
1057 static void
1058 gen6_irq_disable(struct intel_engine_cs *engine)
1059 {
1060 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
1061 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
1062 }
1063
1064 static void
1065 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1066 {
1067 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
1068
1069
1070 ENGINE_POSTING_READ(engine, RING_IMR);
1071
1072 gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
1073 }
1074
1075 static void
1076 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1077 {
1078 ENGINE_WRITE(engine, RING_IMR, ~0);
1079 gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
1080 }
1081
1082 static int
1083 i965_emit_bb_start(struct i915_request *rq,
1084 u64 offset, u32 length,
1085 unsigned int dispatch_flags)
1086 {
1087 u32 *cs;
1088
1089 cs = intel_ring_begin(rq, 2);
1090 if (IS_ERR(cs))
1091 return PTR_ERR(cs);
1092
1093 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1094 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1095 *cs++ = offset;
1096 intel_ring_advance(rq, cs);
1097
1098 return 0;
1099 }
1100
1101
1102 #define I830_BATCH_LIMIT SZ_256K
1103 #define I830_TLB_ENTRIES (2)
1104 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1105 static int
1106 i830_emit_bb_start(struct i915_request *rq,
1107 u64 offset, u32 len,
1108 unsigned int dispatch_flags)
1109 {
1110 u32 *cs, cs_offset =
1111 intel_gt_scratch_offset(rq->engine->gt,
1112 INTEL_GT_SCRATCH_FIELD_DEFAULT);
1113
1114 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
1115
1116 cs = intel_ring_begin(rq, 6);
1117 if (IS_ERR(cs))
1118 return PTR_ERR(cs);
1119
1120
1121 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1122 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1123 *cs++ = I830_TLB_ENTRIES << 16 | 4;
1124 *cs++ = cs_offset;
1125 *cs++ = 0xdeadbeef;
1126 *cs++ = MI_NOOP;
1127 intel_ring_advance(rq, cs);
1128
1129 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1130 if (len > I830_BATCH_LIMIT)
1131 return -ENOSPC;
1132
1133 cs = intel_ring_begin(rq, 6 + 2);
1134 if (IS_ERR(cs))
1135 return PTR_ERR(cs);
1136
1137
1138
1139
1140
1141 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
1142 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1143 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1144 *cs++ = cs_offset;
1145 *cs++ = 4096;
1146 *cs++ = offset;
1147
1148 *cs++ = MI_FLUSH;
1149 *cs++ = MI_NOOP;
1150 intel_ring_advance(rq, cs);
1151
1152
1153 offset = cs_offset;
1154 }
1155
1156 cs = intel_ring_begin(rq, 2);
1157 if (IS_ERR(cs))
1158 return PTR_ERR(cs);
1159
1160 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1161 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1162 MI_BATCH_NON_SECURE);
1163 intel_ring_advance(rq, cs);
1164
1165 return 0;
1166 }
1167
1168 static int
1169 i915_emit_bb_start(struct i915_request *rq,
1170 u64 offset, u32 len,
1171 unsigned int dispatch_flags)
1172 {
1173 u32 *cs;
1174
1175 cs = intel_ring_begin(rq, 2);
1176 if (IS_ERR(cs))
1177 return PTR_ERR(cs);
1178
1179 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1180 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1181 MI_BATCH_NON_SECURE);
1182 intel_ring_advance(rq, cs);
1183
1184 return 0;
1185 }
1186
1187 int intel_ring_pin(struct intel_ring *ring)
1188 {
1189 struct i915_vma *vma = ring->vma;
1190 unsigned int flags;
1191 void *addr;
1192 int ret;
1193
1194 if (atomic_fetch_inc(&ring->pin_count))
1195 return 0;
1196
1197 flags = PIN_GLOBAL;
1198
1199
1200 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
1201
1202 if (vma->obj->stolen)
1203 flags |= PIN_MAPPABLE;
1204 else
1205 flags |= PIN_HIGH;
1206
1207 ret = i915_vma_pin(vma, 0, 0, flags);
1208 if (unlikely(ret))
1209 goto err_unpin;
1210
1211 if (i915_vma_is_map_and_fenceable(vma))
1212 addr = (void __force *)i915_vma_pin_iomap(vma);
1213 else
1214 addr = i915_gem_object_pin_map(vma->obj,
1215 i915_coherent_map_type(vma->vm->i915));
1216 if (IS_ERR(addr)) {
1217 ret = PTR_ERR(addr);
1218 goto err_ring;
1219 }
1220
1221 i915_vma_make_unshrinkable(vma);
1222
1223 GEM_BUG_ON(ring->vaddr);
1224 ring->vaddr = addr;
1225
1226 return 0;
1227
1228 err_ring:
1229 i915_vma_unpin(vma);
1230 err_unpin:
1231 atomic_dec(&ring->pin_count);
1232 return ret;
1233 }
1234
1235 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1236 {
1237 tail = intel_ring_wrap(ring, tail);
1238 ring->tail = tail;
1239 ring->head = tail;
1240 ring->emit = tail;
1241 intel_ring_update_space(ring);
1242 }
1243
1244 void intel_ring_unpin(struct intel_ring *ring)
1245 {
1246 struct i915_vma *vma = ring->vma;
1247
1248 if (!atomic_dec_and_test(&ring->pin_count))
1249 return;
1250
1251
1252 intel_ring_reset(ring, ring->emit);
1253
1254 i915_vma_unset_ggtt_write(vma);
1255 if (i915_vma_is_map_and_fenceable(vma))
1256 i915_vma_unpin_iomap(vma);
1257 else
1258 i915_gem_object_unpin_map(vma->obj);
1259
1260 GEM_BUG_ON(!ring->vaddr);
1261 ring->vaddr = NULL;
1262
1263 i915_vma_unpin(vma);
1264 i915_vma_make_purgeable(vma);
1265 }
1266
1267 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
1268 {
1269 struct i915_address_space *vm = &ggtt->vm;
1270 struct drm_i915_private *i915 = vm->i915;
1271 struct drm_i915_gem_object *obj;
1272 struct i915_vma *vma;
1273
1274 obj = i915_gem_object_create_stolen(i915, size);
1275 if (!obj)
1276 obj = i915_gem_object_create_internal(i915, size);
1277 if (IS_ERR(obj))
1278 return ERR_CAST(obj);
1279
1280
1281
1282
1283
1284 if (vm->has_read_only)
1285 i915_gem_object_set_readonly(obj);
1286
1287 vma = i915_vma_instance(obj, vm, NULL);
1288 if (IS_ERR(vma))
1289 goto err;
1290
1291 return vma;
1292
1293 err:
1294 i915_gem_object_put(obj);
1295 return vma;
1296 }
1297
1298 struct intel_ring *
1299 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1300 {
1301 struct drm_i915_private *i915 = engine->i915;
1302 struct intel_ring *ring;
1303 struct i915_vma *vma;
1304
1305 GEM_BUG_ON(!is_power_of_2(size));
1306 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1307
1308 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1309 if (!ring)
1310 return ERR_PTR(-ENOMEM);
1311
1312 kref_init(&ring->ref);
1313
1314 ring->size = size;
1315 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
1316
1317
1318
1319
1320
1321 ring->effective_size = size;
1322 if (IS_I830(i915) || IS_I845G(i915))
1323 ring->effective_size -= 2 * CACHELINE_BYTES;
1324
1325 intel_ring_update_space(ring);
1326
1327 vma = create_ring_vma(engine->gt->ggtt, size);
1328 if (IS_ERR(vma)) {
1329 kfree(ring);
1330 return ERR_CAST(vma);
1331 }
1332 ring->vma = vma;
1333
1334 return ring;
1335 }
1336
1337 void intel_ring_free(struct kref *ref)
1338 {
1339 struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
1340
1341 i915_vma_close(ring->vma);
1342 i915_vma_put(ring->vma);
1343
1344 kfree(ring);
1345 }
1346
1347 static void __ring_context_fini(struct intel_context *ce)
1348 {
1349 i915_gem_object_put(ce->state->obj);
1350 }
1351
1352 static void ring_context_destroy(struct kref *ref)
1353 {
1354 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
1355
1356 GEM_BUG_ON(intel_context_is_pinned(ce));
1357
1358 if (ce->state)
1359 __ring_context_fini(ce);
1360
1361 intel_context_fini(ce);
1362 intel_context_free(ce);
1363 }
1364
1365 static struct i915_address_space *vm_alias(struct intel_context *ce)
1366 {
1367 struct i915_address_space *vm;
1368
1369 vm = ce->vm;
1370 if (i915_is_ggtt(vm))
1371 vm = &i915_vm_to_ggtt(vm)->alias->vm;
1372
1373 return vm;
1374 }
1375
1376 static int __context_pin_ppgtt(struct intel_context *ce)
1377 {
1378 struct i915_address_space *vm;
1379 int err = 0;
1380
1381 vm = vm_alias(ce);
1382 if (vm)
1383 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
1384
1385 return err;
1386 }
1387
1388 static void __context_unpin_ppgtt(struct intel_context *ce)
1389 {
1390 struct i915_address_space *vm;
1391
1392 vm = vm_alias(ce);
1393 if (vm)
1394 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
1395 }
1396
1397 static void ring_context_unpin(struct intel_context *ce)
1398 {
1399 __context_unpin_ppgtt(ce);
1400 }
1401
1402 static struct i915_vma *
1403 alloc_context_vma(struct intel_engine_cs *engine)
1404 {
1405 struct drm_i915_private *i915 = engine->i915;
1406 struct drm_i915_gem_object *obj;
1407 struct i915_vma *vma;
1408 int err;
1409
1410 obj = i915_gem_object_create_shmem(i915, engine->context_size);
1411 if (IS_ERR(obj))
1412 return ERR_CAST(obj);
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 if (IS_IVYBRIDGE(i915))
1430 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
1431
1432 if (engine->default_state) {
1433 void *defaults, *vaddr;
1434
1435 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1436 if (IS_ERR(vaddr)) {
1437 err = PTR_ERR(vaddr);
1438 goto err_obj;
1439 }
1440
1441 defaults = i915_gem_object_pin_map(engine->default_state,
1442 I915_MAP_WB);
1443 if (IS_ERR(defaults)) {
1444 err = PTR_ERR(defaults);
1445 goto err_map;
1446 }
1447
1448 memcpy(vaddr, defaults, engine->context_size);
1449 i915_gem_object_unpin_map(engine->default_state);
1450
1451 i915_gem_object_flush_map(obj);
1452 i915_gem_object_unpin_map(obj);
1453 }
1454
1455 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1456 if (IS_ERR(vma)) {
1457 err = PTR_ERR(vma);
1458 goto err_obj;
1459 }
1460
1461 return vma;
1462
1463 err_map:
1464 i915_gem_object_unpin_map(obj);
1465 err_obj:
1466 i915_gem_object_put(obj);
1467 return ERR_PTR(err);
1468 }
1469
1470 static int ring_context_alloc(struct intel_context *ce)
1471 {
1472 struct intel_engine_cs *engine = ce->engine;
1473
1474
1475 GEM_BUG_ON(!engine->legacy.ring);
1476 ce->ring = engine->legacy.ring;
1477 ce->timeline = intel_timeline_get(engine->legacy.timeline);
1478
1479 GEM_BUG_ON(ce->state);
1480 if (engine->context_size) {
1481 struct i915_vma *vma;
1482
1483 vma = alloc_context_vma(engine);
1484 if (IS_ERR(vma))
1485 return PTR_ERR(vma);
1486
1487 ce->state = vma;
1488 }
1489
1490 return 0;
1491 }
1492
1493 static int ring_context_pin(struct intel_context *ce)
1494 {
1495 int err;
1496
1497 err = intel_context_active_acquire(ce);
1498 if (err)
1499 return err;
1500
1501 err = __context_pin_ppgtt(ce);
1502 if (err)
1503 goto err_active;
1504
1505 return 0;
1506
1507 err_active:
1508 intel_context_active_release(ce);
1509 return err;
1510 }
1511
1512 static void ring_context_reset(struct intel_context *ce)
1513 {
1514 intel_ring_reset(ce->ring, 0);
1515 }
1516
1517 static const struct intel_context_ops ring_context_ops = {
1518 .alloc = ring_context_alloc,
1519
1520 .pin = ring_context_pin,
1521 .unpin = ring_context_unpin,
1522
1523 .enter = intel_context_enter_engine,
1524 .exit = intel_context_exit_engine,
1525
1526 .reset = ring_context_reset,
1527 .destroy = ring_context_destroy,
1528 };
1529
1530 static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
1531 {
1532 const struct intel_engine_cs * const engine = rq->engine;
1533 u32 *cs;
1534
1535 cs = intel_ring_begin(rq, 6);
1536 if (IS_ERR(cs))
1537 return PTR_ERR(cs);
1538
1539 *cs++ = MI_LOAD_REGISTER_IMM(1);
1540 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
1541 *cs++ = PP_DIR_DCLV_2G;
1542
1543 *cs++ = MI_LOAD_REGISTER_IMM(1);
1544 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
1545 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
1546
1547 intel_ring_advance(rq, cs);
1548
1549 return 0;
1550 }
1551
1552 static int flush_pd_dir(struct i915_request *rq)
1553 {
1554 const struct intel_engine_cs * const engine = rq->engine;
1555 u32 *cs;
1556
1557 cs = intel_ring_begin(rq, 4);
1558 if (IS_ERR(cs))
1559 return PTR_ERR(cs);
1560
1561
1562 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1563 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
1564 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
1565 INTEL_GT_SCRATCH_FIELD_DEFAULT);
1566 *cs++ = MI_NOOP;
1567
1568 intel_ring_advance(rq, cs);
1569 return 0;
1570 }
1571
1572 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1573 {
1574 struct drm_i915_private *i915 = rq->i915;
1575 struct intel_engine_cs *engine = rq->engine;
1576 enum intel_engine_id id;
1577 const int num_engines =
1578 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
1579 bool force_restore = false;
1580 int len;
1581 u32 *cs;
1582
1583 flags |= MI_MM_SPACE_GTT;
1584 if (IS_HASWELL(i915))
1585
1586 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1587 else
1588
1589 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1590
1591 len = 4;
1592 if (IS_GEN(i915, 7))
1593 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
1594 else if (IS_GEN(i915, 5))
1595 len += 2;
1596 if (flags & MI_FORCE_RESTORE) {
1597 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1598 flags &= ~MI_FORCE_RESTORE;
1599 force_restore = true;
1600 len += 2;
1601 }
1602
1603 cs = intel_ring_begin(rq, len);
1604 if (IS_ERR(cs))
1605 return PTR_ERR(cs);
1606
1607
1608 if (IS_GEN(i915, 7)) {
1609 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1610 if (num_engines) {
1611 struct intel_engine_cs *signaller;
1612
1613 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1614 for_each_engine(signaller, i915, id) {
1615 if (signaller == engine)
1616 continue;
1617
1618 *cs++ = i915_mmio_reg_offset(
1619 RING_PSMI_CTL(signaller->mmio_base));
1620 *cs++ = _MASKED_BIT_ENABLE(
1621 GEN6_PSMI_SLEEP_MSG_DISABLE);
1622 }
1623 }
1624 } else if (IS_GEN(i915, 5)) {
1625
1626
1627
1628
1629
1630
1631 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
1632 }
1633
1634 if (force_restore) {
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 *cs++ = MI_SET_CONTEXT;
1648 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
1649 MI_MM_SPACE_GTT |
1650 MI_RESTORE_INHIBIT;
1651 }
1652
1653 *cs++ = MI_NOOP;
1654 *cs++ = MI_SET_CONTEXT;
1655 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1656
1657
1658
1659
1660 *cs++ = MI_NOOP;
1661
1662 if (IS_GEN(i915, 7)) {
1663 if (num_engines) {
1664 struct intel_engine_cs *signaller;
1665 i915_reg_t last_reg = {};
1666
1667 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1668 for_each_engine(signaller, i915, id) {
1669 if (signaller == engine)
1670 continue;
1671
1672 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1673 *cs++ = i915_mmio_reg_offset(last_reg);
1674 *cs++ = _MASKED_BIT_DISABLE(
1675 GEN6_PSMI_SLEEP_MSG_DISABLE);
1676 }
1677
1678
1679 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1680 *cs++ = i915_mmio_reg_offset(last_reg);
1681 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
1682 INTEL_GT_SCRATCH_FIELD_DEFAULT);
1683 *cs++ = MI_NOOP;
1684 }
1685 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1686 } else if (IS_GEN(i915, 5)) {
1687 *cs++ = MI_SUSPEND_FLUSH;
1688 }
1689
1690 intel_ring_advance(rq, cs);
1691
1692 return 0;
1693 }
1694
1695 static int remap_l3_slice(struct i915_request *rq, int slice)
1696 {
1697 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1698 int i;
1699
1700 if (!remap_info)
1701 return 0;
1702
1703 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1704 if (IS_ERR(cs))
1705 return PTR_ERR(cs);
1706
1707
1708
1709
1710
1711
1712 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1713 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1714 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1715 *cs++ = remap_info[i];
1716 }
1717 *cs++ = MI_NOOP;
1718 intel_ring_advance(rq, cs);
1719
1720 return 0;
1721 }
1722
1723 static int remap_l3(struct i915_request *rq)
1724 {
1725 struct i915_gem_context *ctx = rq->gem_context;
1726 int i, err;
1727
1728 if (!ctx->remap_slice)
1729 return 0;
1730
1731 for (i = 0; i < MAX_L3_SLICES; i++) {
1732 if (!(ctx->remap_slice & BIT(i)))
1733 continue;
1734
1735 err = remap_l3_slice(rq, i);
1736 if (err)
1737 return err;
1738 }
1739
1740 ctx->remap_slice = 0;
1741 return 0;
1742 }
1743
1744 static int switch_context(struct i915_request *rq)
1745 {
1746 struct intel_engine_cs *engine = rq->engine;
1747 struct i915_address_space *vm = vm_alias(rq->hw_context);
1748 unsigned int unwind_mm = 0;
1749 u32 hw_flags = 0;
1750 int ret;
1751
1752 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1753
1754 if (vm) {
1755 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1756 int loops;
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 loops = 1;
1768 if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
1769 loops = 32;
1770
1771 do {
1772 ret = load_pd_dir(rq, ppgtt);
1773 if (ret)
1774 goto err;
1775 } while (--loops);
1776
1777 if (ppgtt->pd_dirty_engines & engine->mask) {
1778 unwind_mm = engine->mask;
1779 ppgtt->pd_dirty_engines &= ~unwind_mm;
1780 hw_flags = MI_FORCE_RESTORE;
1781 }
1782 }
1783
1784 if (rq->hw_context->state) {
1785 GEM_BUG_ON(engine->id != RCS0);
1786
1787
1788
1789
1790
1791
1792
1793
1794 if (i915_gem_context_is_kernel(rq->gem_context))
1795 hw_flags = MI_RESTORE_INHIBIT;
1796
1797 ret = mi_set_context(rq, hw_flags);
1798 if (ret)
1799 goto err_mm;
1800 }
1801
1802 if (vm) {
1803 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1804 if (ret)
1805 goto err_mm;
1806
1807 ret = flush_pd_dir(rq);
1808 if (ret)
1809 goto err_mm;
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1820 if (ret)
1821 goto err_mm;
1822
1823 ret = engine->emit_flush(rq, EMIT_FLUSH);
1824 if (ret)
1825 goto err_mm;
1826 }
1827
1828 ret = remap_l3(rq);
1829 if (ret)
1830 goto err_mm;
1831
1832 return 0;
1833
1834 err_mm:
1835 if (unwind_mm)
1836 i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
1837 err:
1838 return ret;
1839 }
1840
1841 static int ring_request_alloc(struct i915_request *request)
1842 {
1843 int ret;
1844
1845 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
1846 GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
1847
1848
1849
1850
1851
1852
1853 request->reserved_space += LEGACY_REQUEST_SIZE;
1854
1855
1856 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1857 if (ret)
1858 return ret;
1859
1860 ret = switch_context(request);
1861 if (ret)
1862 return ret;
1863
1864 request->reserved_space -= LEGACY_REQUEST_SIZE;
1865 return 0;
1866 }
1867
1868 static noinline int
1869 wait_for_space(struct intel_ring *ring,
1870 struct intel_timeline *tl,
1871 unsigned int bytes)
1872 {
1873 struct i915_request *target;
1874 long timeout;
1875
1876 if (intel_ring_update_space(ring) >= bytes)
1877 return 0;
1878
1879 GEM_BUG_ON(list_empty(&tl->requests));
1880 list_for_each_entry(target, &tl->requests, link) {
1881 if (target->ring != ring)
1882 continue;
1883
1884
1885 if (bytes <= __intel_ring_space(target->postfix,
1886 ring->emit, ring->size))
1887 break;
1888 }
1889
1890 if (GEM_WARN_ON(&target->link == &tl->requests))
1891 return -ENOSPC;
1892
1893 timeout = i915_request_wait(target,
1894 I915_WAIT_INTERRUPTIBLE,
1895 MAX_SCHEDULE_TIMEOUT);
1896 if (timeout < 0)
1897 return timeout;
1898
1899 i915_request_retire_upto(target);
1900
1901 intel_ring_update_space(ring);
1902 GEM_BUG_ON(ring->space < bytes);
1903 return 0;
1904 }
1905
1906 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1907 {
1908 struct intel_ring *ring = rq->ring;
1909 const unsigned int remain_usable = ring->effective_size - ring->emit;
1910 const unsigned int bytes = num_dwords * sizeof(u32);
1911 unsigned int need_wrap = 0;
1912 unsigned int total_bytes;
1913 u32 *cs;
1914
1915
1916 GEM_BUG_ON(num_dwords & 1);
1917
1918 total_bytes = bytes + rq->reserved_space;
1919 GEM_BUG_ON(total_bytes > ring->effective_size);
1920
1921 if (unlikely(total_bytes > remain_usable)) {
1922 const int remain_actual = ring->size - ring->emit;
1923
1924 if (bytes > remain_usable) {
1925
1926
1927
1928
1929
1930 total_bytes += remain_actual;
1931 need_wrap = remain_actual | 1;
1932 } else {
1933
1934
1935
1936
1937
1938
1939 total_bytes = rq->reserved_space + remain_actual;
1940 }
1941 }
1942
1943 if (unlikely(total_bytes > ring->space)) {
1944 int ret;
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955 GEM_BUG_ON(!rq->reserved_space);
1956
1957 ret = wait_for_space(ring, rq->timeline, total_bytes);
1958 if (unlikely(ret))
1959 return ERR_PTR(ret);
1960 }
1961
1962 if (unlikely(need_wrap)) {
1963 need_wrap &= ~1;
1964 GEM_BUG_ON(need_wrap > ring->space);
1965 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1966 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
1967
1968
1969 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
1970 ring->space -= need_wrap;
1971 ring->emit = 0;
1972 }
1973
1974 GEM_BUG_ON(ring->emit > ring->size - bytes);
1975 GEM_BUG_ON(ring->space < bytes);
1976 cs = ring->vaddr + ring->emit;
1977 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
1978 ring->emit += bytes;
1979 ring->space -= bytes;
1980
1981 return cs;
1982 }
1983
1984
1985 int intel_ring_cacheline_align(struct i915_request *rq)
1986 {
1987 int num_dwords;
1988 void *cs;
1989
1990 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
1991 if (num_dwords == 0)
1992 return 0;
1993
1994 num_dwords = CACHELINE_DWORDS - num_dwords;
1995 GEM_BUG_ON(num_dwords & 1);
1996
1997 cs = intel_ring_begin(rq, num_dwords);
1998 if (IS_ERR(cs))
1999 return PTR_ERR(cs);
2000
2001 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
2002 intel_ring_advance(rq, cs);
2003
2004 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
2005 return 0;
2006 }
2007
2008 static void gen6_bsd_submit_request(struct i915_request *request)
2009 {
2010 struct intel_uncore *uncore = request->engine->uncore;
2011
2012 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2013
2014
2015
2016
2017
2018
2019 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
2020 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2021
2022
2023 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
2024
2025
2026 if (__intel_wait_for_register_fw(uncore,
2027 GEN6_BSD_SLEEP_PSMI_CONTROL,
2028 GEN6_BSD_SLEEP_INDICATOR,
2029 0,
2030 1000, 0, NULL))
2031 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2032
2033
2034 i9xx_submit_request(request);
2035
2036
2037
2038
2039 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
2040 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2041
2042 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2043 }
2044
2045 static int mi_flush_dw(struct i915_request *rq, u32 flags)
2046 {
2047 u32 cmd, *cs;
2048
2049 cs = intel_ring_begin(rq, 4);
2050 if (IS_ERR(cs))
2051 return PTR_ERR(cs);
2052
2053 cmd = MI_FLUSH_DW;
2054
2055
2056
2057
2058
2059
2060
2061 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2062
2063
2064
2065
2066
2067
2068
2069 cmd |= flags;
2070
2071 *cs++ = cmd;
2072 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2073 *cs++ = 0;
2074 *cs++ = MI_NOOP;
2075
2076 intel_ring_advance(rq, cs);
2077
2078 return 0;
2079 }
2080
2081 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
2082 {
2083 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
2084 }
2085
2086 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
2087 {
2088 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
2089 }
2090
2091 static int
2092 hsw_emit_bb_start(struct i915_request *rq,
2093 u64 offset, u32 len,
2094 unsigned int dispatch_flags)
2095 {
2096 u32 *cs;
2097
2098 cs = intel_ring_begin(rq, 2);
2099 if (IS_ERR(cs))
2100 return PTR_ERR(cs);
2101
2102 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2103 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
2104
2105 *cs++ = offset;
2106 intel_ring_advance(rq, cs);
2107
2108 return 0;
2109 }
2110
2111 static int
2112 gen6_emit_bb_start(struct i915_request *rq,
2113 u64 offset, u32 len,
2114 unsigned int dispatch_flags)
2115 {
2116 u32 *cs;
2117
2118 cs = intel_ring_begin(rq, 2);
2119 if (IS_ERR(cs))
2120 return PTR_ERR(cs);
2121
2122 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2123 0 : MI_BATCH_NON_SECURE_I965);
2124
2125 *cs++ = offset;
2126 intel_ring_advance(rq, cs);
2127
2128 return 0;
2129 }
2130
2131
2132
2133 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
2134 {
2135 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
2136 }
2137
2138 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2139 {
2140 engine->submit_request = i9xx_submit_request;
2141 engine->cancel_requests = cancel_requests;
2142
2143 engine->park = NULL;
2144 engine->unpark = NULL;
2145 }
2146
2147 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2148 {
2149 i9xx_set_default_submission(engine);
2150 engine->submit_request = gen6_bsd_submit_request;
2151 }
2152
2153 static void ring_destroy(struct intel_engine_cs *engine)
2154 {
2155 struct drm_i915_private *dev_priv = engine->i915;
2156
2157 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2158 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
2159
2160 intel_engine_cleanup_common(engine);
2161
2162 intel_ring_unpin(engine->legacy.ring);
2163 intel_ring_put(engine->legacy.ring);
2164
2165 intel_timeline_unpin(engine->legacy.timeline);
2166 intel_timeline_put(engine->legacy.timeline);
2167
2168 kfree(engine);
2169 }
2170
2171 static void setup_irq(struct intel_engine_cs *engine)
2172 {
2173 struct drm_i915_private *i915 = engine->i915;
2174
2175 if (INTEL_GEN(i915) >= 6) {
2176 engine->irq_enable = gen6_irq_enable;
2177 engine->irq_disable = gen6_irq_disable;
2178 } else if (INTEL_GEN(i915) >= 5) {
2179 engine->irq_enable = gen5_irq_enable;
2180 engine->irq_disable = gen5_irq_disable;
2181 } else if (INTEL_GEN(i915) >= 3) {
2182 engine->irq_enable = i9xx_irq_enable;
2183 engine->irq_disable = i9xx_irq_disable;
2184 } else {
2185 engine->irq_enable = i8xx_irq_enable;
2186 engine->irq_disable = i8xx_irq_disable;
2187 }
2188 }
2189
2190 static void setup_common(struct intel_engine_cs *engine)
2191 {
2192 struct drm_i915_private *i915 = engine->i915;
2193
2194
2195 GEM_BUG_ON(INTEL_GEN(i915) >= 8);
2196
2197 setup_irq(engine);
2198
2199 engine->destroy = ring_destroy;
2200
2201 engine->resume = xcs_resume;
2202 engine->reset.prepare = reset_prepare;
2203 engine->reset.reset = reset_ring;
2204 engine->reset.finish = reset_finish;
2205
2206 engine->cops = &ring_context_ops;
2207 engine->request_alloc = ring_request_alloc;
2208
2209
2210
2211
2212
2213
2214 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
2215 if (IS_GEN(i915, 5))
2216 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
2217
2218 engine->set_default_submission = i9xx_set_default_submission;
2219
2220 if (INTEL_GEN(i915) >= 6)
2221 engine->emit_bb_start = gen6_emit_bb_start;
2222 else if (INTEL_GEN(i915) >= 4)
2223 engine->emit_bb_start = i965_emit_bb_start;
2224 else if (IS_I830(i915) || IS_I845G(i915))
2225 engine->emit_bb_start = i830_emit_bb_start;
2226 else
2227 engine->emit_bb_start = i915_emit_bb_start;
2228 }
2229
2230 static void setup_rcs(struct intel_engine_cs *engine)
2231 {
2232 struct drm_i915_private *i915 = engine->i915;
2233
2234 if (HAS_L3_DPF(i915))
2235 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2236
2237 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2238
2239 if (INTEL_GEN(i915) >= 7) {
2240 engine->emit_flush = gen7_render_ring_flush;
2241 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
2242 } else if (IS_GEN(i915, 6)) {
2243 engine->emit_flush = gen6_render_ring_flush;
2244 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
2245 } else if (IS_GEN(i915, 5)) {
2246 engine->emit_flush = gen4_render_ring_flush;
2247 } else {
2248 if (INTEL_GEN(i915) < 4)
2249 engine->emit_flush = gen2_render_ring_flush;
2250 else
2251 engine->emit_flush = gen4_render_ring_flush;
2252 engine->irq_enable_mask = I915_USER_INTERRUPT;
2253 }
2254
2255 if (IS_HASWELL(i915))
2256 engine->emit_bb_start = hsw_emit_bb_start;
2257
2258 engine->resume = rcs_resume;
2259 }
2260
2261 static void setup_vcs(struct intel_engine_cs *engine)
2262 {
2263 struct drm_i915_private *i915 = engine->i915;
2264
2265 if (INTEL_GEN(i915) >= 6) {
2266
2267 if (IS_GEN(i915, 6))
2268 engine->set_default_submission = gen6_bsd_set_default_submission;
2269 engine->emit_flush = gen6_bsd_ring_flush;
2270 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2271
2272 if (IS_GEN(i915, 6))
2273 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
2274 else
2275 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
2276 } else {
2277 engine->emit_flush = bsd_ring_flush;
2278 if (IS_GEN(i915, 5))
2279 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2280 else
2281 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2282 }
2283 }
2284
2285 static void setup_bcs(struct intel_engine_cs *engine)
2286 {
2287 struct drm_i915_private *i915 = engine->i915;
2288
2289 engine->emit_flush = gen6_ring_flush;
2290 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2291
2292 if (IS_GEN(i915, 6))
2293 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
2294 else
2295 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
2296 }
2297
2298 static void setup_vecs(struct intel_engine_cs *engine)
2299 {
2300 struct drm_i915_private *i915 = engine->i915;
2301
2302 GEM_BUG_ON(INTEL_GEN(i915) < 7);
2303
2304 engine->emit_flush = gen6_ring_flush;
2305 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2306 engine->irq_enable = hsw_vebox_irq_enable;
2307 engine->irq_disable = hsw_vebox_irq_disable;
2308
2309 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
2310 }
2311
2312 int intel_ring_submission_setup(struct intel_engine_cs *engine)
2313 {
2314 setup_common(engine);
2315
2316 switch (engine->class) {
2317 case RENDER_CLASS:
2318 setup_rcs(engine);
2319 break;
2320 case VIDEO_DECODE_CLASS:
2321 setup_vcs(engine);
2322 break;
2323 case COPY_ENGINE_CLASS:
2324 setup_bcs(engine);
2325 break;
2326 case VIDEO_ENHANCEMENT_CLASS:
2327 setup_vecs(engine);
2328 break;
2329 default:
2330 MISSING_CASE(engine->class);
2331 return -ENODEV;
2332 }
2333
2334 return 0;
2335 }
2336
2337 int intel_ring_submission_init(struct intel_engine_cs *engine)
2338 {
2339 struct intel_timeline *timeline;
2340 struct intel_ring *ring;
2341 int err;
2342
2343 timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
2344 if (IS_ERR(timeline)) {
2345 err = PTR_ERR(timeline);
2346 goto err;
2347 }
2348 GEM_BUG_ON(timeline->has_initial_breadcrumb);
2349
2350 err = intel_timeline_pin(timeline);
2351 if (err)
2352 goto err_timeline;
2353
2354 ring = intel_engine_create_ring(engine, SZ_16K);
2355 if (IS_ERR(ring)) {
2356 err = PTR_ERR(ring);
2357 goto err_timeline_unpin;
2358 }
2359
2360 err = intel_ring_pin(ring);
2361 if (err)
2362 goto err_ring;
2363
2364 GEM_BUG_ON(engine->legacy.ring);
2365 engine->legacy.ring = ring;
2366 engine->legacy.timeline = timeline;
2367
2368 err = intel_engine_init_common(engine);
2369 if (err)
2370 goto err_ring_unpin;
2371
2372 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
2373
2374 return 0;
2375
2376 err_ring_unpin:
2377 intel_ring_unpin(ring);
2378 err_ring:
2379 intel_ring_put(ring);
2380 err_timeline_unpin:
2381 intel_timeline_unpin(timeline);
2382 err_timeline:
2383 intel_timeline_put(timeline);
2384 err:
2385 intel_engine_cleanup_common(engine);
2386 return err;
2387 }