Lines Matching refs:ring

206 static int intel_lr_context_pin(struct intel_engine_cs *ring,
257 static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring, in execlists_ctx_descriptor() argument
260 struct drm_device *dev = ring->dev; in execlists_ctx_descriptor()
280 (ring->id == BCS || ring->id == VCS || in execlists_ctx_descriptor()
281 ring->id == VECS || ring->id == VCS2)) in execlists_ctx_descriptor()
287 static void execlists_elsp_write(struct intel_engine_cs *ring, in execlists_elsp_write() argument
291 struct drm_device *dev = ring->dev; in execlists_elsp_write()
298 temp = execlists_ctx_descriptor(ring, ctx_obj1); in execlists_elsp_write()
304 temp = execlists_ctx_descriptor(ring, ctx_obj0); in execlists_elsp_write()
309 I915_WRITE(RING_ELSP(ring), desc[1]); in execlists_elsp_write()
310 I915_WRITE(RING_ELSP(ring), desc[0]); in execlists_elsp_write()
311 I915_WRITE(RING_ELSP(ring), desc[3]); in execlists_elsp_write()
314 I915_WRITE(RING_ELSP(ring), desc[2]); in execlists_elsp_write()
317 POSTING_READ(RING_EXECLIST_STATUS(ring)); in execlists_elsp_write()
339 static void execlists_submit_contexts(struct intel_engine_cs *ring, in execlists_submit_contexts() argument
343 struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state; in execlists_submit_contexts()
344 struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf; in execlists_submit_contexts()
355 ringbuf1 = to1->engine[ring->id].ringbuf; in execlists_submit_contexts()
356 ctx_obj1 = to1->engine[ring->id].state; in execlists_submit_contexts()
364 execlists_elsp_write(ring, ctx_obj0, ctx_obj1); in execlists_submit_contexts()
367 static void execlists_context_unqueue(struct intel_engine_cs *ring) in execlists_context_unqueue() argument
372 assert_spin_locked(&ring->execlist_lock); in execlists_context_unqueue()
374 if (list_empty(&ring->execlist_queue)) in execlists_context_unqueue()
378 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, in execlists_context_unqueue()
388 &ring->execlist_retired_req_list); in execlists_context_unqueue()
396 if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) { in execlists_context_unqueue()
410 ringbuf = req0->ctx->engine[ring->id].ringbuf; in execlists_context_unqueue()
418 execlists_submit_contexts(ring, req0->ctx, req0->tail, in execlists_context_unqueue()
427 static bool execlists_check_remove_request(struct intel_engine_cs *ring, in execlists_check_remove_request() argument
432 assert_spin_locked(&ring->execlist_lock); in execlists_check_remove_request()
434 head_req = list_first_entry_or_null(&ring->execlist_queue, in execlists_check_remove_request()
440 head_req->ctx->engine[ring->id].state; in execlists_check_remove_request()
448 &ring->execlist_retired_req_list); in execlists_check_remove_request()
464 void intel_lrc_irq_handler(struct intel_engine_cs *ring) in intel_lrc_irq_handler() argument
466 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_lrc_irq_handler()
474 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); in intel_lrc_irq_handler()
476 read_pointer = ring->next_context_status_buffer; in intel_lrc_irq_handler()
481 spin_lock(&ring->execlist_lock); in intel_lrc_irq_handler()
485 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + in intel_lrc_irq_handler()
487 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + in intel_lrc_irq_handler()
492 if (execlists_check_remove_request(ring, status_id)) in intel_lrc_irq_handler()
500 if (execlists_check_remove_request(ring, status_id)) in intel_lrc_irq_handler()
506 execlists_context_unqueue(ring); in intel_lrc_irq_handler()
508 spin_unlock(&ring->execlist_lock); in intel_lrc_irq_handler()
511 ring->next_context_status_buffer = write_pointer % 6; in intel_lrc_irq_handler()
513 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), in intel_lrc_irq_handler()
514 ((u32)ring->next_context_status_buffer & 0x07) << 8); in intel_lrc_irq_handler()
517 static int execlists_context_queue(struct intel_engine_cs *ring, in execlists_context_queue() argument
523 struct drm_i915_private *dev_priv = ring->dev->dev_private; in execlists_context_queue()
527 if (to != ring->default_context) in execlists_context_queue()
528 intel_lr_context_pin(ring, to); in execlists_context_queue()
538 request->ring = ring; in execlists_context_queue()
551 spin_lock_irqsave(&ring->execlist_lock, flags); in execlists_context_queue()
553 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) in execlists_context_queue()
560 tail_req = list_last_entry(&ring->execlist_queue, in execlists_context_queue()
569 &ring->execlist_retired_req_list); in execlists_context_queue()
573 list_add_tail(&request->execlist_link, &ring->execlist_queue); in execlists_context_queue()
575 execlists_context_unqueue(ring); in execlists_context_queue()
577 spin_unlock_irqrestore(&ring->execlist_lock, flags); in execlists_context_queue()
585 struct intel_engine_cs *ring = ringbuf->ring; in logical_ring_invalidate_all_caches() local
590 if (ring->gpu_caches_dirty) in logical_ring_invalidate_all_caches()
593 ret = ring->emit_flush(ringbuf, ctx, in logical_ring_invalidate_all_caches()
598 ring->gpu_caches_dirty = false; in logical_ring_invalidate_all_caches()
606 struct intel_engine_cs *ring = ringbuf->ring; in execlists_move_to_gpu() local
615 ret = i915_gem_object_sync(obj, ring); in execlists_move_to_gpu()
652 struct intel_engine_cs *ring, in intel_execlists_submission() argument
660 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; in intel_execlists_submission()
671 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { in intel_execlists_submission()
715 if (ring == &dev_priv->ring[RCS] && in intel_execlists_submission()
730 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags); in intel_execlists_submission()
734 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); in intel_execlists_submission()
736 i915_gem_execbuffer_move_to_active(vmas, ring); in intel_execlists_submission()
737 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); in intel_execlists_submission()
742 void intel_execlists_retire_requests(struct intel_engine_cs *ring) in intel_execlists_retire_requests() argument
745 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_execlists_retire_requests()
749 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); in intel_execlists_retire_requests()
750 if (list_empty(&ring->execlist_retired_req_list)) in intel_execlists_retire_requests()
754 spin_lock_irqsave(&ring->execlist_lock, flags); in intel_execlists_retire_requests()
755 list_replace_init(&ring->execlist_retired_req_list, &retired_list); in intel_execlists_retire_requests()
756 spin_unlock_irqrestore(&ring->execlist_lock, flags); in intel_execlists_retire_requests()
761 ctx->engine[ring->id].state; in intel_execlists_retire_requests()
763 if (ctx_obj && (ctx != ring->default_context)) in intel_execlists_retire_requests()
764 intel_lr_context_unpin(ring, ctx); in intel_execlists_retire_requests()
771 void intel_logical_ring_stop(struct intel_engine_cs *ring) in intel_logical_ring_stop() argument
773 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_logical_ring_stop()
776 if (!intel_ring_initialized(ring)) in intel_logical_ring_stop()
779 ret = intel_ring_idle(ring); in intel_logical_ring_stop()
780 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) in intel_logical_ring_stop()
782 ring->name, ret); in intel_logical_ring_stop()
785 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); in intel_logical_ring_stop()
786 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { in intel_logical_ring_stop()
787 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); in intel_logical_ring_stop()
790 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); in intel_logical_ring_stop()
796 struct intel_engine_cs *ring = ringbuf->ring; in logical_ring_flush_all_caches() local
799 if (!ring->gpu_caches_dirty) in logical_ring_flush_all_caches()
802 ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS); in logical_ring_flush_all_caches()
806 ring->gpu_caches_dirty = false; in logical_ring_flush_all_caches()
824 struct intel_engine_cs *ring = ringbuf->ring; in intel_logical_ring_advance_and_submit() local
828 if (intel_ring_stopped(ring)) in intel_logical_ring_advance_and_submit()
831 execlists_context_queue(ring, ctx, ringbuf->tail, request); in intel_logical_ring_advance_and_submit()
834 static int intel_lr_context_pin(struct intel_engine_cs *ring, in intel_lr_context_pin() argument
837 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; in intel_lr_context_pin()
838 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; in intel_lr_context_pin()
841 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); in intel_lr_context_pin()
842 if (ctx->engine[ring->id].pin_count++ == 0) { in intel_lr_context_pin()
848 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); in intel_lr_context_pin()
860 ctx->engine[ring->id].pin_count = 0; in intel_lr_context_pin()
865 void intel_lr_context_unpin(struct intel_engine_cs *ring, in intel_lr_context_unpin() argument
868 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; in intel_lr_context_unpin()
869 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; in intel_lr_context_unpin()
872 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); in intel_lr_context_unpin()
873 if (--ctx->engine[ring->id].pin_count == 0) { in intel_lr_context_unpin()
880 static int logical_ring_alloc_request(struct intel_engine_cs *ring, in logical_ring_alloc_request() argument
884 struct drm_i915_private *dev_private = ring->dev->dev_private; in logical_ring_alloc_request()
887 if (ring->outstanding_lazy_request) in logical_ring_alloc_request()
894 if (ctx != ring->default_context) { in logical_ring_alloc_request()
895 ret = intel_lr_context_pin(ring, ctx); in logical_ring_alloc_request()
903 request->ring = ring; in logical_ring_alloc_request()
906 ret = i915_gem_get_seqno(ring->dev, &request->seqno); in logical_ring_alloc_request()
908 intel_lr_context_unpin(ring, ctx); in logical_ring_alloc_request()
915 request->ringbuf = ctx->engine[ring->id].ringbuf; in logical_ring_alloc_request()
917 ring->outstanding_lazy_request = request; in logical_ring_alloc_request()
924 struct intel_engine_cs *ring = ringbuf->ring; in logical_ring_wait_request() local
931 list_for_each_entry(request, &ring->request_list, list) { in logical_ring_wait_request()
938 if (ctx->engine[ring->id].ringbuf != ringbuf) in logical_ring_wait_request()
948 if (&request->list == &ring->request_list) in logical_ring_wait_request()
955 i915_gem_retire_requests_ring(ring); in logical_ring_wait_request()
964 struct intel_engine_cs *ring = ringbuf->ring; in logical_ring_wait_for_space() local
965 struct drm_device *dev = ring->dev; in logical_ring_wait_for_space()
1070 struct intel_engine_cs *ring = ringbuf->ring; in intel_logical_ring_begin() local
1071 struct drm_device *dev = ring->dev; in intel_logical_ring_begin()
1085 ret = logical_ring_alloc_request(ring, ctx); in intel_logical_ring_begin()
1093 static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, in intel_logical_ring_workarounds_emit() argument
1097 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; in intel_logical_ring_workarounds_emit()
1098 struct drm_device *dev = ring->dev; in intel_logical_ring_workarounds_emit()
1105 ring->gpu_caches_dirty = true; in intel_logical_ring_workarounds_emit()
1123 ring->gpu_caches_dirty = true; in intel_logical_ring_workarounds_emit()
1131 static int gen8_init_common_ring(struct intel_engine_cs *ring) in gen8_init_common_ring() argument
1133 struct drm_device *dev = ring->dev; in gen8_init_common_ring()
1136 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); in gen8_init_common_ring()
1137 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); in gen8_init_common_ring()
1139 if (ring->status_page.obj) { in gen8_init_common_ring()
1140 I915_WRITE(RING_HWS_PGA(ring->mmio_base), in gen8_init_common_ring()
1141 (u32)ring->status_page.gfx_addr); in gen8_init_common_ring()
1142 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); in gen8_init_common_ring()
1145 I915_WRITE(RING_MODE_GEN7(ring), in gen8_init_common_ring()
1148 POSTING_READ(RING_MODE_GEN7(ring)); in gen8_init_common_ring()
1149 ring->next_context_status_buffer = 0; in gen8_init_common_ring()
1150 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); in gen8_init_common_ring()
1152 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); in gen8_init_common_ring()
1157 static int gen8_init_render_ring(struct intel_engine_cs *ring) in gen8_init_render_ring() argument
1159 struct drm_device *dev = ring->dev; in gen8_init_render_ring()
1163 ret = gen8_init_common_ring(ring); in gen8_init_render_ring()
1177 return init_workarounds_ring(ring); in gen8_init_render_ring()
1180 static int gen9_init_render_ring(struct intel_engine_cs *ring) in gen9_init_render_ring() argument
1184 ret = gen8_init_common_ring(ring); in gen9_init_render_ring()
1188 return init_workarounds_ring(ring); in gen9_init_render_ring()
1212 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) in gen8_logical_ring_get_irq() argument
1214 struct drm_device *dev = ring->dev; in gen8_logical_ring_get_irq()
1222 if (ring->irq_refcount++ == 0) { in gen8_logical_ring_get_irq()
1223 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); in gen8_logical_ring_get_irq()
1224 POSTING_READ(RING_IMR(ring->mmio_base)); in gen8_logical_ring_get_irq()
1231 static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) in gen8_logical_ring_put_irq() argument
1233 struct drm_device *dev = ring->dev; in gen8_logical_ring_put_irq()
1238 if (--ring->irq_refcount == 0) { in gen8_logical_ring_put_irq()
1239 I915_WRITE_IMR(ring, ~ring->irq_keep_mask); in gen8_logical_ring_put_irq()
1240 POSTING_READ(RING_IMR(ring->mmio_base)); in gen8_logical_ring_put_irq()
1250 struct intel_engine_cs *ring = ringbuf->ring; in gen8_emit_flush() local
1251 struct drm_device *dev = ring->dev; in gen8_emit_flush()
1271 if (ring == &dev_priv->ring[VCS]) in gen8_emit_flush()
1291 struct intel_engine_cs *ring = ringbuf->ring; in gen8_emit_flush_render() local
1292 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; in gen8_emit_flush_render()
1330 static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) in gen8_get_seqno() argument
1332 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); in gen8_get_seqno()
1335 static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) in gen8_set_seqno() argument
1337 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); in gen8_set_seqno()
1343 struct intel_engine_cs *ring = ringbuf->ring; in gen8_emit_request() local
1361 (ring->status_page.gfx_addr + in gen8_emit_request()
1365 i915_gem_request_get_seqno(ring->outstanding_lazy_request)); in gen8_emit_request()
1381 static int intel_lr_context_render_state_init(struct intel_engine_cs *ring, in intel_lr_context_render_state_init() argument
1384 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; in intel_lr_context_render_state_init()
1390 ret = i915_gem_render_state_prepare(ring, &so); in intel_lr_context_render_state_init()
1397 ret = ring->emit_bb_start(ringbuf, in intel_lr_context_render_state_init()
1404 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); in intel_lr_context_render_state_init()
1406 ret = __i915_add_request(ring, file, so.obj); in intel_lr_context_render_state_init()
1414 static int gen8_init_rcs_context(struct intel_engine_cs *ring, in gen8_init_rcs_context() argument
1419 ret = intel_logical_ring_workarounds_emit(ring, ctx); in gen8_init_rcs_context()
1423 return intel_lr_context_render_state_init(ring, ctx); in gen8_init_rcs_context()
1432 void intel_logical_ring_cleanup(struct intel_engine_cs *ring) in intel_logical_ring_cleanup() argument
1436 if (!intel_ring_initialized(ring)) in intel_logical_ring_cleanup()
1439 dev_priv = ring->dev->dev_private; in intel_logical_ring_cleanup()
1441 intel_logical_ring_stop(ring); in intel_logical_ring_cleanup()
1442 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); in intel_logical_ring_cleanup()
1443 i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); in intel_logical_ring_cleanup()
1445 if (ring->cleanup) in intel_logical_ring_cleanup()
1446 ring->cleanup(ring); in intel_logical_ring_cleanup()
1448 i915_cmd_parser_fini_ring(ring); in intel_logical_ring_cleanup()
1450 if (ring->status_page.obj) { in intel_logical_ring_cleanup()
1451 kunmap(sg_page(ring->status_page.obj->pages->sgl)); in intel_logical_ring_cleanup()
1452 ring->status_page.obj = NULL; in intel_logical_ring_cleanup()
1456 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) in logical_ring_init() argument
1461 ring->buffer = NULL; in logical_ring_init()
1463 ring->dev = dev; in logical_ring_init()
1464 INIT_LIST_HEAD(&ring->active_list); in logical_ring_init()
1465 INIT_LIST_HEAD(&ring->request_list); in logical_ring_init()
1466 init_waitqueue_head(&ring->irq_queue); in logical_ring_init()
1468 INIT_LIST_HEAD(&ring->execlist_queue); in logical_ring_init()
1469 INIT_LIST_HEAD(&ring->execlist_retired_req_list); in logical_ring_init()
1470 spin_lock_init(&ring->execlist_lock); in logical_ring_init()
1472 ret = i915_cmd_parser_init_ring(ring); in logical_ring_init()
1476 ret = intel_lr_context_deferred_create(ring->default_context, ring); in logical_ring_init()
1484 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; in logical_render_ring_init() local
1487 ring->name = "render ring"; in logical_render_ring_init()
1488 ring->id = RCS; in logical_render_ring_init()
1489 ring->mmio_base = RENDER_RING_BASE; in logical_render_ring_init()
1490 ring->irq_enable_mask = in logical_render_ring_init()
1492 ring->irq_keep_mask = in logical_render_ring_init()
1495 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; in logical_render_ring_init()
1498 ring->init_hw = gen9_init_render_ring; in logical_render_ring_init()
1500 ring->init_hw = gen8_init_render_ring; in logical_render_ring_init()
1501 ring->init_context = gen8_init_rcs_context; in logical_render_ring_init()
1502 ring->cleanup = intel_fini_pipe_control; in logical_render_ring_init()
1503 ring->get_seqno = gen8_get_seqno; in logical_render_ring_init()
1504 ring->set_seqno = gen8_set_seqno; in logical_render_ring_init()
1505 ring->emit_request = gen8_emit_request; in logical_render_ring_init()
1506 ring->emit_flush = gen8_emit_flush_render; in logical_render_ring_init()
1507 ring->irq_get = gen8_logical_ring_get_irq; in logical_render_ring_init()
1508 ring->irq_put = gen8_logical_ring_put_irq; in logical_render_ring_init()
1509 ring->emit_bb_start = gen8_emit_bb_start; in logical_render_ring_init()
1511 ring->dev = dev; in logical_render_ring_init()
1512 ret = logical_ring_init(dev, ring); in logical_render_ring_init()
1516 return intel_init_pipe_control(ring); in logical_render_ring_init()
1522 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; in logical_bsd_ring_init() local
1524 ring->name = "bsd ring"; in logical_bsd_ring_init()
1525 ring->id = VCS; in logical_bsd_ring_init()
1526 ring->mmio_base = GEN6_BSD_RING_BASE; in logical_bsd_ring_init()
1527 ring->irq_enable_mask = in logical_bsd_ring_init()
1529 ring->irq_keep_mask = in logical_bsd_ring_init()
1532 ring->init_hw = gen8_init_common_ring; in logical_bsd_ring_init()
1533 ring->get_seqno = gen8_get_seqno; in logical_bsd_ring_init()
1534 ring->set_seqno = gen8_set_seqno; in logical_bsd_ring_init()
1535 ring->emit_request = gen8_emit_request; in logical_bsd_ring_init()
1536 ring->emit_flush = gen8_emit_flush; in logical_bsd_ring_init()
1537 ring->irq_get = gen8_logical_ring_get_irq; in logical_bsd_ring_init()
1538 ring->irq_put = gen8_logical_ring_put_irq; in logical_bsd_ring_init()
1539 ring->emit_bb_start = gen8_emit_bb_start; in logical_bsd_ring_init()
1541 return logical_ring_init(dev, ring); in logical_bsd_ring_init()
1547 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; in logical_bsd2_ring_init() local
1549 ring->name = "bds2 ring"; in logical_bsd2_ring_init()
1550 ring->id = VCS2; in logical_bsd2_ring_init()
1551 ring->mmio_base = GEN8_BSD2_RING_BASE; in logical_bsd2_ring_init()
1552 ring->irq_enable_mask = in logical_bsd2_ring_init()
1554 ring->irq_keep_mask = in logical_bsd2_ring_init()
1557 ring->init_hw = gen8_init_common_ring; in logical_bsd2_ring_init()
1558 ring->get_seqno = gen8_get_seqno; in logical_bsd2_ring_init()
1559 ring->set_seqno = gen8_set_seqno; in logical_bsd2_ring_init()
1560 ring->emit_request = gen8_emit_request; in logical_bsd2_ring_init()
1561 ring->emit_flush = gen8_emit_flush; in logical_bsd2_ring_init()
1562 ring->irq_get = gen8_logical_ring_get_irq; in logical_bsd2_ring_init()
1563 ring->irq_put = gen8_logical_ring_put_irq; in logical_bsd2_ring_init()
1564 ring->emit_bb_start = gen8_emit_bb_start; in logical_bsd2_ring_init()
1566 return logical_ring_init(dev, ring); in logical_bsd2_ring_init()
1572 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; in logical_blt_ring_init() local
1574 ring->name = "blitter ring"; in logical_blt_ring_init()
1575 ring->id = BCS; in logical_blt_ring_init()
1576 ring->mmio_base = BLT_RING_BASE; in logical_blt_ring_init()
1577 ring->irq_enable_mask = in logical_blt_ring_init()
1579 ring->irq_keep_mask = in logical_blt_ring_init()
1582 ring->init_hw = gen8_init_common_ring; in logical_blt_ring_init()
1583 ring->get_seqno = gen8_get_seqno; in logical_blt_ring_init()
1584 ring->set_seqno = gen8_set_seqno; in logical_blt_ring_init()
1585 ring->emit_request = gen8_emit_request; in logical_blt_ring_init()
1586 ring->emit_flush = gen8_emit_flush; in logical_blt_ring_init()
1587 ring->irq_get = gen8_logical_ring_get_irq; in logical_blt_ring_init()
1588 ring->irq_put = gen8_logical_ring_put_irq; in logical_blt_ring_init()
1589 ring->emit_bb_start = gen8_emit_bb_start; in logical_blt_ring_init()
1591 return logical_ring_init(dev, ring); in logical_blt_ring_init()
1597 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; in logical_vebox_ring_init() local
1599 ring->name = "video enhancement ring"; in logical_vebox_ring_init()
1600 ring->id = VECS; in logical_vebox_ring_init()
1601 ring->mmio_base = VEBOX_RING_BASE; in logical_vebox_ring_init()
1602 ring->irq_enable_mask = in logical_vebox_ring_init()
1604 ring->irq_keep_mask = in logical_vebox_ring_init()
1607 ring->init_hw = gen8_init_common_ring; in logical_vebox_ring_init()
1608 ring->get_seqno = gen8_get_seqno; in logical_vebox_ring_init()
1609 ring->set_seqno = gen8_set_seqno; in logical_vebox_ring_init()
1610 ring->emit_request = gen8_emit_request; in logical_vebox_ring_init()
1611 ring->emit_flush = gen8_emit_flush; in logical_vebox_ring_init()
1612 ring->irq_get = gen8_logical_ring_get_irq; in logical_vebox_ring_init()
1613 ring->irq_put = gen8_logical_ring_put_irq; in logical_vebox_ring_init()
1614 ring->emit_bb_start = gen8_emit_bb_start; in logical_vebox_ring_init()
1616 return logical_ring_init(dev, ring); in logical_vebox_ring_init()
1669 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]); in intel_logical_rings_init()
1671 intel_logical_ring_cleanup(&dev_priv->ring[VECS]); in intel_logical_rings_init()
1673 intel_logical_ring_cleanup(&dev_priv->ring[BCS]); in intel_logical_rings_init()
1675 intel_logical_ring_cleanup(&dev_priv->ring[VCS]); in intel_logical_rings_init()
1677 intel_logical_ring_cleanup(&dev_priv->ring[RCS]); in intel_logical_rings_init()
1727 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) in populate_lr_context() argument
1729 struct drm_device *dev = ring->dev; in populate_lr_context()
1763 if (ring->id == RCS) in populate_lr_context()
1768 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); in populate_lr_context()
1772 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); in populate_lr_context()
1774 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); in populate_lr_context()
1776 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); in populate_lr_context()
1780 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); in populate_lr_context()
1783 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; in populate_lr_context()
1785 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; in populate_lr_context()
1787 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; in populate_lr_context()
1789 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; in populate_lr_context()
1791 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114; in populate_lr_context()
1793 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; in populate_lr_context()
1795 if (ring->id == RCS) { in populate_lr_context()
1799 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; in populate_lr_context()
1801 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; in populate_lr_context()
1803 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; in populate_lr_context()
1808 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; in populate_lr_context()
1810 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); in populate_lr_context()
1811 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); in populate_lr_context()
1812 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); in populate_lr_context()
1813 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); in populate_lr_context()
1814 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); in populate_lr_context()
1815 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); in populate_lr_context()
1816 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); in populate_lr_context()
1817 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); in populate_lr_context()
1826 if (ring->id == RCS) { in populate_lr_context()
1859 struct intel_engine_cs *ring = ringbuf->ring; in intel_lr_context_free() local
1861 if (ctx == ring->default_context) { in intel_lr_context_free()
1865 WARN_ON(ctx->engine[ring->id].pin_count); in intel_lr_context_free()
1873 static uint32_t get_lr_context_size(struct intel_engine_cs *ring) in get_lr_context_size() argument
1877 WARN_ON(INTEL_INFO(ring->dev)->gen < 8); in get_lr_context_size()
1879 switch (ring->id) { in get_lr_context_size()
1881 if (INTEL_INFO(ring->dev)->gen >= 9) in get_lr_context_size()
1897 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, in lrc_setup_hardware_status_page() argument
1900 struct drm_i915_private *dev_priv = ring->dev->dev_private; in lrc_setup_hardware_status_page()
1904 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj); in lrc_setup_hardware_status_page()
1905 ring->status_page.page_addr = in lrc_setup_hardware_status_page()
1907 ring->status_page.obj = default_ctx_obj; in lrc_setup_hardware_status_page()
1909 I915_WRITE(RING_HWS_PGA(ring->mmio_base), in lrc_setup_hardware_status_page()
1910 (u32)ring->status_page.gfx_addr); in lrc_setup_hardware_status_page()
1911 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); in lrc_setup_hardware_status_page()
1928 struct intel_engine_cs *ring) in intel_lr_context_deferred_create() argument
1930 const bool is_global_default_ctx = (ctx == ring->default_context); in intel_lr_context_deferred_create()
1931 struct drm_device *dev = ring->dev; in intel_lr_context_deferred_create()
1938 WARN_ON(ctx->engine[ring->id].state); in intel_lr_context_deferred_create()
1940 context_size = round_up(get_lr_context_size(ring), 4096); in intel_lr_context_deferred_create()
1962 ring->name); in intel_lr_context_deferred_create()
1967 ringbuf->ring = ring; in intel_lr_context_deferred_create()
1981 ring->name, ret); in intel_lr_context_deferred_create()
1990 ring->name, ret); in intel_lr_context_deferred_create()
1997 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); in intel_lr_context_deferred_create()
2003 ctx->engine[ring->id].ringbuf = ringbuf; in intel_lr_context_deferred_create()
2004 ctx->engine[ring->id].state = ctx_obj; in intel_lr_context_deferred_create()
2006 if (ctx == ring->default_context) in intel_lr_context_deferred_create()
2007 lrc_setup_hardware_status_page(ring, ctx_obj); in intel_lr_context_deferred_create()
2008 else if (ring->id == RCS && !ctx->rcs_initialized) { in intel_lr_context_deferred_create()
2009 if (ring->init_context) { in intel_lr_context_deferred_create()
2010 ret = ring->init_context(ring, ctx); in intel_lr_context_deferred_create()
2013 ctx->engine[ring->id].ringbuf = NULL; in intel_lr_context_deferred_create()
2014 ctx->engine[ring->id].state = NULL; in intel_lr_context_deferred_create()
2042 struct intel_engine_cs *ring; in intel_lr_context_reset() local
2045 for_each_ring(ring, dev_priv, i) { in intel_lr_context_reset()
2047 ctx->engine[ring->id].state; in intel_lr_context_reset()
2049 ctx->engine[ring->id].ringbuf; in intel_lr_context_reset()