Lines Matching refs:ring
224 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
283 static bool disable_lite_restore_wa(struct intel_engine_cs *ring) in disable_lite_restore_wa() argument
285 struct drm_device *dev = ring->dev; in disable_lite_restore_wa()
289 (ring->id == VCS || ring->id == VCS2); in disable_lite_restore_wa()
293 struct intel_engine_cs *ring) in intel_lr_context_descriptor() argument
295 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; in intel_lr_context_descriptor()
316 if (disable_lite_restore_wa(ring)) in intel_lr_context_descriptor()
326 struct intel_engine_cs *ring = rq0->ring; in execlists_elsp_write() local
327 struct drm_device *dev = ring->dev; in execlists_elsp_write()
332 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring); in execlists_elsp_write()
338 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring); in execlists_elsp_write()
344 I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1])); in execlists_elsp_write()
345 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1])); in execlists_elsp_write()
347 I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0])); in execlists_elsp_write()
349 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0])); in execlists_elsp_write()
352 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring)); in execlists_elsp_write()
359 struct intel_engine_cs *ring = rq->ring; in execlists_update_context() local
361 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in execlists_update_context()
404 static void execlists_context_unqueue(struct intel_engine_cs *ring) in execlists_context_unqueue() argument
409 assert_spin_locked(&ring->execlist_lock); in execlists_context_unqueue()
415 WARN_ON(!intel_irqs_enabled(ring->dev->dev_private)); in execlists_context_unqueue()
417 if (list_empty(&ring->execlist_queue)) in execlists_context_unqueue()
421 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, in execlists_context_unqueue()
431 &ring->execlist_retired_req_list); in execlists_context_unqueue()
439 if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) { in execlists_context_unqueue()
453 ringbuf = req0->ctx->engine[ring->id].ringbuf; in execlists_context_unqueue()
464 static bool execlists_check_remove_request(struct intel_engine_cs *ring, in execlists_check_remove_request() argument
469 assert_spin_locked(&ring->execlist_lock); in execlists_check_remove_request()
471 head_req = list_first_entry_or_null(&ring->execlist_queue, in execlists_check_remove_request()
477 head_req->ctx->engine[ring->id].state; in execlists_check_remove_request()
485 &ring->execlist_retired_req_list); in execlists_check_remove_request()
501 void intel_lrc_irq_handler(struct intel_engine_cs *ring) in intel_lrc_irq_handler() argument
503 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_lrc_irq_handler()
511 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); in intel_lrc_irq_handler()
513 read_pointer = ring->next_context_status_buffer; in intel_lrc_irq_handler()
518 spin_lock(&ring->execlist_lock); in intel_lrc_irq_handler()
522 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES)); in intel_lrc_irq_handler()
523 status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES)); in intel_lrc_irq_handler()
530 if (execlists_check_remove_request(ring, status_id)) in intel_lrc_irq_handler()
538 if (execlists_check_remove_request(ring, status_id)) in intel_lrc_irq_handler()
543 if (disable_lite_restore_wa(ring)) { in intel_lrc_irq_handler()
547 execlists_context_unqueue(ring); in intel_lrc_irq_handler()
549 execlists_context_unqueue(ring); in intel_lrc_irq_handler()
552 spin_unlock(&ring->execlist_lock); in intel_lrc_irq_handler()
555 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; in intel_lrc_irq_handler()
557 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), in intel_lrc_irq_handler()
559 ((u32)ring->next_context_status_buffer & in intel_lrc_irq_handler()
565 struct intel_engine_cs *ring = request->ring; in execlists_context_queue() local
569 if (request->ctx != ring->default_context) in execlists_context_queue()
574 spin_lock_irq(&ring->execlist_lock); in execlists_context_queue()
576 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) in execlists_context_queue()
583 tail_req = list_last_entry(&ring->execlist_queue, in execlists_context_queue()
592 &ring->execlist_retired_req_list); in execlists_context_queue()
596 list_add_tail(&request->execlist_link, &ring->execlist_queue); in execlists_context_queue()
598 execlists_context_unqueue(ring); in execlists_context_queue()
600 spin_unlock_irq(&ring->execlist_lock); in execlists_context_queue()
607 struct intel_engine_cs *ring = req->ring; in logical_ring_invalidate_all_caches() local
612 if (ring->gpu_caches_dirty) in logical_ring_invalidate_all_caches()
615 ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); in logical_ring_invalidate_all_caches()
619 ring->gpu_caches_dirty = false; in logical_ring_invalidate_all_caches()
626 const unsigned other_rings = ~intel_ring_flag(req->ring); in execlists_move_to_gpu()
636 ret = i915_gem_object_sync(obj, req->ring, &req); in execlists_move_to_gpu()
660 request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; in intel_logical_ring_alloc_request_extras()
662 if (request->ctx != request->ring->default_context) { in intel_logical_ring_alloc_request_extras()
675 struct intel_engine_cs *ring = req->ring; in logical_ring_wait_for_space() local
686 list_for_each_entry(target, &ring->request_list, list) { in logical_ring_wait_for_space()
702 if (WARN_ON(&target->list == &ring->request_list)) in logical_ring_wait_for_space()
725 struct intel_engine_cs *ring = request->ring; in intel_logical_ring_advance_and_submit() local
732 if (intel_ring_stopped(ring)) in intel_logical_ring_advance_and_submit()
821 dev_priv = req->ring->dev->dev_private; in intel_logical_ring_begin()
873 struct intel_engine_cs *ring = params->ring; in intel_execlists_submission() local
875 struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf; in intel_execlists_submission()
887 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { in intel_execlists_submission()
916 if (ring == &dev_priv->ring[RCS] && in intel_execlists_submission()
934 ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags); in intel_execlists_submission()
946 void intel_execlists_retire_requests(struct intel_engine_cs *ring) in intel_execlists_retire_requests() argument
951 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); in intel_execlists_retire_requests()
952 if (list_empty(&ring->execlist_retired_req_list)) in intel_execlists_retire_requests()
956 spin_lock_irq(&ring->execlist_lock); in intel_execlists_retire_requests()
957 list_replace_init(&ring->execlist_retired_req_list, &retired_list); in intel_execlists_retire_requests()
958 spin_unlock_irq(&ring->execlist_lock); in intel_execlists_retire_requests()
963 ctx->engine[ring->id].state; in intel_execlists_retire_requests()
965 if (ctx_obj && (ctx != ring->default_context)) in intel_execlists_retire_requests()
972 void intel_logical_ring_stop(struct intel_engine_cs *ring) in intel_logical_ring_stop() argument
974 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_logical_ring_stop()
977 if (!intel_ring_initialized(ring)) in intel_logical_ring_stop()
980 ret = intel_ring_idle(ring); in intel_logical_ring_stop()
981 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) in intel_logical_ring_stop()
983 ring->name, ret); in intel_logical_ring_stop()
986 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); in intel_logical_ring_stop()
987 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { in intel_logical_ring_stop()
988 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); in intel_logical_ring_stop()
991 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); in intel_logical_ring_stop()
996 struct intel_engine_cs *ring = req->ring; in logical_ring_flush_all_caches() local
999 if (!ring->gpu_caches_dirty) in logical_ring_flush_all_caches()
1002 ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); in logical_ring_flush_all_caches()
1006 ring->gpu_caches_dirty = false; in logical_ring_flush_all_caches()
1010 static int intel_lr_context_do_pin(struct intel_engine_cs *ring, in intel_lr_context_do_pin() argument
1014 struct drm_device *dev = ring->dev; in intel_lr_context_do_pin()
1018 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); in intel_lr_context_do_pin()
1024 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); in intel_lr_context_do_pin()
1045 struct intel_engine_cs *ring = rq->ring; in intel_lr_context_pin() local
1046 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in intel_lr_context_pin()
1049 if (rq->ctx->engine[ring->id].pin_count++ == 0) { in intel_lr_context_pin()
1050 ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf); in intel_lr_context_pin()
1057 rq->ctx->engine[ring->id].pin_count = 0; in intel_lr_context_pin()
1063 struct intel_engine_cs *ring = rq->ring; in intel_lr_context_unpin() local
1064 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in intel_lr_context_unpin()
1068 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); in intel_lr_context_unpin()
1069 if (--rq->ctx->engine[ring->id].pin_count == 0) { in intel_lr_context_unpin()
1079 struct intel_engine_cs *ring = req->ring; in intel_logical_ring_workarounds_emit() local
1081 struct drm_device *dev = ring->dev; in intel_logical_ring_workarounds_emit()
1088 ring->gpu_caches_dirty = true; in intel_logical_ring_workarounds_emit()
1106 ring->gpu_caches_dirty = true; in intel_logical_ring_workarounds_emit()
1140 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, in gen8_emit_flush_coherentl3_wa() argument
1152 if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) in gen8_emit_flush_coherentl3_wa()
1158 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); in gen8_emit_flush_coherentl3_wa()
1176 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); in gen8_emit_flush_coherentl3_wa()
1229 static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, in gen8_init_indirectctx_bb() argument
1241 if (IS_BROADWELL(ring->dev)) { in gen8_init_indirectctx_bb()
1242 int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index); in gen8_init_indirectctx_bb()
1250 scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES; in gen8_init_indirectctx_bb()
1292 static int gen8_init_perctx_bb(struct intel_engine_cs *ring, in gen8_init_perctx_bb() argument
1307 static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, in gen9_init_indirectctx_bb() argument
1313 struct drm_device *dev = ring->dev; in gen9_init_indirectctx_bb()
1322 ret = gen8_emit_flush_coherentl3_wa(ring, batch, index); in gen9_init_indirectctx_bb()
1334 static int gen9_init_perctx_bb(struct intel_engine_cs *ring, in gen9_init_perctx_bb() argument
1339 struct drm_device *dev = ring->dev; in gen9_init_perctx_bb()
1362 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size) in lrc_setup_wa_ctx_obj() argument
1366 ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size)); in lrc_setup_wa_ctx_obj()
1367 if (!ring->wa_ctx.obj) { in lrc_setup_wa_ctx_obj()
1372 ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0); in lrc_setup_wa_ctx_obj()
1376 drm_gem_object_unreference(&ring->wa_ctx.obj->base); in lrc_setup_wa_ctx_obj()
1383 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring) in lrc_destroy_wa_ctx_obj() argument
1385 if (ring->wa_ctx.obj) { in lrc_destroy_wa_ctx_obj()
1386 i915_gem_object_ggtt_unpin(ring->wa_ctx.obj); in lrc_destroy_wa_ctx_obj()
1387 drm_gem_object_unreference(&ring->wa_ctx.obj->base); in lrc_destroy_wa_ctx_obj()
1388 ring->wa_ctx.obj = NULL; in lrc_destroy_wa_ctx_obj()
1392 static int intel_init_workaround_bb(struct intel_engine_cs *ring) in intel_init_workaround_bb() argument
1398 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; in intel_init_workaround_bb()
1400 WARN_ON(ring->id != RCS); in intel_init_workaround_bb()
1403 if (INTEL_INFO(ring->dev)->gen > 9) { in intel_init_workaround_bb()
1405 INTEL_INFO(ring->dev)->gen); in intel_init_workaround_bb()
1410 if (ring->scratch.obj == NULL) { in intel_init_workaround_bb()
1411 DRM_ERROR("scratch page not allocated for %s\n", ring->name); in intel_init_workaround_bb()
1415 ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE); in intel_init_workaround_bb()
1425 if (INTEL_INFO(ring->dev)->gen == 8) { in intel_init_workaround_bb()
1426 ret = gen8_init_indirectctx_bb(ring, in intel_init_workaround_bb()
1433 ret = gen8_init_perctx_bb(ring, in intel_init_workaround_bb()
1439 } else if (INTEL_INFO(ring->dev)->gen == 9) { in intel_init_workaround_bb()
1440 ret = gen9_init_indirectctx_bb(ring, in intel_init_workaround_bb()
1447 ret = gen9_init_perctx_bb(ring, in intel_init_workaround_bb()
1458 lrc_destroy_wa_ctx_obj(ring); in intel_init_workaround_bb()
1463 static int gen8_init_common_ring(struct intel_engine_cs *ring) in gen8_init_common_ring() argument
1465 struct drm_device *dev = ring->dev; in gen8_init_common_ring()
1469 lrc_setup_hardware_status_page(ring, in gen8_init_common_ring()
1470 ring->default_context->engine[ring->id].state); in gen8_init_common_ring()
1472 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); in gen8_init_common_ring()
1473 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); in gen8_init_common_ring()
1475 if (ring->status_page.obj) { in gen8_init_common_ring()
1476 I915_WRITE(RING_HWS_PGA(ring->mmio_base), in gen8_init_common_ring()
1477 (u32)ring->status_page.gfx_addr); in gen8_init_common_ring()
1478 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); in gen8_init_common_ring()
1481 I915_WRITE(RING_MODE_GEN7(ring), in gen8_init_common_ring()
1484 POSTING_READ(RING_MODE_GEN7(ring)); in gen8_init_common_ring()
1496 next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) in gen8_init_common_ring()
1507 ring->next_context_status_buffer = next_context_status_buffer_hw; in gen8_init_common_ring()
1508 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); in gen8_init_common_ring()
1510 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); in gen8_init_common_ring()
1515 static int gen8_init_render_ring(struct intel_engine_cs *ring) in gen8_init_render_ring() argument
1517 struct drm_device *dev = ring->dev; in gen8_init_render_ring()
1521 ret = gen8_init_common_ring(ring); in gen8_init_render_ring()
1535 return init_workarounds_ring(ring); in gen8_init_render_ring()
1538 static int gen9_init_render_ring(struct intel_engine_cs *ring) in gen9_init_render_ring() argument
1542 ret = gen8_init_common_ring(ring); in gen9_init_render_ring()
1546 return init_workarounds_ring(ring); in gen9_init_render_ring()
1552 struct intel_engine_cs *ring = req->ring; in intel_logical_ring_emit_pdps() local
1565 intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); in intel_logical_ring_emit_pdps()
1567 intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); in intel_logical_ring_emit_pdps()
1591 (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { in gen8_emit_bb_start()
1599 req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); in gen8_emit_bb_start()
1619 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) in gen8_logical_ring_get_irq() argument
1621 struct drm_device *dev = ring->dev; in gen8_logical_ring_get_irq()
1629 if (ring->irq_refcount++ == 0) { in gen8_logical_ring_get_irq()
1630 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); in gen8_logical_ring_get_irq()
1631 POSTING_READ(RING_IMR(ring->mmio_base)); in gen8_logical_ring_get_irq()
1638 static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) in gen8_logical_ring_put_irq() argument
1640 struct drm_device *dev = ring->dev; in gen8_logical_ring_put_irq()
1645 if (--ring->irq_refcount == 0) { in gen8_logical_ring_put_irq()
1646 I915_WRITE_IMR(ring, ~ring->irq_keep_mask); in gen8_logical_ring_put_irq()
1647 POSTING_READ(RING_IMR(ring->mmio_base)); in gen8_logical_ring_put_irq()
1657 struct intel_engine_cs *ring = ringbuf->ring; in gen8_emit_flush() local
1658 struct drm_device *dev = ring->dev; in gen8_emit_flush()
1678 if (ring == &dev_priv->ring[VCS]) in gen8_emit_flush()
1698 struct intel_engine_cs *ring = ringbuf->ring; in gen8_emit_flush_render() local
1699 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; in gen8_emit_flush_render()
1728 vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && in gen8_emit_flush_render()
1755 static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) in gen8_get_seqno() argument
1757 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); in gen8_get_seqno()
1760 static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) in gen8_set_seqno() argument
1762 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); in gen8_set_seqno()
1765 static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) in bxt_a_get_seqno() argument
1780 intel_flush_status_page(ring, I915_GEM_HWS_INDEX); in bxt_a_get_seqno()
1782 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); in bxt_a_get_seqno()
1785 static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) in bxt_a_set_seqno() argument
1787 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); in bxt_a_set_seqno()
1790 intel_flush_status_page(ring, I915_GEM_HWS_INDEX); in bxt_a_set_seqno()
1796 struct intel_engine_cs *ring = ringbuf->ring; in gen8_emit_request() local
1814 (ring->status_page.gfx_addr + in gen8_emit_request()
1838 ret = i915_gem_render_state_prepare(req->ring, &so); in intel_lr_context_render_state_init()
1845 ret = req->ring->emit_bb_start(req, so.ggtt_offset, in intel_lr_context_render_state_init()
1850 ret = req->ring->emit_bb_start(req, in intel_lr_context_render_state_init()
1888 void intel_logical_ring_cleanup(struct intel_engine_cs *ring) in intel_logical_ring_cleanup() argument
1892 if (!intel_ring_initialized(ring)) in intel_logical_ring_cleanup()
1895 dev_priv = ring->dev->dev_private; in intel_logical_ring_cleanup()
1897 intel_logical_ring_stop(ring); in intel_logical_ring_cleanup()
1898 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); in intel_logical_ring_cleanup()
1900 if (ring->cleanup) in intel_logical_ring_cleanup()
1901 ring->cleanup(ring); in intel_logical_ring_cleanup()
1903 i915_cmd_parser_fini_ring(ring); in intel_logical_ring_cleanup()
1904 i915_gem_batch_pool_fini(&ring->batch_pool); in intel_logical_ring_cleanup()
1906 if (ring->status_page.obj) { in intel_logical_ring_cleanup()
1907 kunmap(sg_page(ring->status_page.obj->pages->sgl)); in intel_logical_ring_cleanup()
1908 ring->status_page.obj = NULL; in intel_logical_ring_cleanup()
1911 lrc_destroy_wa_ctx_obj(ring); in intel_logical_ring_cleanup()
1914 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) in logical_ring_init() argument
1919 ring->buffer = NULL; in logical_ring_init()
1921 ring->dev = dev; in logical_ring_init()
1922 INIT_LIST_HEAD(&ring->active_list); in logical_ring_init()
1923 INIT_LIST_HEAD(&ring->request_list); in logical_ring_init()
1924 i915_gem_batch_pool_init(dev, &ring->batch_pool); in logical_ring_init()
1925 init_waitqueue_head(&ring->irq_queue); in logical_ring_init()
1927 INIT_LIST_HEAD(&ring->execlist_queue); in logical_ring_init()
1928 INIT_LIST_HEAD(&ring->execlist_retired_req_list); in logical_ring_init()
1929 spin_lock_init(&ring->execlist_lock); in logical_ring_init()
1931 ret = i915_cmd_parser_init_ring(ring); in logical_ring_init()
1935 ret = intel_lr_context_deferred_alloc(ring->default_context, ring); in logical_ring_init()
1941 ring, in logical_ring_init()
1942 ring->default_context->engine[ring->id].state, in logical_ring_init()
1943 ring->default_context->engine[ring->id].ringbuf); in logical_ring_init()
1947 ring->name, ret); in logical_ring_init()
1957 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; in logical_render_ring_init() local
1960 ring->name = "render ring"; in logical_render_ring_init()
1961 ring->id = RCS; in logical_render_ring_init()
1962 ring->mmio_base = RENDER_RING_BASE; in logical_render_ring_init()
1963 ring->irq_enable_mask = in logical_render_ring_init()
1965 ring->irq_keep_mask = in logical_render_ring_init()
1968 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; in logical_render_ring_init()
1971 ring->init_hw = gen9_init_render_ring; in logical_render_ring_init()
1973 ring->init_hw = gen8_init_render_ring; in logical_render_ring_init()
1974 ring->init_context = gen8_init_rcs_context; in logical_render_ring_init()
1975 ring->cleanup = intel_fini_pipe_control; in logical_render_ring_init()
1977 ring->get_seqno = bxt_a_get_seqno; in logical_render_ring_init()
1978 ring->set_seqno = bxt_a_set_seqno; in logical_render_ring_init()
1980 ring->get_seqno = gen8_get_seqno; in logical_render_ring_init()
1981 ring->set_seqno = gen8_set_seqno; in logical_render_ring_init()
1983 ring->emit_request = gen8_emit_request; in logical_render_ring_init()
1984 ring->emit_flush = gen8_emit_flush_render; in logical_render_ring_init()
1985 ring->irq_get = gen8_logical_ring_get_irq; in logical_render_ring_init()
1986 ring->irq_put = gen8_logical_ring_put_irq; in logical_render_ring_init()
1987 ring->emit_bb_start = gen8_emit_bb_start; in logical_render_ring_init()
1989 ring->dev = dev; in logical_render_ring_init()
1991 ret = intel_init_pipe_control(ring); in logical_render_ring_init()
1995 ret = intel_init_workaround_bb(ring); in logical_render_ring_init()
2006 ret = logical_ring_init(dev, ring); in logical_render_ring_init()
2008 lrc_destroy_wa_ctx_obj(ring); in logical_render_ring_init()
2017 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; in logical_bsd_ring_init() local
2019 ring->name = "bsd ring"; in logical_bsd_ring_init()
2020 ring->id = VCS; in logical_bsd_ring_init()
2021 ring->mmio_base = GEN6_BSD_RING_BASE; in logical_bsd_ring_init()
2022 ring->irq_enable_mask = in logical_bsd_ring_init()
2024 ring->irq_keep_mask = in logical_bsd_ring_init()
2027 ring->init_hw = gen8_init_common_ring; in logical_bsd_ring_init()
2029 ring->get_seqno = bxt_a_get_seqno; in logical_bsd_ring_init()
2030 ring->set_seqno = bxt_a_set_seqno; in logical_bsd_ring_init()
2032 ring->get_seqno = gen8_get_seqno; in logical_bsd_ring_init()
2033 ring->set_seqno = gen8_set_seqno; in logical_bsd_ring_init()
2035 ring->emit_request = gen8_emit_request; in logical_bsd_ring_init()
2036 ring->emit_flush = gen8_emit_flush; in logical_bsd_ring_init()
2037 ring->irq_get = gen8_logical_ring_get_irq; in logical_bsd_ring_init()
2038 ring->irq_put = gen8_logical_ring_put_irq; in logical_bsd_ring_init()
2039 ring->emit_bb_start = gen8_emit_bb_start; in logical_bsd_ring_init()
2041 return logical_ring_init(dev, ring); in logical_bsd_ring_init()
2047 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; in logical_bsd2_ring_init() local
2049 ring->name = "bds2 ring"; in logical_bsd2_ring_init()
2050 ring->id = VCS2; in logical_bsd2_ring_init()
2051 ring->mmio_base = GEN8_BSD2_RING_BASE; in logical_bsd2_ring_init()
2052 ring->irq_enable_mask = in logical_bsd2_ring_init()
2054 ring->irq_keep_mask = in logical_bsd2_ring_init()
2057 ring->init_hw = gen8_init_common_ring; in logical_bsd2_ring_init()
2058 ring->get_seqno = gen8_get_seqno; in logical_bsd2_ring_init()
2059 ring->set_seqno = gen8_set_seqno; in logical_bsd2_ring_init()
2060 ring->emit_request = gen8_emit_request; in logical_bsd2_ring_init()
2061 ring->emit_flush = gen8_emit_flush; in logical_bsd2_ring_init()
2062 ring->irq_get = gen8_logical_ring_get_irq; in logical_bsd2_ring_init()
2063 ring->irq_put = gen8_logical_ring_put_irq; in logical_bsd2_ring_init()
2064 ring->emit_bb_start = gen8_emit_bb_start; in logical_bsd2_ring_init()
2066 return logical_ring_init(dev, ring); in logical_bsd2_ring_init()
2072 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; in logical_blt_ring_init() local
2074 ring->name = "blitter ring"; in logical_blt_ring_init()
2075 ring->id = BCS; in logical_blt_ring_init()
2076 ring->mmio_base = BLT_RING_BASE; in logical_blt_ring_init()
2077 ring->irq_enable_mask = in logical_blt_ring_init()
2079 ring->irq_keep_mask = in logical_blt_ring_init()
2082 ring->init_hw = gen8_init_common_ring; in logical_blt_ring_init()
2084 ring->get_seqno = bxt_a_get_seqno; in logical_blt_ring_init()
2085 ring->set_seqno = bxt_a_set_seqno; in logical_blt_ring_init()
2087 ring->get_seqno = gen8_get_seqno; in logical_blt_ring_init()
2088 ring->set_seqno = gen8_set_seqno; in logical_blt_ring_init()
2090 ring->emit_request = gen8_emit_request; in logical_blt_ring_init()
2091 ring->emit_flush = gen8_emit_flush; in logical_blt_ring_init()
2092 ring->irq_get = gen8_logical_ring_get_irq; in logical_blt_ring_init()
2093 ring->irq_put = gen8_logical_ring_put_irq; in logical_blt_ring_init()
2094 ring->emit_bb_start = gen8_emit_bb_start; in logical_blt_ring_init()
2096 return logical_ring_init(dev, ring); in logical_blt_ring_init()
2102 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; in logical_vebox_ring_init() local
2104 ring->name = "video enhancement ring"; in logical_vebox_ring_init()
2105 ring->id = VECS; in logical_vebox_ring_init()
2106 ring->mmio_base = VEBOX_RING_BASE; in logical_vebox_ring_init()
2107 ring->irq_enable_mask = in logical_vebox_ring_init()
2109 ring->irq_keep_mask = in logical_vebox_ring_init()
2112 ring->init_hw = gen8_init_common_ring; in logical_vebox_ring_init()
2114 ring->get_seqno = bxt_a_get_seqno; in logical_vebox_ring_init()
2115 ring->set_seqno = bxt_a_set_seqno; in logical_vebox_ring_init()
2117 ring->get_seqno = gen8_get_seqno; in logical_vebox_ring_init()
2118 ring->set_seqno = gen8_set_seqno; in logical_vebox_ring_init()
2120 ring->emit_request = gen8_emit_request; in logical_vebox_ring_init()
2121 ring->emit_flush = gen8_emit_flush; in logical_vebox_ring_init()
2122 ring->irq_get = gen8_logical_ring_get_irq; in logical_vebox_ring_init()
2123 ring->irq_put = gen8_logical_ring_put_irq; in logical_vebox_ring_init()
2124 ring->emit_bb_start = gen8_emit_bb_start; in logical_vebox_ring_init()
2126 return logical_ring_init(dev, ring); in logical_vebox_ring_init()
2175 intel_logical_ring_cleanup(&dev_priv->ring[VECS]); in intel_logical_rings_init()
2177 intel_logical_ring_cleanup(&dev_priv->ring[BCS]); in intel_logical_rings_init()
2179 intel_logical_ring_cleanup(&dev_priv->ring[VCS]); in intel_logical_rings_init()
2181 intel_logical_ring_cleanup(&dev_priv->ring[RCS]); in intel_logical_rings_init()
2231 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) in populate_lr_context() argument
2233 struct drm_device *dev = ring->dev; in populate_lr_context()
2267 if (ring->id == RCS) in populate_lr_context()
2272 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); in populate_lr_context()
2277 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); in populate_lr_context()
2279 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); in populate_lr_context()
2281 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); in populate_lr_context()
2285 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); in populate_lr_context()
2288 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; in populate_lr_context()
2290 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; in populate_lr_context()
2292 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; in populate_lr_context()
2294 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; in populate_lr_context()
2296 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114; in populate_lr_context()
2298 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; in populate_lr_context()
2300 if (ring->id == RCS) { in populate_lr_context()
2301 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; in populate_lr_context()
2303 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; in populate_lr_context()
2305 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; in populate_lr_context()
2307 if (ring->wa_ctx.obj) { in populate_lr_context()
2308 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; in populate_lr_context()
2325 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; in populate_lr_context()
2327 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); in populate_lr_context()
2328 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); in populate_lr_context()
2329 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); in populate_lr_context()
2330 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); in populate_lr_context()
2331 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); in populate_lr_context()
2332 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); in populate_lr_context()
2333 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); in populate_lr_context()
2334 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); in populate_lr_context()
2354 if (ring->id == RCS) { in populate_lr_context()
2387 struct intel_engine_cs *ring = ringbuf->ring; in intel_lr_context_free() local
2389 if (ctx == ring->default_context) { in intel_lr_context_free()
2393 WARN_ON(ctx->engine[ring->id].pin_count); in intel_lr_context_free()
2400 static uint32_t get_lr_context_size(struct intel_engine_cs *ring) in get_lr_context_size() argument
2404 WARN_ON(INTEL_INFO(ring->dev)->gen < 8); in get_lr_context_size()
2406 switch (ring->id) { in get_lr_context_size()
2408 if (INTEL_INFO(ring->dev)->gen >= 9) in get_lr_context_size()
2424 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, in lrc_setup_hardware_status_page() argument
2427 struct drm_i915_private *dev_priv = ring->dev->dev_private; in lrc_setup_hardware_status_page()
2431 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) in lrc_setup_hardware_status_page()
2434 ring->status_page.page_addr = kmap(page); in lrc_setup_hardware_status_page()
2435 ring->status_page.obj = default_ctx_obj; in lrc_setup_hardware_status_page()
2437 I915_WRITE(RING_HWS_PGA(ring->mmio_base), in lrc_setup_hardware_status_page()
2438 (u32)ring->status_page.gfx_addr); in lrc_setup_hardware_status_page()
2439 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); in lrc_setup_hardware_status_page()
2457 struct intel_engine_cs *ring) in intel_lr_context_deferred_alloc() argument
2459 struct drm_device *dev = ring->dev; in intel_lr_context_deferred_alloc()
2466 WARN_ON(ctx->engine[ring->id].state); in intel_lr_context_deferred_alloc()
2468 context_size = round_up(get_lr_context_size(ring), 4096); in intel_lr_context_deferred_alloc()
2479 ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); in intel_lr_context_deferred_alloc()
2485 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); in intel_lr_context_deferred_alloc()
2491 ctx->engine[ring->id].ringbuf = ringbuf; in intel_lr_context_deferred_alloc()
2492 ctx->engine[ring->id].state = ctx_obj; in intel_lr_context_deferred_alloc()
2494 if (ctx != ring->default_context && ring->init_context) { in intel_lr_context_deferred_alloc()
2497 ret = i915_gem_request_alloc(ring, in intel_lr_context_deferred_alloc()
2505 ret = ring->init_context(req); in intel_lr_context_deferred_alloc()
2520 ctx->engine[ring->id].ringbuf = NULL; in intel_lr_context_deferred_alloc()
2521 ctx->engine[ring->id].state = NULL; in intel_lr_context_deferred_alloc()
2529 struct intel_engine_cs *ring; in intel_lr_context_reset() local
2532 for_each_ring(ring, dev_priv, i) { in intel_lr_context_reset()
2534 ctx->engine[ring->id].state; in intel_lr_context_reset()
2536 ctx->engine[ring->id].ringbuf; in intel_lr_context_reset()