Lines Matching refs:ring

333 		struct intel_engine_cs *ring = &dev_priv->ring[i];  in i915_gem_context_reset()  local
334 struct intel_context *lctx = ring->last_context; in i915_gem_context_reset()
341 ring->last_context = NULL; in i915_gem_context_reset()
345 if (ring->default_context) in i915_gem_context_reset()
346 ring->default_context->legacy_hw_ctx.initialized = false; in i915_gem_context_reset()
358 if (WARN_ON(dev_priv->ring[RCS].default_context)) in i915_gem_context_init()
389 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_init() local
392 ring->default_context = ctx; in i915_gem_context_init()
404 struct intel_context *dctx = dev_priv->ring[RCS].default_context; in i915_gem_context_fini()
419 WARN_ON(!dev_priv->ring[RCS].last_context); in i915_gem_context_fini()
420 if (dev_priv->ring[RCS].last_context == dctx) { in i915_gem_context_fini()
425 dev_priv->ring[RCS].last_context = NULL; in i915_gem_context_fini()
432 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_fini() local
434 if (ring->last_context) in i915_gem_context_fini()
435 i915_gem_context_unreference(ring->last_context); in i915_gem_context_fini()
437 ring->default_context = NULL; in i915_gem_context_fini()
438 ring->last_context = NULL; in i915_gem_context_fini()
446 struct intel_engine_cs *ring = req->ring; in i915_gem_context_enable() local
450 if (ring->init_context == NULL) in i915_gem_context_enable()
453 ret = ring->init_context(req); in i915_gem_context_enable()
515 struct intel_engine_cs *ring = req->ring; in mi_set_context() local
519 i915_semaphore_is_enabled(ring->dev) ? in mi_set_context()
520 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : in mi_set_context()
529 if (IS_GEN6(ring->dev)) { in mi_set_context()
530 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); in mi_set_context()
536 if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) in mi_set_context()
538 else if (INTEL_INFO(ring->dev)->gen < 8) in mi_set_context()
543 if (INTEL_INFO(ring->dev)->gen >= 7) in mi_set_context()
551 if (INTEL_INFO(ring->dev)->gen >= 7) { in mi_set_context()
552 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); in mi_set_context()
556 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); in mi_set_context()
557 for_each_ring(signaller, to_i915(ring->dev), i) { in mi_set_context()
558 if (signaller == ring) in mi_set_context()
561 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); in mi_set_context()
562 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); in mi_set_context()
567 intel_ring_emit(ring, MI_NOOP); in mi_set_context()
568 intel_ring_emit(ring, MI_SET_CONTEXT); in mi_set_context()
569 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | in mi_set_context()
575 intel_ring_emit(ring, MI_NOOP); in mi_set_context()
577 if (INTEL_INFO(ring->dev)->gen >= 7) { in mi_set_context()
581 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); in mi_set_context()
582 for_each_ring(signaller, to_i915(ring->dev), i) { in mi_set_context()
583 if (signaller == ring) in mi_set_context()
586 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); in mi_set_context()
587 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); in mi_set_context()
590 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); in mi_set_context()
593 intel_ring_advance(ring); in mi_set_context()
598 static inline bool should_skip_switch(struct intel_engine_cs *ring, in should_skip_switch() argument
606 !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) in should_skip_switch()
613 needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) in needs_pd_load_pre() argument
615 struct drm_i915_private *dev_priv = ring->dev->dev_private; in needs_pd_load_pre()
620 if (INTEL_INFO(ring->dev)->gen < 8) in needs_pd_load_pre()
623 if (ring != &dev_priv->ring[RCS]) in needs_pd_load_pre()
630 needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, in needs_pd_load_post() argument
633 struct drm_i915_private *dev_priv = ring->dev->dev_private; in needs_pd_load_post()
638 if (!IS_GEN8(ring->dev)) in needs_pd_load_post()
641 if (ring != &dev_priv->ring[RCS]) in needs_pd_load_post()
653 struct intel_engine_cs *ring = req->ring; in do_switch() local
654 struct drm_i915_private *dev_priv = ring->dev->dev_private; in do_switch()
655 struct intel_context *from = ring->last_context; in do_switch()
660 if (from != NULL && ring == &dev_priv->ring[RCS]) { in do_switch()
665 if (should_skip_switch(ring, from, to)) in do_switch()
669 if (ring == &dev_priv->ring[RCS]) { in do_switch()
671 get_context_alignment(ring->dev), 0); in do_switch()
681 from = ring->last_context; in do_switch()
683 if (needs_pd_load_pre(ring, to)) { in do_switch()
688 trace_switch_mm(ring, to); in do_switch()
694 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); in do_switch()
697 if (ring != &dev_priv->ring[RCS]) { in do_switch()
722 (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { in do_switch()
724 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); in do_switch()
728 WARN_ON(needs_pd_load_pre(ring, to) && in do_switch()
729 needs_pd_load_post(ring, to, hw_flags)); in do_switch()
738 if (needs_pd_load_post(ring, to, hw_flags)) { in do_switch()
739 trace_switch_mm(ring, to); in do_switch()
792 ring->last_context = to; in do_switch()
795 if (ring->init_context) { in do_switch()
796 ret = ring->init_context(req); in do_switch()
805 if (ring->id == RCS) in do_switch()
825 struct intel_engine_cs *ring = req->ring; in i915_switch_context() local
826 struct drm_i915_private *dev_priv = ring->dev->dev_private; in i915_switch_context()
832 if (req->ctx != ring->last_context) { in i915_switch_context()
834 if (ring->last_context) in i915_switch_context()
835 i915_gem_context_unreference(ring->last_context); in i915_switch_context()
836 ring->last_context = req->ctx; in i915_switch_context()