Lines Matching refs:ring
1164 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); in i915_gem_check_olr()
1167 if (req == req->ring->outstanding_lazy_request) in i915_gem_check_olr()
1168 ret = i915_add_request(req->ring); in i915_gem_check_olr()
1179 struct intel_engine_cs *ring) in missed_irq() argument
1181 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); in missed_irq()
1215 struct intel_engine_cs *ring = i915_gem_request_get_ring(req); in __i915_wait_request() local
1216 struct drm_device *dev = ring->dev; in __i915_wait_request()
1219 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); in __i915_wait_request()
1233 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { in __i915_wait_request()
1241 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) in __i915_wait_request()
1250 prepare_to_wait(&ring->irq_queue, &wait, in __i915_wait_request()
1280 if (timeout || missed_irq(dev_priv, ring)) { in __i915_wait_request()
1284 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire; in __i915_wait_request()
1299 ring->irq_put(ring); in __i915_wait_request()
1301 finish_wait(&ring->irq_queue, &wait); in __i915_wait_request()
1337 dev = req->ring->dev; in i915_wait_request()
2186 struct intel_engine_cs *ring) in i915_gem_object_move_to_active() argument
2191 BUG_ON(ring == NULL); in i915_gem_object_move_to_active()
2193 req = intel_ring_get_request(ring); in i915_gem_object_move_to_active()
2196 if (old_ring != ring && obj->last_write_req) { in i915_gem_object_move_to_active()
2207 list_move_tail(&obj->ring_list, &ring->active_list); in i915_gem_object_move_to_active()
2213 struct intel_engine_cs *ring) in i915_vma_move_to_active() argument
2216 return i915_gem_object_move_to_active(vma->obj, ring); in i915_vma_move_to_active()
2262 struct intel_engine_cs *ring; in i915_gem_init_seqno() local
2266 for_each_ring(ring, dev_priv, i) { in i915_gem_init_seqno()
2267 ret = intel_ring_idle(ring); in i915_gem_init_seqno()
2274 for_each_ring(ring, dev_priv, i) { in i915_gem_init_seqno()
2275 intel_ring_init_seqno(ring, seqno); in i915_gem_init_seqno()
2277 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) in i915_gem_init_seqno()
2278 ring->semaphore.sync_seqno[j] = 0; in i915_gem_init_seqno()
2328 int __i915_add_request(struct intel_engine_cs *ring, in __i915_add_request() argument
2332 struct drm_i915_private *dev_priv = ring->dev->dev_private; in __i915_add_request()
2338 request = ring->outstanding_lazy_request; in __i915_add_request()
2343 ringbuf = request->ctx->engine[ring->id].ringbuf; in __i915_add_request()
2345 ringbuf = ring->buffer; in __i915_add_request()
2360 ret = intel_ring_flush_all_caches(ring); in __i915_add_request()
2373 ret = ring->emit_request(ringbuf, request); in __i915_add_request()
2377 ret = ring->add_request(ring); in __i915_add_request()
2398 request->ctx = ring->last_context; in __i915_add_request()
2404 ring->last_submitted_seqno = request->seqno; in __i915_add_request()
2405 list_add_tail(&request->list, &ring->request_list); in __i915_add_request()
2421 ring->outstanding_lazy_request = NULL; in __i915_add_request()
2423 i915_queue_hangcheck(ring->dev); in __i915_add_request()
2511 struct intel_engine_cs *ring = req->ring; in i915_gem_request_free() local
2513 if (ctx != ring->default_context) in i915_gem_request_free()
2514 intel_lr_context_unpin(ring, ctx); in i915_gem_request_free()
2524 i915_gem_find_active_request(struct intel_engine_cs *ring) in i915_gem_find_active_request() argument
2528 list_for_each_entry(request, &ring->request_list, list) { in i915_gem_find_active_request()
2539 struct intel_engine_cs *ring) in i915_gem_reset_ring_status() argument
2544 request = i915_gem_find_active_request(ring); in i915_gem_reset_ring_status()
2549 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; in i915_gem_reset_ring_status()
2553 list_for_each_entry_continue(request, &ring->request_list, list) in i915_gem_reset_ring_status()
2558 struct intel_engine_cs *ring) in i915_gem_reset_ring_cleanup() argument
2560 while (!list_empty(&ring->active_list)) { in i915_gem_reset_ring_cleanup()
2563 obj = list_first_entry(&ring->active_list, in i915_gem_reset_ring_cleanup()
2575 while (!list_empty(&ring->execlist_queue)) { in i915_gem_reset_ring_cleanup()
2578 submit_req = list_first_entry(&ring->execlist_queue, in i915_gem_reset_ring_cleanup()
2584 if (submit_req->ctx != ring->default_context) in i915_gem_reset_ring_cleanup()
2585 intel_lr_context_unpin(ring, submit_req->ctx); in i915_gem_reset_ring_cleanup()
2597 while (!list_empty(&ring->request_list)) { in i915_gem_reset_ring_cleanup()
2600 request = list_first_entry(&ring->request_list, in i915_gem_reset_ring_cleanup()
2608 i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); in i915_gem_reset_ring_cleanup()
2635 struct intel_engine_cs *ring; in i915_gem_reset() local
2643 for_each_ring(ring, dev_priv, i) in i915_gem_reset()
2644 i915_gem_reset_ring_status(dev_priv, ring); in i915_gem_reset()
2646 for_each_ring(ring, dev_priv, i) in i915_gem_reset()
2647 i915_gem_reset_ring_cleanup(dev_priv, ring); in i915_gem_reset()
2658 i915_gem_retire_requests_ring(struct intel_engine_cs *ring) in i915_gem_retire_requests_ring() argument
2660 if (list_empty(&ring->request_list)) in i915_gem_retire_requests_ring()
2663 WARN_ON(i915_verify_lists(ring->dev)); in i915_gem_retire_requests_ring()
2670 while (!list_empty(&ring->request_list)) { in i915_gem_retire_requests_ring()
2673 request = list_first_entry(&ring->request_list, in i915_gem_retire_requests_ring()
2696 while (!list_empty(&ring->active_list)) { in i915_gem_retire_requests_ring()
2699 obj = list_first_entry(&ring->active_list, in i915_gem_retire_requests_ring()
2709 if (unlikely(ring->trace_irq_req && in i915_gem_retire_requests_ring()
2710 i915_gem_request_completed(ring->trace_irq_req, true))) { in i915_gem_retire_requests_ring()
2711 ring->irq_put(ring); in i915_gem_retire_requests_ring()
2712 i915_gem_request_assign(&ring->trace_irq_req, NULL); in i915_gem_retire_requests_ring()
2715 WARN_ON(i915_verify_lists(ring->dev)); in i915_gem_retire_requests_ring()
2722 struct intel_engine_cs *ring; in i915_gem_retire_requests() local
2726 for_each_ring(ring, dev_priv, i) { in i915_gem_retire_requests()
2727 i915_gem_retire_requests_ring(ring); in i915_gem_retire_requests()
2728 idle &= list_empty(&ring->request_list); in i915_gem_retire_requests()
2732 spin_lock_irqsave(&ring->execlist_lock, flags); in i915_gem_retire_requests()
2733 idle &= list_empty(&ring->execlist_queue); in i915_gem_retire_requests()
2734 spin_unlock_irqrestore(&ring->execlist_lock, flags); in i915_gem_retire_requests()
2736 intel_execlists_retire_requests(ring); in i915_gem_retire_requests()
2784 struct intel_engine_cs *ring; in i915_gem_object_flush_active() local
2788 ring = i915_gem_request_get_ring(obj->last_read_req); in i915_gem_object_flush_active()
2794 i915_gem_retire_requests_ring(ring); in i915_gem_object_flush_active()
3037 struct intel_engine_cs *ring; in i915_gpu_idle() local
3041 for_each_ring(ring, dev_priv, i) { in i915_gpu_idle()
3043 ret = i915_switch_context(ring, ring->default_context); in i915_gpu_idle()
3048 ret = intel_ring_idle(ring); in i915_gpu_idle()
4296 struct intel_engine_cs *ring; in i915_gem_busy_ioctl() local
4298 ring = i915_gem_request_get_ring(obj->last_read_req); in i915_gem_busy_ioctl()
4299 args->busy |= intel_ring_flag(ring) << 16; in i915_gem_busy_ioctl()
4588 struct intel_engine_cs *ring; in i915_gem_stop_ringbuffers() local
4591 for_each_ring(ring, dev_priv, i) in i915_gem_stop_ringbuffers()
4592 dev_priv->gt.stop_ring(ring); in i915_gem_stop_ringbuffers()
4627 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) in i915_gem_l3_remap() argument
4629 struct drm_device *dev = ring->dev; in i915_gem_l3_remap()
4638 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3); in i915_gem_l3_remap()
4648 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); in i915_gem_l3_remap()
4649 intel_ring_emit(ring, reg_base + i); in i915_gem_l3_remap()
4650 intel_ring_emit(ring, remap_info[i/4]); in i915_gem_l3_remap()
4653 intel_ring_advance(ring); in i915_gem_l3_remap()
4766 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]); in i915_gem_init_rings()
4768 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); in i915_gem_init_rings()
4770 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); in i915_gem_init_rings()
4772 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); in i915_gem_init_rings()
4774 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); in i915_gem_init_rings()
4783 struct intel_engine_cs *ring; in i915_gem_init_hw() local
4821 for_each_ring(ring, dev_priv, i) { in i915_gem_init_hw()
4822 ret = ring->init_hw(ring); in i915_gem_init_hw()
4828 i915_gem_l3_remap(&dev_priv->ring[RCS], i); in i915_gem_init_hw()
4923 struct intel_engine_cs *ring; in i915_gem_cleanup_ringbuffer() local
4926 for_each_ring(ring, dev_priv, i) in i915_gem_cleanup_ringbuffer()
4927 dev_priv->gt.cleanup_ring(ring); in i915_gem_cleanup_ringbuffer()
4931 init_ring_lists(struct intel_engine_cs *ring) in init_ring_lists() argument
4933 INIT_LIST_HEAD(&ring->active_list); in init_ring_lists()
4934 INIT_LIST_HEAD(&ring->request_list); in init_ring_lists()
4969 init_ring_lists(&dev_priv->ring[i]); in i915_gem_load()