root/drivers/gpu/drm/i915/gem/i915_gem_busy.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __busy_read_flag
  2. __busy_write_id
  3. __busy_set_if_active
  4. busy_check_reader
  5. busy_check_writer
  6. i915_gem_busy_ioctl

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2014-2016 Intel Corporation
   5  */
   6 
   7 #include "gt/intel_engine.h"
   8 
   9 #include "i915_gem_ioctls.h"
  10 #include "i915_gem_object.h"
  11 
  12 static __always_inline u32 __busy_read_flag(u16 id)
  13 {
  14         if (id == (u16)I915_ENGINE_CLASS_INVALID)
  15                 return 0xffff0000u;
  16 
  17         GEM_BUG_ON(id >= 16);
  18         return 0x10000u << id;
  19 }
  20 
  21 static __always_inline u32 __busy_write_id(u16 id)
  22 {
  23         /*
  24          * The uABI guarantees an active writer is also amongst the read
  25          * engines. This would be true if we accessed the activity tracking
  26          * under the lock, but as we perform the lookup of the object and
  27          * its activity locklessly we can not guarantee that the last_write
  28          * being active implies that we have set the same engine flag from
  29          * last_read - hence we always set both read and write busy for
  30          * last_write.
  31          */
  32         if (id == (u16)I915_ENGINE_CLASS_INVALID)
  33                 return 0xffffffffu;
  34 
  35         return (id + 1) | __busy_read_flag(id);
  36 }
  37 
  38 static __always_inline unsigned int
  39 __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
  40 {
  41         const struct i915_request *rq;
  42 
  43         /*
  44          * We have to check the current hw status of the fence as the uABI
  45          * guarantees forward progress. We could rely on the idle worker
  46          * to eventually flush us, but to minimise latency just ask the
  47          * hardware.
  48          *
  49          * Note we only report on the status of native fences.
  50          */
  51         if (!dma_fence_is_i915(fence))
  52                 return 0;
  53 
  54         /* opencode to_request() in order to avoid const warnings */
  55         rq = container_of(fence, const struct i915_request, fence);
  56         if (i915_request_completed(rq))
  57                 return 0;
  58 
  59         /* Beware type-expansion follies! */
  60         BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
  61         return flag(rq->engine->uabi_class);
  62 }
  63 
  64 static __always_inline unsigned int
  65 busy_check_reader(const struct dma_fence *fence)
  66 {
  67         return __busy_set_if_active(fence, __busy_read_flag);
  68 }
  69 
  70 static __always_inline unsigned int
  71 busy_check_writer(const struct dma_fence *fence)
  72 {
  73         if (!fence)
  74                 return 0;
  75 
  76         return __busy_set_if_active(fence, __busy_write_id);
  77 }
  78 
  79 int
  80 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  81                     struct drm_file *file)
  82 {
  83         struct drm_i915_gem_busy *args = data;
  84         struct drm_i915_gem_object *obj;
  85         struct dma_resv_list *list;
  86         unsigned int seq;
  87         int err;
  88 
  89         err = -ENOENT;
  90         rcu_read_lock();
  91         obj = i915_gem_object_lookup_rcu(file, args->handle);
  92         if (!obj)
  93                 goto out;
  94 
  95         /*
  96          * A discrepancy here is that we do not report the status of
  97          * non-i915 fences, i.e. even though we may report the object as idle,
  98          * a call to set-domain may still stall waiting for foreign rendering.
  99          * This also means that wait-ioctl may report an object as busy,
 100          * where busy-ioctl considers it idle.
 101          *
 102          * We trade the ability to warn of foreign fences to report on which
 103          * i915 engines are active for the object.
 104          *
 105          * Alternatively, we can trade that extra information on read/write
 106          * activity with
 107          *      args->busy =
 108          *              !dma_resv_test_signaled_rcu(obj->resv, true);
 109          * to report the overall busyness. This is what the wait-ioctl does.
 110          *
 111          */
 112 retry:
 113         seq = raw_read_seqcount(&obj->base.resv->seq);
 114 
 115         /* Translate the exclusive fence to the READ *and* WRITE engine */
 116         args->busy =
 117                 busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
 118 
 119         /* Translate shared fences to READ set of engines */
 120         list = rcu_dereference(obj->base.resv->fence);
 121         if (list) {
 122                 unsigned int shared_count = list->shared_count, i;
 123 
 124                 for (i = 0; i < shared_count; ++i) {
 125                         struct dma_fence *fence =
 126                                 rcu_dereference(list->shared[i]);
 127 
 128                         args->busy |= busy_check_reader(fence);
 129                 }
 130         }
 131 
 132         if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
 133                 goto retry;
 134 
 135         err = 0;
 136 out:
 137         rcu_read_unlock();
 138         return err;
 139 }

/* [<][>][^][v][top][bottom][index][help] */