This source file includes following definitions.
- __do_clflush
 
- clflush_work
 
- clflush_release
 
- clflush_work_create
 
- i915_gem_clflush_object
 
   1 
   2 
   3 
   4 
   5 
   6 
   7 #include "display/intel_frontbuffer.h"
   8 
   9 #include "i915_drv.h"
  10 #include "i915_gem_clflush.h"
  11 #include "i915_sw_fence_work.h"
  12 #include "i915_trace.h"
  13 
  14 struct clflush {
  15         struct dma_fence_work base;
  16         struct drm_i915_gem_object *obj;
  17 };
  18 
  19 static void __do_clflush(struct drm_i915_gem_object *obj)
  20 {
  21         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
  22         drm_clflush_sg(obj->mm.pages);
  23         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
  24 }
  25 
  26 static int clflush_work(struct dma_fence_work *base)
  27 {
  28         struct clflush *clflush = container_of(base, typeof(*clflush), base);
  29         struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
  30         int err;
  31 
  32         err = i915_gem_object_pin_pages(obj);
  33         if (err)
  34                 goto put;
  35 
  36         __do_clflush(obj);
  37         i915_gem_object_unpin_pages(obj);
  38 
  39 put:
  40         i915_gem_object_put(obj);
  41         return err;
  42 }
  43 
  44 static void clflush_release(struct dma_fence_work *base)
  45 {
  46         struct clflush *clflush = container_of(base, typeof(*clflush), base);
  47 
  48         if (clflush->obj)
  49                 i915_gem_object_put(clflush->obj);
  50 }
  51 
  52 static const struct dma_fence_work_ops clflush_ops = {
  53         .name = "clflush",
  54         .work = clflush_work,
  55         .release = clflush_release,
  56 };
  57 
  58 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
  59 {
  60         struct clflush *clflush;
  61 
  62         GEM_BUG_ON(!obj->cache_dirty);
  63 
  64         clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
  65         if (!clflush)
  66                 return NULL;
  67 
  68         dma_fence_work_init(&clflush->base, &clflush_ops);
  69         clflush->obj = i915_gem_object_get(obj); 
  70 
  71         return clflush;
  72 }
  73 
  74 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
  75                              unsigned int flags)
  76 {
  77         struct clflush *clflush;
  78 
  79         assert_object_held(obj);
  80 
  81         
  82 
  83 
  84 
  85 
  86 
  87 
  88         if (!i915_gem_object_has_struct_page(obj)) {
  89                 obj->cache_dirty = false;
  90                 return false;
  91         }
  92 
  93         
  94 
  95 
  96 
  97 
  98 
  99 
 100 
 101         if (!(flags & I915_CLFLUSH_FORCE) &&
 102             obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
 103                 return false;
 104 
 105         trace_i915_gem_object_clflush(obj);
 106 
 107         clflush = NULL;
 108         if (!(flags & I915_CLFLUSH_SYNC))
 109                 clflush = clflush_work_create(obj);
 110         if (clflush) {
 111                 i915_sw_fence_await_reservation(&clflush->base.chain,
 112                                                 obj->base.resv, NULL, true,
 113                                                 I915_FENCE_TIMEOUT,
 114                                                 I915_FENCE_GFP);
 115                 dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
 116                 dma_fence_work_commit(&clflush->base);
 117         } else if (obj->mm.pages) {
 118                 __do_clflush(obj);
 119         } else {
 120                 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
 121         }
 122 
 123         obj->cache_dirty = false;
 124         return true;
 125 }