root/drivers/gpu/drm/i915/display/intel_frontbuffer.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. frontbuffer_flush
  2. intel_frontbuffer_flip_prepare
  3. intel_frontbuffer_flip_complete
  4. intel_frontbuffer_flip
  5. __intel_fb_invalidate
  6. __intel_fb_flush
  7. frontbuffer_active
  8. frontbuffer_retire
  9. frontbuffer_release
  10. intel_frontbuffer_get
  11. intel_frontbuffer_put
  12. intel_frontbuffer_track

   1 /*
   2  * Copyright © 2014 Intel Corporation
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21  * DEALINGS IN THE SOFTWARE.
  22  *
  23  * Authors:
  24  *      Daniel Vetter <daniel.vetter@ffwll.ch>
  25  */
  26 
  27 /**
  28  * DOC: frontbuffer tracking
  29  *
  30  * Many features require us to track changes to the currently active
  31  * frontbuffer, especially rendering targeted at the frontbuffer.
  32  *
  33  * To be able to do so we track frontbuffers using a bitmask for all possible
  34  * frontbuffer slots through intel_frontbuffer_track(). The functions in this
  35  * file are then called when the contents of the frontbuffer are invalidated,
  36  * when frontbuffer rendering has stopped again to flush out all the changes
  37  * and when the frontbuffer is exchanged with a flip. Subsystems interested in
  38  * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
  39  * into the relevant places and filter for the frontbuffer slots that they are
  40  * interested int.
  41  *
  42  * On a high level there are two types of powersaving features. The first one
  43  * work like a special cache (FBC and PSR) and are interested when they should
  44  * stop caching and when to restart caching. This is done by placing callbacks
  45  * into the invalidate and the flush functions: At invalidate the caching must
  46  * be stopped and at flush time it can be restarted. And maybe they need to know
  47  * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
  48  * and flush on its own) which can be achieved with placing callbacks into the
  49  * flip functions.
  50  *
  51  * The other type of display power saving feature only cares about busyness
  52  * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
  53  * busyness. There is no direct way to detect idleness. Instead an idle timer
  54  * work delayed work should be started from the flush and flip functions and
  55  * cancelled as soon as busyness is detected.
  56  */
  57 
  58 #include "display/intel_dp.h"
  59 
  60 #include "i915_drv.h"
  61 #include "intel_display_types.h"
  62 #include "intel_fbc.h"
  63 #include "intel_frontbuffer.h"
  64 #include "intel_psr.h"
  65 
  66 /**
  67  * frontbuffer_flush - flush frontbuffer
  68  * @i915: i915 device
  69  * @frontbuffer_bits: frontbuffer plane tracking bits
  70  * @origin: which operation caused the flush
  71  *
  72  * This function gets called every time rendering on the given planes has
  73  * completed and frontbuffer caching can be started again. Flushes will get
  74  * delayed if they're blocked by some outstanding asynchronous rendering.
  75  *
  76  * Can be called without any locks held.
  77  */
  78 static void frontbuffer_flush(struct drm_i915_private *i915,
  79                               unsigned int frontbuffer_bits,
  80                               enum fb_op_origin origin)
  81 {
  82         /* Delay flushing when rings are still busy.*/
  83         spin_lock(&i915->fb_tracking.lock);
  84         frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
  85         spin_unlock(&i915->fb_tracking.lock);
  86 
  87         if (!frontbuffer_bits)
  88                 return;
  89 
  90         might_sleep();
  91         intel_edp_drrs_flush(i915, frontbuffer_bits);
  92         intel_psr_flush(i915, frontbuffer_bits, origin);
  93         intel_fbc_flush(i915, frontbuffer_bits, origin);
  94 }
  95 
  96 /**
  97  * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
  98  * @i915: i915 device
  99  * @frontbuffer_bits: frontbuffer plane tracking bits
 100  *
 101  * This function gets called after scheduling a flip on @obj. The actual
 102  * frontbuffer flushing will be delayed until completion is signalled with
 103  * intel_frontbuffer_flip_complete. If an invalidate happens in between this
 104  * flush will be cancelled.
 105  *
 106  * Can be called without any locks held.
 107  */
 108 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
 109                                     unsigned frontbuffer_bits)
 110 {
 111         spin_lock(&i915->fb_tracking.lock);
 112         i915->fb_tracking.flip_bits |= frontbuffer_bits;
 113         /* Remove stale busy bits due to the old buffer. */
 114         i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
 115         spin_unlock(&i915->fb_tracking.lock);
 116 }
 117 
 118 /**
 119  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
 120  * @i915: i915 device
 121  * @frontbuffer_bits: frontbuffer plane tracking bits
 122  *
 123  * This function gets called after the flip has been latched and will complete
 124  * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
 125  *
 126  * Can be called without any locks held.
 127  */
 128 void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 129                                      unsigned frontbuffer_bits)
 130 {
 131         spin_lock(&i915->fb_tracking.lock);
 132         /* Mask any cancelled flips. */
 133         frontbuffer_bits &= i915->fb_tracking.flip_bits;
 134         i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
 135         spin_unlock(&i915->fb_tracking.lock);
 136 
 137         if (frontbuffer_bits)
 138                 frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
 139 }
 140 
 141 /**
 142  * intel_frontbuffer_flip - synchronous frontbuffer flip
 143  * @i915: i915 device
 144  * @frontbuffer_bits: frontbuffer plane tracking bits
 145  *
 146  * This function gets called after scheduling a flip on @obj. This is for
 147  * synchronous plane updates which will happen on the next vblank and which will
 148  * not get delayed by pending gpu rendering.
 149  *
 150  * Can be called without any locks held.
 151  */
 152 void intel_frontbuffer_flip(struct drm_i915_private *i915,
 153                             unsigned frontbuffer_bits)
 154 {
 155         spin_lock(&i915->fb_tracking.lock);
 156         /* Remove stale busy bits due to the old buffer. */
 157         i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
 158         spin_unlock(&i915->fb_tracking.lock);
 159 
 160         frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
 161 }
 162 
 163 void __intel_fb_invalidate(struct intel_frontbuffer *front,
 164                            enum fb_op_origin origin,
 165                            unsigned int frontbuffer_bits)
 166 {
 167         struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
 168 
 169         if (origin == ORIGIN_CS) {
 170                 spin_lock(&i915->fb_tracking.lock);
 171                 i915->fb_tracking.busy_bits |= frontbuffer_bits;
 172                 i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
 173                 spin_unlock(&i915->fb_tracking.lock);
 174         }
 175 
 176         might_sleep();
 177         intel_psr_invalidate(i915, frontbuffer_bits, origin);
 178         intel_edp_drrs_invalidate(i915, frontbuffer_bits);
 179         intel_fbc_invalidate(i915, frontbuffer_bits, origin);
 180 }
 181 
 182 void __intel_fb_flush(struct intel_frontbuffer *front,
 183                       enum fb_op_origin origin,
 184                       unsigned int frontbuffer_bits)
 185 {
 186         struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
 187 
 188         if (origin == ORIGIN_CS) {
 189                 spin_lock(&i915->fb_tracking.lock);
 190                 /* Filter out new bits since rendering started. */
 191                 frontbuffer_bits &= i915->fb_tracking.busy_bits;
 192                 i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
 193                 spin_unlock(&i915->fb_tracking.lock);
 194         }
 195 
 196         if (frontbuffer_bits)
 197                 frontbuffer_flush(i915, frontbuffer_bits, origin);
 198 }
 199 
 200 static int frontbuffer_active(struct i915_active *ref)
 201 {
 202         struct intel_frontbuffer *front =
 203                 container_of(ref, typeof(*front), write);
 204 
 205         kref_get(&front->ref);
 206         return 0;
 207 }
 208 
 209 static void frontbuffer_retire(struct i915_active *ref)
 210 {
 211         struct intel_frontbuffer *front =
 212                 container_of(ref, typeof(*front), write);
 213 
 214         intel_frontbuffer_flush(front, ORIGIN_CS);
 215         intel_frontbuffer_put(front);
 216 }
 217 
 218 static void frontbuffer_release(struct kref *ref)
 219         __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
 220 {
 221         struct intel_frontbuffer *front =
 222                 container_of(ref, typeof(*front), ref);
 223 
 224         front->obj->frontbuffer = NULL;
 225         spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
 226 
 227         i915_gem_object_put(front->obj);
 228         kfree(front);
 229 }
 230 
 231 struct intel_frontbuffer *
 232 intel_frontbuffer_get(struct drm_i915_gem_object *obj)
 233 {
 234         struct drm_i915_private *i915 = to_i915(obj->base.dev);
 235         struct intel_frontbuffer *front;
 236 
 237         spin_lock(&i915->fb_tracking.lock);
 238         front = obj->frontbuffer;
 239         if (front)
 240                 kref_get(&front->ref);
 241         spin_unlock(&i915->fb_tracking.lock);
 242         if (front)
 243                 return front;
 244 
 245         front = kmalloc(sizeof(*front), GFP_KERNEL);
 246         if (!front)
 247                 return NULL;
 248 
 249         front->obj = obj;
 250         kref_init(&front->ref);
 251         atomic_set(&front->bits, 0);
 252         i915_active_init(i915, &front->write,
 253                          frontbuffer_active, frontbuffer_retire);
 254 
 255         spin_lock(&i915->fb_tracking.lock);
 256         if (obj->frontbuffer) {
 257                 kfree(front);
 258                 front = obj->frontbuffer;
 259                 kref_get(&front->ref);
 260         } else {
 261                 i915_gem_object_get(obj);
 262                 obj->frontbuffer = front;
 263         }
 264         spin_unlock(&i915->fb_tracking.lock);
 265 
 266         return front;
 267 }
 268 
 269 void intel_frontbuffer_put(struct intel_frontbuffer *front)
 270 {
 271         kref_put_lock(&front->ref,
 272                       frontbuffer_release,
 273                       &to_i915(front->obj->base.dev)->fb_tracking.lock);
 274 }
 275 
 276 /**
 277  * intel_frontbuffer_track - update frontbuffer tracking
 278  * @old: current buffer for the frontbuffer slots
 279  * @new: new buffer for the frontbuffer slots
 280  * @frontbuffer_bits: bitmask of frontbuffer slots
 281  *
 282  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 283  * from @old and setting them in @new. Both @old and @new can be NULL.
 284  */
 285 void intel_frontbuffer_track(struct intel_frontbuffer *old,
 286                              struct intel_frontbuffer *new,
 287                              unsigned int frontbuffer_bits)
 288 {
 289         /*
 290          * Control of individual bits within the mask are guarded by
 291          * the owning plane->mutex, i.e. we can never see concurrent
 292          * manipulation of individual bits. But since the bitfield as a whole
 293          * is updated using RMW, we need to use atomics in order to update
 294          * the bits.
 295          */
 296         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
 297                      BITS_PER_TYPE(atomic_t));
 298 
 299         if (old) {
 300                 WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
 301                 atomic_andnot(frontbuffer_bits, &old->bits);
 302         }
 303 
 304         if (new) {
 305                 WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
 306                 atomic_or(frontbuffer_bits, &new->bits);
 307         }
 308 }

/* [<][>][^][v][top][bottom][index][help] */