root/drivers/gpu/drm/drm_lock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. drm_lock_take
  2. drm_lock_transfer
  3. drm_legacy_lock_free
  4. drm_legacy_lock
  5. drm_legacy_unlock
  6. drm_legacy_idlelock_take
  7. drm_legacy_idlelock_release
  8. drm_legacy_i_have_hw_lock
  9. drm_legacy_lock_release
  10. drm_legacy_lock_master_cleanup

   1 /*
   2  * \file drm_lock.c
   3  * IOCTLs for locking
   4  *
   5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6  * \author Gareth Hughes <gareth@valinux.com>
   7  */
   8 
   9 /*
  10  * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
  11  *
  12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  14  * All Rights Reserved.
  15  *
  16  * Permission is hereby granted, free of charge, to any person obtaining a
  17  * copy of this software and associated documentation files (the "Software"),
  18  * to deal in the Software without restriction, including without limitation
  19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20  * and/or sell copies of the Software, and to permit persons to whom the
  21  * Software is furnished to do so, subject to the following conditions:
  22  *
  23  * The above copyright notice and this permission notice (including the next
  24  * paragraph) shall be included in all copies or substantial portions of the
  25  * Software.
  26  *
  27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33  * OTHER DEALINGS IN THE SOFTWARE.
  34  */
  35 
  36 #include <linux/export.h>
  37 #include <linux/sched/signal.h>
  38 
  39 #include <drm/drm.h>
  40 #include <drm/drm_drv.h>
  41 #include <drm/drm_file.h>
  42 #include <drm/drm_print.h>
  43 
  44 #include "drm_internal.h"
  45 #include "drm_legacy.h"
  46 
  47 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
  48 
  49 /**
  50  * Take the heavyweight lock.
  51  *
  52  * \param lock lock pointer.
  53  * \param context locking context.
  54  * \return one if the lock is held, or zero otherwise.
  55  *
  56  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
  57  */
  58 static
  59 int drm_lock_take(struct drm_lock_data *lock_data,
  60                   unsigned int context)
  61 {
  62         unsigned int old, new, prev;
  63         volatile unsigned int *lock = &lock_data->hw_lock->lock;
  64 
  65         spin_lock_bh(&lock_data->spinlock);
  66         do {
  67                 old = *lock;
  68                 if (old & _DRM_LOCK_HELD)
  69                         new = old | _DRM_LOCK_CONT;
  70                 else {
  71                         new = context | _DRM_LOCK_HELD |
  72                                 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
  73                                  _DRM_LOCK_CONT : 0);
  74                 }
  75                 prev = cmpxchg(lock, old, new);
  76         } while (prev != old);
  77         spin_unlock_bh(&lock_data->spinlock);
  78 
  79         if (_DRM_LOCKING_CONTEXT(old) == context) {
  80                 if (old & _DRM_LOCK_HELD) {
  81                         if (context != DRM_KERNEL_CONTEXT) {
  82                                 DRM_ERROR("%d holds heavyweight lock\n",
  83                                           context);
  84                         }
  85                         return 0;
  86                 }
  87         }
  88 
  89         if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
  90                 /* Have lock */
  91                 return 1;
  92         }
  93         return 0;
  94 }
  95 
  96 /**
  97  * This takes a lock forcibly and hands it to context.  Should ONLY be used
  98  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
  99  *
 100  * \param dev DRM device.
 101  * \param lock lock pointer.
 102  * \param context locking context.
 103  * \return always one.
 104  *
 105  * Resets the lock file pointer.
 106  * Marks the lock as held by the given context, via the \p cmpxchg instruction.
 107  */
 108 static int drm_lock_transfer(struct drm_lock_data *lock_data,
 109                              unsigned int context)
 110 {
 111         unsigned int old, new, prev;
 112         volatile unsigned int *lock = &lock_data->hw_lock->lock;
 113 
 114         lock_data->file_priv = NULL;
 115         do {
 116                 old = *lock;
 117                 new = context | _DRM_LOCK_HELD;
 118                 prev = cmpxchg(lock, old, new);
 119         } while (prev != old);
 120         return 1;
 121 }
 122 
 123 static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
 124                                 unsigned int context)
 125 {
 126         unsigned int old, new, prev;
 127         volatile unsigned int *lock = &lock_data->hw_lock->lock;
 128 
 129         spin_lock_bh(&lock_data->spinlock);
 130         if (lock_data->kernel_waiters != 0) {
 131                 drm_lock_transfer(lock_data, 0);
 132                 lock_data->idle_has_lock = 1;
 133                 spin_unlock_bh(&lock_data->spinlock);
 134                 return 1;
 135         }
 136         spin_unlock_bh(&lock_data->spinlock);
 137 
 138         do {
 139                 old = *lock;
 140                 new = _DRM_LOCKING_CONTEXT(old);
 141                 prev = cmpxchg(lock, old, new);
 142         } while (prev != old);
 143 
 144         if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
 145                 DRM_ERROR("%d freed heavyweight lock held by %d\n",
 146                           context, _DRM_LOCKING_CONTEXT(old));
 147                 return 1;
 148         }
 149         wake_up_interruptible(&lock_data->lock_queue);
 150         return 0;
 151 }
 152 
 153 /**
 154  * Lock ioctl.
 155  *
 156  * \param inode device inode.
 157  * \param file_priv DRM file private.
 158  * \param cmd command.
 159  * \param arg user argument, pointing to a drm_lock structure.
 160  * \return zero on success or negative number on failure.
 161  *
 162  * Add the current task to the lock wait queue, and attempt to take to lock.
 163  */
 164 int drm_legacy_lock(struct drm_device *dev, void *data,
 165                     struct drm_file *file_priv)
 166 {
 167         DECLARE_WAITQUEUE(entry, current);
 168         struct drm_lock *lock = data;
 169         struct drm_master *master = file_priv->master;
 170         int ret = 0;
 171 
 172         if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 173                 return -EOPNOTSUPP;
 174 
 175         ++file_priv->lock_count;
 176 
 177         if (lock->context == DRM_KERNEL_CONTEXT) {
 178                 DRM_ERROR("Process %d using kernel context %d\n",
 179                           task_pid_nr(current), lock->context);
 180                 return -EINVAL;
 181         }
 182 
 183         DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
 184                   lock->context, task_pid_nr(current),
 185                   master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
 186                   lock->flags);
 187 
 188         add_wait_queue(&master->lock.lock_queue, &entry);
 189         spin_lock_bh(&master->lock.spinlock);
 190         master->lock.user_waiters++;
 191         spin_unlock_bh(&master->lock.spinlock);
 192 
 193         for (;;) {
 194                 __set_current_state(TASK_INTERRUPTIBLE);
 195                 if (!master->lock.hw_lock) {
 196                         /* Device has been unregistered */
 197                         send_sig(SIGTERM, current, 0);
 198                         ret = -EINTR;
 199                         break;
 200                 }
 201                 if (drm_lock_take(&master->lock, lock->context)) {
 202                         master->lock.file_priv = file_priv;
 203                         master->lock.lock_time = jiffies;
 204                         break;  /* Got lock */
 205                 }
 206 
 207                 /* Contention */
 208                 mutex_unlock(&drm_global_mutex);
 209                 schedule();
 210                 mutex_lock(&drm_global_mutex);
 211                 if (signal_pending(current)) {
 212                         ret = -EINTR;
 213                         break;
 214                 }
 215         }
 216         spin_lock_bh(&master->lock.spinlock);
 217         master->lock.user_waiters--;
 218         spin_unlock_bh(&master->lock.spinlock);
 219         __set_current_state(TASK_RUNNING);
 220         remove_wait_queue(&master->lock.lock_queue, &entry);
 221 
 222         DRM_DEBUG("%d %s\n", lock->context,
 223                   ret ? "interrupted" : "has lock");
 224         if (ret) return ret;
 225 
 226         /* don't set the block all signals on the master process for now 
 227          * really probably not the correct answer but lets us debug xkb
 228          * xserver for now */
 229         if (!drm_is_current_master(file_priv)) {
 230                 dev->sigdata.context = lock->context;
 231                 dev->sigdata.lock = master->lock.hw_lock;
 232         }
 233 
 234         if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
 235         {
 236                 if (dev->driver->dma_quiescent(dev)) {
 237                         DRM_DEBUG("%d waiting for DMA quiescent\n",
 238                                   lock->context);
 239                         return -EBUSY;
 240                 }
 241         }
 242 
 243         return 0;
 244 }
 245 
 246 /**
 247  * Unlock ioctl.
 248  *
 249  * \param inode device inode.
 250  * \param file_priv DRM file private.
 251  * \param cmd command.
 252  * \param arg user argument, pointing to a drm_lock structure.
 253  * \return zero on success or negative number on failure.
 254  *
 255  * Transfer and free the lock.
 256  */
 257 int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 258 {
 259         struct drm_lock *lock = data;
 260         struct drm_master *master = file_priv->master;
 261 
 262         if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 263                 return -EOPNOTSUPP;
 264 
 265         if (lock->context == DRM_KERNEL_CONTEXT) {
 266                 DRM_ERROR("Process %d using kernel context %d\n",
 267                           task_pid_nr(current), lock->context);
 268                 return -EINVAL;
 269         }
 270 
 271         if (drm_legacy_lock_free(&master->lock, lock->context)) {
 272                 /* FIXME: Should really bail out here. */
 273         }
 274 
 275         return 0;
 276 }
 277 
 278 /**
 279  * This function returns immediately and takes the hw lock
 280  * with the kernel context if it is free, otherwise it gets the highest priority when and if
 281  * it is eventually released.
 282  *
 283  * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
 284  * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
 285  * a deadlock, which is why the "idlelock" was invented).
 286  *
 287  * This should be sufficient to wait for GPU idle without
 288  * having to worry about starvation.
 289  */
 290 
 291 void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
 292 {
 293         int ret;
 294 
 295         spin_lock_bh(&lock_data->spinlock);
 296         lock_data->kernel_waiters++;
 297         if (!lock_data->idle_has_lock) {
 298 
 299                 spin_unlock_bh(&lock_data->spinlock);
 300                 ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
 301                 spin_lock_bh(&lock_data->spinlock);
 302 
 303                 if (ret == 1)
 304                         lock_data->idle_has_lock = 1;
 305         }
 306         spin_unlock_bh(&lock_data->spinlock);
 307 }
 308 EXPORT_SYMBOL(drm_legacy_idlelock_take);
 309 
 310 void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
 311 {
 312         unsigned int old, prev;
 313         volatile unsigned int *lock = &lock_data->hw_lock->lock;
 314 
 315         spin_lock_bh(&lock_data->spinlock);
 316         if (--lock_data->kernel_waiters == 0) {
 317                 if (lock_data->idle_has_lock) {
 318                         do {
 319                                 old = *lock;
 320                                 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
 321                         } while (prev != old);
 322                         wake_up_interruptible(&lock_data->lock_queue);
 323                         lock_data->idle_has_lock = 0;
 324                 }
 325         }
 326         spin_unlock_bh(&lock_data->spinlock);
 327 }
 328 EXPORT_SYMBOL(drm_legacy_idlelock_release);
 329 
 330 static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
 331                                      struct drm_file *file_priv)
 332 {
 333         struct drm_master *master = file_priv->master;
 334         return (file_priv->lock_count && master->lock.hw_lock &&
 335                 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
 336                 master->lock.file_priv == file_priv);
 337 }
 338 
 339 void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
 340 {
 341         struct drm_file *file_priv = filp->private_data;
 342 
 343         /* if the master has gone away we can't do anything with the lock */
 344         if (!dev->master)
 345                 return;
 346 
 347         if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
 348                 DRM_DEBUG("File %p released, freeing lock for context %d\n",
 349                           filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 350                 drm_legacy_lock_free(&file_priv->master->lock,
 351                                      _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 352         }
 353 }
 354 
 355 void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
 356 {
 357         if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 358                 return;
 359 
 360         /*
 361          * Since the master is disappearing, so is the
 362          * possibility to lock.
 363          */     mutex_lock(&dev->struct_mutex);
 364         if (master->lock.hw_lock) {
 365                 if (dev->sigdata.lock == master->lock.hw_lock)
 366                         dev->sigdata.lock = NULL;
 367                 master->lock.hw_lock = NULL;
 368                 master->lock.file_priv = NULL;
 369                 wake_up_interruptible_all(&master->lock.lock_queue);
 370         }
 371         mutex_unlock(&dev->struct_mutex);
 372 }

/* [<][>][^][v][top][bottom][index][help] */