root/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vmw_thread_fn
  2. vmw_irq_handler
  3. vmw_fifo_idle
  4. vmw_update_seqno
  5. vmw_seqno_passed
  6. vmw_fallback_wait
  7. vmw_generic_waiter_add
  8. vmw_generic_waiter_remove
  9. vmw_seqno_waiter_add
  10. vmw_seqno_waiter_remove
  11. vmw_goal_waiter_add
  12. vmw_goal_waiter_remove
  13. vmw_wait_seqno
  14. vmw_irq_preinstall
  15. vmw_irq_uninstall
  16. vmw_irq_install

   1 // SPDX-License-Identifier: GPL-2.0 OR MIT
   2 /**************************************************************************
   3  *
   4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5  *
   6  * Permission is hereby granted, free of charge, to any person obtaining a
   7  * copy of this software and associated documentation files (the
   8  * "Software"), to deal in the Software without restriction, including
   9  * without limitation the rights to use, copy, modify, merge, publish,
  10  * distribute, sub license, and/or sell copies of the Software, and to
  11  * permit persons to whom the Software is furnished to do so, subject to
  12  * the following conditions:
  13  *
  14  * The above copyright notice and this permission notice (including the
  15  * next paragraph) shall be included in all copies or substantial portions
  16  * of the Software.
  17  *
  18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25  *
  26  **************************************************************************/
  27 
  28 #include <linux/sched/signal.h>
  29 
  30 #include "vmwgfx_drv.h"
  31 
  32 #define VMW_FENCE_WRAP (1 << 24)
  33 
  34 /**
  35  * vmw_thread_fn - Deferred (process context) irq handler
  36  *
  37  * @irq: irq number
  38  * @arg: Closure argument. Pointer to a struct drm_device cast to void *
  39  *
  40  * This function implements the deferred part of irq processing.
  41  * The function is guaranteed to run at least once after the
  42  * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
  43  *
  44  */
  45 static irqreturn_t vmw_thread_fn(int irq, void *arg)
  46 {
  47         struct drm_device *dev = (struct drm_device *)arg;
  48         struct vmw_private *dev_priv = vmw_priv(dev);
  49         irqreturn_t ret = IRQ_NONE;
  50 
  51         if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
  52                                dev_priv->irqthread_pending)) {
  53                 vmw_fences_update(dev_priv->fman);
  54                 wake_up_all(&dev_priv->fence_queue);
  55                 ret = IRQ_HANDLED;
  56         }
  57 
  58         if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
  59                                dev_priv->irqthread_pending)) {
  60                 vmw_cmdbuf_irqthread(dev_priv->cman);
  61                 ret = IRQ_HANDLED;
  62         }
  63 
  64         return ret;
  65 }
  66 
  67 /**
  68  * vmw_irq_handler irq handler
  69  *
  70  * @irq: irq number
  71  * @arg: Closure argument. Pointer to a struct drm_device cast to void *
  72  *
  73  * This function implements the quick part of irq processing.
  74  * The function performs fast actions like clearing the device interrupt
  75  * flags and also reasonably quick actions like waking processes waiting for
  76  * FIFO space. Other IRQ actions are deferred to the IRQ thread.
  77  */
  78 static irqreturn_t vmw_irq_handler(int irq, void *arg)
  79 {
  80         struct drm_device *dev = (struct drm_device *)arg;
  81         struct vmw_private *dev_priv = vmw_priv(dev);
  82         uint32_t status, masked_status;
  83         irqreturn_t ret = IRQ_HANDLED;
  84 
  85         status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
  86         masked_status = status & READ_ONCE(dev_priv->irq_mask);
  87 
  88         if (likely(status))
  89                 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
  90 
  91         if (!status)
  92                 return IRQ_NONE;
  93 
  94         if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
  95                 wake_up_all(&dev_priv->fifo_queue);
  96 
  97         if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
  98                               SVGA_IRQFLAG_FENCE_GOAL)) &&
  99             !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
 100                 ret = IRQ_WAKE_THREAD;
 101 
 102         if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
 103                               SVGA_IRQFLAG_ERROR)) &&
 104             !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
 105                               dev_priv->irqthread_pending))
 106                 ret = IRQ_WAKE_THREAD;
 107 
 108         return ret;
 109 }
 110 
 111 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 112 {
 113 
 114         return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 115 }
 116 
 117 void vmw_update_seqno(struct vmw_private *dev_priv,
 118                          struct vmw_fifo_state *fifo_state)
 119 {
 120         u32 *fifo_mem = dev_priv->mmio_virt;
 121         uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 122 
 123         if (dev_priv->last_read_seqno != seqno) {
 124                 dev_priv->last_read_seqno = seqno;
 125                 vmw_marker_pull(&fifo_state->marker_queue, seqno);
 126                 vmw_fences_update(dev_priv->fman);
 127         }
 128 }
 129 
 130 bool vmw_seqno_passed(struct vmw_private *dev_priv,
 131                          uint32_t seqno)
 132 {
 133         struct vmw_fifo_state *fifo_state;
 134         bool ret;
 135 
 136         if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 137                 return true;
 138 
 139         fifo_state = &dev_priv->fifo;
 140         vmw_update_seqno(dev_priv, fifo_state);
 141         if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 142                 return true;
 143 
 144         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
 145             vmw_fifo_idle(dev_priv, seqno))
 146                 return true;
 147 
 148         /**
 149          * Then check if the seqno is higher than what we've actually
 150          * emitted. Then the fence is stale and signaled.
 151          */
 152 
 153         ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
 154                > VMW_FENCE_WRAP);
 155 
 156         return ret;
 157 }
 158 
 159 int vmw_fallback_wait(struct vmw_private *dev_priv,
 160                       bool lazy,
 161                       bool fifo_idle,
 162                       uint32_t seqno,
 163                       bool interruptible,
 164                       unsigned long timeout)
 165 {
 166         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 167 
 168         uint32_t count = 0;
 169         uint32_t signal_seq;
 170         int ret;
 171         unsigned long end_jiffies = jiffies + timeout;
 172         bool (*wait_condition)(struct vmw_private *, uint32_t);
 173         DEFINE_WAIT(__wait);
 174 
 175         wait_condition = (fifo_idle) ? &vmw_fifo_idle :
 176                 &vmw_seqno_passed;
 177 
 178         /**
 179          * Block command submission while waiting for idle.
 180          */
 181 
 182         if (fifo_idle) {
 183                 down_read(&fifo_state->rwsem);
 184                 if (dev_priv->cman) {
 185                         ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
 186                                               10*HZ);
 187                         if (ret)
 188                                 goto out_err;
 189                 }
 190         }
 191 
 192         signal_seq = atomic_read(&dev_priv->marker_seq);
 193         ret = 0;
 194 
 195         for (;;) {
 196                 prepare_to_wait(&dev_priv->fence_queue, &__wait,
 197                                 (interruptible) ?
 198                                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 199                 if (wait_condition(dev_priv, seqno))
 200                         break;
 201                 if (time_after_eq(jiffies, end_jiffies)) {
 202                         DRM_ERROR("SVGA device lockup.\n");
 203                         break;
 204                 }
 205                 if (lazy)
 206                         schedule_timeout(1);
 207                 else if ((++count & 0x0F) == 0) {
 208                         /**
 209                          * FIXME: Use schedule_hr_timeout here for
 210                          * newer kernels and lower CPU utilization.
 211                          */
 212 
 213                         __set_current_state(TASK_RUNNING);
 214                         schedule();
 215                         __set_current_state((interruptible) ?
 216                                             TASK_INTERRUPTIBLE :
 217                                             TASK_UNINTERRUPTIBLE);
 218                 }
 219                 if (interruptible && signal_pending(current)) {
 220                         ret = -ERESTARTSYS;
 221                         break;
 222                 }
 223         }
 224         finish_wait(&dev_priv->fence_queue, &__wait);
 225         if (ret == 0 && fifo_idle) {
 226                 u32 *fifo_mem = dev_priv->mmio_virt;
 227 
 228                 vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 229         }
 230         wake_up_all(&dev_priv->fence_queue);
 231 out_err:
 232         if (fifo_idle)
 233                 up_read(&fifo_state->rwsem);
 234 
 235         return ret;
 236 }
 237 
 238 void vmw_generic_waiter_add(struct vmw_private *dev_priv,
 239                             u32 flag, int *waiter_count)
 240 {
 241         spin_lock_bh(&dev_priv->waiter_lock);
 242         if ((*waiter_count)++ == 0) {
 243                 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 244                 dev_priv->irq_mask |= flag;
 245                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 246         }
 247         spin_unlock_bh(&dev_priv->waiter_lock);
 248 }
 249 
 250 void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
 251                                u32 flag, int *waiter_count)
 252 {
 253         spin_lock_bh(&dev_priv->waiter_lock);
 254         if (--(*waiter_count) == 0) {
 255                 dev_priv->irq_mask &= ~flag;
 256                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 257         }
 258         spin_unlock_bh(&dev_priv->waiter_lock);
 259 }
 260 
 261 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
 262 {
 263         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
 264                                &dev_priv->fence_queue_waiters);
 265 }
 266 
 267 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 268 {
 269         vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
 270                                   &dev_priv->fence_queue_waiters);
 271 }
 272 
 273 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 274 {
 275         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
 276                                &dev_priv->goal_queue_waiters);
 277 }
 278 
 279 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 280 {
 281         vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
 282                                   &dev_priv->goal_queue_waiters);
 283 }
 284 
 285 int vmw_wait_seqno(struct vmw_private *dev_priv,
 286                       bool lazy, uint32_t seqno,
 287                       bool interruptible, unsigned long timeout)
 288 {
 289         long ret;
 290         struct vmw_fifo_state *fifo = &dev_priv->fifo;
 291 
 292         if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 293                 return 0;
 294 
 295         if (likely(vmw_seqno_passed(dev_priv, seqno)))
 296                 return 0;
 297 
 298         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 299 
 300         if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
 301                 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
 302                                          interruptible, timeout);
 303 
 304         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 305                 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
 306                                          interruptible, timeout);
 307 
 308         vmw_seqno_waiter_add(dev_priv);
 309 
 310         if (interruptible)
 311                 ret = wait_event_interruptible_timeout
 312                     (dev_priv->fence_queue,
 313                      vmw_seqno_passed(dev_priv, seqno),
 314                      timeout);
 315         else
 316                 ret = wait_event_timeout
 317                     (dev_priv->fence_queue,
 318                      vmw_seqno_passed(dev_priv, seqno),
 319                      timeout);
 320 
 321         vmw_seqno_waiter_remove(dev_priv);
 322 
 323         if (unlikely(ret == 0))
 324                 ret = -EBUSY;
 325         else if (likely(ret > 0))
 326                 ret = 0;
 327 
 328         return ret;
 329 }
 330 
 331 static void vmw_irq_preinstall(struct drm_device *dev)
 332 {
 333         struct vmw_private *dev_priv = vmw_priv(dev);
 334         uint32_t status;
 335 
 336         status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 337         outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 338 }
 339 
 340 void vmw_irq_uninstall(struct drm_device *dev)
 341 {
 342         struct vmw_private *dev_priv = vmw_priv(dev);
 343         uint32_t status;
 344 
 345         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 346                 return;
 347 
 348         if (!dev->irq_enabled)
 349                 return;
 350 
 351         vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 352 
 353         status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 354         outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 355 
 356         dev->irq_enabled = false;
 357         free_irq(dev->irq, dev);
 358 }
 359 
 360 /**
 361  * vmw_irq_install - Install the irq handlers
 362  *
 363  * @dev:  Pointer to the drm device.
 364  * @irq:  The irq number.
 365  * Return:  Zero if successful. Negative number otherwise.
 366  */
 367 int vmw_irq_install(struct drm_device *dev, int irq)
 368 {
 369         int ret;
 370 
 371         if (dev->irq_enabled)
 372                 return -EBUSY;
 373 
 374         vmw_irq_preinstall(dev);
 375 
 376         ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
 377                                    IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
 378         if (ret < 0)
 379                 return ret;
 380 
 381         dev->irq_enabled = true;
 382         dev->irq = irq;
 383 
 384         return ret;
 385 }

/* [<][>][^][v][top][bottom][index][help] */