root/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kfd_interrupt_init
  2. kfd_interrupt_exit
  3. enqueue_ih_ring_entry
  4. dequeue_ih_ring_entry
  5. interrupt_wq
  6. interrupt_is_wanted

   1 /*
   2  * Copyright 2014 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 
  23 /*
  24  * KFD Interrupts.
  25  *
  26  * AMD GPUs deliver interrupts by pushing an interrupt description onto the
  27  * interrupt ring and then sending an interrupt. KGD receives the interrupt
  28  * in ISR and sends us a pointer to each new entry on the interrupt ring.
  29  *
  30  * We generally can't process interrupt-signaled events from ISR, so we call
  31  * out to each interrupt client module (currently only the scheduler) to ask if
  32  * each interrupt is interesting. If they return true, then it requires further
  33  * processing so we copy it to an internal interrupt ring and call each
  34  * interrupt client again from a work-queue.
  35  *
  36  * There's no acknowledgment for the interrupts we use. The hardware simply
  37  * queues a new interrupt each time without waiting.
  38  *
  39  * The fixed-size internal queue means that it's possible for us to lose
  40  * interrupts because we have no back-pressure to the hardware.
  41  */
  42 
  43 #include <linux/slab.h>
  44 #include <linux/device.h>
  45 #include <linux/kfifo.h>
  46 #include "kfd_priv.h"
  47 
  48 #define KFD_IH_NUM_ENTRIES 8192
  49 
  50 static void interrupt_wq(struct work_struct *);
  51 
  52 int kfd_interrupt_init(struct kfd_dev *kfd)
  53 {
  54         int r;
  55 
  56         r = kfifo_alloc(&kfd->ih_fifo,
  57                 KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size,
  58                 GFP_KERNEL);
  59         if (r) {
  60                 dev_err(kfd_chardev(), "Failed to allocate IH fifo\n");
  61                 return r;
  62         }
  63 
  64         kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
  65         if (unlikely(!kfd->ih_wq)) {
  66                 kfifo_free(&kfd->ih_fifo);
  67                 dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
  68                 return -ENOMEM;
  69         }
  70         spin_lock_init(&kfd->interrupt_lock);
  71 
  72         INIT_WORK(&kfd->interrupt_work, interrupt_wq);
  73 
  74         kfd->interrupts_active = true;
  75 
  76         /*
  77          * After this function returns, the interrupt will be enabled. This
  78          * barrier ensures that the interrupt running on a different processor
  79          * sees all the above writes.
  80          */
  81         smp_wmb();
  82 
  83         return 0;
  84 }
  85 
  86 void kfd_interrupt_exit(struct kfd_dev *kfd)
  87 {
  88         /*
  89          * Stop the interrupt handler from writing to the ring and scheduling
  90          * workqueue items. The spinlock ensures that any interrupt running
  91          * after we have unlocked sees interrupts_active = false.
  92          */
  93         unsigned long flags;
  94 
  95         spin_lock_irqsave(&kfd->interrupt_lock, flags);
  96         kfd->interrupts_active = false;
  97         spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
  98 
  99         /*
 100          * flush_work ensures that there are no outstanding
 101          * work-queue items that will access interrupt_ring. New work items
 102          * can't be created because we stopped interrupt handling above.
 103          */
 104         flush_workqueue(kfd->ih_wq);
 105 
 106         kfifo_free(&kfd->ih_fifo);
 107 }
 108 
 109 /*
 110  * Assumption: single reader/writer. This function is not re-entrant
 111  */
 112 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
 113 {
 114         int count;
 115 
 116         count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
 117                                 kfd->device_info->ih_ring_entry_size);
 118         if (count != kfd->device_info->ih_ring_entry_size) {
 119                 dev_err_ratelimited(kfd_chardev(),
 120                         "Interrupt ring overflow, dropping interrupt %d\n",
 121                         count);
 122                 return false;
 123         }
 124 
 125         return true;
 126 }
 127 
 128 /*
 129  * Assumption: single reader/writer. This function is not re-entrant
 130  */
 131 static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
 132 {
 133         int count;
 134 
 135         count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
 136                                 kfd->device_info->ih_ring_entry_size);
 137 
 138         WARN_ON(count && count != kfd->device_info->ih_ring_entry_size);
 139 
 140         return count == kfd->device_info->ih_ring_entry_size;
 141 }
 142 
 143 static void interrupt_wq(struct work_struct *work)
 144 {
 145         struct kfd_dev *dev = container_of(work, struct kfd_dev,
 146                                                 interrupt_work);
 147         uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
 148 
 149         if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) {
 150                 dev_err_once(kfd_chardev(), "Ring entry too small\n");
 151                 return;
 152         }
 153 
 154         while (dequeue_ih_ring_entry(dev, ih_ring_entry))
 155                 dev->device_info->event_interrupt_class->interrupt_wq(dev,
 156                                                                 ih_ring_entry);
 157 }
 158 
 159 bool interrupt_is_wanted(struct kfd_dev *dev,
 160                         const uint32_t *ih_ring_entry,
 161                         uint32_t *patched_ihre, bool *flag)
 162 {
 163         /* integer and bitwise OR so there is no boolean short-circuiting */
 164         unsigned int wanted = 0;
 165 
 166         wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
 167                                          ih_ring_entry, patched_ihre, flag);
 168 
 169         return wanted != 0;
 170 }

/* [<][>][^][v][top][bottom][index][help] */