root/drivers/gpu/drm/v3d/v3d_irq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. v3d_overflow_mem_work
  2. v3d_irq
  3. v3d_hub_irq
  4. v3d_irq_init
  5. v3d_irq_enable
  6. v3d_irq_disable
  7. v3d_irq_reset

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /* Copyright (C) 2014-2018 Broadcom */
   3 
   4 /**
   5  * DOC: Interrupt management for the V3D engine
   6  *
   7  * When we take a bin, render, TFU done, or CSD done interrupt, we
   8  * need to signal the fence for that job so that the scheduler can
   9  * queue up the next one and unblock any waiters.
  10  *
  11  * When we take the binner out of memory interrupt, we need to
  12  * allocate some new memory and pass it to the binner so that the
  13  * current job can make progress.
  14  */
  15 
  16 #include <linux/platform_device.h>
  17 
  18 #include "v3d_drv.h"
  19 #include "v3d_regs.h"
  20 #include "v3d_trace.h"
  21 
  22 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM |  \
  23                              V3D_INT_FLDONE |   \
  24                              V3D_INT_FRDONE |   \
  25                              V3D_INT_CSDDONE |  \
  26                              V3D_INT_GMPV))
  27 
  28 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV |       \
  29                             V3D_HUB_INT_MMU_PTI |       \
  30                             V3D_HUB_INT_MMU_CAP |       \
  31                             V3D_HUB_INT_TFUC))
  32 
  33 static irqreturn_t
  34 v3d_hub_irq(int irq, void *arg);
  35 
  36 static void
  37 v3d_overflow_mem_work(struct work_struct *work)
  38 {
  39         struct v3d_dev *v3d =
  40                 container_of(work, struct v3d_dev, overflow_mem_work);
  41         struct drm_device *dev = &v3d->drm;
  42         struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
  43         struct drm_gem_object *obj;
  44         unsigned long irqflags;
  45 
  46         if (IS_ERR(bo)) {
  47                 DRM_ERROR("Couldn't allocate binner overflow mem\n");
  48                 return;
  49         }
  50         obj = &bo->base.base;
  51 
  52         /* We lost a race, and our work task came in after the bin job
  53          * completed and exited.  This can happen because the HW
  54          * signals OOM before it's fully OOM, so the binner might just
  55          * barely complete.
  56          *
  57          * If we lose the race and our work task comes in after a new
  58          * bin job got scheduled, that's fine.  We'll just give them
  59          * some binner pool anyway.
  60          */
  61         spin_lock_irqsave(&v3d->job_lock, irqflags);
  62         if (!v3d->bin_job) {
  63                 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  64                 goto out;
  65         }
  66 
  67         drm_gem_object_get(obj);
  68         list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
  69         spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  70 
  71         V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
  72         V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
  73 
  74 out:
  75         drm_gem_object_put_unlocked(obj);
  76 }
  77 
  78 static irqreturn_t
  79 v3d_irq(int irq, void *arg)
  80 {
  81         struct v3d_dev *v3d = arg;
  82         u32 intsts;
  83         irqreturn_t status = IRQ_NONE;
  84 
  85         intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
  86 
  87         /* Acknowledge the interrupts we're handling here. */
  88         V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
  89 
  90         if (intsts & V3D_INT_OUTOMEM) {
  91                 /* Note that the OOM status is edge signaled, so the
  92                  * interrupt won't happen again until the we actually
  93                  * add more memory.  Also, as of V3D 4.1, FLDONE won't
  94                  * be reported until any OOM state has been cleared.
  95                  */
  96                 schedule_work(&v3d->overflow_mem_work);
  97                 status = IRQ_HANDLED;
  98         }
  99 
 100         if (intsts & V3D_INT_FLDONE) {
 101                 struct v3d_fence *fence =
 102                         to_v3d_fence(v3d->bin_job->base.irq_fence);
 103 
 104                 trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
 105                 dma_fence_signal(&fence->base);
 106                 status = IRQ_HANDLED;
 107         }
 108 
 109         if (intsts & V3D_INT_FRDONE) {
 110                 struct v3d_fence *fence =
 111                         to_v3d_fence(v3d->render_job->base.irq_fence);
 112 
 113                 trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
 114                 dma_fence_signal(&fence->base);
 115                 status = IRQ_HANDLED;
 116         }
 117 
 118         if (intsts & V3D_INT_CSDDONE) {
 119                 struct v3d_fence *fence =
 120                         to_v3d_fence(v3d->csd_job->base.irq_fence);
 121 
 122                 trace_v3d_csd_irq(&v3d->drm, fence->seqno);
 123                 dma_fence_signal(&fence->base);
 124                 status = IRQ_HANDLED;
 125         }
 126 
 127         /* We shouldn't be triggering these if we have GMP in
 128          * always-allowed mode.
 129          */
 130         if (intsts & V3D_INT_GMPV)
 131                 dev_err(v3d->dev, "GMP violation\n");
 132 
 133         /* V3D 4.2 wires the hub and core IRQs together, so if we &
 134          * didn't see the common one then check hub for MMU IRQs.
 135          */
 136         if (v3d->single_irq_line && status == IRQ_NONE)
 137                 return v3d_hub_irq(irq, arg);
 138 
 139         return status;
 140 }
 141 
 142 static irqreturn_t
 143 v3d_hub_irq(int irq, void *arg)
 144 {
 145         struct v3d_dev *v3d = arg;
 146         u32 intsts;
 147         irqreturn_t status = IRQ_NONE;
 148 
 149         intsts = V3D_READ(V3D_HUB_INT_STS);
 150 
 151         /* Acknowledge the interrupts we're handling here. */
 152         V3D_WRITE(V3D_HUB_INT_CLR, intsts);
 153 
 154         if (intsts & V3D_HUB_INT_TFUC) {
 155                 struct v3d_fence *fence =
 156                         to_v3d_fence(v3d->tfu_job->base.irq_fence);
 157 
 158                 trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
 159                 dma_fence_signal(&fence->base);
 160                 status = IRQ_HANDLED;
 161         }
 162 
 163         if (intsts & (V3D_HUB_INT_MMU_WRV |
 164                       V3D_HUB_INT_MMU_PTI |
 165                       V3D_HUB_INT_MMU_CAP)) {
 166                 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
 167                 u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
 168                                 (v3d->va_width - 32));
 169                 static const char *const v3d41_axi_ids[] = {
 170                         "L2T",
 171                         "PTB",
 172                         "PSE",
 173                         "TLB",
 174                         "CLE",
 175                         "TFU",
 176                         "MMU",
 177                         "GMP",
 178                 };
 179                 const char *client = "?";
 180 
 181                 V3D_WRITE(V3D_MMU_CTL,
 182                           V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED |
 183                                                    V3D_MMU_CTL_PT_INVALID |
 184                                                    V3D_MMU_CTL_WRITE_VIOLATION));
 185 
 186                 if (v3d->ver >= 41) {
 187                         axi_id = axi_id >> 5;
 188                         if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
 189                                 client = v3d41_axi_ids[axi_id];
 190                 }
 191 
 192                 dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
 193                         client, axi_id, (long long)vio_addr,
 194                         ((intsts & V3D_HUB_INT_MMU_WRV) ?
 195                          ", write violation" : ""),
 196                         ((intsts & V3D_HUB_INT_MMU_PTI) ?
 197                          ", pte invalid" : ""),
 198                         ((intsts & V3D_HUB_INT_MMU_CAP) ?
 199                          ", cap exceeded" : ""));
 200                 status = IRQ_HANDLED;
 201         }
 202 
 203         return status;
 204 }
 205 
 206 int
 207 v3d_irq_init(struct v3d_dev *v3d)
 208 {
 209         int irq1, ret, core;
 210 
 211         INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
 212 
 213         /* Clear any pending interrupts someone might have left around
 214          * for us.
 215          */
 216         for (core = 0; core < v3d->cores; core++)
 217                 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
 218         V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 219 
 220         irq1 = platform_get_irq(v3d->pdev, 1);
 221         if (irq1 == -EPROBE_DEFER)
 222                 return irq1;
 223         if (irq1 > 0) {
 224                 ret = devm_request_irq(v3d->dev, irq1,
 225                                        v3d_irq, IRQF_SHARED,
 226                                        "v3d_core0", v3d);
 227                 if (ret)
 228                         goto fail;
 229                 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
 230                                        v3d_hub_irq, IRQF_SHARED,
 231                                        "v3d_hub", v3d);
 232                 if (ret)
 233                         goto fail;
 234         } else {
 235                 v3d->single_irq_line = true;
 236 
 237                 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
 238                                        v3d_irq, IRQF_SHARED,
 239                                        "v3d", v3d);
 240                 if (ret)
 241                         goto fail;
 242         }
 243 
 244         v3d_irq_enable(v3d);
 245         return 0;
 246 
 247 fail:
 248         if (ret != -EPROBE_DEFER)
 249                 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
 250         return ret;
 251 }
 252 
 253 void
 254 v3d_irq_enable(struct v3d_dev *v3d)
 255 {
 256         int core;
 257 
 258         /* Enable our set of interrupts, masking out any others. */
 259         for (core = 0; core < v3d->cores; core++) {
 260                 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
 261                 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
 262         }
 263 
 264         V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
 265         V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
 266 }
 267 
 268 void
 269 v3d_irq_disable(struct v3d_dev *v3d)
 270 {
 271         int core;
 272 
 273         /* Disable all interrupts. */
 274         for (core = 0; core < v3d->cores; core++)
 275                 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
 276         V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
 277 
 278         /* Clear any pending interrupts we might have left. */
 279         for (core = 0; core < v3d->cores; core++)
 280                 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
 281         V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 282 
 283         cancel_work_sync(&v3d->overflow_mem_work);
 284 }
 285 
 286 /** Reinitializes interrupt registers when a GPU reset is performed. */
 287 void v3d_irq_reset(struct v3d_dev *v3d)
 288 {
 289         v3d_irq_enable(v3d);
 290 }

/* [<][>][^][v][top][bottom][index][help] */