root/drivers/gpu/drm/amd/amdgpu/cik_ih.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cik_ih_enable_interrupts
  2. cik_ih_disable_interrupts
  3. cik_ih_irq_init
  4. cik_ih_irq_disable
  5. cik_ih_get_wptr
  6. cik_ih_decode_iv
  7. cik_ih_set_rptr
  8. cik_ih_early_init
  9. cik_ih_sw_init
  10. cik_ih_sw_fini
  11. cik_ih_hw_init
  12. cik_ih_hw_fini
  13. cik_ih_suspend
  14. cik_ih_resume
  15. cik_ih_is_idle
  16. cik_ih_wait_for_idle
  17. cik_ih_soft_reset
  18. cik_ih_set_clockgating_state
  19. cik_ih_set_powergating_state
  20. cik_ih_set_interrupt_funcs

   1 /*
   2  * Copyright 2012 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include <linux/pci.h>
  25 
  26 #include "amdgpu.h"
  27 #include "amdgpu_ih.h"
  28 #include "cikd.h"
  29 
  30 #include "bif/bif_4_1_d.h"
  31 #include "bif/bif_4_1_sh_mask.h"
  32 
  33 #include "oss/oss_2_0_d.h"
  34 #include "oss/oss_2_0_sh_mask.h"
  35 
  36 /*
  37  * Interrupts
  38  * Starting with r6xx, interrupts are handled via a ring buffer.
  39  * Ring buffers are areas of GPU accessible memory that the GPU
  40  * writes interrupt vectors into and the host reads vectors out of.
  41  * There is a rptr (read pointer) that determines where the
  42  * host is currently reading, and a wptr (write pointer)
  43  * which determines where the GPU has written.  When the
  44  * pointers are equal, the ring is idle.  When the GPU
  45  * writes vectors to the ring buffer, it increments the
  46  * wptr.  When there is an interrupt, the host then starts
  47  * fetching commands and processing them until the pointers are
  48  * equal again at which point it updates the rptr.
  49  */
  50 
  51 static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
  52 
  53 /**
  54  * cik_ih_enable_interrupts - Enable the interrupt ring buffer
  55  *
  56  * @adev: amdgpu_device pointer
  57  *
  58  * Enable the interrupt ring buffer (CIK).
  59  */
  60 static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
  61 {
  62         u32 ih_cntl = RREG32(mmIH_CNTL);
  63         u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
  64 
  65         ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
  66         ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
  67         WREG32(mmIH_CNTL, ih_cntl);
  68         WREG32(mmIH_RB_CNTL, ih_rb_cntl);
  69         adev->irq.ih.enabled = true;
  70 }
  71 
  72 /**
  73  * cik_ih_disable_interrupts - Disable the interrupt ring buffer
  74  *
  75  * @adev: amdgpu_device pointer
  76  *
  77  * Disable the interrupt ring buffer (CIK).
  78  */
  79 static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
  80 {
  81         u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
  82         u32 ih_cntl = RREG32(mmIH_CNTL);
  83 
  84         ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
  85         ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
  86         WREG32(mmIH_RB_CNTL, ih_rb_cntl);
  87         WREG32(mmIH_CNTL, ih_cntl);
  88         /* set rptr, wptr to 0 */
  89         WREG32(mmIH_RB_RPTR, 0);
  90         WREG32(mmIH_RB_WPTR, 0);
  91         adev->irq.ih.enabled = false;
  92         adev->irq.ih.rptr = 0;
  93 }
  94 
  95 /**
  96  * cik_ih_irq_init - init and enable the interrupt ring
  97  *
  98  * @adev: amdgpu_device pointer
  99  *
 100  * Allocate a ring buffer for the interrupt controller,
 101  * enable the RLC, disable interrupts, enable the IH
 102  * ring buffer and enable it (CIK).
 103  * Called at device load and reume.
 104  * Returns 0 for success, errors for failure.
 105  */
 106 static int cik_ih_irq_init(struct amdgpu_device *adev)
 107 {
 108         struct amdgpu_ih_ring *ih = &adev->irq.ih;
 109         int rb_bufsz;
 110         u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 111 
 112         /* disable irqs */
 113         cik_ih_disable_interrupts(adev);
 114 
 115         /* setup interrupt control */
 116         WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 117         interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
 118         /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
 119          * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
 120          */
 121         interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
 122         /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
 123         interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
 124         WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
 125 
 126         WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 127         rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 128 
 129         ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
 130                       IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
 131                       (rb_bufsz << 1));
 132 
 133         ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
 134 
 135         /* set the writeback address whether it's enabled or not */
 136         WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
 137         WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
 138 
 139         WREG32(mmIH_RB_CNTL, ih_rb_cntl);
 140 
 141         /* set rptr, wptr to 0 */
 142         WREG32(mmIH_RB_RPTR, 0);
 143         WREG32(mmIH_RB_WPTR, 0);
 144 
 145         /* Default settings for IH_CNTL (disabled at first) */
 146         ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
 147                 (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
 148                 (0 << IH_CNTL__MC_VMID__SHIFT);
 149         /* IH_CNTL__RPTR_REARM_MASK only works if msi's are enabled */
 150         if (adev->irq.msi_enabled)
 151                 ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
 152         WREG32(mmIH_CNTL, ih_cntl);
 153 
 154         pci_set_master(adev->pdev);
 155 
 156         /* enable irqs */
 157         cik_ih_enable_interrupts(adev);
 158 
 159         return 0;
 160 }
 161 
 162 /**
 163  * cik_ih_irq_disable - disable interrupts
 164  *
 165  * @adev: amdgpu_device pointer
 166  *
 167  * Disable interrupts on the hw (CIK).
 168  */
 169 static void cik_ih_irq_disable(struct amdgpu_device *adev)
 170 {
 171         cik_ih_disable_interrupts(adev);
 172         /* Wait and acknowledge irq */
 173         mdelay(1);
 174 }
 175 
 176 /**
 177  * cik_ih_get_wptr - get the IH ring buffer wptr
 178  *
 179  * @adev: amdgpu_device pointer
 180  *
 181  * Get the IH ring buffer wptr from either the register
 182  * or the writeback memory buffer (CIK).  Also check for
 183  * ring buffer overflow and deal with it.
 184  * Used by cik_irq_process().
 185  * Returns the value of the wptr.
 186  */
 187 static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
 188                            struct amdgpu_ih_ring *ih)
 189 {
 190         u32 wptr, tmp;
 191 
 192         wptr = le32_to_cpu(*ih->wptr_cpu);
 193 
 194         if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
 195                 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
 196                 /* When a ring buffer overflow happen start parsing interrupt
 197                  * from the last not overwritten vector (wptr + 16). Hopefully
 198                  * this should allow us to catchup.
 199                  */
 200                 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
 201                          wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
 202                 ih->rptr = (wptr + 16) & ih->ptr_mask;
 203                 tmp = RREG32(mmIH_RB_CNTL);
 204                 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
 205                 WREG32(mmIH_RB_CNTL, tmp);
 206         }
 207         return (wptr & ih->ptr_mask);
 208 }
 209 
 210 /*        CIK IV Ring
 211  * Each IV ring entry is 128 bits:
 212  * [7:0]    - interrupt source id
 213  * [31:8]   - reserved
 214  * [59:32]  - interrupt source data
 215  * [63:60]  - reserved
 216  * [71:64]  - RINGID
 217  *            CP:
 218  *            ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
 219  *            QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
 220  *                     - for gfx, hw shader state (0=PS...5=LS, 6=CS)
 221  *            ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
 222  *            PIPE_ID - ME0 0=3D
 223  *                    - ME1&2 compute dispatcher (4 pipes each)
 224  *            SDMA:
 225  *            INSTANCE_ID [1:0], QUEUE_ID[1:0]
 226  *            INSTANCE_ID - 0 = sdma0, 1 = sdma1
 227  *            QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
 228  * [79:72]  - VMID
 229  * [95:80]  - PASID
 230  * [127:96] - reserved
 231  */
 232 
 233  /**
 234  * cik_ih_decode_iv - decode an interrupt vector
 235  *
 236  * @adev: amdgpu_device pointer
 237  *
 238  * Decodes the interrupt vector at the current rptr
 239  * position and also advance the position.
 240  */
 241 static void cik_ih_decode_iv(struct amdgpu_device *adev,
 242                              struct amdgpu_ih_ring *ih,
 243                              struct amdgpu_iv_entry *entry)
 244 {
 245         /* wptr/rptr are in bytes! */
 246         u32 ring_index = ih->rptr >> 2;
 247         uint32_t dw[4];
 248 
 249         dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
 250         dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
 251         dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
 252         dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
 253 
 254         entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 255         entry->src_id = dw[0] & 0xff;
 256         entry->src_data[0] = dw[1] & 0xfffffff;
 257         entry->ring_id = dw[2] & 0xff;
 258         entry->vmid = (dw[2] >> 8) & 0xff;
 259         entry->pasid = (dw[2] >> 16) & 0xffff;
 260 
 261         /* wptr/rptr are in bytes! */
 262         ih->rptr += 16;
 263 }
 264 
 265 /**
 266  * cik_ih_set_rptr - set the IH ring buffer rptr
 267  *
 268  * @adev: amdgpu_device pointer
 269  *
 270  * Set the IH ring buffer rptr.
 271  */
 272 static void cik_ih_set_rptr(struct amdgpu_device *adev,
 273                             struct amdgpu_ih_ring *ih)
 274 {
 275         WREG32(mmIH_RB_RPTR, ih->rptr);
 276 }
 277 
 278 static int cik_ih_early_init(void *handle)
 279 {
 280         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 281         int ret;
 282 
 283         ret = amdgpu_irq_add_domain(adev);
 284         if (ret)
 285                 return ret;
 286 
 287         cik_ih_set_interrupt_funcs(adev);
 288 
 289         return 0;
 290 }
 291 
 292 static int cik_ih_sw_init(void *handle)
 293 {
 294         int r;
 295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 296 
 297         r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
 298         if (r)
 299                 return r;
 300 
 301         r = amdgpu_irq_init(adev);
 302 
 303         return r;
 304 }
 305 
 306 static int cik_ih_sw_fini(void *handle)
 307 {
 308         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 309 
 310         amdgpu_irq_fini(adev);
 311         amdgpu_ih_ring_fini(adev, &adev->irq.ih);
 312         amdgpu_irq_remove_domain(adev);
 313 
 314         return 0;
 315 }
 316 
 317 static int cik_ih_hw_init(void *handle)
 318 {
 319         int r;
 320         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 321 
 322         r = cik_ih_irq_init(adev);
 323         if (r)
 324                 return r;
 325 
 326         return 0;
 327 }
 328 
 329 static int cik_ih_hw_fini(void *handle)
 330 {
 331         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 332 
 333         cik_ih_irq_disable(adev);
 334 
 335         return 0;
 336 }
 337 
 338 static int cik_ih_suspend(void *handle)
 339 {
 340         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 341 
 342         return cik_ih_hw_fini(adev);
 343 }
 344 
 345 static int cik_ih_resume(void *handle)
 346 {
 347         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 348 
 349         return cik_ih_hw_init(adev);
 350 }
 351 
 352 static bool cik_ih_is_idle(void *handle)
 353 {
 354         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 355         u32 tmp = RREG32(mmSRBM_STATUS);
 356 
 357         if (tmp & SRBM_STATUS__IH_BUSY_MASK)
 358                 return false;
 359 
 360         return true;
 361 }
 362 
 363 static int cik_ih_wait_for_idle(void *handle)
 364 {
 365         unsigned i;
 366         u32 tmp;
 367         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 368 
 369         for (i = 0; i < adev->usec_timeout; i++) {
 370                 /* read MC_STATUS */
 371                 tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
 372                 if (!tmp)
 373                         return 0;
 374                 udelay(1);
 375         }
 376         return -ETIMEDOUT;
 377 }
 378 
 379 static int cik_ih_soft_reset(void *handle)
 380 {
 381         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 382 
 383         u32 srbm_soft_reset = 0;
 384         u32 tmp = RREG32(mmSRBM_STATUS);
 385 
 386         if (tmp & SRBM_STATUS__IH_BUSY_MASK)
 387                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
 388 
 389         if (srbm_soft_reset) {
 390                 tmp = RREG32(mmSRBM_SOFT_RESET);
 391                 tmp |= srbm_soft_reset;
 392                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
 393                 WREG32(mmSRBM_SOFT_RESET, tmp);
 394                 tmp = RREG32(mmSRBM_SOFT_RESET);
 395 
 396                 udelay(50);
 397 
 398                 tmp &= ~srbm_soft_reset;
 399                 WREG32(mmSRBM_SOFT_RESET, tmp);
 400                 tmp = RREG32(mmSRBM_SOFT_RESET);
 401 
 402                 /* Wait a little for things to settle down */
 403                 udelay(50);
 404         }
 405 
 406         return 0;
 407 }
 408 
 409 static int cik_ih_set_clockgating_state(void *handle,
 410                                           enum amd_clockgating_state state)
 411 {
 412         return 0;
 413 }
 414 
 415 static int cik_ih_set_powergating_state(void *handle,
 416                                           enum amd_powergating_state state)
 417 {
 418         return 0;
 419 }
 420 
 421 static const struct amd_ip_funcs cik_ih_ip_funcs = {
 422         .name = "cik_ih",
 423         .early_init = cik_ih_early_init,
 424         .late_init = NULL,
 425         .sw_init = cik_ih_sw_init,
 426         .sw_fini = cik_ih_sw_fini,
 427         .hw_init = cik_ih_hw_init,
 428         .hw_fini = cik_ih_hw_fini,
 429         .suspend = cik_ih_suspend,
 430         .resume = cik_ih_resume,
 431         .is_idle = cik_ih_is_idle,
 432         .wait_for_idle = cik_ih_wait_for_idle,
 433         .soft_reset = cik_ih_soft_reset,
 434         .set_clockgating_state = cik_ih_set_clockgating_state,
 435         .set_powergating_state = cik_ih_set_powergating_state,
 436 };
 437 
 438 static const struct amdgpu_ih_funcs cik_ih_funcs = {
 439         .get_wptr = cik_ih_get_wptr,
 440         .decode_iv = cik_ih_decode_iv,
 441         .set_rptr = cik_ih_set_rptr
 442 };
 443 
 444 static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 445 {
 446         adev->irq.ih_funcs = &cik_ih_funcs;
 447 }
 448 
 449 const struct amdgpu_ip_block_version cik_ih_ip_block =
 450 {
 451         .type = AMD_IP_BLOCK_TYPE_IH,
 452         .major = 2,
 453         .minor = 0,
 454         .rev = 0,
 455         .funcs = &cik_ih_ip_funcs,
 456 };

/* [<][>][^][v][top][bottom][index][help] */