root/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. uvd_v4_2_ring_get_rptr
  2. uvd_v4_2_ring_get_wptr
  3. uvd_v4_2_ring_set_wptr
  4. uvd_v4_2_early_init
  5. uvd_v4_2_sw_init
  6. uvd_v4_2_sw_fini
  7. uvd_v4_2_hw_init
  8. uvd_v4_2_hw_fini
  9. uvd_v4_2_suspend
  10. uvd_v4_2_resume
  11. uvd_v4_2_start
  12. uvd_v4_2_stop
  13. uvd_v4_2_ring_emit_fence
  14. uvd_v4_2_ring_test_ring
  15. uvd_v4_2_ring_emit_ib
  16. uvd_v4_2_ring_insert_nop
  17. uvd_v4_2_mc_resume
  18. uvd_v4_2_enable_mgcg
  19. uvd_v4_2_set_dcm
  20. uvd_v4_2_is_idle
  21. uvd_v4_2_wait_for_idle
  22. uvd_v4_2_soft_reset
  23. uvd_v4_2_set_interrupt_state
  24. uvd_v4_2_process_interrupt
  25. uvd_v4_2_set_clockgating_state
  26. uvd_v4_2_set_powergating_state
  27. uvd_v4_2_set_ring_funcs
  28. uvd_v4_2_set_irq_funcs

   1 /*
   2  * Copyright 2013 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: Christian König <christian.koenig@amd.com>
  23  */
  24 
  25 #include <linux/firmware.h>
  26 
  27 #include "amdgpu.h"
  28 #include "amdgpu_uvd.h"
  29 #include "cikd.h"
  30 
  31 #include "uvd/uvd_4_2_d.h"
  32 #include "uvd/uvd_4_2_sh_mask.h"
  33 
  34 #include "oss/oss_2_0_d.h"
  35 #include "oss/oss_2_0_sh_mask.h"
  36 
  37 #include "bif/bif_4_1_d.h"
  38 
  39 #include "smu/smu_7_0_1_d.h"
  40 #include "smu/smu_7_0_1_sh_mask.h"
  41 
  42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
  43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
  44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
  45 static int uvd_v4_2_start(struct amdgpu_device *adev);
  46 static void uvd_v4_2_stop(struct amdgpu_device *adev);
  47 static int uvd_v4_2_set_clockgating_state(void *handle,
  48                                 enum amd_clockgating_state state);
  49 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
  50                              bool sw_mode);
  51 /**
  52  * uvd_v4_2_ring_get_rptr - get read pointer
  53  *
  54  * @ring: amdgpu_ring pointer
  55  *
  56  * Returns the current hardware read pointer
  57  */
  58 static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
  59 {
  60         struct amdgpu_device *adev = ring->adev;
  61 
  62         return RREG32(mmUVD_RBC_RB_RPTR);
  63 }
  64 
  65 /**
  66  * uvd_v4_2_ring_get_wptr - get write pointer
  67  *
  68  * @ring: amdgpu_ring pointer
  69  *
  70  * Returns the current hardware write pointer
  71  */
  72 static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
  73 {
  74         struct amdgpu_device *adev = ring->adev;
  75 
  76         return RREG32(mmUVD_RBC_RB_WPTR);
  77 }
  78 
  79 /**
  80  * uvd_v4_2_ring_set_wptr - set write pointer
  81  *
  82  * @ring: amdgpu_ring pointer
  83  *
  84  * Commits the write pointer to the hardware
  85  */
  86 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
  87 {
  88         struct amdgpu_device *adev = ring->adev;
  89 
  90         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
  91 }
  92 
  93 static int uvd_v4_2_early_init(void *handle)
  94 {
  95         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  96         adev->uvd.num_uvd_inst = 1;
  97 
  98         uvd_v4_2_set_ring_funcs(adev);
  99         uvd_v4_2_set_irq_funcs(adev);
 100 
 101         return 0;
 102 }
 103 
 104 static int uvd_v4_2_sw_init(void *handle)
 105 {
 106         struct amdgpu_ring *ring;
 107         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 108         int r;
 109 
 110         /* UVD TRAP */
 111         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
 112         if (r)
 113                 return r;
 114 
 115         r = amdgpu_uvd_sw_init(adev);
 116         if (r)
 117                 return r;
 118 
 119         ring = &adev->uvd.inst->ring;
 120         sprintf(ring->name, "uvd");
 121         r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 122         if (r)
 123                 return r;
 124 
 125         r = amdgpu_uvd_resume(adev);
 126         if (r)
 127                 return r;
 128 
 129         r = amdgpu_uvd_entity_init(adev);
 130 
 131         return r;
 132 }
 133 
 134 static int uvd_v4_2_sw_fini(void *handle)
 135 {
 136         int r;
 137         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 138 
 139         r = amdgpu_uvd_suspend(adev);
 140         if (r)
 141                 return r;
 142 
 143         return amdgpu_uvd_sw_fini(adev);
 144 }
 145 
 146 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
 147                                  bool enable);
 148 /**
 149  * uvd_v4_2_hw_init - start and test UVD block
 150  *
 151  * @adev: amdgpu_device pointer
 152  *
 153  * Initialize the hardware, boot up the VCPU and do some testing
 154  */
 155 static int uvd_v4_2_hw_init(void *handle)
 156 {
 157         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 158         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 159         uint32_t tmp;
 160         int r;
 161 
 162         uvd_v4_2_enable_mgcg(adev, true);
 163         amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 164 
 165         r = amdgpu_ring_test_helper(ring);
 166         if (r)
 167                 goto done;
 168 
 169         r = amdgpu_ring_alloc(ring, 10);
 170         if (r) {
 171                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 172                 goto done;
 173         }
 174 
 175         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 176         amdgpu_ring_write(ring, tmp);
 177         amdgpu_ring_write(ring, 0xFFFFF);
 178 
 179         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 180         amdgpu_ring_write(ring, tmp);
 181         amdgpu_ring_write(ring, 0xFFFFF);
 182 
 183         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 184         amdgpu_ring_write(ring, tmp);
 185         amdgpu_ring_write(ring, 0xFFFFF);
 186 
 187         /* Clear timeout status bits */
 188         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 189         amdgpu_ring_write(ring, 0x8);
 190 
 191         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 192         amdgpu_ring_write(ring, 3);
 193 
 194         amdgpu_ring_commit(ring);
 195 
 196 done:
 197         if (!r)
 198                 DRM_INFO("UVD initialized successfully.\n");
 199 
 200         return r;
 201 }
 202 
 203 /**
 204  * uvd_v4_2_hw_fini - stop the hardware block
 205  *
 206  * @adev: amdgpu_device pointer
 207  *
 208  * Stop the UVD block, mark ring as not ready any more
 209  */
 210 static int uvd_v4_2_hw_fini(void *handle)
 211 {
 212         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 213         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 214 
 215         if (RREG32(mmUVD_STATUS) != 0)
 216                 uvd_v4_2_stop(adev);
 217 
 218         ring->sched.ready = false;
 219 
 220         return 0;
 221 }
 222 
 223 static int uvd_v4_2_suspend(void *handle)
 224 {
 225         int r;
 226         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 227 
 228         r = uvd_v4_2_hw_fini(adev);
 229         if (r)
 230                 return r;
 231 
 232         return amdgpu_uvd_suspend(adev);
 233 }
 234 
 235 static int uvd_v4_2_resume(void *handle)
 236 {
 237         int r;
 238         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 239 
 240         r = amdgpu_uvd_resume(adev);
 241         if (r)
 242                 return r;
 243 
 244         return uvd_v4_2_hw_init(adev);
 245 }
 246 
 247 /**
 248  * uvd_v4_2_start - start UVD block
 249  *
 250  * @adev: amdgpu_device pointer
 251  *
 252  * Setup and start the UVD block
 253  */
 254 static int uvd_v4_2_start(struct amdgpu_device *adev)
 255 {
 256         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 257         uint32_t rb_bufsz;
 258         int i, j, r;
 259         u32 tmp;
 260         /* disable byte swapping */
 261         u32 lmi_swap_cntl = 0;
 262         u32 mp_swap_cntl = 0;
 263 
 264         /* set uvd busy */
 265         WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
 266 
 267         uvd_v4_2_set_dcm(adev, true);
 268         WREG32(mmUVD_CGC_GATE, 0);
 269 
 270         /* take UVD block out of reset */
 271         WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 272         mdelay(5);
 273 
 274         /* enable VCPU clock */
 275         WREG32(mmUVD_VCPU_CNTL,  1 << 9);
 276 
 277         /* disable interupt */
 278         WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 279 
 280 #ifdef __BIG_ENDIAN
 281         /* swap (8 in 32) RB and IB */
 282         lmi_swap_cntl = 0xa;
 283         mp_swap_cntl = 0;
 284 #endif
 285         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 286         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 287         /* initialize UVD memory controller */
 288         WREG32(mmUVD_LMI_CTRL, 0x203108);
 289 
 290         tmp = RREG32(mmUVD_MPC_CNTL);
 291         WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
 292 
 293         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 294         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 295         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 296         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 297         WREG32(mmUVD_MPC_SET_ALU, 0);
 298         WREG32(mmUVD_MPC_SET_MUX, 0x88);
 299 
 300         uvd_v4_2_mc_resume(adev);
 301 
 302         tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
 303         WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
 304 
 305         /* enable UMC */
 306         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 307 
 308         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
 309 
 310         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 311 
 312         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 313 
 314         mdelay(10);
 315 
 316         for (i = 0; i < 10; ++i) {
 317                 uint32_t status;
 318                 for (j = 0; j < 100; ++j) {
 319                         status = RREG32(mmUVD_STATUS);
 320                         if (status & 2)
 321                                 break;
 322                         mdelay(10);
 323                 }
 324                 r = 0;
 325                 if (status & 2)
 326                         break;
 327 
 328                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 329                 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 330                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 331                 mdelay(10);
 332                 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 333                 mdelay(10);
 334                 r = -1;
 335         }
 336 
 337         if (r) {
 338                 DRM_ERROR("UVD not responding, giving up!!!\n");
 339                 return r;
 340         }
 341 
 342         /* enable interupt */
 343         WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
 344 
 345         WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
 346 
 347         /* force RBC into idle state */
 348         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 349 
 350         /* Set the write pointer delay */
 351         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 352 
 353         /* programm the 4GB memory segment for rptr and ring buffer */
 354         WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
 355                                    (0x7 << 16) | (0x1 << 31));
 356 
 357         /* Initialize the ring buffer's read and write pointers */
 358         WREG32(mmUVD_RBC_RB_RPTR, 0x0);
 359 
 360         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 361         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 362 
 363         /* set the ring address */
 364         WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
 365 
 366         /* Set ring buffer size */
 367         rb_bufsz = order_base_2(ring->ring_size);
 368         rb_bufsz = (0x1 << 8) | rb_bufsz;
 369         WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
 370 
 371         return 0;
 372 }
 373 
 374 /**
 375  * uvd_v4_2_stop - stop UVD block
 376  *
 377  * @adev: amdgpu_device pointer
 378  *
 379  * stop the UVD block
 380  */
 381 static void uvd_v4_2_stop(struct amdgpu_device *adev)
 382 {
 383         uint32_t i, j;
 384         uint32_t status;
 385 
 386         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 387 
 388         for (i = 0; i < 10; ++i) {
 389                 for (j = 0; j < 100; ++j) {
 390                         status = RREG32(mmUVD_STATUS);
 391                         if (status & 2)
 392                                 break;
 393                         mdelay(1);
 394                 }
 395                 if (status & 2)
 396                         break;
 397         }
 398 
 399         for (i = 0; i < 10; ++i) {
 400                 for (j = 0; j < 100; ++j) {
 401                         status = RREG32(mmUVD_LMI_STATUS);
 402                         if (status & 0xf)
 403                                 break;
 404                         mdelay(1);
 405                 }
 406                 if (status & 0xf)
 407                         break;
 408         }
 409 
 410         /* Stall UMC and register bus before resetting VCPU */
 411         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 412 
 413         for (i = 0; i < 10; ++i) {
 414                 for (j = 0; j < 100; ++j) {
 415                         status = RREG32(mmUVD_LMI_STATUS);
 416                         if (status & 0x240)
 417                                 break;
 418                         mdelay(1);
 419                 }
 420                 if (status & 0x240)
 421                         break;
 422         }
 423 
 424         WREG32_P(0x3D49, 0, ~(1 << 2));
 425 
 426         WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
 427 
 428         /* put LMI, VCPU, RBC etc... into reset */
 429         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 430                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 431                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 432 
 433         WREG32(mmUVD_STATUS, 0);
 434 
 435         uvd_v4_2_set_dcm(adev, false);
 436 }
 437 
 438 /**
 439  * uvd_v4_2_ring_emit_fence - emit an fence & trap command
 440  *
 441  * @ring: amdgpu_ring pointer
 442  * @fence: fence to emit
 443  *
 444  * Write a fence and a trap command to the ring.
 445  */
 446 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 447                                      unsigned flags)
 448 {
 449         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 450 
 451         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 452         amdgpu_ring_write(ring, seq);
 453         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 454         amdgpu_ring_write(ring, addr & 0xffffffff);
 455         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 456         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 457         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 458         amdgpu_ring_write(ring, 0);
 459 
 460         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 461         amdgpu_ring_write(ring, 0);
 462         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 463         amdgpu_ring_write(ring, 0);
 464         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 465         amdgpu_ring_write(ring, 2);
 466 }
 467 
 468 /**
 469  * uvd_v4_2_ring_test_ring - register write test
 470  *
 471  * @ring: amdgpu_ring pointer
 472  *
 473  * Test if we can successfully write to the context register
 474  */
 475 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
 476 {
 477         struct amdgpu_device *adev = ring->adev;
 478         uint32_t tmp = 0;
 479         unsigned i;
 480         int r;
 481 
 482         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 483         r = amdgpu_ring_alloc(ring, 3);
 484         if (r)
 485                 return r;
 486 
 487         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 488         amdgpu_ring_write(ring, 0xDEADBEEF);
 489         amdgpu_ring_commit(ring);
 490         for (i = 0; i < adev->usec_timeout; i++) {
 491                 tmp = RREG32(mmUVD_CONTEXT_ID);
 492                 if (tmp == 0xDEADBEEF)
 493                         break;
 494                 udelay(1);
 495         }
 496 
 497         if (i >= adev->usec_timeout)
 498                 r = -ETIMEDOUT;
 499 
 500         return r;
 501 }
 502 
 503 /**
 504  * uvd_v4_2_ring_emit_ib - execute indirect buffer
 505  *
 506  * @ring: amdgpu_ring pointer
 507  * @ib: indirect buffer to execute
 508  *
 509  * Write ring commands to execute the indirect buffer
 510  */
 511 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
 512                                   struct amdgpu_job *job,
 513                                   struct amdgpu_ib *ib,
 514                                   uint32_t flags)
 515 {
 516         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
 517         amdgpu_ring_write(ring, ib->gpu_addr);
 518         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
 519         amdgpu_ring_write(ring, ib->length_dw);
 520 }
 521 
 522 static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 523 {
 524         int i;
 525 
 526         WARN_ON(ring->wptr % 2 || count % 2);
 527 
 528         for (i = 0; i < count / 2; i++) {
 529                 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
 530                 amdgpu_ring_write(ring, 0);
 531         }
 532 }
 533 
 534 /**
 535  * uvd_v4_2_mc_resume - memory controller programming
 536  *
 537  * @adev: amdgpu_device pointer
 538  *
 539  * Let the UVD memory controller know it's offsets
 540  */
 541 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
 542 {
 543         uint64_t addr;
 544         uint32_t size;
 545 
 546         /* programm the VCPU memory controller bits 0-27 */
 547         addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
 548         size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
 549         WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
 550         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 551 
 552         addr += size;
 553         size = AMDGPU_UVD_HEAP_SIZE >> 3;
 554         WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
 555         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 556 
 557         addr += size;
 558         size = (AMDGPU_UVD_STACK_SIZE +
 559                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
 560         WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
 561         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 562 
 563         /* bits 28-31 */
 564         addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
 565         WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
 566 
 567         /* bits 32-39 */
 568         addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
 569         WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 570 
 571         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 572         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 573         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 574 }
 575 
 576 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
 577                                  bool enable)
 578 {
 579         u32 orig, data;
 580 
 581         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 582                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 583                 data |= 0xfff;
 584                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 585 
 586                 orig = data = RREG32(mmUVD_CGC_CTRL);
 587                 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 588                 if (orig != data)
 589                         WREG32(mmUVD_CGC_CTRL, data);
 590         } else {
 591                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 592                 data &= ~0xfff;
 593                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 594 
 595                 orig = data = RREG32(mmUVD_CGC_CTRL);
 596                 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 597                 if (orig != data)
 598                         WREG32(mmUVD_CGC_CTRL, data);
 599         }
 600 }
 601 
 602 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
 603                              bool sw_mode)
 604 {
 605         u32 tmp, tmp2;
 606 
 607         WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
 608 
 609         tmp = RREG32(mmUVD_CGC_CTRL);
 610         tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
 611         tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
 612                 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
 613                 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
 614 
 615         if (sw_mode) {
 616                 tmp &= ~0x7ffff800;
 617                 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
 618                         UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
 619                         (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
 620         } else {
 621                 tmp |= 0x7ffff800;
 622                 tmp2 = 0;
 623         }
 624 
 625         WREG32(mmUVD_CGC_CTRL, tmp);
 626         WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
 627 }
 628 
 629 static bool uvd_v4_2_is_idle(void *handle)
 630 {
 631         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 632 
 633         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 634 }
 635 
 636 static int uvd_v4_2_wait_for_idle(void *handle)
 637 {
 638         unsigned i;
 639         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 640 
 641         for (i = 0; i < adev->usec_timeout; i++) {
 642                 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
 643                         return 0;
 644         }
 645         return -ETIMEDOUT;
 646 }
 647 
 648 static int uvd_v4_2_soft_reset(void *handle)
 649 {
 650         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 651 
 652         uvd_v4_2_stop(adev);
 653 
 654         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
 655                         ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 656         mdelay(5);
 657 
 658         return uvd_v4_2_start(adev);
 659 }
 660 
 661 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
 662                                         struct amdgpu_irq_src *source,
 663                                         unsigned type,
 664                                         enum amdgpu_interrupt_state state)
 665 {
 666         // TODO
 667         return 0;
 668 }
 669 
 670 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 671                                       struct amdgpu_irq_src *source,
 672                                       struct amdgpu_iv_entry *entry)
 673 {
 674         DRM_DEBUG("IH: UVD TRAP\n");
 675         amdgpu_fence_process(&adev->uvd.inst->ring);
 676         return 0;
 677 }
 678 
 679 static int uvd_v4_2_set_clockgating_state(void *handle,
 680                                           enum amd_clockgating_state state)
 681 {
 682         return 0;
 683 }
 684 
 685 static int uvd_v4_2_set_powergating_state(void *handle,
 686                                           enum amd_powergating_state state)
 687 {
 688         /* This doesn't actually powergate the UVD block.
 689          * That's done in the dpm code via the SMC.  This
 690          * just re-inits the block as necessary.  The actual
 691          * gating still happens in the dpm code.  We should
 692          * revisit this when there is a cleaner line between
 693          * the smc and the hw blocks
 694          */
 695         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 696 
 697         if (state == AMD_PG_STATE_GATE) {
 698                 uvd_v4_2_stop(adev);
 699                 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
 700                         if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
 701                                 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
 702                                 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
 703                                                         UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
 704                                                         UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
 705                                 mdelay(20);
 706                         }
 707                 }
 708                 return 0;
 709         } else {
 710                 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
 711                         if (RREG32_SMC(ixCURRENT_PG_STATUS) &
 712                                 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
 713                                 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
 714                                                 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
 715                                                 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
 716                                 mdelay(30);
 717                         }
 718                 }
 719                 return uvd_v4_2_start(adev);
 720         }
 721 }
 722 
 723 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
 724         .name = "uvd_v4_2",
 725         .early_init = uvd_v4_2_early_init,
 726         .late_init = NULL,
 727         .sw_init = uvd_v4_2_sw_init,
 728         .sw_fini = uvd_v4_2_sw_fini,
 729         .hw_init = uvd_v4_2_hw_init,
 730         .hw_fini = uvd_v4_2_hw_fini,
 731         .suspend = uvd_v4_2_suspend,
 732         .resume = uvd_v4_2_resume,
 733         .is_idle = uvd_v4_2_is_idle,
 734         .wait_for_idle = uvd_v4_2_wait_for_idle,
 735         .soft_reset = uvd_v4_2_soft_reset,
 736         .set_clockgating_state = uvd_v4_2_set_clockgating_state,
 737         .set_powergating_state = uvd_v4_2_set_powergating_state,
 738 };
 739 
 740 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 741         .type = AMDGPU_RING_TYPE_UVD,
 742         .align_mask = 0xf,
 743         .support_64bit_ptrs = false,
 744         .no_user_fence = true,
 745         .get_rptr = uvd_v4_2_ring_get_rptr,
 746         .get_wptr = uvd_v4_2_ring_get_wptr,
 747         .set_wptr = uvd_v4_2_ring_set_wptr,
 748         .parse_cs = amdgpu_uvd_ring_parse_cs,
 749         .emit_frame_size =
 750                 14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
 751         .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
 752         .emit_ib = uvd_v4_2_ring_emit_ib,
 753         .emit_fence = uvd_v4_2_ring_emit_fence,
 754         .test_ring = uvd_v4_2_ring_test_ring,
 755         .test_ib = amdgpu_uvd_ring_test_ib,
 756         .insert_nop = uvd_v4_2_ring_insert_nop,
 757         .pad_ib = amdgpu_ring_generic_pad_ib,
 758         .begin_use = amdgpu_uvd_ring_begin_use,
 759         .end_use = amdgpu_uvd_ring_end_use,
 760 };
 761 
 762 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
 763 {
 764         adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
 765 }
 766 
 767 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
 768         .set = uvd_v4_2_set_interrupt_state,
 769         .process = uvd_v4_2_process_interrupt,
 770 };
 771 
 772 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
 773 {
 774         adev->uvd.inst->irq.num_types = 1;
 775         adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
 776 }
 777 
 778 const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
 779 {
 780                 .type = AMD_IP_BLOCK_TYPE_UVD,
 781                 .major = 4,
 782                 .minor = 2,
 783                 .rev = 0,
 784                 .funcs = &uvd_v4_2_ip_funcs,
 785 };

/* [<][>][^][v][top][bottom][index][help] */