root/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. uvd_v5_0_ring_get_rptr
  2. uvd_v5_0_ring_get_wptr
  3. uvd_v5_0_ring_set_wptr
  4. uvd_v5_0_early_init
  5. uvd_v5_0_sw_init
  6. uvd_v5_0_sw_fini
  7. uvd_v5_0_hw_init
  8. uvd_v5_0_hw_fini
  9. uvd_v5_0_suspend
  10. uvd_v5_0_resume
  11. uvd_v5_0_mc_resume
  12. uvd_v5_0_start
  13. uvd_v5_0_stop
  14. uvd_v5_0_ring_emit_fence
  15. uvd_v5_0_ring_test_ring
  16. uvd_v5_0_ring_emit_ib
  17. uvd_v5_0_ring_insert_nop
  18. uvd_v5_0_is_idle
  19. uvd_v5_0_wait_for_idle
  20. uvd_v5_0_soft_reset
  21. uvd_v5_0_set_interrupt_state
  22. uvd_v5_0_process_interrupt
  23. uvd_v5_0_enable_clock_gating
  24. uvd_v5_0_set_sw_clock_gating
  25. uvd_v5_0_set_hw_clock_gating
  26. uvd_v5_0_enable_mgcg
  27. uvd_v5_0_set_clockgating_state
  28. uvd_v5_0_set_powergating_state
  29. uvd_v5_0_get_clockgating_state
  30. uvd_v5_0_set_ring_funcs
  31. uvd_v5_0_set_irq_funcs

   1 /*
   2  * Copyright 2014 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: Christian König <christian.koenig@amd.com>
  23  */
  24 
  25 #include <linux/delay.h>
  26 #include <linux/firmware.h>
  27 
  28 #include "amdgpu.h"
  29 #include "amdgpu_uvd.h"
  30 #include "vid.h"
  31 #include "uvd/uvd_5_0_d.h"
  32 #include "uvd/uvd_5_0_sh_mask.h"
  33 #include "oss/oss_2_0_d.h"
  34 #include "oss/oss_2_0_sh_mask.h"
  35 #include "bif/bif_5_0_d.h"
  36 #include "vi.h"
  37 #include "smu/smu_7_1_2_d.h"
  38 #include "smu/smu_7_1_2_sh_mask.h"
  39 #include "ivsrcid/ivsrcid_vislands30.h"
  40 
  41 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
  42 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
  43 static int uvd_v5_0_start(struct amdgpu_device *adev);
  44 static void uvd_v5_0_stop(struct amdgpu_device *adev);
  45 static int uvd_v5_0_set_clockgating_state(void *handle,
  46                                           enum amd_clockgating_state state);
  47 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
  48                                  bool enable);
  49 /**
  50  * uvd_v5_0_ring_get_rptr - get read pointer
  51  *
  52  * @ring: amdgpu_ring pointer
  53  *
  54  * Returns the current hardware read pointer
  55  */
  56 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
  57 {
  58         struct amdgpu_device *adev = ring->adev;
  59 
  60         return RREG32(mmUVD_RBC_RB_RPTR);
  61 }
  62 
  63 /**
  64  * uvd_v5_0_ring_get_wptr - get write pointer
  65  *
  66  * @ring: amdgpu_ring pointer
  67  *
  68  * Returns the current hardware write pointer
  69  */
  70 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
  71 {
  72         struct amdgpu_device *adev = ring->adev;
  73 
  74         return RREG32(mmUVD_RBC_RB_WPTR);
  75 }
  76 
  77 /**
  78  * uvd_v5_0_ring_set_wptr - set write pointer
  79  *
  80  * @ring: amdgpu_ring pointer
  81  *
  82  * Commits the write pointer to the hardware
  83  */
  84 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
  85 {
  86         struct amdgpu_device *adev = ring->adev;
  87 
  88         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
  89 }
  90 
  91 static int uvd_v5_0_early_init(void *handle)
  92 {
  93         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  94         adev->uvd.num_uvd_inst = 1;
  95 
  96         uvd_v5_0_set_ring_funcs(adev);
  97         uvd_v5_0_set_irq_funcs(adev);
  98 
  99         return 0;
 100 }
 101 
 102 static int uvd_v5_0_sw_init(void *handle)
 103 {
 104         struct amdgpu_ring *ring;
 105         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 106         int r;
 107 
 108         /* UVD TRAP */
 109         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
 110         if (r)
 111                 return r;
 112 
 113         r = amdgpu_uvd_sw_init(adev);
 114         if (r)
 115                 return r;
 116 
 117         ring = &adev->uvd.inst->ring;
 118         sprintf(ring->name, "uvd");
 119         r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 120         if (r)
 121                 return r;
 122 
 123         r = amdgpu_uvd_resume(adev);
 124         if (r)
 125                 return r;
 126 
 127         r = amdgpu_uvd_entity_init(adev);
 128 
 129         return r;
 130 }
 131 
 132 static int uvd_v5_0_sw_fini(void *handle)
 133 {
 134         int r;
 135         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 136 
 137         r = amdgpu_uvd_suspend(adev);
 138         if (r)
 139                 return r;
 140 
 141         return amdgpu_uvd_sw_fini(adev);
 142 }
 143 
 144 /**
 145  * uvd_v5_0_hw_init - start and test UVD block
 146  *
 147  * @adev: amdgpu_device pointer
 148  *
 149  * Initialize the hardware, boot up the VCPU and do some testing
 150  */
 151 static int uvd_v5_0_hw_init(void *handle)
 152 {
 153         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 154         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 155         uint32_t tmp;
 156         int r;
 157 
 158         amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 159         uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 160         uvd_v5_0_enable_mgcg(adev, true);
 161 
 162         r = amdgpu_ring_test_helper(ring);
 163         if (r)
 164                 goto done;
 165 
 166         r = amdgpu_ring_alloc(ring, 10);
 167         if (r) {
 168                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 169                 goto done;
 170         }
 171 
 172         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 173         amdgpu_ring_write(ring, tmp);
 174         amdgpu_ring_write(ring, 0xFFFFF);
 175 
 176         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 177         amdgpu_ring_write(ring, tmp);
 178         amdgpu_ring_write(ring, 0xFFFFF);
 179 
 180         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 181         amdgpu_ring_write(ring, tmp);
 182         amdgpu_ring_write(ring, 0xFFFFF);
 183 
 184         /* Clear timeout status bits */
 185         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 186         amdgpu_ring_write(ring, 0x8);
 187 
 188         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 189         amdgpu_ring_write(ring, 3);
 190 
 191         amdgpu_ring_commit(ring);
 192 
 193 done:
 194         if (!r)
 195                 DRM_INFO("UVD initialized successfully.\n");
 196 
 197         return r;
 198 
 199 }
 200 
 201 /**
 202  * uvd_v5_0_hw_fini - stop the hardware block
 203  *
 204  * @adev: amdgpu_device pointer
 205  *
 206  * Stop the UVD block, mark ring as not ready any more
 207  */
 208 static int uvd_v5_0_hw_fini(void *handle)
 209 {
 210         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 211         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 212 
 213         if (RREG32(mmUVD_STATUS) != 0)
 214                 uvd_v5_0_stop(adev);
 215 
 216         ring->sched.ready = false;
 217 
 218         return 0;
 219 }
 220 
 221 static int uvd_v5_0_suspend(void *handle)
 222 {
 223         int r;
 224         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 225 
 226         r = uvd_v5_0_hw_fini(adev);
 227         if (r)
 228                 return r;
 229         uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
 230 
 231         return amdgpu_uvd_suspend(adev);
 232 }
 233 
 234 static int uvd_v5_0_resume(void *handle)
 235 {
 236         int r;
 237         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 238 
 239         r = amdgpu_uvd_resume(adev);
 240         if (r)
 241                 return r;
 242 
 243         return uvd_v5_0_hw_init(adev);
 244 }
 245 
 246 /**
 247  * uvd_v5_0_mc_resume - memory controller programming
 248  *
 249  * @adev: amdgpu_device pointer
 250  *
 251  * Let the UVD memory controller know it's offsets
 252  */
 253 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
 254 {
 255         uint64_t offset;
 256         uint32_t size;
 257 
 258         /* programm memory controller bits 0-27 */
 259         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 260                         lower_32_bits(adev->uvd.inst->gpu_addr));
 261         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 262                         upper_32_bits(adev->uvd.inst->gpu_addr));
 263 
 264         offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 265         size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 266         WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
 267         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 268 
 269         offset += size;
 270         size = AMDGPU_UVD_HEAP_SIZE;
 271         WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 272         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 273 
 274         offset += size;
 275         size = AMDGPU_UVD_STACK_SIZE +
 276                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 277         WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 278         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 279 
 280         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 281         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 282         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 283 }
 284 
 285 /**
 286  * uvd_v5_0_start - start UVD block
 287  *
 288  * @adev: amdgpu_device pointer
 289  *
 290  * Setup and start the UVD block
 291  */
 292 static int uvd_v5_0_start(struct amdgpu_device *adev)
 293 {
 294         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 295         uint32_t rb_bufsz, tmp;
 296         uint32_t lmi_swap_cntl;
 297         uint32_t mp_swap_cntl;
 298         int i, j, r;
 299 
 300         /*disable DPG */
 301         WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
 302 
 303         /* disable byte swapping */
 304         lmi_swap_cntl = 0;
 305         mp_swap_cntl = 0;
 306 
 307         uvd_v5_0_mc_resume(adev);
 308 
 309         /* disable interupt */
 310         WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 311 
 312         /* stall UMC and register bus before resetting VCPU */
 313         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 314         mdelay(1);
 315 
 316         /* put LMI, VCPU, RBC etc... into reset */
 317         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 318                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 319                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 320                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 321                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 322         mdelay(5);
 323 
 324         /* take UVD block out of reset */
 325         WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 326         mdelay(5);
 327 
 328         /* initialize UVD memory controller */
 329         WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
 330                              (1 << 21) | (1 << 9) | (1 << 20));
 331 
 332 #ifdef __BIG_ENDIAN
 333         /* swap (8 in 32) RB and IB */
 334         lmi_swap_cntl = 0xa;
 335         mp_swap_cntl = 0;
 336 #endif
 337         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 338         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 339 
 340         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 341         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 342         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 343         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 344         WREG32(mmUVD_MPC_SET_ALU, 0);
 345         WREG32(mmUVD_MPC_SET_MUX, 0x88);
 346 
 347         /* take all subblocks out of reset, except VCPU */
 348         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 349         mdelay(5);
 350 
 351         /* enable VCPU clock */
 352         WREG32(mmUVD_VCPU_CNTL,  1 << 9);
 353 
 354         /* enable UMC */
 355         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 356 
 357         /* boot up the VCPU */
 358         WREG32(mmUVD_SOFT_RESET, 0);
 359         mdelay(10);
 360 
 361         for (i = 0; i < 10; ++i) {
 362                 uint32_t status;
 363                 for (j = 0; j < 100; ++j) {
 364                         status = RREG32(mmUVD_STATUS);
 365                         if (status & 2)
 366                                 break;
 367                         mdelay(10);
 368                 }
 369                 r = 0;
 370                 if (status & 2)
 371                         break;
 372 
 373                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 374                 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 375                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 376                 mdelay(10);
 377                 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 378                 mdelay(10);
 379                 r = -1;
 380         }
 381 
 382         if (r) {
 383                 DRM_ERROR("UVD not responding, giving up!!!\n");
 384                 return r;
 385         }
 386         /* enable master interrupt */
 387         WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
 388 
 389         /* clear the bit 4 of UVD_STATUS */
 390         WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
 391 
 392         rb_bufsz = order_base_2(ring->ring_size);
 393         tmp = 0;
 394         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 395         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 396         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 397         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 398         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 399         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 400         /* force RBC into idle state */
 401         WREG32(mmUVD_RBC_RB_CNTL, tmp);
 402 
 403         /* set the write pointer delay */
 404         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 405 
 406         /* set the wb address */
 407         WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 408 
 409         /* programm the RB_BASE for ring buffer */
 410         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 411                         lower_32_bits(ring->gpu_addr));
 412         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 413                         upper_32_bits(ring->gpu_addr));
 414 
 415         /* Initialize the ring buffer's read and write pointers */
 416         WREG32(mmUVD_RBC_RB_RPTR, 0);
 417 
 418         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 419         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 420 
 421         WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 422 
 423         return 0;
 424 }
 425 
 426 /**
 427  * uvd_v5_0_stop - stop UVD block
 428  *
 429  * @adev: amdgpu_device pointer
 430  *
 431  * stop the UVD block
 432  */
 433 static void uvd_v5_0_stop(struct amdgpu_device *adev)
 434 {
 435         /* force RBC into idle state */
 436         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 437 
 438         /* Stall UMC and register bus before resetting VCPU */
 439         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 440         mdelay(1);
 441 
 442         /* put VCPU into reset */
 443         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 444         mdelay(5);
 445 
 446         /* disable VCPU clock */
 447         WREG32(mmUVD_VCPU_CNTL, 0x0);
 448 
 449         /* Unstall UMC and register bus */
 450         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 451 
 452         WREG32(mmUVD_STATUS, 0);
 453 }
 454 
 455 /**
 456  * uvd_v5_0_ring_emit_fence - emit an fence & trap command
 457  *
 458  * @ring: amdgpu_ring pointer
 459  * @fence: fence to emit
 460  *
 461  * Write a fence and a trap command to the ring.
 462  */
 463 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 464                                      unsigned flags)
 465 {
 466         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 467 
 468         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 469         amdgpu_ring_write(ring, seq);
 470         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 471         amdgpu_ring_write(ring, addr & 0xffffffff);
 472         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 473         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 474         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 475         amdgpu_ring_write(ring, 0);
 476 
 477         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 478         amdgpu_ring_write(ring, 0);
 479         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 480         amdgpu_ring_write(ring, 0);
 481         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 482         amdgpu_ring_write(ring, 2);
 483 }
 484 
 485 /**
 486  * uvd_v5_0_ring_test_ring - register write test
 487  *
 488  * @ring: amdgpu_ring pointer
 489  *
 490  * Test if we can successfully write to the context register
 491  */
 492 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 493 {
 494         struct amdgpu_device *adev = ring->adev;
 495         uint32_t tmp = 0;
 496         unsigned i;
 497         int r;
 498 
 499         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 500         r = amdgpu_ring_alloc(ring, 3);
 501         if (r)
 502                 return r;
 503         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 504         amdgpu_ring_write(ring, 0xDEADBEEF);
 505         amdgpu_ring_commit(ring);
 506         for (i = 0; i < adev->usec_timeout; i++) {
 507                 tmp = RREG32(mmUVD_CONTEXT_ID);
 508                 if (tmp == 0xDEADBEEF)
 509                         break;
 510                 udelay(1);
 511         }
 512 
 513         if (i >= adev->usec_timeout)
 514                 r = -ETIMEDOUT;
 515 
 516         return r;
 517 }
 518 
 519 /**
 520  * uvd_v5_0_ring_emit_ib - execute indirect buffer
 521  *
 522  * @ring: amdgpu_ring pointer
 523  * @ib: indirect buffer to execute
 524  *
 525  * Write ring commands to execute the indirect buffer
 526  */
 527 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 528                                   struct amdgpu_job *job,
 529                                   struct amdgpu_ib *ib,
 530                                   uint32_t flags)
 531 {
 532         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 533         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 534         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
 535         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 536         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
 537         amdgpu_ring_write(ring, ib->length_dw);
 538 }
 539 
 540 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 541 {
 542         int i;
 543 
 544         WARN_ON(ring->wptr % 2 || count % 2);
 545 
 546         for (i = 0; i < count / 2; i++) {
 547                 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
 548                 amdgpu_ring_write(ring, 0);
 549         }
 550 }
 551 
 552 static bool uvd_v5_0_is_idle(void *handle)
 553 {
 554         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 555 
 556         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 557 }
 558 
 559 static int uvd_v5_0_wait_for_idle(void *handle)
 560 {
 561         unsigned i;
 562         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 563 
 564         for (i = 0; i < adev->usec_timeout; i++) {
 565                 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
 566                         return 0;
 567         }
 568         return -ETIMEDOUT;
 569 }
 570 
 571 static int uvd_v5_0_soft_reset(void *handle)
 572 {
 573         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 574 
 575         uvd_v5_0_stop(adev);
 576 
 577         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
 578                         ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 579         mdelay(5);
 580 
 581         return uvd_v5_0_start(adev);
 582 }
 583 
 584 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
 585                                         struct amdgpu_irq_src *source,
 586                                         unsigned type,
 587                                         enum amdgpu_interrupt_state state)
 588 {
 589         // TODO
 590         return 0;
 591 }
 592 
 593 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
 594                                       struct amdgpu_irq_src *source,
 595                                       struct amdgpu_iv_entry *entry)
 596 {
 597         DRM_DEBUG("IH: UVD TRAP\n");
 598         amdgpu_fence_process(&adev->uvd.inst->ring);
 599         return 0;
 600 }
 601 
 602 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
 603 {
 604         uint32_t data1, data3, suvd_flags;
 605 
 606         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 607         data3 = RREG32(mmUVD_CGC_GATE);
 608 
 609         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
 610                      UVD_SUVD_CGC_GATE__SIT_MASK |
 611                      UVD_SUVD_CGC_GATE__SMP_MASK |
 612                      UVD_SUVD_CGC_GATE__SCM_MASK |
 613                      UVD_SUVD_CGC_GATE__SDB_MASK;
 614 
 615         if (enable) {
 616                 data3 |= (UVD_CGC_GATE__SYS_MASK     |
 617                         UVD_CGC_GATE__UDEC_MASK      |
 618                         UVD_CGC_GATE__MPEG2_MASK     |
 619                         UVD_CGC_GATE__RBC_MASK       |
 620                         UVD_CGC_GATE__LMI_MC_MASK    |
 621                         UVD_CGC_GATE__IDCT_MASK      |
 622                         UVD_CGC_GATE__MPRD_MASK      |
 623                         UVD_CGC_GATE__MPC_MASK       |
 624                         UVD_CGC_GATE__LBSI_MASK      |
 625                         UVD_CGC_GATE__LRBBM_MASK     |
 626                         UVD_CGC_GATE__UDEC_RE_MASK   |
 627                         UVD_CGC_GATE__UDEC_CM_MASK   |
 628                         UVD_CGC_GATE__UDEC_IT_MASK   |
 629                         UVD_CGC_GATE__UDEC_DB_MASK   |
 630                         UVD_CGC_GATE__UDEC_MP_MASK   |
 631                         UVD_CGC_GATE__WCB_MASK       |
 632                         UVD_CGC_GATE__JPEG_MASK      |
 633                         UVD_CGC_GATE__SCPU_MASK);
 634                 /* only in pg enabled, we can gate clock to vcpu*/
 635                 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
 636                         data3 |= UVD_CGC_GATE__VCPU_MASK;
 637                 data3 &= ~UVD_CGC_GATE__REGS_MASK;
 638                 data1 |= suvd_flags;
 639         } else {
 640                 data3 = 0;
 641                 data1 = 0;
 642         }
 643 
 644         WREG32(mmUVD_SUVD_CGC_GATE, data1);
 645         WREG32(mmUVD_CGC_GATE, data3);
 646 }
 647 
 648 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
 649 {
 650         uint32_t data, data2;
 651 
 652         data = RREG32(mmUVD_CGC_CTRL);
 653         data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
 654 
 655 
 656         data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
 657                   UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
 658 
 659 
 660         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
 661                 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
 662                 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
 663 
 664         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
 665                         UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
 666                         UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
 667                         UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
 668                         UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
 669                         UVD_CGC_CTRL__SYS_MODE_MASK |
 670                         UVD_CGC_CTRL__UDEC_MODE_MASK |
 671                         UVD_CGC_CTRL__MPEG2_MODE_MASK |
 672                         UVD_CGC_CTRL__REGS_MODE_MASK |
 673                         UVD_CGC_CTRL__RBC_MODE_MASK |
 674                         UVD_CGC_CTRL__LMI_MC_MODE_MASK |
 675                         UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
 676                         UVD_CGC_CTRL__IDCT_MODE_MASK |
 677                         UVD_CGC_CTRL__MPRD_MODE_MASK |
 678                         UVD_CGC_CTRL__MPC_MODE_MASK |
 679                         UVD_CGC_CTRL__LBSI_MODE_MASK |
 680                         UVD_CGC_CTRL__LRBBM_MODE_MASK |
 681                         UVD_CGC_CTRL__WCB_MODE_MASK |
 682                         UVD_CGC_CTRL__VCPU_MODE_MASK |
 683                         UVD_CGC_CTRL__JPEG_MODE_MASK |
 684                         UVD_CGC_CTRL__SCPU_MODE_MASK);
 685         data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
 686                         UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
 687                         UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
 688                         UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
 689                         UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
 690 
 691         WREG32(mmUVD_CGC_CTRL, data);
 692         WREG32(mmUVD_SUVD_CGC_CTRL, data2);
 693 }
 694 
 695 #if 0
 696 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
 697 {
 698         uint32_t data, data1, cgc_flags, suvd_flags;
 699 
 700         data = RREG32(mmUVD_CGC_GATE);
 701         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 702 
 703         cgc_flags = UVD_CGC_GATE__SYS_MASK |
 704                                 UVD_CGC_GATE__UDEC_MASK |
 705                                 UVD_CGC_GATE__MPEG2_MASK |
 706                                 UVD_CGC_GATE__RBC_MASK |
 707                                 UVD_CGC_GATE__LMI_MC_MASK |
 708                                 UVD_CGC_GATE__IDCT_MASK |
 709                                 UVD_CGC_GATE__MPRD_MASK |
 710                                 UVD_CGC_GATE__MPC_MASK |
 711                                 UVD_CGC_GATE__LBSI_MASK |
 712                                 UVD_CGC_GATE__LRBBM_MASK |
 713                                 UVD_CGC_GATE__UDEC_RE_MASK |
 714                                 UVD_CGC_GATE__UDEC_CM_MASK |
 715                                 UVD_CGC_GATE__UDEC_IT_MASK |
 716                                 UVD_CGC_GATE__UDEC_DB_MASK |
 717                                 UVD_CGC_GATE__UDEC_MP_MASK |
 718                                 UVD_CGC_GATE__WCB_MASK |
 719                                 UVD_CGC_GATE__VCPU_MASK |
 720                                 UVD_CGC_GATE__SCPU_MASK;
 721 
 722         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
 723                                 UVD_SUVD_CGC_GATE__SIT_MASK |
 724                                 UVD_SUVD_CGC_GATE__SMP_MASK |
 725                                 UVD_SUVD_CGC_GATE__SCM_MASK |
 726                                 UVD_SUVD_CGC_GATE__SDB_MASK;
 727 
 728         data |= cgc_flags;
 729         data1 |= suvd_flags;
 730 
 731         WREG32(mmUVD_CGC_GATE, data);
 732         WREG32(mmUVD_SUVD_CGC_GATE, data1);
 733 }
 734 #endif
 735 
 736 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
 737                                  bool enable)
 738 {
 739         u32 orig, data;
 740 
 741         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 742                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 743                 data |= 0xfff;
 744                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 745 
 746                 orig = data = RREG32(mmUVD_CGC_CTRL);
 747                 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 748                 if (orig != data)
 749                         WREG32(mmUVD_CGC_CTRL, data);
 750         } else {
 751                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 752                 data &= ~0xfff;
 753                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 754 
 755                 orig = data = RREG32(mmUVD_CGC_CTRL);
 756                 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 757                 if (orig != data)
 758                         WREG32(mmUVD_CGC_CTRL, data);
 759         }
 760 }
 761 
 762 static int uvd_v5_0_set_clockgating_state(void *handle,
 763                                           enum amd_clockgating_state state)
 764 {
 765         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 766         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 767 
 768         if (enable) {
 769                 /* wait for STATUS to clear */
 770                 if (uvd_v5_0_wait_for_idle(handle))
 771                         return -EBUSY;
 772                 uvd_v5_0_enable_clock_gating(adev, true);
 773 
 774                 /* enable HW gates because UVD is idle */
 775 /*              uvd_v5_0_set_hw_clock_gating(adev); */
 776         } else {
 777                 uvd_v5_0_enable_clock_gating(adev, false);
 778         }
 779 
 780         uvd_v5_0_set_sw_clock_gating(adev);
 781         return 0;
 782 }
 783 
 784 static int uvd_v5_0_set_powergating_state(void *handle,
 785                                           enum amd_powergating_state state)
 786 {
 787         /* This doesn't actually powergate the UVD block.
 788          * That's done in the dpm code via the SMC.  This
 789          * just re-inits the block as necessary.  The actual
 790          * gating still happens in the dpm code.  We should
 791          * revisit this when there is a cleaner line between
 792          * the smc and the hw blocks
 793          */
 794         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 795         int ret = 0;
 796 
 797         if (state == AMD_PG_STATE_GATE) {
 798                 uvd_v5_0_stop(adev);
 799         } else {
 800                 ret = uvd_v5_0_start(adev);
 801                 if (ret)
 802                         goto out;
 803         }
 804 
 805 out:
 806         return ret;
 807 }
 808 
 809 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
 810 {
 811         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 812         int data;
 813 
 814         mutex_lock(&adev->pm.mutex);
 815 
 816         if (RREG32_SMC(ixCURRENT_PG_STATUS) &
 817                                 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
 818                 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
 819                 goto out;
 820         }
 821 
 822         /* AMD_CG_SUPPORT_UVD_MGCG */
 823         data = RREG32(mmUVD_CGC_CTRL);
 824         if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
 825                 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
 826 
 827 out:
 828         mutex_unlock(&adev->pm.mutex);
 829 }
 830 
 831 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 832         .name = "uvd_v5_0",
 833         .early_init = uvd_v5_0_early_init,
 834         .late_init = NULL,
 835         .sw_init = uvd_v5_0_sw_init,
 836         .sw_fini = uvd_v5_0_sw_fini,
 837         .hw_init = uvd_v5_0_hw_init,
 838         .hw_fini = uvd_v5_0_hw_fini,
 839         .suspend = uvd_v5_0_suspend,
 840         .resume = uvd_v5_0_resume,
 841         .is_idle = uvd_v5_0_is_idle,
 842         .wait_for_idle = uvd_v5_0_wait_for_idle,
 843         .soft_reset = uvd_v5_0_soft_reset,
 844         .set_clockgating_state = uvd_v5_0_set_clockgating_state,
 845         .set_powergating_state = uvd_v5_0_set_powergating_state,
 846         .get_clockgating_state = uvd_v5_0_get_clockgating_state,
 847 };
 848 
 849 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 850         .type = AMDGPU_RING_TYPE_UVD,
 851         .align_mask = 0xf,
 852         .support_64bit_ptrs = false,
 853         .no_user_fence = true,
 854         .get_rptr = uvd_v5_0_ring_get_rptr,
 855         .get_wptr = uvd_v5_0_ring_get_wptr,
 856         .set_wptr = uvd_v5_0_ring_set_wptr,
 857         .parse_cs = amdgpu_uvd_ring_parse_cs,
 858         .emit_frame_size =
 859                 14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
 860         .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
 861         .emit_ib = uvd_v5_0_ring_emit_ib,
 862         .emit_fence = uvd_v5_0_ring_emit_fence,
 863         .test_ring = uvd_v5_0_ring_test_ring,
 864         .test_ib = amdgpu_uvd_ring_test_ib,
 865         .insert_nop = uvd_v5_0_ring_insert_nop,
 866         .pad_ib = amdgpu_ring_generic_pad_ib,
 867         .begin_use = amdgpu_uvd_ring_begin_use,
 868         .end_use = amdgpu_uvd_ring_end_use,
 869 };
 870 
 871 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
 872 {
 873         adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
 874 }
 875 
 876 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
 877         .set = uvd_v5_0_set_interrupt_state,
 878         .process = uvd_v5_0_process_interrupt,
 879 };
 880 
 881 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 882 {
 883         adev->uvd.inst->irq.num_types = 1;
 884         adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
 885 }
 886 
 887 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
 888 {
 889                 .type = AMD_IP_BLOCK_TYPE_UVD,
 890                 .major = 5,
 891                 .minor = 0,
 892                 .rev = 0,
 893                 .funcs = &uvd_v5_0_ip_funcs,
 894 };

/* [<][>][^][v][top][bottom][index][help] */