root/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vcn_v1_0_early_init
  2. vcn_v1_0_sw_init
  3. vcn_v1_0_sw_fini
  4. vcn_v1_0_hw_init
  5. vcn_v1_0_hw_fini
  6. vcn_v1_0_suspend
  7. vcn_v1_0_resume
  8. vcn_v1_0_mc_resume_spg_mode
  9. vcn_v1_0_mc_resume_dpg_mode
  10. vcn_v1_0_disable_clock_gating
  11. vcn_v1_0_enable_clock_gating
  12. vcn_v1_0_clock_gating_dpg_mode
  13. vcn_1_0_disable_static_power_gating
  14. vcn_1_0_enable_static_power_gating
  15. vcn_v1_0_start_spg_mode
  16. vcn_v1_0_start_dpg_mode
  17. vcn_v1_0_start
  18. vcn_v1_0_stop_spg_mode
  19. vcn_v1_0_stop_dpg_mode
  20. vcn_v1_0_stop
  21. vcn_v1_0_pause_dpg_mode
  22. vcn_v1_0_is_idle
  23. vcn_v1_0_wait_for_idle
  24. vcn_v1_0_set_clockgating_state
  25. vcn_v1_0_dec_ring_get_rptr
  26. vcn_v1_0_dec_ring_get_wptr
  27. vcn_v1_0_dec_ring_set_wptr
  28. vcn_v1_0_dec_ring_insert_start
  29. vcn_v1_0_dec_ring_insert_end
  30. vcn_v1_0_dec_ring_emit_fence
  31. vcn_v1_0_dec_ring_emit_ib
  32. vcn_v1_0_dec_ring_emit_reg_wait
  33. vcn_v1_0_dec_ring_emit_vm_flush
  34. vcn_v1_0_dec_ring_emit_wreg
  35. vcn_v1_0_enc_ring_get_rptr
  36. vcn_v1_0_enc_ring_get_wptr
  37. vcn_v1_0_enc_ring_set_wptr
  38. vcn_v1_0_enc_ring_emit_fence
  39. vcn_v1_0_enc_ring_insert_end
  40. vcn_v1_0_enc_ring_emit_ib
  41. vcn_v1_0_enc_ring_emit_reg_wait
  42. vcn_v1_0_enc_ring_emit_vm_flush
  43. vcn_v1_0_enc_ring_emit_wreg
  44. vcn_v1_0_jpeg_ring_get_rptr
  45. vcn_v1_0_jpeg_ring_get_wptr
  46. vcn_v1_0_jpeg_ring_set_wptr
  47. vcn_v1_0_jpeg_ring_insert_start
  48. vcn_v1_0_jpeg_ring_insert_end
  49. vcn_v1_0_jpeg_ring_emit_fence
  50. vcn_v1_0_jpeg_ring_emit_ib
  51. vcn_v1_0_jpeg_ring_emit_reg_wait
  52. vcn_v1_0_jpeg_ring_emit_vm_flush
  53. vcn_v1_0_jpeg_ring_emit_wreg
  54. vcn_v1_0_jpeg_ring_nop
  55. vcn_v1_0_jpeg_ring_patch_wreg
  56. vcn_v1_0_jpeg_ring_set_patch_ring
  57. vcn_v1_0_set_interrupt_state
  58. vcn_v1_0_process_interrupt
  59. vcn_v1_0_dec_ring_insert_nop
  60. vcn_v1_0_set_powergating_state
  61. vcn_v1_0_set_dec_ring_funcs
  62. vcn_v1_0_set_enc_ring_funcs
  63. vcn_v1_0_set_jpeg_ring_funcs
  64. vcn_v1_0_set_irq_funcs

   1 /*
   2  * Copyright 2016 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include <linux/firmware.h>
  25 
  26 #include "amdgpu.h"
  27 #include "amdgpu_vcn.h"
  28 #include "soc15.h"
  29 #include "soc15d.h"
  30 #include "soc15_common.h"
  31 
  32 #include "vcn/vcn_1_0_offset.h"
  33 #include "vcn/vcn_1_0_sh_mask.h"
  34 #include "hdp/hdp_4_0_offset.h"
  35 #include "mmhub/mmhub_9_1_offset.h"
  36 #include "mmhub/mmhub_9_1_sh_mask.h"
  37 
  38 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
  39 
  40 #define mmUVD_RBC_XX_IB_REG_CHECK                               0x05ab
  41 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX      1
  42 #define mmUVD_REG_XX_MASK                                                       0x05ac
  43 #define mmUVD_REG_XX_MASK_BASE_IDX                              1
  44 
  45 static int vcn_v1_0_stop(struct amdgpu_device *adev);
  46 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  47 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  48 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
  49 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
  50 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
  51 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
  52 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
  53                                 struct dpg_pause_state *new_state);
  54 
  55 /**
  56  * vcn_v1_0_early_init - set function pointers
  57  *
  58  * @handle: amdgpu_device pointer
  59  *
  60  * Set ring and irq function pointers
  61  */
  62 static int vcn_v1_0_early_init(void *handle)
  63 {
  64         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  65 
  66         adev->vcn.num_vcn_inst = 1;
  67         adev->vcn.num_enc_rings = 2;
  68 
  69         vcn_v1_0_set_dec_ring_funcs(adev);
  70         vcn_v1_0_set_enc_ring_funcs(adev);
  71         vcn_v1_0_set_jpeg_ring_funcs(adev);
  72         vcn_v1_0_set_irq_funcs(adev);
  73 
  74         return 0;
  75 }
  76 
  77 /**
  78  * vcn_v1_0_sw_init - sw init for VCN block
  79  *
  80  * @handle: amdgpu_device pointer
  81  *
  82  * Load firmware and sw initialization
  83  */
  84 static int vcn_v1_0_sw_init(void *handle)
  85 {
  86         struct amdgpu_ring *ring;
  87         int i, r;
  88         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  89 
  90         /* VCN DEC TRAP */
  91         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
  92                         VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
  93         if (r)
  94                 return r;
  95 
  96         /* VCN ENC TRAP */
  97         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
  98                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
  99                                         &adev->vcn.inst->irq);
 100                 if (r)
 101                         return r;
 102         }
 103 
 104         /* VCN JPEG TRAP */
 105         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq);
 106         if (r)
 107                 return r;
 108 
 109         r = amdgpu_vcn_sw_init(adev);
 110         if (r)
 111                 return r;
 112 
 113         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 114                 const struct common_firmware_header *hdr;
 115                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 116                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
 117                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
 118                 adev->firmware.fw_size +=
 119                         ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 120                 DRM_INFO("PSP loading VCN firmware\n");
 121         }
 122 
 123         r = amdgpu_vcn_resume(adev);
 124         if (r)
 125                 return r;
 126 
 127         ring = &adev->vcn.inst->ring_dec;
 128         sprintf(ring->name, "vcn_dec");
 129         r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
 130         if (r)
 131                 return r;
 132 
 133         adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
 134                 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
 135         adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
 136                 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
 137         adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
 138                 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
 139         adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
 140                 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
 141         adev->vcn.internal.nop = adev->vcn.inst->external.nop =
 142                 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 143 
 144         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 145                 ring = &adev->vcn.inst->ring_enc[i];
 146                 sprintf(ring->name, "vcn_enc%d", i);
 147                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
 148                 if (r)
 149                         return r;
 150         }
 151 
 152         ring = &adev->vcn.inst->ring_jpeg;
 153         sprintf(ring->name, "vcn_jpeg");
 154         r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
 155         if (r)
 156                 return r;
 157 
 158         adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
 159         adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
 160                 SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
 161 
 162         return 0;
 163 }
 164 
 165 /**
 166  * vcn_v1_0_sw_fini - sw fini for VCN block
 167  *
 168  * @handle: amdgpu_device pointer
 169  *
 170  * VCN suspend and free up sw allocation
 171  */
 172 static int vcn_v1_0_sw_fini(void *handle)
 173 {
 174         int r;
 175         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 176 
 177         r = amdgpu_vcn_suspend(adev);
 178         if (r)
 179                 return r;
 180 
 181         r = amdgpu_vcn_sw_fini(adev);
 182 
 183         return r;
 184 }
 185 
 186 /**
 187  * vcn_v1_0_hw_init - start and test VCN block
 188  *
 189  * @handle: amdgpu_device pointer
 190  *
 191  * Initialize the hardware, boot up the VCPU and do some testing
 192  */
 193 static int vcn_v1_0_hw_init(void *handle)
 194 {
 195         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 196         struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 197         int i, r;
 198 
 199         r = amdgpu_ring_test_helper(ring);
 200         if (r)
 201                 goto done;
 202 
 203         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 204                 ring = &adev->vcn.inst->ring_enc[i];
 205                 ring->sched.ready = true;
 206                 r = amdgpu_ring_test_helper(ring);
 207                 if (r)
 208                         goto done;
 209         }
 210 
 211         ring = &adev->vcn.inst->ring_jpeg;
 212         r = amdgpu_ring_test_helper(ring);
 213         if (r)
 214                 goto done;
 215 
 216 done:
 217         if (!r)
 218                 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
 219                         (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
 220 
 221         return r;
 222 }
 223 
 224 /**
 225  * vcn_v1_0_hw_fini - stop the hardware block
 226  *
 227  * @handle: amdgpu_device pointer
 228  *
 229  * Stop the VCN block, mark ring as not ready any more
 230  */
 231 static int vcn_v1_0_hw_fini(void *handle)
 232 {
 233         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 234         struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 235 
 236         if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
 237                 RREG32_SOC15(VCN, 0, mmUVD_STATUS))
 238                 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 239 
 240         ring->sched.ready = false;
 241 
 242         return 0;
 243 }
 244 
 245 /**
 246  * vcn_v1_0_suspend - suspend VCN block
 247  *
 248  * @handle: amdgpu_device pointer
 249  *
 250  * HW fini and suspend VCN block
 251  */
 252 static int vcn_v1_0_suspend(void *handle)
 253 {
 254         int r;
 255         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 256 
 257         r = vcn_v1_0_hw_fini(adev);
 258         if (r)
 259                 return r;
 260 
 261         r = amdgpu_vcn_suspend(adev);
 262 
 263         return r;
 264 }
 265 
 266 /**
 267  * vcn_v1_0_resume - resume VCN block
 268  *
 269  * @handle: amdgpu_device pointer
 270  *
 271  * Resume firmware and hw init VCN block
 272  */
 273 static int vcn_v1_0_resume(void *handle)
 274 {
 275         int r;
 276         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 277 
 278         r = amdgpu_vcn_resume(adev);
 279         if (r)
 280                 return r;
 281 
 282         r = vcn_v1_0_hw_init(adev);
 283 
 284         return r;
 285 }
 286 
 287 /**
 288  * vcn_v1_0_mc_resume_spg_mode - memory controller programming
 289  *
 290  * @adev: amdgpu_device pointer
 291  *
 292  * Let the VCN memory controller know it's offsets
 293  */
 294 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
 295 {
 296         uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
 297         uint32_t offset;
 298 
 299         /* cache window 0: fw */
 300         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 301                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 302                              (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
 303                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 304                              (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
 305                 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
 306                 offset = 0;
 307         } else {
 308                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 309                         lower_32_bits(adev->vcn.inst->gpu_addr));
 310                 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 311                         upper_32_bits(adev->vcn.inst->gpu_addr));
 312                 offset = size;
 313                 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
 314                              AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 315         }
 316 
 317         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
 318 
 319         /* cache window 1: stack */
 320         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 321                      lower_32_bits(adev->vcn.inst->gpu_addr + offset));
 322         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 323                      upper_32_bits(adev->vcn.inst->gpu_addr + offset));
 324         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
 325         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 326 
 327         /* cache window 2: context */
 328         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 329                      lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
 330         WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 331                      upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
 332         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
 333         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 334 
 335         WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
 336                         adev->gfx.config.gb_addr_config);
 337         WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
 338                         adev->gfx.config.gb_addr_config);
 339         WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
 340                         adev->gfx.config.gb_addr_config);
 341         WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
 342                         adev->gfx.config.gb_addr_config);
 343         WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
 344                         adev->gfx.config.gb_addr_config);
 345         WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
 346                         adev->gfx.config.gb_addr_config);
 347         WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
 348                         adev->gfx.config.gb_addr_config);
 349         WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
 350                         adev->gfx.config.gb_addr_config);
 351         WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
 352                         adev->gfx.config.gb_addr_config);
 353         WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
 354                         adev->gfx.config.gb_addr_config);
 355         WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
 356                         adev->gfx.config.gb_addr_config);
 357         WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
 358                         adev->gfx.config.gb_addr_config);
 359 }
 360 
 361 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
 362 {
 363         uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
 364         uint32_t offset;
 365 
 366         /* cache window 0: fw */
 367         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 368                 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 369                              (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
 370                              0xFFFFFFFF, 0);
 371                 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 372                              (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
 373                              0xFFFFFFFF, 0);
 374                 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
 375                              0xFFFFFFFF, 0);
 376                 offset = 0;
 377         } else {
 378                 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 379                         lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
 380                 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 381                         upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
 382                 offset = size;
 383                 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
 384                              AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
 385         }
 386 
 387         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
 388 
 389         /* cache window 1: stack */
 390         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 391                      lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
 392         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 393                      upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
 394         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
 395                              0xFFFFFFFF, 0);
 396         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
 397                              0xFFFFFFFF, 0);
 398 
 399         /* cache window 2: context */
 400         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 401                      lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
 402                              0xFFFFFFFF, 0);
 403         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 404                      upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
 405                              0xFFFFFFFF, 0);
 406         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
 407         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
 408                              0xFFFFFFFF, 0);
 409 
 410         /* VCN global tiling registers */
 411         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
 412                         adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 413         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
 414                         adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 415         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
 416                         adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 417         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
 418                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 419         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
 420                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 421         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
 422                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 423         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
 424                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 425         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
 426                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 427         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
 428                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 429         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
 430                 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
 431 }
 432 
 433 /**
 434  * vcn_v1_0_disable_clock_gating - disable VCN clock gating
 435  *
 436  * @adev: amdgpu_device pointer
 437  * @sw: enable SW clock gating
 438  *
 439  * Disable clock gating for VCN block
 440  */
 441 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
 442 {
 443         uint32_t data;
 444 
 445         /* JPEG disable CGC */
 446         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
 447 
 448         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 449                 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 450         else
 451                 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 452 
 453         data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 454         data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 455         WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
 456 
 457         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
 458         data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
 459         WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
 460 
 461         /* UVD disable CGC */
 462         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 463         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 464                 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 465         else
 466                 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 467 
 468         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 469         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 470         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 471 
 472         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
 473         data &= ~(UVD_CGC_GATE__SYS_MASK
 474                 | UVD_CGC_GATE__UDEC_MASK
 475                 | UVD_CGC_GATE__MPEG2_MASK
 476                 | UVD_CGC_GATE__REGS_MASK
 477                 | UVD_CGC_GATE__RBC_MASK
 478                 | UVD_CGC_GATE__LMI_MC_MASK
 479                 | UVD_CGC_GATE__LMI_UMC_MASK
 480                 | UVD_CGC_GATE__IDCT_MASK
 481                 | UVD_CGC_GATE__MPRD_MASK
 482                 | UVD_CGC_GATE__MPC_MASK
 483                 | UVD_CGC_GATE__LBSI_MASK
 484                 | UVD_CGC_GATE__LRBBM_MASK
 485                 | UVD_CGC_GATE__UDEC_RE_MASK
 486                 | UVD_CGC_GATE__UDEC_CM_MASK
 487                 | UVD_CGC_GATE__UDEC_IT_MASK
 488                 | UVD_CGC_GATE__UDEC_DB_MASK
 489                 | UVD_CGC_GATE__UDEC_MP_MASK
 490                 | UVD_CGC_GATE__WCB_MASK
 491                 | UVD_CGC_GATE__VCPU_MASK
 492                 | UVD_CGC_GATE__SCPU_MASK);
 493         WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
 494 
 495         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 496         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
 497                 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
 498                 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
 499                 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
 500                 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
 501                 | UVD_CGC_CTRL__SYS_MODE_MASK
 502                 | UVD_CGC_CTRL__UDEC_MODE_MASK
 503                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
 504                 | UVD_CGC_CTRL__REGS_MODE_MASK
 505                 | UVD_CGC_CTRL__RBC_MODE_MASK
 506                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
 507                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
 508                 | UVD_CGC_CTRL__IDCT_MODE_MASK
 509                 | UVD_CGC_CTRL__MPRD_MODE_MASK
 510                 | UVD_CGC_CTRL__MPC_MODE_MASK
 511                 | UVD_CGC_CTRL__LBSI_MODE_MASK
 512                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
 513                 | UVD_CGC_CTRL__WCB_MODE_MASK
 514                 | UVD_CGC_CTRL__VCPU_MODE_MASK
 515                 | UVD_CGC_CTRL__SCPU_MODE_MASK);
 516         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 517 
 518         /* turn on */
 519         data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
 520         data |= (UVD_SUVD_CGC_GATE__SRE_MASK
 521                 | UVD_SUVD_CGC_GATE__SIT_MASK
 522                 | UVD_SUVD_CGC_GATE__SMP_MASK
 523                 | UVD_SUVD_CGC_GATE__SCM_MASK
 524                 | UVD_SUVD_CGC_GATE__SDB_MASK
 525                 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
 526                 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
 527                 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
 528                 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
 529                 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
 530                 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
 531                 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
 532                 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
 533                 | UVD_SUVD_CGC_GATE__SCLR_MASK
 534                 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
 535                 | UVD_SUVD_CGC_GATE__ENT_MASK
 536                 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
 537                 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
 538                 | UVD_SUVD_CGC_GATE__SITE_MASK
 539                 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
 540                 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
 541                 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
 542                 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
 543                 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
 544         WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
 545 
 546         data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
 547         data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
 548                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
 549                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
 550                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
 551                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
 552                 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
 553                 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
 554                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
 555                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
 556                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
 557         WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
 558 }
 559 
 560 /**
 561  * vcn_v1_0_enable_clock_gating - enable VCN clock gating
 562  *
 563  * @adev: amdgpu_device pointer
 564  * @sw: enable SW clock gating
 565  *
 566  * Enable clock gating for VCN block
 567  */
 568 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
 569 {
 570         uint32_t data = 0;
 571 
 572         /* enable JPEG CGC */
 573         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
 574         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 575                 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 576         else
 577                 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 578         data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 579         data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 580         WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
 581 
 582         data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
 583         data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
 584         WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
 585 
 586         /* enable UVD CGC */
 587         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 588         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 589                 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 590         else
 591                 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 592         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 593         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 594         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 595 
 596         data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 597         data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
 598                 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
 599                 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
 600                 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
 601                 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
 602                 | UVD_CGC_CTRL__SYS_MODE_MASK
 603                 | UVD_CGC_CTRL__UDEC_MODE_MASK
 604                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
 605                 | UVD_CGC_CTRL__REGS_MODE_MASK
 606                 | UVD_CGC_CTRL__RBC_MODE_MASK
 607                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
 608                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
 609                 | UVD_CGC_CTRL__IDCT_MODE_MASK
 610                 | UVD_CGC_CTRL__MPRD_MODE_MASK
 611                 | UVD_CGC_CTRL__MPC_MODE_MASK
 612                 | UVD_CGC_CTRL__LBSI_MODE_MASK
 613                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
 614                 | UVD_CGC_CTRL__WCB_MODE_MASK
 615                 | UVD_CGC_CTRL__VCPU_MODE_MASK
 616                 | UVD_CGC_CTRL__SCPU_MODE_MASK);
 617         WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 618 
 619         data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
 620         data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
 621                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
 622                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
 623                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
 624                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
 625                 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
 626                 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
 627                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
 628                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
 629                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
 630         WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
 631 }
 632 
 633 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
 634 {
 635         uint32_t reg_data = 0;
 636 
 637         /* disable JPEG CGC */
 638         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 639                 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 640         else
 641                 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 642         reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 643         reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 644         WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
 645 
 646         WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
 647 
 648         /* enable sw clock gating control */
 649         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 650                 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 651         else
 652                 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 653         reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 654         reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 655         reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
 656                  UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
 657                  UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
 658                  UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
 659                  UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
 660                  UVD_CGC_CTRL__SYS_MODE_MASK |
 661                  UVD_CGC_CTRL__UDEC_MODE_MASK |
 662                  UVD_CGC_CTRL__MPEG2_MODE_MASK |
 663                  UVD_CGC_CTRL__REGS_MODE_MASK |
 664                  UVD_CGC_CTRL__RBC_MODE_MASK |
 665                  UVD_CGC_CTRL__LMI_MC_MODE_MASK |
 666                  UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
 667                  UVD_CGC_CTRL__IDCT_MODE_MASK |
 668                  UVD_CGC_CTRL__MPRD_MODE_MASK |
 669                  UVD_CGC_CTRL__MPC_MODE_MASK |
 670                  UVD_CGC_CTRL__LBSI_MODE_MASK |
 671                  UVD_CGC_CTRL__LRBBM_MODE_MASK |
 672                  UVD_CGC_CTRL__WCB_MODE_MASK |
 673                  UVD_CGC_CTRL__VCPU_MODE_MASK |
 674                  UVD_CGC_CTRL__SCPU_MODE_MASK);
 675         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
 676 
 677         /* turn off clock gating */
 678         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
 679 
 680         /* turn on SUVD clock gating */
 681         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
 682 
 683         /* turn on sw mode in UVD_SUVD_CGC_CTRL */
 684         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
 685 }
 686 
 687 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
 688 {
 689         uint32_t data = 0;
 690         int ret;
 691 
 692         if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 693                 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 694                         | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
 695                         | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
 696                         | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
 697                         | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
 698                         | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
 699                         | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
 700                         | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
 701                         | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
 702                         | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
 703                         | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
 704 
 705                 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
 706                 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
 707         } else {
 708                 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 709                         | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
 710                         | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
 711                         | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
 712                         | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
 713                         | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
 714                         | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
 715                         | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
 716                         | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
 717                         | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
 718                         | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
 719                 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
 720                 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFFFFF, ret);
 721         }
 722 
 723         /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
 724 
 725         data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
 726         data &= ~0x103;
 727         if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
 728                 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
 729 
 730         WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
 731 }
 732 
 733 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
 734 {
 735         uint32_t data = 0;
 736         int ret;
 737 
 738         if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 739                 /* Before power off, this indicator has to be turned on */
 740                 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
 741                 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
 742                 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
 743                 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
 744 
 745 
 746                 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 747                         | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
 748                         | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
 749                         | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
 750                         | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
 751                         | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
 752                         | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
 753                         | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
 754                         | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
 755                         | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
 756                         | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
 757 
 758                 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
 759 
 760                 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
 761                         | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
 762                         | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
 763                         | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
 764                         | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
 765                         | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
 766                         | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
 767                         | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
 768                         | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
 769                         | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
 770                         | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
 771                 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
 772         }
 773 }
 774 
 775 /**
 776  * vcn_v1_0_start - start VCN block
 777  *
 778  * @adev: amdgpu_device pointer
 779  *
 780  * Setup and start the VCN block
 781  */
 782 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
 783 {
 784         struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 785         uint32_t rb_bufsz, tmp;
 786         uint32_t lmi_swap_cntl;
 787         int i, j, r;
 788 
 789         /* disable byte swapping */
 790         lmi_swap_cntl = 0;
 791 
 792         vcn_1_0_disable_static_power_gating(adev);
 793 
 794         tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
 795         WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
 796 
 797         /* disable clock gating */
 798         vcn_v1_0_disable_clock_gating(adev);
 799 
 800         /* disable interupt */
 801         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
 802                         ~UVD_MASTINT_EN__VCPU_EN_MASK);
 803 
 804         /* initialize VCN memory controller */
 805         tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
 806         WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp                |
 807                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 808                 UVD_LMI_CTRL__MASK_MC_URGENT_MASK                       |
 809                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK            |
 810                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
 811 
 812 #ifdef __BIG_ENDIAN
 813         /* swap (8 in 32) RB and IB */
 814         lmi_swap_cntl = 0xa;
 815 #endif
 816         WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 817 
 818         tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
 819         tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
 820         tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
 821         WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
 822 
 823         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
 824                 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
 825                 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
 826                 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
 827                 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
 828 
 829         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
 830                 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
 831                 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
 832                 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
 833                 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
 834 
 835         WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
 836                 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
 837                 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
 838                 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
 839 
 840         vcn_v1_0_mc_resume_spg_mode(adev);
 841 
 842         WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
 843         WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
 844                 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
 845 
 846         /* enable VCPU clock */
 847         WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 848 
 849         /* boot up the VCPU */
 850         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
 851                         ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 852 
 853         /* enable UMC */
 854         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
 855                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 856 
 857         tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
 858         tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
 859         tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
 860         WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
 861 
 862         for (i = 0; i < 10; ++i) {
 863                 uint32_t status;
 864 
 865                 for (j = 0; j < 100; ++j) {
 866                         status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
 867                         if (status & UVD_STATUS__IDLE)
 868                                 break;
 869                         mdelay(10);
 870                 }
 871                 r = 0;
 872                 if (status & UVD_STATUS__IDLE)
 873                         break;
 874 
 875                 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
 876                 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
 877                                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 878                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 879                 mdelay(10);
 880                 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
 881                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 882                 mdelay(10);
 883                 r = -1;
 884         }
 885 
 886         if (r) {
 887                 DRM_ERROR("VCN decode not responding, giving up!!!\n");
 888                 return r;
 889         }
 890         /* enable master interrupt */
 891         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
 892                 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
 893 
 894         /* enable system interrupt for JRBC, TODO: move to set interrupt*/
 895         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
 896                 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
 897                 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
 898 
 899         /* clear the busy bit of UVD_STATUS */
 900         tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
 901         WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
 902 
 903         /* force RBC into idle state */
 904         rb_bufsz = order_base_2(ring->ring_size);
 905         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 906         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 907         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 908         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 909         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 910         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 911 
 912         /* set the write pointer delay */
 913         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
 914 
 915         /* set the wb address */
 916         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
 917                         (upper_32_bits(ring->gpu_addr) >> 2));
 918 
 919         /* programm the RB_BASE for ring buffer */
 920         WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 921                         lower_32_bits(ring->gpu_addr));
 922         WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 923                         upper_32_bits(ring->gpu_addr));
 924 
 925         /* Initialize the ring buffer's read and write pointers */
 926         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
 927 
 928         WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
 929 
 930         ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
 931         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
 932                         lower_32_bits(ring->wptr));
 933 
 934         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
 935                         ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 936 
 937         ring = &adev->vcn.inst->ring_enc[0];
 938         WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 939         WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 940         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
 941         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 942         WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 943 
 944         ring = &adev->vcn.inst->ring_enc[1];
 945         WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 946         WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 947         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
 948         WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 949         WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 950 
 951         ring = &adev->vcn.inst->ring_jpeg;
 952         WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
 953         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
 954                         UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 955         WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
 956         WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
 957         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
 958         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
 959         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 960 
 961         /* initialize wptr */
 962         ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
 963 
 964         /* copy patch commands to the jpeg ring */
 965         vcn_v1_0_jpeg_ring_set_patch_ring(ring,
 966                 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
 967 
 968         return 0;
 969 }
 970 
 971 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
 972 {
 973         struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 974         uint32_t rb_bufsz, tmp;
 975         uint32_t lmi_swap_cntl;
 976 
 977         /* disable byte swapping */
 978         lmi_swap_cntl = 0;
 979 
 980         vcn_1_0_enable_static_power_gating(adev);
 981 
 982         /* enable dynamic power gating mode */
 983         tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
 984         tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
 985         tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
 986         WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
 987 
 988         /* enable clock gating */
 989         vcn_v1_0_clock_gating_dpg_mode(adev, 0);
 990 
 991         /* enable VCPU clock */
 992         tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
 993         tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
 994         tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
 995         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
 996 
 997         /* disable interupt */
 998         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
 999                         0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1000 
1001         /* initialize VCN memory controller */
1002         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1003                 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1004                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1005                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1006                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1007                 UVD_LMI_CTRL__REQ_MODE_MASK |
1008                 UVD_LMI_CTRL__CRC_RESET_MASK |
1009                 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1010                 0x00100000L, 0xFFFFFFFF, 0);
1011 
1012 #ifdef __BIG_ENDIAN
1013         /* swap (8 in 32) RB and IB */
1014         lmi_swap_cntl = 0xa;
1015 #endif
1016         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1017 
1018         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL,
1019                 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1020 
1021         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0,
1022                 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1023                  (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1024                  (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1025                  (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1026 
1027         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0,
1028                 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1029                  (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1030                  (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1031                  (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1032 
1033         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX,
1034                 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1035                  (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1036                  (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1037 
1038         vcn_v1_0_mc_resume_dpg_mode(adev);
1039 
1040         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1041         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1042 
1043         /* boot up the VCPU */
1044         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1045 
1046         /* enable UMC */
1047         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
1048                 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1049                 0xFFFFFFFF, 0);
1050 
1051         /* enable master interrupt */
1052         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
1053                         UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1054 
1055         vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1056         /* setup mmUVD_LMI_CTRL */
1057         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1058                 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1059                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1060                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1061                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1062                 UVD_LMI_CTRL__REQ_MODE_MASK |
1063                 UVD_LMI_CTRL__CRC_RESET_MASK |
1064                 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1065                 0x00100000L, 0xFFFFFFFF, 1);
1066 
1067         tmp = adev->gfx.config.gb_addr_config;
1068         /* setup VCN global tiling registers */
1069         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1070         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1071 
1072         /* enable System Interrupt for JRBC */
1073         WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
1074                                                                         UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1075 
1076         /* force RBC into idle state */
1077         rb_bufsz = order_base_2(ring->ring_size);
1078         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1079         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1080         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1081         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1082         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1083         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1084 
1085         /* set the write pointer delay */
1086         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1087 
1088         /* set the wb address */
1089         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1090                                                                 (upper_32_bits(ring->gpu_addr) >> 2));
1091 
1092         /* programm the RB_BASE for ring buffer */
1093         WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1094                                                                 lower_32_bits(ring->gpu_addr));
1095         WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1096                                                                 upper_32_bits(ring->gpu_addr));
1097 
1098         /* Initialize the ring buffer's read and write pointers */
1099         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1100 
1101         WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1102 
1103         ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1104         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1105                                                                 lower_32_bits(ring->wptr));
1106 
1107         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1108                         ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1109 
1110         /* initialize JPEG wptr */
1111         ring = &adev->vcn.inst->ring_jpeg;
1112         ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1113 
1114         /* copy patch commands to the jpeg ring */
1115         vcn_v1_0_jpeg_ring_set_patch_ring(ring,
1116                 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
1117 
1118         return 0;
1119 }
1120 
1121 static int vcn_v1_0_start(struct amdgpu_device *adev)
1122 {
1123         int r;
1124 
1125         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1126                 r = vcn_v1_0_start_dpg_mode(adev);
1127         else
1128                 r = vcn_v1_0_start_spg_mode(adev);
1129         return r;
1130 }
1131 
1132 /**
1133  * vcn_v1_0_stop - stop VCN block
1134  *
1135  * @adev: amdgpu_device pointer
1136  *
1137  * stop the VCN block
1138  */
1139 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1140 {
1141         int ret_code, tmp;
1142 
1143         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code);
1144 
1145         tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1146                 UVD_LMI_STATUS__READ_CLEAN_MASK |
1147                 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1148                 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1149         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1150 
1151         /* put VCPU into reset */
1152         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1153                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1154                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1155 
1156         tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1157                 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1158         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1159 
1160         /* disable VCPU clock */
1161         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1162                 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1163 
1164         /* reset LMI UMC/LMI */
1165         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1166                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1167                 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1168 
1169         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1170                 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1171                 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1172 
1173         WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1174 
1175         vcn_v1_0_enable_clock_gating(adev);
1176         vcn_1_0_enable_static_power_gating(adev);
1177         return 0;
1178 }
1179 
1180 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1181 {
1182         int ret_code = 0;
1183         uint32_t tmp;
1184 
1185         /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1186         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1187                         UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1188                         UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1189 
1190         /* wait for read ptr to be equal to write ptr */
1191         tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1192         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1193 
1194         tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1195         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1196 
1197         tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1198         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1199 
1200         tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1201         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1202 
1203         SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1204                 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1205                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1206 
1207         /* disable dynamic power gating mode */
1208         WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1209                         ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1210 
1211         return 0;
1212 }
1213 
1214 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1215 {
1216         int r;
1217 
1218         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1219                 r = vcn_v1_0_stop_dpg_mode(adev);
1220         else
1221                 r = vcn_v1_0_stop_spg_mode(adev);
1222 
1223         return r;
1224 }
1225 
1226 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1227                                 struct dpg_pause_state *new_state)
1228 {
1229         int ret_code;
1230         uint32_t reg_data = 0;
1231         uint32_t reg_data2 = 0;
1232         struct amdgpu_ring *ring;
1233 
1234         /* pause/unpause if state is changed */
1235         if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
1236                 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1237                         adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
1238                         new_state->fw_based, new_state->jpeg);
1239 
1240                 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1241                         (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1242 
1243                 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1244                         ret_code = 0;
1245 
1246                         if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1247                                 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1248                                                    UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1249                                                    UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1250 
1251                         if (!ret_code) {
1252                                 /* pause DPG non-jpeg */
1253                                 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1254                                 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1255                                 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1256                                                    UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1257                                                    UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1258 
1259                                 /* Restore */
1260                                 ring = &adev->vcn.inst->ring_enc[0];
1261                                 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1262                                 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1263                                 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1264                                 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1265                                 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1266 
1267                                 ring = &adev->vcn.inst->ring_enc[1];
1268                                 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1269                                 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1270                                 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1271                                 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1272                                 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1273 
1274                                 ring = &adev->vcn.inst->ring_dec;
1275                                 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1276                                                    RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1277                                 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1278                                                    UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1279                                                    UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1280                         }
1281                 } else {
1282                         /* unpause dpg non-jpeg, no need to wait */
1283                         reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1284                         WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1285                 }
1286                 adev->vcn.pause_state.fw_based = new_state->fw_based;
1287         }
1288 
1289         /* pause/unpause if state is changed */
1290         if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
1291                 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1292                         adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
1293                         new_state->fw_based, new_state->jpeg);
1294 
1295                 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1296                         (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1297 
1298                 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1299                         ret_code = 0;
1300 
1301                         if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1302                                 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1303                                                    UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1304                                                    UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1305 
1306                         if (!ret_code) {
1307                                 /* Make sure JPRG Snoop is disabled before sending the pause */
1308                                 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1309                                 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1310                                 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1311 
1312                                 /* pause DPG jpeg */
1313                                 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1314                                 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1315                                 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1316                                                         UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1317                                                         UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
1318 
1319                                 /* Restore */
1320                                 ring = &adev->vcn.inst->ring_jpeg;
1321                                 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1322                                 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1323                                                         UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1324                                                         UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1325                                 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1326                                                         lower_32_bits(ring->gpu_addr));
1327                                 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1328                                                         upper_32_bits(ring->gpu_addr));
1329                                 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1330                                 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1331                                 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1332                                                         UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1333 
1334                                 ring = &adev->vcn.inst->ring_dec;
1335                                 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1336                                                    RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1337                                 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1338                                                    UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1339                                                    UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1340                         }
1341                 } else {
1342                         /* unpause dpg jpeg, no need to wait */
1343                         reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1344                         WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1345                 }
1346                 adev->vcn.pause_state.jpeg = new_state->jpeg;
1347         }
1348 
1349         return 0;
1350 }
1351 
1352 static bool vcn_v1_0_is_idle(void *handle)
1353 {
1354         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 
1356         return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1357 }
1358 
1359 static int vcn_v1_0_wait_for_idle(void *handle)
1360 {
1361         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1362         int ret = 0;
1363 
1364         SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1365                 UVD_STATUS__IDLE, ret);
1366 
1367         return ret;
1368 }
1369 
1370 static int vcn_v1_0_set_clockgating_state(void *handle,
1371                                           enum amd_clockgating_state state)
1372 {
1373         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1374         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1375 
1376         if (enable) {
1377                 /* wait for STATUS to clear */
1378                 if (!vcn_v1_0_is_idle(handle))
1379                         return -EBUSY;
1380                 vcn_v1_0_enable_clock_gating(adev);
1381         } else {
1382                 /* disable HW gating and enable Sw gating */
1383                 vcn_v1_0_disable_clock_gating(adev);
1384         }
1385         return 0;
1386 }
1387 
1388 /**
1389  * vcn_v1_0_dec_ring_get_rptr - get read pointer
1390  *
1391  * @ring: amdgpu_ring pointer
1392  *
1393  * Returns the current hardware read pointer
1394  */
1395 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1396 {
1397         struct amdgpu_device *adev = ring->adev;
1398 
1399         return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1400 }
1401 
1402 /**
1403  * vcn_v1_0_dec_ring_get_wptr - get write pointer
1404  *
1405  * @ring: amdgpu_ring pointer
1406  *
1407  * Returns the current hardware write pointer
1408  */
1409 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1410 {
1411         struct amdgpu_device *adev = ring->adev;
1412 
1413         return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1414 }
1415 
1416 /**
1417  * vcn_v1_0_dec_ring_set_wptr - set write pointer
1418  *
1419  * @ring: amdgpu_ring pointer
1420  *
1421  * Commits the write pointer to the hardware
1422  */
1423 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1424 {
1425         struct amdgpu_device *adev = ring->adev;
1426 
1427         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1428                 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1429                         lower_32_bits(ring->wptr) | 0x80000000);
1430 
1431         WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1432 }
1433 
1434 /**
1435  * vcn_v1_0_dec_ring_insert_start - insert a start command
1436  *
1437  * @ring: amdgpu_ring pointer
1438  *
1439  * Write a start command to the ring.
1440  */
1441 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1442 {
1443         struct amdgpu_device *adev = ring->adev;
1444 
1445         amdgpu_ring_write(ring,
1446                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1447         amdgpu_ring_write(ring, 0);
1448         amdgpu_ring_write(ring,
1449                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1450         amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1451 }
1452 
1453 /**
1454  * vcn_v1_0_dec_ring_insert_end - insert a end command
1455  *
1456  * @ring: amdgpu_ring pointer
1457  *
1458  * Write a end command to the ring.
1459  */
1460 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1461 {
1462         struct amdgpu_device *adev = ring->adev;
1463 
1464         amdgpu_ring_write(ring,
1465                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1466         amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1467 }
1468 
1469 /**
1470  * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1471  *
1472  * @ring: amdgpu_ring pointer
1473  * @fence: fence to emit
1474  *
1475  * Write a fence and a trap command to the ring.
1476  */
1477 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1478                                      unsigned flags)
1479 {
1480         struct amdgpu_device *adev = ring->adev;
1481 
1482         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1483 
1484         amdgpu_ring_write(ring,
1485                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1486         amdgpu_ring_write(ring, seq);
1487         amdgpu_ring_write(ring,
1488                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1489         amdgpu_ring_write(ring, addr & 0xffffffff);
1490         amdgpu_ring_write(ring,
1491                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1492         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1493         amdgpu_ring_write(ring,
1494                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1495         amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1496 
1497         amdgpu_ring_write(ring,
1498                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1499         amdgpu_ring_write(ring, 0);
1500         amdgpu_ring_write(ring,
1501                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1502         amdgpu_ring_write(ring, 0);
1503         amdgpu_ring_write(ring,
1504                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1505         amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1506 }
1507 
1508 /**
1509  * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1510  *
1511  * @ring: amdgpu_ring pointer
1512  * @ib: indirect buffer to execute
1513  *
1514  * Write ring commands to execute the indirect buffer
1515  */
1516 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1517                                         struct amdgpu_job *job,
1518                                         struct amdgpu_ib *ib,
1519                                         uint32_t flags)
1520 {
1521         struct amdgpu_device *adev = ring->adev;
1522         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1523 
1524         amdgpu_ring_write(ring,
1525                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1526         amdgpu_ring_write(ring, vmid);
1527 
1528         amdgpu_ring_write(ring,
1529                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1530         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1531         amdgpu_ring_write(ring,
1532                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1533         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1534         amdgpu_ring_write(ring,
1535                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1536         amdgpu_ring_write(ring, ib->length_dw);
1537 }
1538 
1539 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1540                                             uint32_t reg, uint32_t val,
1541                                             uint32_t mask)
1542 {
1543         struct amdgpu_device *adev = ring->adev;
1544 
1545         amdgpu_ring_write(ring,
1546                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1547         amdgpu_ring_write(ring, reg << 2);
1548         amdgpu_ring_write(ring,
1549                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1550         amdgpu_ring_write(ring, val);
1551         amdgpu_ring_write(ring,
1552                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1553         amdgpu_ring_write(ring, mask);
1554         amdgpu_ring_write(ring,
1555                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1556         amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1557 }
1558 
1559 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1560                                             unsigned vmid, uint64_t pd_addr)
1561 {
1562         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1563         uint32_t data0, data1, mask;
1564 
1565         pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1566 
1567         /* wait for register write */
1568         data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1569         data1 = lower_32_bits(pd_addr);
1570         mask = 0xffffffff;
1571         vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1572 }
1573 
1574 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1575                                         uint32_t reg, uint32_t val)
1576 {
1577         struct amdgpu_device *adev = ring->adev;
1578 
1579         amdgpu_ring_write(ring,
1580                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1581         amdgpu_ring_write(ring, reg << 2);
1582         amdgpu_ring_write(ring,
1583                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1584         amdgpu_ring_write(ring, val);
1585         amdgpu_ring_write(ring,
1586                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1587         amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1588 }
1589 
1590 /**
1591  * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1592  *
1593  * @ring: amdgpu_ring pointer
1594  *
1595  * Returns the current hardware enc read pointer
1596  */
1597 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1598 {
1599         struct amdgpu_device *adev = ring->adev;
1600 
1601         if (ring == &adev->vcn.inst->ring_enc[0])
1602                 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1603         else
1604                 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1605 }
1606 
1607  /**
1608  * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1609  *
1610  * @ring: amdgpu_ring pointer
1611  *
1612  * Returns the current hardware enc write pointer
1613  */
1614 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1615 {
1616         struct amdgpu_device *adev = ring->adev;
1617 
1618         if (ring == &adev->vcn.inst->ring_enc[0])
1619                 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1620         else
1621                 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1622 }
1623 
1624  /**
1625  * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1626  *
1627  * @ring: amdgpu_ring pointer
1628  *
1629  * Commits the enc write pointer to the hardware
1630  */
1631 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1632 {
1633         struct amdgpu_device *adev = ring->adev;
1634 
1635         if (ring == &adev->vcn.inst->ring_enc[0])
1636                 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1637                         lower_32_bits(ring->wptr));
1638         else
1639                 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1640                         lower_32_bits(ring->wptr));
1641 }
1642 
1643 /**
1644  * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1645  *
1646  * @ring: amdgpu_ring pointer
1647  * @fence: fence to emit
1648  *
1649  * Write enc a fence and a trap command to the ring.
1650  */
1651 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1652                         u64 seq, unsigned flags)
1653 {
1654         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1655 
1656         amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1657         amdgpu_ring_write(ring, addr);
1658         amdgpu_ring_write(ring, upper_32_bits(addr));
1659         amdgpu_ring_write(ring, seq);
1660         amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1661 }
1662 
1663 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1664 {
1665         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1666 }
1667 
1668 /**
1669  * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1670  *
1671  * @ring: amdgpu_ring pointer
1672  * @ib: indirect buffer to execute
1673  *
1674  * Write enc ring commands to execute the indirect buffer
1675  */
1676 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1677                                         struct amdgpu_job *job,
1678                                         struct amdgpu_ib *ib,
1679                                         uint32_t flags)
1680 {
1681         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1682 
1683         amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1684         amdgpu_ring_write(ring, vmid);
1685         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1686         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1687         amdgpu_ring_write(ring, ib->length_dw);
1688 }
1689 
1690 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1691                                             uint32_t reg, uint32_t val,
1692                                             uint32_t mask)
1693 {
1694         amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1695         amdgpu_ring_write(ring, reg << 2);
1696         amdgpu_ring_write(ring, mask);
1697         amdgpu_ring_write(ring, val);
1698 }
1699 
1700 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1701                                             unsigned int vmid, uint64_t pd_addr)
1702 {
1703         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1704 
1705         pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1706 
1707         /* wait for reg writes */
1708         vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1709                                         lower_32_bits(pd_addr), 0xffffffff);
1710 }
1711 
1712 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1713                                         uint32_t reg, uint32_t val)
1714 {
1715         amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1716         amdgpu_ring_write(ring, reg << 2);
1717         amdgpu_ring_write(ring, val);
1718 }
1719 
1720 
1721 /**
1722  * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1723  *
1724  * @ring: amdgpu_ring pointer
1725  *
1726  * Returns the current hardware read pointer
1727  */
1728 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1729 {
1730         struct amdgpu_device *adev = ring->adev;
1731 
1732         return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1733 }
1734 
1735 /**
1736  * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1737  *
1738  * @ring: amdgpu_ring pointer
1739  *
1740  * Returns the current hardware write pointer
1741  */
1742 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1743 {
1744         struct amdgpu_device *adev = ring->adev;
1745 
1746         return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1747 }
1748 
1749 /**
1750  * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1751  *
1752  * @ring: amdgpu_ring pointer
1753  *
1754  * Commits the write pointer to the hardware
1755  */
1756 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1757 {
1758         struct amdgpu_device *adev = ring->adev;
1759 
1760         WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1761 }
1762 
1763 /**
1764  * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1765  *
1766  * @ring: amdgpu_ring pointer
1767  *
1768  * Write a start command to the ring.
1769  */
1770 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1771 {
1772         struct amdgpu_device *adev = ring->adev;
1773 
1774         amdgpu_ring_write(ring,
1775                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1776         amdgpu_ring_write(ring, 0x68e04);
1777 
1778         amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1779         amdgpu_ring_write(ring, 0x80010000);
1780 }
1781 
1782 /**
1783  * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1784  *
1785  * @ring: amdgpu_ring pointer
1786  *
1787  * Write a end command to the ring.
1788  */
1789 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1790 {
1791         struct amdgpu_device *adev = ring->adev;
1792 
1793         amdgpu_ring_write(ring,
1794                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1795         amdgpu_ring_write(ring, 0x68e04);
1796 
1797         amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1798         amdgpu_ring_write(ring, 0x00010000);
1799 }
1800 
1801 /**
1802  * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1803  *
1804  * @ring: amdgpu_ring pointer
1805  * @fence: fence to emit
1806  *
1807  * Write a fence and a trap command to the ring.
1808  */
1809 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1810                                      unsigned flags)
1811 {
1812         struct amdgpu_device *adev = ring->adev;
1813 
1814         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1815 
1816         amdgpu_ring_write(ring,
1817                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1818         amdgpu_ring_write(ring, seq);
1819 
1820         amdgpu_ring_write(ring,
1821                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1822         amdgpu_ring_write(ring, seq);
1823 
1824         amdgpu_ring_write(ring,
1825                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1826         amdgpu_ring_write(ring, lower_32_bits(addr));
1827 
1828         amdgpu_ring_write(ring,
1829                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1830         amdgpu_ring_write(ring, upper_32_bits(addr));
1831 
1832         amdgpu_ring_write(ring,
1833                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1834         amdgpu_ring_write(ring, 0x8);
1835 
1836         amdgpu_ring_write(ring,
1837                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1838         amdgpu_ring_write(ring, 0);
1839 
1840         amdgpu_ring_write(ring,
1841                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1842         amdgpu_ring_write(ring, 0x01400200);
1843 
1844         amdgpu_ring_write(ring,
1845                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1846         amdgpu_ring_write(ring, seq);
1847 
1848         amdgpu_ring_write(ring,
1849                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1850         amdgpu_ring_write(ring, lower_32_bits(addr));
1851 
1852         amdgpu_ring_write(ring,
1853                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1854         amdgpu_ring_write(ring, upper_32_bits(addr));
1855 
1856         amdgpu_ring_write(ring,
1857                 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1858         amdgpu_ring_write(ring, 0xffffffff);
1859 
1860         amdgpu_ring_write(ring,
1861                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1862         amdgpu_ring_write(ring, 0x3fbc);
1863 
1864         amdgpu_ring_write(ring,
1865                 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1866         amdgpu_ring_write(ring, 0x1);
1867 
1868         /* emit trap */
1869         amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1870         amdgpu_ring_write(ring, 0);
1871 }
1872 
1873 /**
1874  * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1875  *
1876  * @ring: amdgpu_ring pointer
1877  * @ib: indirect buffer to execute
1878  *
1879  * Write ring commands to execute the indirect buffer.
1880  */
1881 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1882                                         struct amdgpu_job *job,
1883                                         struct amdgpu_ib *ib,
1884                                         uint32_t flags)
1885 {
1886         struct amdgpu_device *adev = ring->adev;
1887         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1888 
1889         amdgpu_ring_write(ring,
1890                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1891         amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1892 
1893         amdgpu_ring_write(ring,
1894                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1895         amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1896 
1897         amdgpu_ring_write(ring,
1898                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1899         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1900 
1901         amdgpu_ring_write(ring,
1902                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1903         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1904 
1905         amdgpu_ring_write(ring,
1906                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1907         amdgpu_ring_write(ring, ib->length_dw);
1908 
1909         amdgpu_ring_write(ring,
1910                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1911         amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1912 
1913         amdgpu_ring_write(ring,
1914                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1915         amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1916 
1917         amdgpu_ring_write(ring,
1918                 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1919         amdgpu_ring_write(ring, 0);
1920 
1921         amdgpu_ring_write(ring,
1922                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1923         amdgpu_ring_write(ring, 0x01400200);
1924 
1925         amdgpu_ring_write(ring,
1926                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1927         amdgpu_ring_write(ring, 0x2);
1928 
1929         amdgpu_ring_write(ring,
1930                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1931         amdgpu_ring_write(ring, 0x2);
1932 }
1933 
1934 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1935                                             uint32_t reg, uint32_t val,
1936                                             uint32_t mask)
1937 {
1938         struct amdgpu_device *adev = ring->adev;
1939         uint32_t reg_offset = (reg << 2);
1940 
1941         amdgpu_ring_write(ring,
1942                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1943         amdgpu_ring_write(ring, 0x01400200);
1944 
1945         amdgpu_ring_write(ring,
1946                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1947         amdgpu_ring_write(ring, val);
1948 
1949         amdgpu_ring_write(ring,
1950                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1951         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1952                 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1953                 amdgpu_ring_write(ring, 0);
1954                 amdgpu_ring_write(ring,
1955                         PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1956         } else {
1957                 amdgpu_ring_write(ring, reg_offset);
1958                 amdgpu_ring_write(ring,
1959                         PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1960         }
1961         amdgpu_ring_write(ring, mask);
1962 }
1963 
1964 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1965                 unsigned vmid, uint64_t pd_addr)
1966 {
1967         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1968         uint32_t data0, data1, mask;
1969 
1970         pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1971 
1972         /* wait for register write */
1973         data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1974         data1 = lower_32_bits(pd_addr);
1975         mask = 0xffffffff;
1976         vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1977 }
1978 
1979 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1980                                         uint32_t reg, uint32_t val)
1981 {
1982         struct amdgpu_device *adev = ring->adev;
1983         uint32_t reg_offset = (reg << 2);
1984 
1985         amdgpu_ring_write(ring,
1986                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1987         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1988                         ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1989                 amdgpu_ring_write(ring, 0);
1990                 amdgpu_ring_write(ring,
1991                         PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1992         } else {
1993                 amdgpu_ring_write(ring, reg_offset);
1994                 amdgpu_ring_write(ring,
1995                         PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1996         }
1997         amdgpu_ring_write(ring, val);
1998 }
1999 
2000 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
2001 {
2002         int i;
2003 
2004         WARN_ON(ring->wptr % 2 || count % 2);
2005 
2006         for (i = 0; i < count / 2; i++) {
2007                 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
2008                 amdgpu_ring_write(ring, 0);
2009         }
2010 }
2011 
2012 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
2013 {
2014         struct amdgpu_device *adev = ring->adev;
2015         ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
2016         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
2017                 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
2018                 ring->ring[(*ptr)++] = 0;
2019                 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
2020         } else {
2021                 ring->ring[(*ptr)++] = reg_offset;
2022                 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
2023         }
2024         ring->ring[(*ptr)++] = val;
2025 }
2026 
2027 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
2028 {
2029         struct amdgpu_device *adev = ring->adev;
2030 
2031         uint32_t reg, reg_offset, val, mask, i;
2032 
2033         // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
2034         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
2035         reg_offset = (reg << 2);
2036         val = lower_32_bits(ring->gpu_addr);
2037         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2038 
2039         // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
2040         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
2041         reg_offset = (reg << 2);
2042         val = upper_32_bits(ring->gpu_addr);
2043         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2044 
2045         // 3rd to 5th: issue MEM_READ commands
2046         for (i = 0; i <= 2; i++) {
2047                 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
2048                 ring->ring[ptr++] = 0;
2049         }
2050 
2051         // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
2052         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2053         reg_offset = (reg << 2);
2054         val = 0x13;
2055         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2056 
2057         // 7th: program mmUVD_JRBC_RB_REF_DATA
2058         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
2059         reg_offset = (reg << 2);
2060         val = 0x1;
2061         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2062 
2063         // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
2064         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2065         reg_offset = (reg << 2);
2066         val = 0x1;
2067         mask = 0x1;
2068 
2069         ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
2070         ring->ring[ptr++] = 0x01400200;
2071         ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
2072         ring->ring[ptr++] = val;
2073         ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
2074         if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
2075                 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
2076                 ring->ring[ptr++] = 0;
2077                 ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
2078         } else {
2079                 ring->ring[ptr++] = reg_offset;
2080                 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
2081         }
2082         ring->ring[ptr++] = mask;
2083 
2084         //9th to 21st: insert no-op
2085         for (i = 0; i <= 12; i++) {
2086                 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
2087                 ring->ring[ptr++] = 0;
2088         }
2089 
2090         //22nd: reset mmUVD_JRBC_RB_RPTR
2091         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
2092         reg_offset = (reg << 2);
2093         val = 0;
2094         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2095 
2096         //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
2097         reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2098         reg_offset = (reg << 2);
2099         val = 0x12;
2100         vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2101 }
2102 
2103 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
2104                                         struct amdgpu_irq_src *source,
2105                                         unsigned type,
2106                                         enum amdgpu_interrupt_state state)
2107 {
2108         return 0;
2109 }
2110 
2111 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
2112                                       struct amdgpu_irq_src *source,
2113                                       struct amdgpu_iv_entry *entry)
2114 {
2115         DRM_DEBUG("IH: VCN TRAP\n");
2116 
2117         switch (entry->src_id) {
2118         case 124:
2119                 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
2120                 break;
2121         case 119:
2122                 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
2123                 break;
2124         case 120:
2125                 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
2126                 break;
2127         case 126:
2128                 amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
2129                 break;
2130         default:
2131                 DRM_ERROR("Unhandled interrupt: %d %d\n",
2132                           entry->src_id, entry->src_data[0]);
2133                 break;
2134         }
2135 
2136         return 0;
2137 }
2138 
2139 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
2140 {
2141         struct amdgpu_device *adev = ring->adev;
2142         int i;
2143 
2144         WARN_ON(ring->wptr % 2 || count % 2);
2145 
2146         for (i = 0; i < count / 2; i++) {
2147                 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
2148                 amdgpu_ring_write(ring, 0);
2149         }
2150 }
2151 
2152 static int vcn_v1_0_set_powergating_state(void *handle,
2153                                           enum amd_powergating_state state)
2154 {
2155         /* This doesn't actually powergate the VCN block.
2156          * That's done in the dpm code via the SMC.  This
2157          * just re-inits the block as necessary.  The actual
2158          * gating still happens in the dpm code.  We should
2159          * revisit this when there is a cleaner line between
2160          * the smc and the hw blocks
2161          */
2162         int ret;
2163         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2164 
2165         if(state == adev->vcn.cur_state)
2166                 return 0;
2167 
2168         if (state == AMD_PG_STATE_GATE)
2169                 ret = vcn_v1_0_stop(adev);
2170         else
2171                 ret = vcn_v1_0_start(adev);
2172 
2173         if(!ret)
2174                 adev->vcn.cur_state = state;
2175         return ret;
2176 }
2177 
2178 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
2179         .name = "vcn_v1_0",
2180         .early_init = vcn_v1_0_early_init,
2181         .late_init = NULL,
2182         .sw_init = vcn_v1_0_sw_init,
2183         .sw_fini = vcn_v1_0_sw_fini,
2184         .hw_init = vcn_v1_0_hw_init,
2185         .hw_fini = vcn_v1_0_hw_fini,
2186         .suspend = vcn_v1_0_suspend,
2187         .resume = vcn_v1_0_resume,
2188         .is_idle = vcn_v1_0_is_idle,
2189         .wait_for_idle = vcn_v1_0_wait_for_idle,
2190         .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
2191         .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
2192         .soft_reset = NULL /* vcn_v1_0_soft_reset */,
2193         .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
2194         .set_clockgating_state = vcn_v1_0_set_clockgating_state,
2195         .set_powergating_state = vcn_v1_0_set_powergating_state,
2196 };
2197 
2198 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
2199         .type = AMDGPU_RING_TYPE_VCN_DEC,
2200         .align_mask = 0xf,
2201         .support_64bit_ptrs = false,
2202         .no_user_fence = true,
2203         .vmhub = AMDGPU_MMHUB_0,
2204         .get_rptr = vcn_v1_0_dec_ring_get_rptr,
2205         .get_wptr = vcn_v1_0_dec_ring_get_wptr,
2206         .set_wptr = vcn_v1_0_dec_ring_set_wptr,
2207         .emit_frame_size =
2208                 6 + 6 + /* hdp invalidate / flush */
2209                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2210                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2211                 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2212                 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2213                 6,
2214         .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
2215         .emit_ib = vcn_v1_0_dec_ring_emit_ib,
2216         .emit_fence = vcn_v1_0_dec_ring_emit_fence,
2217         .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
2218         .test_ring = amdgpu_vcn_dec_ring_test_ring,
2219         .test_ib = amdgpu_vcn_dec_ring_test_ib,
2220         .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2221         .insert_start = vcn_v1_0_dec_ring_insert_start,
2222         .insert_end = vcn_v1_0_dec_ring_insert_end,
2223         .pad_ib = amdgpu_ring_generic_pad_ib,
2224         .begin_use = amdgpu_vcn_ring_begin_use,
2225         .end_use = amdgpu_vcn_ring_end_use,
2226         .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2227         .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2228         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2229 };
2230 
2231 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2232         .type = AMDGPU_RING_TYPE_VCN_ENC,
2233         .align_mask = 0x3f,
2234         .nop = VCN_ENC_CMD_NO_OP,
2235         .support_64bit_ptrs = false,
2236         .no_user_fence = true,
2237         .vmhub = AMDGPU_MMHUB_0,
2238         .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2239         .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2240         .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2241         .emit_frame_size =
2242                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2243                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2244                 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2245                 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2246                 1, /* vcn_v1_0_enc_ring_insert_end */
2247         .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2248         .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2249         .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2250         .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2251         .test_ring = amdgpu_vcn_enc_ring_test_ring,
2252         .test_ib = amdgpu_vcn_enc_ring_test_ib,
2253         .insert_nop = amdgpu_ring_insert_nop,
2254         .insert_end = vcn_v1_0_enc_ring_insert_end,
2255         .pad_ib = amdgpu_ring_generic_pad_ib,
2256         .begin_use = amdgpu_vcn_ring_begin_use,
2257         .end_use = amdgpu_vcn_ring_end_use,
2258         .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2259         .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2260         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2261 };
2262 
2263 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
2264         .type = AMDGPU_RING_TYPE_VCN_JPEG,
2265         .align_mask = 0xf,
2266         .nop = PACKET0(0x81ff, 0),
2267         .support_64bit_ptrs = false,
2268         .no_user_fence = true,
2269         .vmhub = AMDGPU_MMHUB_0,
2270         .extra_dw = 64,
2271         .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
2272         .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
2273         .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
2274         .emit_frame_size =
2275                 6 + 6 + /* hdp invalidate / flush */
2276                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2277                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2278                 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
2279                 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
2280                 6,
2281         .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
2282         .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
2283         .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
2284         .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
2285         .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
2286         .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
2287         .insert_nop = vcn_v1_0_jpeg_ring_nop,
2288         .insert_start = vcn_v1_0_jpeg_ring_insert_start,
2289         .insert_end = vcn_v1_0_jpeg_ring_insert_end,
2290         .pad_ib = amdgpu_ring_generic_pad_ib,
2291         .begin_use = amdgpu_vcn_ring_begin_use,
2292         .end_use = amdgpu_vcn_ring_end_use,
2293         .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
2294         .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
2295         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2296 };
2297 
2298 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2299 {
2300         adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2301         DRM_INFO("VCN decode is enabled in VM mode\n");
2302 }
2303 
2304 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2305 {
2306         int i;
2307 
2308         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2309                 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2310 
2311         DRM_INFO("VCN encode is enabled in VM mode\n");
2312 }
2313 
2314 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2315 {
2316         adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
2317         DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2318 }
2319 
2320 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2321         .set = vcn_v1_0_set_interrupt_state,
2322         .process = vcn_v1_0_process_interrupt,
2323 };
2324 
2325 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2326 {
2327         adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2328         adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2329 }
2330 
2331 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
2332 {
2333                 .type = AMD_IP_BLOCK_TYPE_VCN,
2334                 .major = 1,
2335                 .minor = 0,
2336                 .rev = 0,
2337                 .funcs = &vcn_v1_0_ip_funcs,
2338 };

/* [<][>][^][v][top][bottom][index][help] */