root/drivers/gpu/drm/amd/amdgpu/nv.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nv_pcie_rreg
  2. nv_pcie_wreg
  3. nv_didt_rreg
  4. nv_didt_wreg
  5. nv_get_config_memsize
  6. nv_get_xclk
  7. nv_grbm_select
  8. nv_vga_set_state
  9. nv_read_disabled_bios
  10. nv_read_bios_from_rom
  11. nv_read_indexed_register
  12. nv_get_register_value
  13. nv_read_register
  14. nv_gpu_pci_config_reset
  15. nv_asic_mode1_reset
  16. nv_asic_reset_method
  17. nv_asic_reset
  18. nv_set_uvd_clocks
  19. nv_set_vce_clocks
  20. nv_pcie_gen3_enable
  21. nv_program_aspm
  22. nv_enable_doorbell_aperture
  23. nv_reg_base_init
  24. nv_set_ip_blocks
  25. nv_get_rev_id
  26. nv_flush_hdp
  27. nv_invalidate_hdp
  28. nv_need_full_reset
  29. nv_get_pcie_usage
  30. nv_need_reset_on_init
  31. nv_init_doorbell_index
  32. nv_common_early_init
  33. nv_common_late_init
  34. nv_common_sw_init
  35. nv_common_sw_fini
  36. nv_common_hw_init
  37. nv_common_hw_fini
  38. nv_common_suspend
  39. nv_common_resume
  40. nv_common_is_idle
  41. nv_common_wait_for_idle
  42. nv_common_soft_reset
  43. nv_update_hdp_mem_power_gating
  44. nv_update_hdp_clock_gating
  45. nv_common_set_clockgating_state
  46. nv_common_set_powergating_state
  47. nv_common_get_clockgating_state

   1 /*
   2  * Copyright 2019 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 #include <linux/firmware.h>
  24 #include <linux/slab.h>
  25 #include <linux/module.h>
  26 #include <linux/pci.h>
  27 
  28 #include "amdgpu.h"
  29 #include "amdgpu_atombios.h"
  30 #include "amdgpu_ih.h"
  31 #include "amdgpu_uvd.h"
  32 #include "amdgpu_vce.h"
  33 #include "amdgpu_ucode.h"
  34 #include "amdgpu_psp.h"
  35 #include "amdgpu_smu.h"
  36 #include "atom.h"
  37 #include "amd_pcie.h"
  38 
  39 #include "gc/gc_10_1_0_offset.h"
  40 #include "gc/gc_10_1_0_sh_mask.h"
  41 #include "hdp/hdp_5_0_0_offset.h"
  42 #include "hdp/hdp_5_0_0_sh_mask.h"
  43 
  44 #include "soc15.h"
  45 #include "soc15_common.h"
  46 #include "gmc_v10_0.h"
  47 #include "gfxhub_v2_0.h"
  48 #include "mmhub_v2_0.h"
  49 #include "nv.h"
  50 #include "navi10_ih.h"
  51 #include "gfx_v10_0.h"
  52 #include "sdma_v5_0.h"
  53 #include "vcn_v2_0.h"
  54 #include "dce_virtual.h"
  55 #include "mes_v10_1.h"
  56 
  57 static const struct amd_ip_funcs nv_common_ip_funcs;
  58 
  59 /*
  60  * Indirect registers accessor
  61  */
  62 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  63 {
  64         unsigned long flags, address, data;
  65         u32 r;
  66         address = adev->nbio_funcs->get_pcie_index_offset(adev);
  67         data = adev->nbio_funcs->get_pcie_data_offset(adev);
  68 
  69         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  70         WREG32(address, reg);
  71         (void)RREG32(address);
  72         r = RREG32(data);
  73         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  74         return r;
  75 }
  76 
  77 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  78 {
  79         unsigned long flags, address, data;
  80 
  81         address = adev->nbio_funcs->get_pcie_index_offset(adev);
  82         data = adev->nbio_funcs->get_pcie_data_offset(adev);
  83 
  84         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  85         WREG32(address, reg);
  86         (void)RREG32(address);
  87         WREG32(data, v);
  88         (void)RREG32(data);
  89         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  90 }
  91 
  92 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
  93 {
  94         unsigned long flags, address, data;
  95         u32 r;
  96 
  97         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  98         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  99 
 100         spin_lock_irqsave(&adev->didt_idx_lock, flags);
 101         WREG32(address, (reg));
 102         r = RREG32(data);
 103         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 104         return r;
 105 }
 106 
 107 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 108 {
 109         unsigned long flags, address, data;
 110 
 111         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 112         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 113 
 114         spin_lock_irqsave(&adev->didt_idx_lock, flags);
 115         WREG32(address, (reg));
 116         WREG32(data, (v));
 117         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 118 }
 119 
 120 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
 121 {
 122         return adev->nbio_funcs->get_memsize(adev);
 123 }
 124 
 125 static u32 nv_get_xclk(struct amdgpu_device *adev)
 126 {
 127         return adev->clock.spll.reference_freq;
 128 }
 129 
 130 
 131 void nv_grbm_select(struct amdgpu_device *adev,
 132                      u32 me, u32 pipe, u32 queue, u32 vmid)
 133 {
 134         u32 grbm_gfx_cntl = 0;
 135         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 136         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 137         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 138         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 139 
 140         WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
 141 }
 142 
 143 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
 144 {
 145         /* todo */
 146 }
 147 
 148 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
 149 {
 150         /* todo */
 151         return false;
 152 }
 153 
 154 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
 155                                   u8 *bios, u32 length_bytes)
 156 {
 157         /* TODO: will implement it when SMU header is available */
 158         return false;
 159 }
 160 
 161 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
 162         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
 163         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
 164         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
 165         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
 166         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
 167         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
 168 #if 0   /* TODO: will set it when SDMA header is available */
 169         { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
 170         { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
 171 #endif
 172         { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
 173         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
 174         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
 175         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
 176         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
 177         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
 178         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 179         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 180         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 181         { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
 182 };
 183 
 184 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 185                                          u32 sh_num, u32 reg_offset)
 186 {
 187         uint32_t val;
 188 
 189         mutex_lock(&adev->grbm_idx_mutex);
 190         if (se_num != 0xffffffff || sh_num != 0xffffffff)
 191                 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 192 
 193         val = RREG32(reg_offset);
 194 
 195         if (se_num != 0xffffffff || sh_num != 0xffffffff)
 196                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 197         mutex_unlock(&adev->grbm_idx_mutex);
 198         return val;
 199 }
 200 
 201 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
 202                                       bool indexed, u32 se_num,
 203                                       u32 sh_num, u32 reg_offset)
 204 {
 205         if (indexed) {
 206                 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
 207         } else {
 208                 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 209                         return adev->gfx.config.gb_addr_config;
 210                 return RREG32(reg_offset);
 211         }
 212 }
 213 
 214 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
 215                             u32 sh_num, u32 reg_offset, u32 *value)
 216 {
 217         uint32_t i;
 218         struct soc15_allowed_register_entry  *en;
 219 
 220         *value = 0;
 221         for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
 222                 en = &nv_allowed_read_registers[i];
 223                 if (reg_offset !=
 224                     (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
 225                         continue;
 226 
 227                 *value = nv_get_register_value(adev,
 228                                                nv_allowed_read_registers[i].grbm_indexed,
 229                                                se_num, sh_num, reg_offset);
 230                 return 0;
 231         }
 232         return -EINVAL;
 233 }
 234 
 235 #if 0
 236 static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
 237 {
 238         u32 i;
 239 
 240         dev_info(adev->dev, "GPU pci config reset\n");
 241 
 242         /* disable BM */
 243         pci_clear_master(adev->pdev);
 244         /* reset */
 245         amdgpu_pci_config_reset(adev);
 246 
 247         udelay(100);
 248 
 249         /* wait for asic to come out of reset */
 250         for (i = 0; i < adev->usec_timeout; i++) {
 251                 u32 memsize = nbio_v2_3_get_memsize(adev);
 252                 if (memsize != 0xffffffff)
 253                         break;
 254                 udelay(1);
 255         }
 256 
 257 }
 258 #endif
 259 
 260 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
 261 {
 262         u32 i;
 263         int ret = 0;
 264 
 265         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 266 
 267         dev_info(adev->dev, "GPU mode1 reset\n");
 268 
 269         /* disable BM */
 270         pci_clear_master(adev->pdev);
 271 
 272         pci_save_state(adev->pdev);
 273 
 274         ret = psp_gpu_reset(adev);
 275         if (ret)
 276                 dev_err(adev->dev, "GPU mode1 reset failed\n");
 277 
 278         pci_restore_state(adev->pdev);
 279 
 280         /* wait for asic to come out of reset */
 281         for (i = 0; i < adev->usec_timeout; i++) {
 282                 u32 memsize = adev->nbio_funcs->get_memsize(adev);
 283 
 284                 if (memsize != 0xffffffff)
 285                         break;
 286                 udelay(1);
 287         }
 288 
 289         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 290 
 291         return ret;
 292 }
 293 
 294 static enum amd_reset_method
 295 nv_asic_reset_method(struct amdgpu_device *adev)
 296 {
 297         struct smu_context *smu = &adev->smu;
 298 
 299         if (smu_baco_is_support(smu))
 300                 return AMD_RESET_METHOD_BACO;
 301         else
 302                 return AMD_RESET_METHOD_MODE1;
 303 }
 304 
 305 static int nv_asic_reset(struct amdgpu_device *adev)
 306 {
 307 
 308         /* FIXME: it doesn't work since vega10 */
 309 #if 0
 310         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 311 
 312         nv_gpu_pci_config_reset(adev);
 313 
 314         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 315 #endif
 316         int ret = 0;
 317         struct smu_context *smu = &adev->smu;
 318 
 319         if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 320                 if (!adev->in_suspend)
 321                         amdgpu_inc_vram_lost(adev);
 322                 ret = smu_baco_reset(smu);
 323         } else {
 324                 if (!adev->in_suspend)
 325                         amdgpu_inc_vram_lost(adev);
 326                 ret = nv_asic_mode1_reset(adev);
 327         }
 328 
 329         return ret;
 330 }
 331 
 332 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 333 {
 334         /* todo */
 335         return 0;
 336 }
 337 
 338 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 339 {
 340         /* todo */
 341         return 0;
 342 }
 343 
 344 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
 345 {
 346         if (pci_is_root_bus(adev->pdev->bus))
 347                 return;
 348 
 349         if (amdgpu_pcie_gen2 == 0)
 350                 return;
 351 
 352         if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 353                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 354                 return;
 355 
 356         /* todo */
 357 }
 358 
 359 static void nv_program_aspm(struct amdgpu_device *adev)
 360 {
 361 
 362         if (amdgpu_aspm == 0)
 363                 return;
 364 
 365         /* todo */
 366 }
 367 
 368 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
 369                                         bool enable)
 370 {
 371         adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
 372         adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
 373 }
 374 
 375 static const struct amdgpu_ip_block_version nv_common_ip_block =
 376 {
 377         .type = AMD_IP_BLOCK_TYPE_COMMON,
 378         .major = 1,
 379         .minor = 0,
 380         .rev = 0,
 381         .funcs = &nv_common_ip_funcs,
 382 };
 383 
 384 static int nv_reg_base_init(struct amdgpu_device *adev)
 385 {
 386         int r;
 387 
 388         if (amdgpu_discovery) {
 389                 r = amdgpu_discovery_reg_base_init(adev);
 390                 if (r) {
 391                         DRM_WARN("failed to init reg base from ip discovery table, "
 392                                         "fallback to legacy init method\n");
 393                         goto legacy_init;
 394                 }
 395 
 396                 return 0;
 397         }
 398 
 399 legacy_init:
 400         switch (adev->asic_type) {
 401         case CHIP_NAVI10:
 402                 navi10_reg_base_init(adev);
 403                 break;
 404         case CHIP_NAVI14:
 405                 navi14_reg_base_init(adev);
 406                 break;
 407         case CHIP_NAVI12:
 408                 navi12_reg_base_init(adev);
 409                 break;
 410         default:
 411                 return -EINVAL;
 412         }
 413 
 414         return 0;
 415 }
 416 
 417 int nv_set_ip_blocks(struct amdgpu_device *adev)
 418 {
 419         int r;
 420 
 421         /* Set IP register base before any HW register access */
 422         r = nv_reg_base_init(adev);
 423         if (r)
 424                 return r;
 425 
 426         adev->nbio_funcs = &nbio_v2_3_funcs;
 427 
 428         adev->nbio_funcs->detect_hw_virt(adev);
 429 
 430         switch (adev->asic_type) {
 431         case CHIP_NAVI10:
 432         case CHIP_NAVI14:
 433                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
 434                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
 435                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
 436                 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 437                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
 438                     is_support_sw_smu(adev))
 439                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 440                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 441                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 442 #if defined(CONFIG_DRM_AMD_DC)
 443                 else if (amdgpu_device_has_dc_support(adev))
 444                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
 445 #endif
 446                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
 447                 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
 448                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
 449                     is_support_sw_smu(adev))
 450                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 451                 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
 452                 if (adev->enable_mes)
 453                         amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
 454                 break;
 455         case CHIP_NAVI12:
 456                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
 457                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
 458                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
 459                 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 460                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
 461                     is_support_sw_smu(adev))
 462                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 463                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 464                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 465 #if defined(CONFIG_DRM_AMD_DC)
 466                 else if (amdgpu_device_has_dc_support(adev))
 467                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
 468 #endif
 469                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
 470                 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
 471                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
 472                     is_support_sw_smu(adev))
 473                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 474                 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
 475                 break;
 476         default:
 477                 return -EINVAL;
 478         }
 479 
 480         return 0;
 481 }
 482 
 483 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
 484 {
 485         return adev->nbio_funcs->get_rev_id(adev);
 486 }
 487 
 488 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 489 {
 490         adev->nbio_funcs->hdp_flush(adev, ring);
 491 }
 492 
 493 static void nv_invalidate_hdp(struct amdgpu_device *adev,
 494                                 struct amdgpu_ring *ring)
 495 {
 496         if (!ring || !ring->funcs->emit_wreg) {
 497                 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
 498         } else {
 499                 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
 500                                         HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 501         }
 502 }
 503 
 504 static bool nv_need_full_reset(struct amdgpu_device *adev)
 505 {
 506         return true;
 507 }
 508 
 509 static void nv_get_pcie_usage(struct amdgpu_device *adev,
 510                               uint64_t *count0,
 511                               uint64_t *count1)
 512 {
 513         /*TODO*/
 514 }
 515 
 516 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
 517 {
 518 #if 0
 519         u32 sol_reg;
 520 
 521         if (adev->flags & AMD_IS_APU)
 522                 return false;
 523 
 524         /* Check sOS sign of life register to confirm sys driver and sOS
 525          * are already been loaded.
 526          */
 527         sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
 528         if (sol_reg)
 529                 return true;
 530 #endif
 531         /* TODO: re-enable it when mode1 reset is functional */
 532         return false;
 533 }
 534 
 535 static void nv_init_doorbell_index(struct amdgpu_device *adev)
 536 {
 537         adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
 538         adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
 539         adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
 540         adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
 541         adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
 542         adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
 543         adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
 544         adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
 545         adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
 546         adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
 547         adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
 548         adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
 549         adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
 550         adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
 551         adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
 552         adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
 553         adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
 554         adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
 555         adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
 556         adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
 557         adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
 558         adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
 559 
 560         adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
 561         adev->doorbell_index.sdma_doorbell_range = 20;
 562 }
 563 
 564 static const struct amdgpu_asic_funcs nv_asic_funcs =
 565 {
 566         .read_disabled_bios = &nv_read_disabled_bios,
 567         .read_bios_from_rom = &nv_read_bios_from_rom,
 568         .read_register = &nv_read_register,
 569         .reset = &nv_asic_reset,
 570         .reset_method = &nv_asic_reset_method,
 571         .set_vga_state = &nv_vga_set_state,
 572         .get_xclk = &nv_get_xclk,
 573         .set_uvd_clocks = &nv_set_uvd_clocks,
 574         .set_vce_clocks = &nv_set_vce_clocks,
 575         .get_config_memsize = &nv_get_config_memsize,
 576         .flush_hdp = &nv_flush_hdp,
 577         .invalidate_hdp = &nv_invalidate_hdp,
 578         .init_doorbell_index = &nv_init_doorbell_index,
 579         .need_full_reset = &nv_need_full_reset,
 580         .get_pcie_usage = &nv_get_pcie_usage,
 581         .need_reset_on_init = &nv_need_reset_on_init,
 582 };
 583 
 584 static int nv_common_early_init(void *handle)
 585 {
 586         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 587 
 588         adev->smc_rreg = NULL;
 589         adev->smc_wreg = NULL;
 590         adev->pcie_rreg = &nv_pcie_rreg;
 591         adev->pcie_wreg = &nv_pcie_wreg;
 592 
 593         /* TODO: will add them during VCN v2 implementation */
 594         adev->uvd_ctx_rreg = NULL;
 595         adev->uvd_ctx_wreg = NULL;
 596 
 597         adev->didt_rreg = &nv_didt_rreg;
 598         adev->didt_wreg = &nv_didt_wreg;
 599 
 600         adev->asic_funcs = &nv_asic_funcs;
 601 
 602         adev->rev_id = nv_get_rev_id(adev);
 603         adev->external_rev_id = 0xff;
 604         switch (adev->asic_type) {
 605         case CHIP_NAVI10:
 606                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 607                         AMD_CG_SUPPORT_GFX_CGCG |
 608                         AMD_CG_SUPPORT_IH_CG |
 609                         AMD_CG_SUPPORT_HDP_MGCG |
 610                         AMD_CG_SUPPORT_HDP_LS |
 611                         AMD_CG_SUPPORT_SDMA_MGCG |
 612                         AMD_CG_SUPPORT_SDMA_LS |
 613                         AMD_CG_SUPPORT_MC_MGCG |
 614                         AMD_CG_SUPPORT_MC_LS |
 615                         AMD_CG_SUPPORT_ATHUB_MGCG |
 616                         AMD_CG_SUPPORT_ATHUB_LS |
 617                         AMD_CG_SUPPORT_VCN_MGCG |
 618                         AMD_CG_SUPPORT_BIF_MGCG |
 619                         AMD_CG_SUPPORT_BIF_LS;
 620                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
 621                         AMD_PG_SUPPORT_VCN_DPG |
 622                         AMD_PG_SUPPORT_ATHUB;
 623                 adev->external_rev_id = adev->rev_id + 0x1;
 624                 break;
 625         case CHIP_NAVI14:
 626                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 627                         AMD_CG_SUPPORT_GFX_CGCG |
 628                         AMD_CG_SUPPORT_IH_CG |
 629                         AMD_CG_SUPPORT_HDP_MGCG |
 630                         AMD_CG_SUPPORT_HDP_LS |
 631                         AMD_CG_SUPPORT_SDMA_MGCG |
 632                         AMD_CG_SUPPORT_SDMA_LS |
 633                         AMD_CG_SUPPORT_MC_MGCG |
 634                         AMD_CG_SUPPORT_MC_LS |
 635                         AMD_CG_SUPPORT_ATHUB_MGCG |
 636                         AMD_CG_SUPPORT_ATHUB_LS |
 637                         AMD_CG_SUPPORT_VCN_MGCG |
 638                         AMD_CG_SUPPORT_BIF_MGCG |
 639                         AMD_CG_SUPPORT_BIF_LS;
 640                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
 641                         AMD_PG_SUPPORT_VCN_DPG;
 642                 adev->external_rev_id = adev->rev_id + 20;
 643                 break;
 644         case CHIP_NAVI12:
 645                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 646                         AMD_CG_SUPPORT_GFX_MGLS |
 647                         AMD_CG_SUPPORT_GFX_CGCG |
 648                         AMD_CG_SUPPORT_GFX_CP_LS |
 649                         AMD_CG_SUPPORT_GFX_RLC_LS |
 650                         AMD_CG_SUPPORT_IH_CG |
 651                         AMD_CG_SUPPORT_HDP_MGCG |
 652                         AMD_CG_SUPPORT_HDP_LS |
 653                         AMD_CG_SUPPORT_SDMA_MGCG |
 654                         AMD_CG_SUPPORT_SDMA_LS |
 655                         AMD_CG_SUPPORT_MC_MGCG |
 656                         AMD_CG_SUPPORT_MC_LS |
 657                         AMD_CG_SUPPORT_ATHUB_MGCG |
 658                         AMD_CG_SUPPORT_ATHUB_LS |
 659                         AMD_CG_SUPPORT_VCN_MGCG;
 660                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
 661                         AMD_PG_SUPPORT_VCN_DPG |
 662                         AMD_PG_SUPPORT_ATHUB;
 663                 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
 664                  * as a consequence, the rev_id and external_rev_id are wrong.
 665                  * workaround it by hardcoding rev_id to 0 (default value).
 666                  */
 667                 if (amdgpu_sriov_vf(adev))
 668                         adev->rev_id = 0;
 669                 adev->external_rev_id = adev->rev_id + 0xa;
 670                 break;
 671         default:
 672                 /* FIXME: not supported yet */
 673                 return -EINVAL;
 674         }
 675 
 676         return 0;
 677 }
 678 
 679 static int nv_common_late_init(void *handle)
 680 {
 681         return 0;
 682 }
 683 
 684 static int nv_common_sw_init(void *handle)
 685 {
 686         return 0;
 687 }
 688 
 689 static int nv_common_sw_fini(void *handle)
 690 {
 691         return 0;
 692 }
 693 
 694 static int nv_common_hw_init(void *handle)
 695 {
 696         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 697 
 698         /* enable pcie gen2/3 link */
 699         nv_pcie_gen3_enable(adev);
 700         /* enable aspm */
 701         nv_program_aspm(adev);
 702         /* setup nbio registers */
 703         adev->nbio_funcs->init_registers(adev);
 704         /* enable the doorbell aperture */
 705         nv_enable_doorbell_aperture(adev, true);
 706 
 707         return 0;
 708 }
 709 
 710 static int nv_common_hw_fini(void *handle)
 711 {
 712         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 713 
 714         /* disable the doorbell aperture */
 715         nv_enable_doorbell_aperture(adev, false);
 716 
 717         return 0;
 718 }
 719 
 720 static int nv_common_suspend(void *handle)
 721 {
 722         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 723 
 724         return nv_common_hw_fini(adev);
 725 }
 726 
 727 static int nv_common_resume(void *handle)
 728 {
 729         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 730 
 731         return nv_common_hw_init(adev);
 732 }
 733 
 734 static bool nv_common_is_idle(void *handle)
 735 {
 736         return true;
 737 }
 738 
 739 static int nv_common_wait_for_idle(void *handle)
 740 {
 741         return 0;
 742 }
 743 
 744 static int nv_common_soft_reset(void *handle)
 745 {
 746         return 0;
 747 }
 748 
 749 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
 750                                            bool enable)
 751 {
 752         uint32_t hdp_clk_cntl, hdp_clk_cntl1;
 753         uint32_t hdp_mem_pwr_cntl;
 754 
 755         if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
 756                                 AMD_CG_SUPPORT_HDP_DS |
 757                                 AMD_CG_SUPPORT_HDP_SD)))
 758                 return;
 759 
 760         hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 761         hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
 762 
 763         /* Before doing clock/power mode switch,
 764          * forced on IPH & RC clock */
 765         hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
 766                                      IPH_MEM_CLK_SOFT_OVERRIDE, 1);
 767         hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
 768                                      RC_MEM_CLK_SOFT_OVERRIDE, 1);
 769         WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
 770 
 771         /* HDP 5.0 doesn't support dynamic power mode switch,
 772          * disable clock and power gating before any changing */
 773         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 774                                          IPH_MEM_POWER_CTRL_EN, 0);
 775         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 776                                          IPH_MEM_POWER_LS_EN, 0);
 777         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 778                                          IPH_MEM_POWER_DS_EN, 0);
 779         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 780                                          IPH_MEM_POWER_SD_EN, 0);
 781         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 782                                          RC_MEM_POWER_CTRL_EN, 0);
 783         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 784                                          RC_MEM_POWER_LS_EN, 0);
 785         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 786                                          RC_MEM_POWER_DS_EN, 0);
 787         hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 788                                          RC_MEM_POWER_SD_EN, 0);
 789         WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
 790 
 791         /* only one clock gating mode (LS/DS/SD) can be enabled */
 792         if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
 793                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 794                                                  HDP_MEM_POWER_CTRL,
 795                                                  IPH_MEM_POWER_LS_EN, enable);
 796                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 797                                                  HDP_MEM_POWER_CTRL,
 798                                                  RC_MEM_POWER_LS_EN, enable);
 799         } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
 800                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 801                                                  HDP_MEM_POWER_CTRL,
 802                                                  IPH_MEM_POWER_DS_EN, enable);
 803                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 804                                                  HDP_MEM_POWER_CTRL,
 805                                                  RC_MEM_POWER_DS_EN, enable);
 806         } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
 807                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 808                                                  HDP_MEM_POWER_CTRL,
 809                                                  IPH_MEM_POWER_SD_EN, enable);
 810                 /* RC should not use shut down mode, fallback to ds */
 811                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 812                                                  HDP_MEM_POWER_CTRL,
 813                                                  RC_MEM_POWER_DS_EN, enable);
 814         }
 815 
 816         WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
 817 
 818         /* restore IPH & RC clock override after clock/power mode changing */
 819         WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
 820 }
 821 
 822 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
 823                                        bool enable)
 824 {
 825         uint32_t hdp_clk_cntl;
 826 
 827         if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 828                 return;
 829 
 830         hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 831 
 832         if (enable) {
 833                 hdp_clk_cntl &=
 834                         ~(uint32_t)
 835                           (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 836                            HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 837                            HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 838                            HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 839                            HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 840                            HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
 841         } else {
 842                 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 843                         HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 844                         HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 845                         HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 846                         HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 847                         HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
 848         }
 849 
 850         WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
 851 }
 852 
 853 static int nv_common_set_clockgating_state(void *handle,
 854                                            enum amd_clockgating_state state)
 855 {
 856         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 857 
 858         if (amdgpu_sriov_vf(adev))
 859                 return 0;
 860 
 861         switch (adev->asic_type) {
 862         case CHIP_NAVI10:
 863         case CHIP_NAVI14:
 864         case CHIP_NAVI12:
 865                 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
 866                                 state == AMD_CG_STATE_GATE ? true : false);
 867                 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
 868                                 state == AMD_CG_STATE_GATE ? true : false);
 869                 nv_update_hdp_mem_power_gating(adev,
 870                                    state == AMD_CG_STATE_GATE ? true : false);
 871                 nv_update_hdp_clock_gating(adev,
 872                                 state == AMD_CG_STATE_GATE ? true : false);
 873                 break;
 874         default:
 875                 break;
 876         }
 877         return 0;
 878 }
 879 
 880 static int nv_common_set_powergating_state(void *handle,
 881                                            enum amd_powergating_state state)
 882 {
 883         /* TODO */
 884         return 0;
 885 }
 886 
 887 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
 888 {
 889         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 890         uint32_t tmp;
 891 
 892         if (amdgpu_sriov_vf(adev))
 893                 *flags = 0;
 894 
 895         adev->nbio_funcs->get_clockgating_state(adev, flags);
 896 
 897         /* AMD_CG_SUPPORT_HDP_MGCG */
 898         tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 899         if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 900                      HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 901                      HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 902                      HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 903                      HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 904                      HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
 905                 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
 906 
 907         /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
 908         tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
 909         if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
 910                 *flags |= AMD_CG_SUPPORT_HDP_LS;
 911         else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
 912                 *flags |= AMD_CG_SUPPORT_HDP_DS;
 913         else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
 914                 *flags |= AMD_CG_SUPPORT_HDP_SD;
 915 
 916         return;
 917 }
 918 
 919 static const struct amd_ip_funcs nv_common_ip_funcs = {
 920         .name = "nv_common",
 921         .early_init = nv_common_early_init,
 922         .late_init = nv_common_late_init,
 923         .sw_init = nv_common_sw_init,
 924         .sw_fini = nv_common_sw_fini,
 925         .hw_init = nv_common_hw_init,
 926         .hw_fini = nv_common_hw_fini,
 927         .suspend = nv_common_suspend,
 928         .resume = nv_common_resume,
 929         .is_idle = nv_common_is_idle,
 930         .wait_for_idle = nv_common_wait_for_idle,
 931         .soft_reset = nv_common_soft_reset,
 932         .set_clockgating_state = nv_common_set_clockgating_state,
 933         .set_powergating_state = nv_common_set_powergating_state,
 934         .get_clockgating_state = nv_common_get_clockgating_state,
 935 };

/* [<][>][^][v][top][bottom][index][help] */