root/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mmhub_v2_0_init_gart_pt_regs
  2. mmhub_v2_0_init_gart_aperture_regs
  3. mmhub_v2_0_init_system_aperture_regs
  4. mmhub_v2_0_init_tlb_regs
  5. mmhub_v2_0_init_cache_regs
  6. mmhub_v2_0_enable_system_domain
  7. mmhub_v2_0_disable_identity_aperture
  8. mmhub_v2_0_setup_vmid_config
  9. mmhub_v2_0_program_invalidation
  10. mmhub_v2_0_gart_enable
  11. mmhub_v2_0_gart_disable
  12. mmhub_v2_0_set_fault_enable_default
  13. mmhub_v2_0_init
  14. mmhub_v2_0_update_medium_grain_clock_gating
  15. mmhub_v2_0_update_medium_grain_light_sleep
  16. mmhub_v2_0_set_clockgating
  17. mmhub_v2_0_get_clockgating

   1 /*
   2  * Copyright 2019 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include "amdgpu.h"
  25 #include "mmhub_v2_0.h"
  26 
  27 #include "mmhub/mmhub_2_0_0_offset.h"
  28 #include "mmhub/mmhub_2_0_0_sh_mask.h"
  29 #include "mmhub/mmhub_2_0_0_default.h"
  30 #include "navi10_enum.h"
  31 
  32 #include "soc15_common.h"
  33 
  34 static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
  35 {
  36         uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
  37 
  38         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
  39                      lower_32_bits(value));
  40 
  41         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
  42                      upper_32_bits(value));
  43 }
  44 
  45 static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  46 {
  47         mmhub_v2_0_init_gart_pt_regs(adev);
  48 
  49         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
  50                      (u32)(adev->gmc.gart_start >> 12));
  51         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
  52                      (u32)(adev->gmc.gart_start >> 44));
  53 
  54         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
  55                      (u32)(adev->gmc.gart_end >> 12));
  56         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
  57                      (u32)(adev->gmc.gart_end >> 44));
  58 }
  59 
  60 static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
  61 {
  62         uint64_t value;
  63         uint32_t tmp;
  64 
  65         /* Disable AGP. */
  66         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
  67         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
  68         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
  69 
  70         /* Program the system aperture low logical page number. */
  71         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  72                      adev->gmc.vram_start >> 18);
  73         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  74                      adev->gmc.vram_end >> 18);
  75 
  76         /* Set default page address. */
  77         value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
  78                 adev->vm_manager.vram_base_offset;
  79         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
  80                      (u32)(value >> 12));
  81         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
  82                      (u32)(value >> 44));
  83 
  84         /* Program "protection fault". */
  85         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
  86                      (u32)(adev->dummy_page_addr >> 12));
  87         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
  88                      (u32)((u64)adev->dummy_page_addr >> 44));
  89 
  90         tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2);
  91         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
  92                             ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
  93         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
  94 }
  95 
  96 static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
  97 {
  98         uint32_t tmp;
  99 
 100         /* Setup TLB control */
 101         tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
 102 
 103         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 104         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 105         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 106                             ENABLE_ADVANCED_DRIVER_MODEL, 1);
 107         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 108                             SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 109         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
 110         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 111                             MTYPE, MTYPE_UC); /* UC, uncached */
 112 
 113         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
 114 }
 115 
 116 static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
 117 {
 118         uint32_t tmp;
 119 
 120         /* Setup L2 cache */
 121         tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
 122         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
 123         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
 124         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
 125                             ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 126         /* XXX for emulation, Refer to closed source code.*/
 127         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
 128                             0);
 129         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
 130         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 131         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
 132         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
 133 
 134         tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2);
 135         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 136         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 137         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
 138 
 139         tmp = mmMMVM_L2_CNTL3_DEFAULT;
 140         if (adev->gmc.translate_further) {
 141                 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
 142                 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
 143                                     L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
 144         } else {
 145                 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
 146                 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
 147                                     L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
 148         }
 149         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
 150 
 151         tmp = mmMMVM_L2_CNTL4_DEFAULT;
 152         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
 153         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
 154         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp);
 155 }
 156 
 157 static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
 158 {
 159         uint32_t tmp;
 160 
 161         tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
 162         tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 163         tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 164         WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
 165 }
 166 
 167 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
 168 {
 169         WREG32_SOC15(MMHUB, 0,
 170                      mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
 171                      0xFFFFFFFF);
 172         WREG32_SOC15(MMHUB, 0,
 173                      mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
 174                      0x0000000F);
 175 
 176         WREG32_SOC15(MMHUB, 0,
 177                      mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
 178         WREG32_SOC15(MMHUB, 0,
 179                      mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
 180 
 181         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
 182                      0);
 183         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
 184                      0);
 185 }
 186 
 187 static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
 188 {
 189         int i;
 190         uint32_t tmp;
 191 
 192         for (i = 0; i <= 14; i++) {
 193                 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i);
 194                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 195                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
 196                                     adev->vm_manager.num_level);
 197                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 198                                     RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 199                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 200                                     DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
 201                                     1);
 202                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 203                                     PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 204                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 205                                     VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 206                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 207                                     READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 208                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 209                                     WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 210                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 211                                     EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 212                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 213                                     PAGE_TABLE_BLOCK_SIZE,
 214                                     adev->vm_manager.block_size - 9);
 215                 /* Send no-retry XNACK on fault to suppress VM fault storm. */
 216                 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 217                                     RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
 218                                     !amdgpu_noretry);
 219                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i, tmp);
 220                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
 221                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
 222                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
 223                         lower_32_bits(adev->vm_manager.max_pfn - 1));
 224                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
 225                         upper_32_bits(adev->vm_manager.max_pfn - 1));
 226         }
 227 }
 228 
 229 static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
 230 {
 231         unsigned i;
 232 
 233         for (i = 0; i < 18; ++i) {
 234                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
 235                                     2 * i, 0xffffffff);
 236                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
 237                                     2 * i, 0x1f);
 238         }
 239 }
 240 
 241 int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
 242 {
 243         if (amdgpu_sriov_vf(adev)) {
 244                 /*
 245                  * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
 246                  * VF copy registers so vbios post doesn't program them, for
 247                  * SRIOV driver need to program them
 248                  */
 249                 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
 250                              adev->gmc.vram_start >> 24);
 251                 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
 252                              adev->gmc.vram_end >> 24);
 253         }
 254 
 255         /* GART Enable. */
 256         mmhub_v2_0_init_gart_aperture_regs(adev);
 257         mmhub_v2_0_init_system_aperture_regs(adev);
 258         mmhub_v2_0_init_tlb_regs(adev);
 259         mmhub_v2_0_init_cache_regs(adev);
 260 
 261         mmhub_v2_0_enable_system_domain(adev);
 262         mmhub_v2_0_disable_identity_aperture(adev);
 263         mmhub_v2_0_setup_vmid_config(adev);
 264         mmhub_v2_0_program_invalidation(adev);
 265 
 266         return 0;
 267 }
 268 
 269 void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
 270 {
 271         u32 tmp;
 272         u32 i;
 273 
 274         /* Disable all tables */
 275         for (i = 0; i < 16; i++)
 276                 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, i, 0);
 277 
 278         /* Setup TLB control */
 279         tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
 280         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 281         tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 282                             ENABLE_ADVANCED_DRIVER_MODEL, 0);
 283         WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
 284 
 285         /* Setup L2 cache */
 286         tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
 287         tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
 288         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
 289         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0);
 290 }
 291 
 292 /**
 293  * mmhub_v2_0_set_fault_enable_default - update GART/VM fault handling
 294  *
 295  * @adev: amdgpu_device pointer
 296  * @value: true redirects VM faults to the default page
 297  */
 298 void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 299 {
 300         u32 tmp;
 301         tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
 302         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 303                             RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 304         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 305                             PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 306         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 307                             PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 308         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 309                             PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 310         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 311                             TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
 312                             value);
 313         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 314                             NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 315         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 316                             DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 317         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 318                             VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 319         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 320                             READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 321         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 322                             WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 323         tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 324                             EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 325         if (!value) {
 326                 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 327                                 CRASH_ON_NO_RETRY_FAULT, 1);
 328                 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 329                                 CRASH_ON_RETRY_FAULT, 1);
 330         }
 331         WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
 332 }
 333 
 334 void mmhub_v2_0_init(struct amdgpu_device *adev)
 335 {
 336         struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 337 
 338         hub->ctx0_ptb_addr_lo32 =
 339                 SOC15_REG_OFFSET(MMHUB, 0,
 340                                  mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
 341         hub->ctx0_ptb_addr_hi32 =
 342                 SOC15_REG_OFFSET(MMHUB, 0,
 343                                  mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
 344         hub->vm_inv_eng0_sem =
 345                 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
 346         hub->vm_inv_eng0_req =
 347                 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
 348         hub->vm_inv_eng0_ack =
 349                 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK);
 350         hub->vm_context0_cntl =
 351                 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
 352         hub->vm_l2_pro_fault_status =
 353                 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS);
 354         hub->vm_l2_pro_fault_cntl =
 355                 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
 356 
 357 }
 358 
 359 static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 360                                                         bool enable)
 361 {
 362         uint32_t def, data, def1, data1;
 363 
 364         def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
 365 
 366         def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 367 
 368         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
 369                 data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
 370 
 371                 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 372                            DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 373                            DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 374                            DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 375                            DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 376                            DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 377 
 378         } else {
 379                 data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
 380 
 381                 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 382                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 383                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 384                           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 385                           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 386                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 387         }
 388 
 389         if (def != data)
 390                 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
 391 
 392         if (def1 != data1)
 393                 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 394 }
 395 
 396 static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 397                                                        bool enable)
 398 {
 399         uint32_t def, data;
 400 
 401         def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
 402 
 403         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 404                 data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 405         else
 406                 data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 407 
 408         if (def != data)
 409                 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
 410 }
 411 
 412 int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
 413                                enum amd_clockgating_state state)
 414 {
 415         if (amdgpu_sriov_vf(adev))
 416                 return 0;
 417 
 418         switch (adev->asic_type) {
 419         case CHIP_NAVI10:
 420         case CHIP_NAVI14:
 421         case CHIP_NAVI12:
 422                 mmhub_v2_0_update_medium_grain_clock_gating(adev,
 423                                 state == AMD_CG_STATE_GATE ? true : false);
 424                 mmhub_v2_0_update_medium_grain_light_sleep(adev,
 425                                 state == AMD_CG_STATE_GATE ? true : false);
 426                 break;
 427         default:
 428                 break;
 429         }
 430 
 431         return 0;
 432 }
 433 
 434 void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 435 {
 436         int data, data1;
 437 
 438         if (amdgpu_sriov_vf(adev))
 439                 *flags = 0;
 440 
 441         data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
 442 
 443         data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 444 
 445         /* AMD_CG_SUPPORT_MC_MGCG */
 446         if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
 447             !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 448                        DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 449                        DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 450                        DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 451                        DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 452                        DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
 453                 *flags |= AMD_CG_SUPPORT_MC_MGCG;
 454 
 455         /* AMD_CG_SUPPORT_MC_LS */
 456         if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
 457                 *flags |= AMD_CG_SUPPORT_MC_LS;
 458 }

/* [<][>][^][v][top][bottom][index][help] */