This source file includes following definitions.
- gmc_v6_0_mc_stop
- gmc_v6_0_mc_resume
- gmc_v6_0_init_microcode
- gmc_v6_0_mc_load_microcode
- gmc_v6_0_vram_gtt_location
- gmc_v6_0_mc_program
- gmc_v6_0_mc_init
- gmc_v6_0_flush_gpu_tlb
- gmc_v6_0_emit_flush_gpu_tlb
- gmc_v6_0_get_vm_pte_flags
- gmc_v6_0_get_vm_pde
- gmc_v6_0_set_fault_enable_default
- gmc_v6_0_set_prt
- gmc_v6_0_gart_enable
- gmc_v6_0_gart_init
- gmc_v6_0_gart_disable
- gmc_v6_0_vm_decode_fault
- gmc_v6_0_convert_vram_type
- gmc_v6_0_early_init
- gmc_v6_0_late_init
- gmc_v6_0_get_vbios_fb_size
- gmc_v6_0_sw_init
- gmc_v6_0_sw_fini
- gmc_v6_0_hw_init
- gmc_v6_0_hw_fini
- gmc_v6_0_suspend
- gmc_v6_0_resume
- gmc_v6_0_is_idle
- gmc_v6_0_wait_for_idle
- gmc_v6_0_soft_reset
- gmc_v6_0_vm_fault_interrupt_state
- gmc_v6_0_process_interrupt
- gmc_v6_0_set_clockgating_state
- gmc_v6_0_set_powergating_state
- gmc_v6_0_set_gmc_funcs
- gmc_v6_0_set_irq_funcs
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 #include <linux/firmware.h>
  25 #include <linux/module.h>
  26 #include <linux/pci.h>
  27 
  28 #include <drm/drm_cache.h>
  29 #include "amdgpu.h"
  30 #include "gmc_v6_0.h"
  31 #include "amdgpu_ucode.h"
  32 #include "amdgpu_gem.h"
  33 
  34 #include "bif/bif_3_0_d.h"
  35 #include "bif/bif_3_0_sh_mask.h"
  36 #include "oss/oss_1_0_d.h"
  37 #include "oss/oss_1_0_sh_mask.h"
  38 #include "gmc/gmc_6_0_d.h"
  39 #include "gmc/gmc_6_0_sh_mask.h"
  40 #include "dce/dce_6_0_d.h"
  41 #include "dce/dce_6_0_sh_mask.h"
  42 #include "si_enums.h"
  43 
  44 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
  45 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  46 static int gmc_v6_0_wait_for_idle(void *handle);
  47 
  48 MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
  49 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
  50 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
  51 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
  52 MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
  53 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
  54 
  55 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
  56 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
  57 #define MC_SEQ_MISC0__MT__DDR2   0x20000000
  58 #define MC_SEQ_MISC0__MT__GDDR3  0x30000000
  59 #define MC_SEQ_MISC0__MT__GDDR4  0x40000000
  60 #define MC_SEQ_MISC0__MT__GDDR5  0x50000000
  61 #define MC_SEQ_MISC0__MT__HBM    0x60000000
  62 #define MC_SEQ_MISC0__MT__DDR3   0xB0000000
  63 
  64 
  65 static const u32 crtc_offsets[6] =
  66 {
  67         SI_CRTC0_REGISTER_OFFSET,
  68         SI_CRTC1_REGISTER_OFFSET,
  69         SI_CRTC2_REGISTER_OFFSET,
  70         SI_CRTC3_REGISTER_OFFSET,
  71         SI_CRTC4_REGISTER_OFFSET,
  72         SI_CRTC5_REGISTER_OFFSET
  73 };
  74 
  75 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
  76 {
  77         u32 blackout;
  78 
  79         gmc_v6_0_wait_for_idle((void *)adev);
  80 
  81         blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  82         if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  83                 
  84                 WREG32(mmBIF_FB_EN, 0);
  85                 
  86                 blackout = REG_SET_FIELD(blackout,
  87                                          MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  88                 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  89         }
  90         
  91         udelay(100);
  92 
  93 }
  94 
  95 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
  96 {
  97         u32 tmp;
  98 
  99         
 100         tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 101         tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 102         WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 103         
 104         tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 105         tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 106         WREG32(mmBIF_FB_EN, tmp);
 107 }
 108 
 109 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
 110 {
 111         const char *chip_name;
 112         char fw_name[30];
 113         int err;
 114         bool is_58_fw = false;
 115 
 116         DRM_DEBUG("\n");
 117 
 118         switch (adev->asic_type) {
 119         case CHIP_TAHITI:
 120                 chip_name = "tahiti";
 121                 break;
 122         case CHIP_PITCAIRN:
 123                 chip_name = "pitcairn";
 124                 break;
 125         case CHIP_VERDE:
 126                 chip_name = "verde";
 127                 break;
 128         case CHIP_OLAND:
 129                 chip_name = "oland";
 130                 break;
 131         case CHIP_HAINAN:
 132                 chip_name = "hainan";
 133                 break;
 134         default: BUG();
 135         }
 136 
 137         
 138         if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
 139                 is_58_fw = true;
 140 
 141         if (is_58_fw)
 142                 snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
 143         else
 144                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 145         err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 146         if (err)
 147                 goto out;
 148 
 149         err = amdgpu_ucode_validate(adev->gmc.fw);
 150 
 151 out:
 152         if (err) {
 153                 dev_err(adev->dev,
 154                        "si_mc: Failed to load firmware \"%s\"\n",
 155                        fw_name);
 156                 release_firmware(adev->gmc.fw);
 157                 adev->gmc.fw = NULL;
 158         }
 159         return err;
 160 }
 161 
 162 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 163 {
 164         const __le32 *new_fw_data = NULL;
 165         u32 running;
 166         const __le32 *new_io_mc_regs = NULL;
 167         int i, regs_size, ucode_size;
 168         const struct mc_firmware_header_v1_0 *hdr;
 169 
 170         if (!adev->gmc.fw)
 171                 return -EINVAL;
 172 
 173         hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 174 
 175         amdgpu_ucode_print_mc_hdr(&hdr->header);
 176 
 177         adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 178         regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 179         new_io_mc_regs = (const __le32 *)
 180                 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 181         ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 182         new_fw_data = (const __le32 *)
 183                 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 184 
 185         running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
 186 
 187         if (running == 0) {
 188 
 189                 
 190                 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 191                 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 192 
 193                 
 194                 for (i = 0; i < regs_size; i++) {
 195                         WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
 196                         WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
 197                 }
 198                 
 199                 for (i = 0; i < ucode_size; i++) {
 200                         WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
 201                 }
 202 
 203                 
 204                 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 205                 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 206                 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 207 
 208                 
 209                 for (i = 0; i < adev->usec_timeout; i++) {
 210                         if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
 211                                 break;
 212                         udelay(1);
 213                 }
 214                 for (i = 0; i < adev->usec_timeout; i++) {
 215                         if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
 216                                 break;
 217                         udelay(1);
 218                 }
 219 
 220         }
 221 
 222         return 0;
 223 }
 224 
 225 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 226                                        struct amdgpu_gmc *mc)
 227 {
 228         u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 229         base <<= 24;
 230 
 231         amdgpu_gmc_vram_location(adev, mc, base);
 232         amdgpu_gmc_gart_location(adev, mc);
 233 }
 234 
 235 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 236 {
 237         int i, j;
 238 
 239         
 240         for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 241                 WREG32((0xb05 + j), 0x00000000);
 242                 WREG32((0xb06 + j), 0x00000000);
 243                 WREG32((0xb07 + j), 0x00000000);
 244                 WREG32((0xb08 + j), 0x00000000);
 245                 WREG32((0xb09 + j), 0x00000000);
 246         }
 247         WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 248 
 249         if (gmc_v6_0_wait_for_idle((void *)adev)) {
 250                 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 251         }
 252 
 253         if (adev->mode_info.num_crtc) {
 254                 u32 tmp;
 255 
 256                 
 257                 tmp = RREG32(mmVGA_HDP_CONTROL);
 258                 tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
 259                 WREG32(mmVGA_HDP_CONTROL, tmp);
 260 
 261                 
 262                 tmp = RREG32(mmVGA_RENDER_CONTROL);
 263                 tmp &= ~VGA_VSTATUS_CNTL;
 264                 WREG32(mmVGA_RENDER_CONTROL, tmp);
 265         }
 266         
 267         WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 268                adev->gmc.vram_start >> 12);
 269         WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 270                adev->gmc.vram_end >> 12);
 271         WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 272                adev->vram_scratch.gpu_addr >> 12);
 273         WREG32(mmMC_VM_AGP_BASE, 0);
 274         WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 275         WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 276 
 277         if (gmc_v6_0_wait_for_idle((void *)adev)) {
 278                 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 279         }
 280 }
 281 
 282 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 283 {
 284 
 285         u32 tmp;
 286         int chansize, numchan;
 287         int r;
 288 
 289         tmp = RREG32(mmMC_ARB_RAMCFG);
 290         if (tmp & (1 << 11)) {
 291                 chansize = 16;
 292         } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
 293                 chansize = 64;
 294         } else {
 295                 chansize = 32;
 296         }
 297         tmp = RREG32(mmMC_SHARED_CHMAP);
 298         switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 299         case 0:
 300         default:
 301                 numchan = 1;
 302                 break;
 303         case 1:
 304                 numchan = 2;
 305                 break;
 306         case 2:
 307                 numchan = 4;
 308                 break;
 309         case 3:
 310                 numchan = 8;
 311                 break;
 312         case 4:
 313                 numchan = 3;
 314                 break;
 315         case 5:
 316                 numchan = 6;
 317                 break;
 318         case 6:
 319                 numchan = 10;
 320                 break;
 321         case 7:
 322                 numchan = 12;
 323                 break;
 324         case 8:
 325                 numchan = 16;
 326                 break;
 327         }
 328         adev->gmc.vram_width = numchan * chansize;
 329         
 330         adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 331         adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 332 
 333         if (!(adev->flags & AMD_IS_APU)) {
 334                 r = amdgpu_device_resize_fb_bar(adev);
 335                 if (r)
 336                         return r;
 337         }
 338         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 339         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 340         adev->gmc.visible_vram_size = adev->gmc.aper_size;
 341 
 342         
 343         if (amdgpu_gart_size == -1) {
 344                 switch (adev->asic_type) {
 345                 case CHIP_HAINAN:    
 346                 default:
 347                         adev->gmc.gart_size = 256ULL << 20;
 348                         break;
 349                 case CHIP_VERDE:    
 350                 case CHIP_TAHITI:   
 351                 case CHIP_PITCAIRN: 
 352                 case CHIP_OLAND:    
 353                         adev->gmc.gart_size = 1024ULL << 20;
 354                         break;
 355                 }
 356         } else {
 357                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 358         }
 359 
 360         gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
 361 
 362         return 0;
 363 }
 364 
 365 static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 366                                         uint32_t vmhub, uint32_t flush_type)
 367 {
 368         WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 369 }
 370 
 371 static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 372                                             unsigned vmid, uint64_t pd_addr)
 373 {
 374         uint32_t reg;
 375 
 376         
 377         if (vmid < 8)
 378                 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 379         else
 380                 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
 381         amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 382 
 383         
 384         amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 385 
 386         return pd_addr;
 387 }
 388 
 389 static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
 390                                           uint32_t flags)
 391 {
 392         uint64_t pte_flag = 0;
 393 
 394         if (flags & AMDGPU_VM_PAGE_READABLE)
 395                 pte_flag |= AMDGPU_PTE_READABLE;
 396         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 397                 pte_flag |= AMDGPU_PTE_WRITEABLE;
 398         if (flags & AMDGPU_VM_PAGE_PRT)
 399                 pte_flag |= AMDGPU_PTE_PRT;
 400 
 401         return pte_flag;
 402 }
 403 
 404 static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
 405                                 uint64_t *addr, uint64_t *flags)
 406 {
 407         BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 408 }
 409 
 410 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
 411                                               bool value)
 412 {
 413         u32 tmp;
 414 
 415         tmp = RREG32(mmVM_CONTEXT1_CNTL);
 416         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 417                             RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 418         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 419                             DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 420         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 421                             PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 422         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 423                             VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 424         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 425                             READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 426         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 427                             WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 428         WREG32(mmVM_CONTEXT1_CNTL, tmp);
 429 }
 430 
 431  
 432 
 433 
 434 
 435 
 436 
 437 static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 438 {
 439         u32 tmp;
 440 
 441         if (enable && !adev->gmc.prt_warning) {
 442                 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 443                 adev->gmc.prt_warning = true;
 444         }
 445 
 446         tmp = RREG32(mmVM_PRT_CNTL);
 447         tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 448                             CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 449                             enable);
 450         tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 451                             TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 452                             enable);
 453         tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 454                             L2_CACHE_STORE_INVALID_ENTRIES,
 455                             enable);
 456         tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 457                             L1_TLB_STORE_INVALID_ENTRIES,
 458                             enable);
 459         WREG32(mmVM_PRT_CNTL, tmp);
 460 
 461         if (enable) {
 462                 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
 463                 uint32_t high = adev->vm_manager.max_pfn -
 464                         (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
 465 
 466                 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 467                 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 468                 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 469                 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 470                 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 471                 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 472                 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 473                 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 474         } else {
 475                 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 476                 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 477                 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 478                 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 479                 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 480                 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 481                 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 482                 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 483         }
 484 }
 485 
 486 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 487 {
 488         uint64_t table_addr;
 489         int r, i;
 490         u32 field;
 491 
 492         if (adev->gart.bo == NULL) {
 493                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 494                 return -EINVAL;
 495         }
 496         r = amdgpu_gart_table_vram_pin(adev);
 497         if (r)
 498                 return r;
 499 
 500         table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 501 
 502         
 503         WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 504                (0xA << 7) |
 505                MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
 506                MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
 507                MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 508                MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
 509                (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 510         
 511         WREG32(mmVM_L2_CNTL,
 512                VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
 513                VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
 514                VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 515                VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 516                (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 517                (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 518         WREG32(mmVM_L2_CNTL2,
 519                VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
 520                VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
 521 
 522         field = adev->vm_manager.fragment_size;
 523         WREG32(mmVM_L2_CNTL3,
 524                VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 525                (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 526                (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 527         
 528         WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 529         WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 530         WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 531         WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 532                         (u32)(adev->dummy_page_addr >> 12));
 533         WREG32(mmVM_CONTEXT0_CNTL2, 0);
 534         WREG32(mmVM_CONTEXT0_CNTL,
 535                VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
 536                (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 537                VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
 538 
 539         WREG32(0x575, 0);
 540         WREG32(0x576, 0);
 541         WREG32(0x577, 0);
 542 
 543         
 544         
 545         WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 546         WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 547         
 548 
 549 
 550 
 551         for (i = 1; i < 16; i++) {
 552                 if (i < 8)
 553                         WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 554                                table_addr >> 12);
 555                 else
 556                         WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 557                                table_addr >> 12);
 558         }
 559 
 560         
 561         WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 562                (u32)(adev->dummy_page_addr >> 12));
 563         WREG32(mmVM_CONTEXT1_CNTL2, 4);
 564         WREG32(mmVM_CONTEXT1_CNTL,
 565                VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
 566                (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 567                ((adev->vm_manager.block_size - 9)
 568                << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
 569         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 570                 gmc_v6_0_set_fault_enable_default(adev, false);
 571         else
 572                 gmc_v6_0_set_fault_enable_default(adev, true);
 573 
 574         gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
 575         dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 576                  (unsigned)(adev->gmc.gart_size >> 20),
 577                  (unsigned long long)table_addr);
 578         adev->gart.ready = true;
 579         return 0;
 580 }
 581 
 582 static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 583 {
 584         int r;
 585 
 586         if (adev->gart.bo) {
 587                 dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 588                 return 0;
 589         }
 590         r = amdgpu_gart_init(adev);
 591         if (r)
 592                 return r;
 593         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 594         adev->gart.gart_pte_flags = 0;
 595         return amdgpu_gart_table_vram_alloc(adev);
 596 }
 597 
 598 static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 599 {
 600         
 601 
 602 
 603 
 604 
 605 
 606 
 607 
 608 
 609 
 610 
 611         
 612         WREG32(mmVM_CONTEXT0_CNTL, 0);
 613         WREG32(mmVM_CONTEXT1_CNTL, 0);
 614         
 615         WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 616                MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 617                (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 618         
 619         WREG32(mmVM_L2_CNTL,
 620                VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 621                VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 622                (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 623                (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 624         WREG32(mmVM_L2_CNTL2, 0);
 625         WREG32(mmVM_L2_CNTL3,
 626                VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 627                (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 628         amdgpu_gart_table_vram_unpin(adev);
 629 }
 630 
 631 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
 632                                      u32 status, u32 addr, u32 mc_client)
 633 {
 634         u32 mc_id;
 635         u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 636         u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 637                                         PROTECTIONS);
 638         char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 639                 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 640 
 641         mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 642                               MEMORY_CLIENT_ID);
 643 
 644         dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 645                protections, vmid, addr,
 646                REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 647                              MEMORY_CLIENT_RW) ?
 648                "write" : "read", block, mc_client, mc_id);
 649 }
 650 
 651 
 652 
 653 
 654 
 655 
 656 
 657 
 658 
 659 
 660 
 661 
 662 
 663 
 664 
 665 
 666 
 667 
 668 
 669 
 670 
 671 
 672 
 673 
 674 
 675 
 676 
 677 
 678 
 679 
 680 
 681 
 682 
 683 
 684 
 685 
 686 
 687 
 688 
 689 
 690 
 691 
 692 
 693 
 694 
 695 
 696 
 697 
 698 
 699 
 700 
 701 
 702 
 703 
 704 
 705 
 706 
 707 
 708 
 709 
 710 
 711 
 712 
 713 
 714 
 715 
 716 
 717 
 718 
 719 
 720 
 721 
 722 
 723 
 724 
 725 
 726 
 727 
 728 
 729 
 730 
 731 
 732 
 733 
 734 
 735 
 736 
 737 
 738 
 739 
 740 
 741 
 742 
 743 
 744 
 745 
 746 
 747 
 748 
 749 
 750 
 751 
 752 
 753 
 754 
 755 
 756 
 757 
 758 
 759 
 760 
 761 
 762 
 763 
 764 
 765 
 766 
 767 
 768 
 769 
 770 
 771 
 772 
 773 
 774 
 775 
 776 
 777 
 778 static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
 779 {
 780         switch (mc_seq_vram_type) {
 781         case MC_SEQ_MISC0__MT__GDDR1:
 782                 return AMDGPU_VRAM_TYPE_GDDR1;
 783         case MC_SEQ_MISC0__MT__DDR2:
 784                 return AMDGPU_VRAM_TYPE_DDR2;
 785         case MC_SEQ_MISC0__MT__GDDR3:
 786                 return AMDGPU_VRAM_TYPE_GDDR3;
 787         case MC_SEQ_MISC0__MT__GDDR4:
 788                 return AMDGPU_VRAM_TYPE_GDDR4;
 789         case MC_SEQ_MISC0__MT__GDDR5:
 790                 return AMDGPU_VRAM_TYPE_GDDR5;
 791         case MC_SEQ_MISC0__MT__DDR3:
 792                 return AMDGPU_VRAM_TYPE_DDR3;
 793         default:
 794                 return AMDGPU_VRAM_TYPE_UNKNOWN;
 795         }
 796 }
 797 
 798 static int gmc_v6_0_early_init(void *handle)
 799 {
 800         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 801 
 802         gmc_v6_0_set_gmc_funcs(adev);
 803         gmc_v6_0_set_irq_funcs(adev);
 804 
 805         return 0;
 806 }
 807 
 808 static int gmc_v6_0_late_init(void *handle)
 809 {
 810         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 811 
 812         amdgpu_bo_late_init(adev);
 813 
 814         if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 815                 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 816         else
 817                 return 0;
 818 }
 819 
 820 static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
 821 {
 822         u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
 823         unsigned size;
 824 
 825         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 826                 size = 9 * 1024 * 1024; 
 827         } else {
 828                 u32 viewport = RREG32(mmVIEWPORT_SIZE);
 829                 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
 830                         REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
 831                         4);
 832         }
 833         
 834         if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
 835                 return 0;
 836         return size;
 837 }
 838 
 839 static int gmc_v6_0_sw_init(void *handle)
 840 {
 841         int r;
 842         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 843 
 844         adev->num_vmhubs = 1;
 845 
 846         if (adev->flags & AMD_IS_APU) {
 847                 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 848         } else {
 849                 u32 tmp = RREG32(mmMC_SEQ_MISC0);
 850                 tmp &= MC_SEQ_MISC0__MT__MASK;
 851                 adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
 852         }
 853 
 854         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
 855         if (r)
 856                 return r;
 857 
 858         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
 859         if (r)
 860                 return r;
 861 
 862         amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
 863 
 864         adev->gmc.mc_mask = 0xffffffffffULL;
 865 
 866         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
 867         if (r) {
 868                 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
 869                 return r;
 870         }
 871         adev->need_swiotlb = drm_need_swiotlb(44);
 872 
 873         r = gmc_v6_0_init_microcode(adev);
 874         if (r) {
 875                 dev_err(adev->dev, "Failed to load mc firmware!\n");
 876                 return r;
 877         }
 878 
 879         r = gmc_v6_0_mc_init(adev);
 880         if (r)
 881                 return r;
 882 
 883         adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
 884 
 885         r = amdgpu_bo_init(adev);
 886         if (r)
 887                 return r;
 888 
 889         r = gmc_v6_0_gart_init(adev);
 890         if (r)
 891                 return r;
 892 
 893         
 894 
 895 
 896 
 897 
 898 
 899         adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
 900         amdgpu_vm_manager_init(adev);
 901 
 902         
 903         if (adev->flags & AMD_IS_APU) {
 904                 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 905 
 906                 tmp <<= 22;
 907                 adev->vm_manager.vram_base_offset = tmp;
 908         } else {
 909                 adev->vm_manager.vram_base_offset = 0;
 910         }
 911 
 912         return 0;
 913 }
 914 
 915 static int gmc_v6_0_sw_fini(void *handle)
 916 {
 917         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 918 
 919         amdgpu_gem_force_release(adev);
 920         amdgpu_vm_manager_fini(adev);
 921         amdgpu_gart_table_vram_free(adev);
 922         amdgpu_bo_fini(adev);
 923         amdgpu_gart_fini(adev);
 924         release_firmware(adev->gmc.fw);
 925         adev->gmc.fw = NULL;
 926 
 927         return 0;
 928 }
 929 
 930 static int gmc_v6_0_hw_init(void *handle)
 931 {
 932         int r;
 933         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 934 
 935         gmc_v6_0_mc_program(adev);
 936 
 937         if (!(adev->flags & AMD_IS_APU)) {
 938                 r = gmc_v6_0_mc_load_microcode(adev);
 939                 if (r) {
 940                         dev_err(adev->dev, "Failed to load MC firmware!\n");
 941                         return r;
 942                 }
 943         }
 944 
 945         r = gmc_v6_0_gart_enable(adev);
 946         if (r)
 947                 return r;
 948 
 949         return r;
 950 }
 951 
 952 static int gmc_v6_0_hw_fini(void *handle)
 953 {
 954         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 955 
 956         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 957         gmc_v6_0_gart_disable(adev);
 958 
 959         return 0;
 960 }
 961 
 962 static int gmc_v6_0_suspend(void *handle)
 963 {
 964         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 965 
 966         gmc_v6_0_hw_fini(adev);
 967 
 968         return 0;
 969 }
 970 
 971 static int gmc_v6_0_resume(void *handle)
 972 {
 973         int r;
 974         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 975 
 976         r = gmc_v6_0_hw_init(adev);
 977         if (r)
 978                 return r;
 979 
 980         amdgpu_vmid_reset_all(adev);
 981 
 982         return 0;
 983 }
 984 
 985 static bool gmc_v6_0_is_idle(void *handle)
 986 {
 987         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 988         u32 tmp = RREG32(mmSRBM_STATUS);
 989 
 990         if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 991                    SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
 992                 return false;
 993 
 994         return true;
 995 }
 996 
 997 static int gmc_v6_0_wait_for_idle(void *handle)
 998 {
 999         unsigned i;
1000         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1001 
1002         for (i = 0; i < adev->usec_timeout; i++) {
1003                 if (gmc_v6_0_is_idle(handle))
1004                         return 0;
1005                 udelay(1);
1006         }
1007         return -ETIMEDOUT;
1008 
1009 }
1010 
1011 static int gmc_v6_0_soft_reset(void *handle)
1012 {
1013         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1014         u32 srbm_soft_reset = 0;
1015         u32 tmp = RREG32(mmSRBM_STATUS);
1016 
1017         if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1018                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1019                                                 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1020 
1021         if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1022                    SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1023                 if (!(adev->flags & AMD_IS_APU))
1024                         srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1025                                                         SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1026         }
1027 
1028         if (srbm_soft_reset) {
1029                 gmc_v6_0_mc_stop(adev);
1030                 if (gmc_v6_0_wait_for_idle(adev)) {
1031                         dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1032                 }
1033 
1034 
1035                 tmp = RREG32(mmSRBM_SOFT_RESET);
1036                 tmp |= srbm_soft_reset;
1037                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1038                 WREG32(mmSRBM_SOFT_RESET, tmp);
1039                 tmp = RREG32(mmSRBM_SOFT_RESET);
1040 
1041                 udelay(50);
1042 
1043                 tmp &= ~srbm_soft_reset;
1044                 WREG32(mmSRBM_SOFT_RESET, tmp);
1045                 tmp = RREG32(mmSRBM_SOFT_RESET);
1046 
1047                 udelay(50);
1048 
1049                 gmc_v6_0_mc_resume(adev);
1050                 udelay(50);
1051         }
1052 
1053         return 0;
1054 }
1055 
1056 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1057                                              struct amdgpu_irq_src *src,
1058                                              unsigned type,
1059                                              enum amdgpu_interrupt_state state)
1060 {
1061         u32 tmp;
1062         u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1063                     VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1064                     VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1065                     VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1066                     VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1067                     VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1068 
1069         switch (state) {
1070         case AMDGPU_IRQ_STATE_DISABLE:
1071                 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1072                 tmp &= ~bits;
1073                 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1074                 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1075                 tmp &= ~bits;
1076                 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1077                 break;
1078         case AMDGPU_IRQ_STATE_ENABLE:
1079                 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1080                 tmp |= bits;
1081                 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1082                 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1083                 tmp |= bits;
1084                 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1085                 break;
1086         default:
1087                 break;
1088         }
1089 
1090         return 0;
1091 }
1092 
1093 static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1094                                       struct amdgpu_irq_src *source,
1095                                       struct amdgpu_iv_entry *entry)
1096 {
1097         u32 addr, status;
1098 
1099         addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1100         status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1101         WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1102 
1103         if (!addr && !status)
1104                 return 0;
1105 
1106         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1107                 gmc_v6_0_set_fault_enable_default(adev, false);
1108 
1109         if (printk_ratelimit()) {
1110                 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1111                         entry->src_id, entry->src_data[0]);
1112                 dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1113                         addr);
1114                 dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1115                         status);
1116                 gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1117         }
1118 
1119         return 0;
1120 }
1121 
1122 static int gmc_v6_0_set_clockgating_state(void *handle,
1123                                           enum amd_clockgating_state state)
1124 {
1125         return 0;
1126 }
1127 
1128 static int gmc_v6_0_set_powergating_state(void *handle,
1129                                           enum amd_powergating_state state)
1130 {
1131         return 0;
1132 }
1133 
1134 static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1135         .name = "gmc_v6_0",
1136         .early_init = gmc_v6_0_early_init,
1137         .late_init = gmc_v6_0_late_init,
1138         .sw_init = gmc_v6_0_sw_init,
1139         .sw_fini = gmc_v6_0_sw_fini,
1140         .hw_init = gmc_v6_0_hw_init,
1141         .hw_fini = gmc_v6_0_hw_fini,
1142         .suspend = gmc_v6_0_suspend,
1143         .resume = gmc_v6_0_resume,
1144         .is_idle = gmc_v6_0_is_idle,
1145         .wait_for_idle = gmc_v6_0_wait_for_idle,
1146         .soft_reset = gmc_v6_0_soft_reset,
1147         .set_clockgating_state = gmc_v6_0_set_clockgating_state,
1148         .set_powergating_state = gmc_v6_0_set_powergating_state,
1149 };
1150 
1151 static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1152         .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1153         .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1154         .set_prt = gmc_v6_0_set_prt,
1155         .get_vm_pde = gmc_v6_0_get_vm_pde,
1156         .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
1157 };
1158 
1159 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1160         .set = gmc_v6_0_vm_fault_interrupt_state,
1161         .process = gmc_v6_0_process_interrupt,
1162 };
1163 
1164 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1165 {
1166         adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1167 }
1168 
1169 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1170 {
1171         adev->gmc.vm_fault.num_types = 1;
1172         adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1173 }
1174 
1175 const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
1176 {
1177         .type = AMD_IP_BLOCK_TYPE_GMC,
1178         .major = 6,
1179         .minor = 0,
1180         .rev = 0,
1181         .funcs = &gmc_v6_0_ip_funcs,
1182 };