Lines Matching refs:adev

40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
60 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v7_0_init_golden_registers() argument
62 switch (adev->asic_type) { in gmc_v7_0_init_golden_registers()
64 amdgpu_program_register_sequence(adev, in gmc_v7_0_init_golden_registers()
67 amdgpu_program_register_sequence(adev, in gmc_v7_0_init_golden_registers()
85 int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) in gmc_v7_0_mc_wait_for_idle() argument
90 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_mc_wait_for_idle()
100 void gmc_v7_0_mc_stop(struct amdgpu_device *adev, in gmc_v7_0_mc_stop() argument
105 if (adev->mode_info.num_crtc) in gmc_v7_0_mc_stop()
106 amdgpu_display_stop_mc_access(adev, save); in gmc_v7_0_mc_stop()
108 amdgpu_asic_wait_for_mc_idle(adev); in gmc_v7_0_mc_stop()
123 void gmc_v7_0_mc_resume(struct amdgpu_device *adev, in gmc_v7_0_mc_resume() argument
137 if (adev->mode_info.num_crtc) in gmc_v7_0_mc_resume()
138 amdgpu_display_resume_mc_access(adev, save); in gmc_v7_0_mc_resume()
150 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) in gmc_v7_0_init_microcode() argument
158 switch (adev->asic_type) { in gmc_v7_0_init_microcode()
174 if (adev->asic_type == CHIP_TOPAZ) in gmc_v7_0_init_microcode()
179 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); in gmc_v7_0_init_microcode()
182 err = amdgpu_ucode_validate(adev->mc.fw); in gmc_v7_0_init_microcode()
189 release_firmware(adev->mc.fw); in gmc_v7_0_init_microcode()
190 adev->mc.fw = NULL; in gmc_v7_0_init_microcode()
203 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) in gmc_v7_0_mc_load_microcode() argument
211 if (!adev->mc.fw) in gmc_v7_0_mc_load_microcode()
214 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; in gmc_v7_0_mc_load_microcode()
217 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); in gmc_v7_0_mc_load_microcode()
220 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); in gmc_v7_0_mc_load_microcode()
223 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in gmc_v7_0_mc_load_microcode()
252 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_mc_load_microcode()
258 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_mc_load_microcode()
272 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v7_0_vram_gtt_location() argument
277 dev_warn(adev->dev, "limiting VRAM\n"); in gmc_v7_0_vram_gtt_location()
281 amdgpu_vram_location(adev, &adev->mc, 0); in gmc_v7_0_vram_gtt_location()
282 adev->mc.gtt_base_align = 0; in gmc_v7_0_vram_gtt_location()
283 amdgpu_gtt_location(adev, mc); in gmc_v7_0_vram_gtt_location()
294 static void gmc_v7_0_mc_program(struct amdgpu_device *adev) in gmc_v7_0_mc_program() argument
310 if (adev->mode_info.num_crtc) in gmc_v7_0_mc_program()
311 amdgpu_display_set_vga_render_state(adev, false); in gmc_v7_0_mc_program()
313 gmc_v7_0_mc_stop(adev, &save); in gmc_v7_0_mc_program()
314 if (amdgpu_asic_wait_for_mc_idle(adev)) { in gmc_v7_0_mc_program()
315 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v7_0_mc_program()
319 adev->mc.vram_start >> 12); in gmc_v7_0_mc_program()
321 adev->mc.vram_end >> 12); in gmc_v7_0_mc_program()
323 adev->vram_scratch.gpu_addr >> 12); in gmc_v7_0_mc_program()
324 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; in gmc_v7_0_mc_program()
325 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); in gmc_v7_0_mc_program()
328 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); in gmc_v7_0_mc_program()
334 if (amdgpu_asic_wait_for_mc_idle(adev)) { in gmc_v7_0_mc_program()
335 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v7_0_mc_program()
337 gmc_v7_0_mc_resume(adev, &save); in gmc_v7_0_mc_program()
358 static int gmc_v7_0_mc_init(struct amdgpu_device *adev) in gmc_v7_0_mc_init() argument
401 adev->mc.vram_width = numchan * chansize; in gmc_v7_0_mc_init()
403 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v7_0_mc_init()
404 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v7_0_mc_init()
406 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v7_0_mc_init()
407 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v7_0_mc_init()
408 adev->mc.visible_vram_size = adev->mc.aper_size; in gmc_v7_0_mc_init()
414 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); in gmc_v7_0_mc_init()
416 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; in gmc_v7_0_mc_init()
418 gmc_v7_0_vram_gtt_location(adev, &adev->mc); in gmc_v7_0_mc_init()
438 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, in gmc_v7_0_gart_flush_gpu_tlb() argument
459 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, in gmc_v7_0_gart_set_pte_pde() argument
481 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev, in gmc_v7_0_set_fault_enable_default() argument
513 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) in gmc_v7_0_gart_enable() argument
518 if (adev->gart.robj == NULL) { in gmc_v7_0_gart_enable()
519 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v7_0_gart_enable()
522 r = amdgpu_gart_table_vram_pin(adev); in gmc_v7_0_gart_enable()
552 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); in gmc_v7_0_gart_enable()
553 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); in gmc_v7_0_gart_enable()
554 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); in gmc_v7_0_gart_enable()
556 (u32)(adev->dummy_page.addr >> 12)); in gmc_v7_0_gart_enable()
574 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); in gmc_v7_0_gart_enable()
578 adev->gart.table_addr >> 12); in gmc_v7_0_gart_enable()
581 adev->gart.table_addr >> 12); in gmc_v7_0_gart_enable()
586 (u32)(adev->dummy_page.addr >> 12)); in gmc_v7_0_gart_enable()
595 gmc_v7_0_set_fault_enable_default(adev, false); in gmc_v7_0_gart_enable()
597 gmc_v7_0_set_fault_enable_default(adev, true); in gmc_v7_0_gart_enable()
599 if (adev->asic_type == CHIP_KAVERI) { in gmc_v7_0_gart_enable()
605 gmc_v7_0_gart_flush_gpu_tlb(adev, 0); in gmc_v7_0_gart_enable()
607 (unsigned)(adev->mc.gtt_size >> 20), in gmc_v7_0_gart_enable()
608 (unsigned long long)adev->gart.table_addr); in gmc_v7_0_gart_enable()
609 adev->gart.ready = true; in gmc_v7_0_gart_enable()
613 static int gmc_v7_0_gart_init(struct amdgpu_device *adev) in gmc_v7_0_gart_init() argument
617 if (adev->gart.robj) { in gmc_v7_0_gart_init()
622 r = amdgpu_gart_init(adev); in gmc_v7_0_gart_init()
625 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v7_0_gart_init()
626 return amdgpu_gart_table_vram_alloc(adev); in gmc_v7_0_gart_init()
636 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) in gmc_v7_0_gart_disable() argument
654 amdgpu_gart_table_vram_unpin(adev); in gmc_v7_0_gart_disable()
664 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) in gmc_v7_0_gart_fini() argument
666 amdgpu_gart_table_vram_free(adev); in gmc_v7_0_gart_fini()
667 amdgpu_gart_fini(adev); in gmc_v7_0_gart_fini()
685 static int gmc_v7_0_vm_init(struct amdgpu_device *adev) in gmc_v7_0_vm_init() argument
693 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; in gmc_v7_0_vm_init()
696 if (adev->flags & AMD_IS_APU) { in gmc_v7_0_vm_init()
699 adev->vm_manager.vram_base_offset = tmp; in gmc_v7_0_vm_init()
701 adev->vm_manager.vram_base_offset = 0; in gmc_v7_0_vm_init()
713 static void gmc_v7_0_vm_fini(struct amdgpu_device *adev) in gmc_v7_0_vm_fini() argument
726 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, in gmc_v7_0_vm_decode_fault() argument
783 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, in gmc_v7_0_enable_mc_ls() argument
791 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) in gmc_v7_0_enable_mc_ls()
800 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, in gmc_v7_0_enable_mc_mgcg() argument
808 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) in gmc_v7_0_enable_mc_mgcg()
817 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, in gmc_v7_0_enable_bif_mgls() argument
824 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { in gmc_v7_0_enable_bif_mgls()
840 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, in gmc_v7_0_enable_hdp_mgcg() argument
847 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) in gmc_v7_0_enable_hdp_mgcg()
856 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, in gmc_v7_0_enable_hdp_ls() argument
863 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) in gmc_v7_0_enable_hdp_ls()
896 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_early_init() local
898 gmc_v7_0_set_gart_funcs(adev); in gmc_v7_0_early_init()
899 gmc_v7_0_set_irq_funcs(adev); in gmc_v7_0_early_init()
906 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_late_init() local
908 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); in gmc_v7_0_late_init()
915 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_sw_init() local
917 r = amdgpu_gem_init(adev); in gmc_v7_0_sw_init()
921 if (adev->flags & AMD_IS_APU) { in gmc_v7_0_sw_init()
922 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; in gmc_v7_0_sw_init()
926 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); in gmc_v7_0_sw_init()
929 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); in gmc_v7_0_sw_init()
933 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); in gmc_v7_0_sw_init()
941 adev->vm_manager.max_pfn = amdgpu_vm_size << 18; in gmc_v7_0_sw_init()
947 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ in gmc_v7_0_sw_init()
954 adev->need_dma32 = false; in gmc_v7_0_sw_init()
955 dma_bits = adev->need_dma32 ? 32 : 40; in gmc_v7_0_sw_init()
956 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); in gmc_v7_0_sw_init()
958 adev->need_dma32 = true; in gmc_v7_0_sw_init()
962 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); in gmc_v7_0_sw_init()
964 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); in gmc_v7_0_sw_init()
968 r = gmc_v7_0_init_microcode(adev); in gmc_v7_0_sw_init()
974 r = gmc_v7_0_mc_init(adev); in gmc_v7_0_sw_init()
979 r = amdgpu_bo_init(adev); in gmc_v7_0_sw_init()
983 r = gmc_v7_0_gart_init(adev); in gmc_v7_0_sw_init()
987 if (!adev->vm_manager.enabled) { in gmc_v7_0_sw_init()
988 r = gmc_v7_0_vm_init(adev); in gmc_v7_0_sw_init()
990 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); in gmc_v7_0_sw_init()
993 adev->vm_manager.enabled = true; in gmc_v7_0_sw_init()
1001 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_sw_fini() local
1003 if (adev->vm_manager.enabled) { in gmc_v7_0_sw_fini()
1004 amdgpu_vm_manager_fini(adev); in gmc_v7_0_sw_fini()
1005 gmc_v7_0_vm_fini(adev); in gmc_v7_0_sw_fini()
1006 adev->vm_manager.enabled = false; in gmc_v7_0_sw_fini()
1008 gmc_v7_0_gart_fini(adev); in gmc_v7_0_sw_fini()
1009 amdgpu_gem_fini(adev); in gmc_v7_0_sw_fini()
1010 amdgpu_bo_fini(adev); in gmc_v7_0_sw_fini()
1018 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_hw_init() local
1020 gmc_v7_0_init_golden_registers(adev); in gmc_v7_0_hw_init()
1022 gmc_v7_0_mc_program(adev); in gmc_v7_0_hw_init()
1024 if (!(adev->flags & AMD_IS_APU)) { in gmc_v7_0_hw_init()
1025 r = gmc_v7_0_mc_load_microcode(adev); in gmc_v7_0_hw_init()
1032 r = gmc_v7_0_gart_enable(adev); in gmc_v7_0_hw_init()
1041 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_hw_fini() local
1043 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); in gmc_v7_0_hw_fini()
1044 gmc_v7_0_gart_disable(adev); in gmc_v7_0_hw_fini()
1051 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_suspend() local
1053 if (adev->vm_manager.enabled) { in gmc_v7_0_suspend()
1054 amdgpu_vm_manager_fini(adev); in gmc_v7_0_suspend()
1055 gmc_v7_0_vm_fini(adev); in gmc_v7_0_suspend()
1056 adev->vm_manager.enabled = false; in gmc_v7_0_suspend()
1058 gmc_v7_0_hw_fini(adev); in gmc_v7_0_suspend()
1066 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_resume() local
1068 r = gmc_v7_0_hw_init(adev); in gmc_v7_0_resume()
1072 if (!adev->vm_manager.enabled) { in gmc_v7_0_resume()
1073 r = gmc_v7_0_vm_init(adev); in gmc_v7_0_resume()
1075 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); in gmc_v7_0_resume()
1078 adev->vm_manager.enabled = true; in gmc_v7_0_resume()
1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_is_idle() local
1100 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_wait_for_idle() local
1102 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_wait_for_idle()
1120 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_print_status() local
1122 dev_info(adev->dev, "GMC 8.x registers\n"); in gmc_v7_0_print_status()
1123 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", in gmc_v7_0_print_status()
1125 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", in gmc_v7_0_print_status()
1128 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in gmc_v7_0_print_status()
1130 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in gmc_v7_0_print_status()
1132 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1134 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1136 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", in gmc_v7_0_print_status()
1138 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", in gmc_v7_0_print_status()
1140 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1142 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1144 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1146 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", in gmc_v7_0_print_status()
1148 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1150 dev_info(adev->dev, " 0x15D4=0x%08X\n", in gmc_v7_0_print_status()
1152 dev_info(adev->dev, " 0x15D8=0x%08X\n", in gmc_v7_0_print_status()
1154 dev_info(adev->dev, " 0x15DC=0x%08X\n", in gmc_v7_0_print_status()
1156 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1158 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1160 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1162 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", in gmc_v7_0_print_status()
1164 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1168 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1171 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1174 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1176 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1178 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", in gmc_v7_0_print_status()
1180 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", in gmc_v7_0_print_status()
1182 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", in gmc_v7_0_print_status()
1184 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", in gmc_v7_0_print_status()
1186 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", in gmc_v7_0_print_status()
1189 if (adev->asic_type == CHIP_KAVERI) { in gmc_v7_0_print_status()
1190 dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n", in gmc_v7_0_print_status()
1194 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1196 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", in gmc_v7_0_print_status()
1198 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", in gmc_v7_0_print_status()
1200 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", in gmc_v7_0_print_status()
1202 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1204 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", in gmc_v7_0_print_status()
1208 dev_info(adev->dev, " %d:\n", i); in gmc_v7_0_print_status()
1209 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v7_0_print_status()
1211 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v7_0_print_status()
1213 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v7_0_print_status()
1215 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v7_0_print_status()
1217 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v7_0_print_status()
1221 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", in gmc_v7_0_print_status()
1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_soft_reset() local
1238 if (!(adev->flags & AMD_IS_APU)) in gmc_v7_0_soft_reset()
1244 gmc_v7_0_print_status((void *)adev); in gmc_v7_0_soft_reset()
1246 gmc_v7_0_mc_stop(adev, &save); in gmc_v7_0_soft_reset()
1247 if (gmc_v7_0_wait_for_idle(adev)) { in gmc_v7_0_soft_reset()
1248 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); in gmc_v7_0_soft_reset()
1254 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in gmc_v7_0_soft_reset()
1267 gmc_v7_0_mc_resume(adev, &save); in gmc_v7_0_soft_reset()
1270 gmc_v7_0_print_status((void *)adev); in gmc_v7_0_soft_reset()
1276 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v7_0_vm_fault_interrupt_state() argument
1317 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, in gmc_v7_0_process_interrupt() argument
1333 gmc_v7_0_set_fault_enable_default(adev, false); in gmc_v7_0_process_interrupt()
1335 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", in gmc_v7_0_process_interrupt()
1337 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in gmc_v7_0_process_interrupt()
1339 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in gmc_v7_0_process_interrupt()
1341 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); in gmc_v7_0_process_interrupt()
1350 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v7_0_set_clockgating_state() local
1355 if (!(adev->flags & AMD_IS_APU)) { in gmc_v7_0_set_clockgating_state()
1356 gmc_v7_0_enable_mc_mgcg(adev, gate); in gmc_v7_0_set_clockgating_state()
1357 gmc_v7_0_enable_mc_ls(adev, gate); in gmc_v7_0_set_clockgating_state()
1359 gmc_v7_0_enable_bif_mgls(adev, gate); in gmc_v7_0_set_clockgating_state()
1360 gmc_v7_0_enable_hdp_mgcg(adev, gate); in gmc_v7_0_set_clockgating_state()
1361 gmc_v7_0_enable_hdp_ls(adev, gate); in gmc_v7_0_set_clockgating_state()
1399 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) in gmc_v7_0_set_gart_funcs() argument
1401 if (adev->gart.gart_funcs == NULL) in gmc_v7_0_set_gart_funcs()
1402 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; in gmc_v7_0_set_gart_funcs()
1405 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v7_0_set_irq_funcs() argument
1407 adev->mc.vm_fault.num_types = 1; in gmc_v7_0_set_irq_funcs()
1408 adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; in gmc_v7_0_set_irq_funcs()