Lines Matching refs:adev

42 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
87 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v8_0_init_golden_registers() argument
89 switch (adev->asic_type) { in gmc_v8_0_init_golden_registers()
91 amdgpu_program_register_sequence(adev, in gmc_v8_0_init_golden_registers()
94 amdgpu_program_register_sequence(adev, in gmc_v8_0_init_golden_registers()
99 amdgpu_program_register_sequence(adev, in gmc_v8_0_init_golden_registers()
102 amdgpu_program_register_sequence(adev, in gmc_v8_0_init_golden_registers()
107 amdgpu_program_register_sequence(adev, in gmc_v8_0_init_golden_registers()
112 amdgpu_program_register_sequence(adev, in gmc_v8_0_init_golden_registers()
130 int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) in gmc_v8_0_mc_wait_for_idle() argument
135 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v8_0_mc_wait_for_idle()
150 void gmc_v8_0_mc_stop(struct amdgpu_device *adev, in gmc_v8_0_mc_stop() argument
155 if (adev->mode_info.num_crtc) in gmc_v8_0_mc_stop()
156 amdgpu_display_stop_mc_access(adev, save); in gmc_v8_0_mc_stop()
158 amdgpu_asic_wait_for_mc_idle(adev); in gmc_v8_0_mc_stop()
173 void gmc_v8_0_mc_resume(struct amdgpu_device *adev, in gmc_v8_0_mc_resume() argument
187 if (adev->mode_info.num_crtc) in gmc_v8_0_mc_resume()
188 amdgpu_display_resume_mc_access(adev, save); in gmc_v8_0_mc_resume()
200 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) in gmc_v8_0_init_microcode() argument
208 switch (adev->asic_type) { in gmc_v8_0_init_microcode()
220 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); in gmc_v8_0_init_microcode()
223 err = amdgpu_ucode_validate(adev->mc.fw); in gmc_v8_0_init_microcode()
230 release_firmware(adev->mc.fw); in gmc_v8_0_init_microcode()
231 adev->mc.fw = NULL; in gmc_v8_0_init_microcode()
244 static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) in gmc_v8_0_mc_load_microcode() argument
252 if (!adev->mc.fw) in gmc_v8_0_mc_load_microcode()
255 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; in gmc_v8_0_mc_load_microcode()
258 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); in gmc_v8_0_mc_load_microcode()
261 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); in gmc_v8_0_mc_load_microcode()
264 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in gmc_v8_0_mc_load_microcode()
293 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v8_0_mc_load_microcode()
299 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v8_0_mc_load_microcode()
313 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v8_0_vram_gtt_location() argument
318 dev_warn(adev->dev, "limiting VRAM\n"); in gmc_v8_0_vram_gtt_location()
322 amdgpu_vram_location(adev, &adev->mc, 0); in gmc_v8_0_vram_gtt_location()
323 adev->mc.gtt_base_align = 0; in gmc_v8_0_vram_gtt_location()
324 amdgpu_gtt_location(adev, mc); in gmc_v8_0_vram_gtt_location()
335 static void gmc_v8_0_mc_program(struct amdgpu_device *adev) in gmc_v8_0_mc_program() argument
351 if (adev->mode_info.num_crtc) in gmc_v8_0_mc_program()
352 amdgpu_display_set_vga_render_state(adev, false); in gmc_v8_0_mc_program()
354 gmc_v8_0_mc_stop(adev, &save); in gmc_v8_0_mc_program()
355 if (amdgpu_asic_wait_for_mc_idle(adev)) { in gmc_v8_0_mc_program()
356 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v8_0_mc_program()
360 adev->mc.vram_start >> 12); in gmc_v8_0_mc_program()
362 adev->mc.vram_end >> 12); in gmc_v8_0_mc_program()
364 adev->vram_scratch.gpu_addr >> 12); in gmc_v8_0_mc_program()
365 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; in gmc_v8_0_mc_program()
366 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); in gmc_v8_0_mc_program()
369 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); in gmc_v8_0_mc_program()
375 if (amdgpu_asic_wait_for_mc_idle(adev)) { in gmc_v8_0_mc_program()
376 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v8_0_mc_program()
378 gmc_v8_0_mc_resume(adev, &save); in gmc_v8_0_mc_program()
399 static int gmc_v8_0_mc_init(struct amdgpu_device *adev) in gmc_v8_0_mc_init() argument
442 adev->mc.vram_width = numchan * chansize; in gmc_v8_0_mc_init()
444 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v8_0_mc_init()
445 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v8_0_mc_init()
447 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v8_0_mc_init()
448 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v8_0_mc_init()
449 adev->mc.visible_vram_size = adev->mc.aper_size; in gmc_v8_0_mc_init()
455 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); in gmc_v8_0_mc_init()
457 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; in gmc_v8_0_mc_init()
459 gmc_v8_0_vram_gtt_location(adev, &adev->mc); in gmc_v8_0_mc_init()
479 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, in gmc_v8_0_gart_flush_gpu_tlb() argument
500 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, in gmc_v8_0_gart_set_pte_pde() argument
542 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, in gmc_v8_0_set_fault_enable_default() argument
576 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) in gmc_v8_0_gart_enable() argument
581 if (adev->gart.robj == NULL) { in gmc_v8_0_gart_enable()
582 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v8_0_gart_enable()
585 r = amdgpu_gart_table_vram_pin(adev); in gmc_v8_0_gart_enable()
631 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); in gmc_v8_0_gart_enable()
632 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); in gmc_v8_0_gart_enable()
633 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); in gmc_v8_0_gart_enable()
635 (u32)(adev->dummy_page.addr >> 12)); in gmc_v8_0_gart_enable()
653 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); in gmc_v8_0_gart_enable()
657 adev->gart.table_addr >> 12); in gmc_v8_0_gart_enable()
660 adev->gart.table_addr >> 12); in gmc_v8_0_gart_enable()
665 (u32)(adev->dummy_page.addr >> 12)); in gmc_v8_0_gart_enable()
681 gmc_v8_0_set_fault_enable_default(adev, false); in gmc_v8_0_gart_enable()
683 gmc_v8_0_set_fault_enable_default(adev, true); in gmc_v8_0_gart_enable()
685 gmc_v8_0_gart_flush_gpu_tlb(adev, 0); in gmc_v8_0_gart_enable()
687 (unsigned)(adev->mc.gtt_size >> 20), in gmc_v8_0_gart_enable()
688 (unsigned long long)adev->gart.table_addr); in gmc_v8_0_gart_enable()
689 adev->gart.ready = true; in gmc_v8_0_gart_enable()
693 static int gmc_v8_0_gart_init(struct amdgpu_device *adev) in gmc_v8_0_gart_init() argument
697 if (adev->gart.robj) { in gmc_v8_0_gart_init()
702 r = amdgpu_gart_init(adev); in gmc_v8_0_gart_init()
705 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v8_0_gart_init()
706 return amdgpu_gart_table_vram_alloc(adev); in gmc_v8_0_gart_init()
716 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) in gmc_v8_0_gart_disable() argument
734 amdgpu_gart_table_vram_unpin(adev); in gmc_v8_0_gart_disable()
744 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) in gmc_v8_0_gart_fini() argument
746 amdgpu_gart_table_vram_free(adev); in gmc_v8_0_gart_fini()
747 amdgpu_gart_fini(adev); in gmc_v8_0_gart_fini()
765 static int gmc_v8_0_vm_init(struct amdgpu_device *adev) in gmc_v8_0_vm_init() argument
773 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; in gmc_v8_0_vm_init()
776 if (adev->flags & AMD_IS_APU) { in gmc_v8_0_vm_init()
779 adev->vm_manager.vram_base_offset = tmp; in gmc_v8_0_vm_init()
781 adev->vm_manager.vram_base_offset = 0; in gmc_v8_0_vm_init()
793 static void gmc_v8_0_vm_fini(struct amdgpu_device *adev) in gmc_v8_0_vm_fini() argument
806 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, in gmc_v8_0_vm_decode_fault() argument
850 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_early_init() local
852 gmc_v8_0_set_gart_funcs(adev); in gmc_v8_0_early_init()
853 gmc_v8_0_set_irq_funcs(adev); in gmc_v8_0_early_init()
860 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_late_init() local
862 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); in gmc_v8_0_late_init()
871 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_sw_init() local
873 r = amdgpu_gem_init(adev); in gmc_v8_0_sw_init()
877 if (adev->flags & AMD_IS_APU) { in gmc_v8_0_sw_init()
878 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; in gmc_v8_0_sw_init()
882 if (adev->asic_type == CHIP_FIJI) in gmc_v8_0_sw_init()
887 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); in gmc_v8_0_sw_init()
890 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); in gmc_v8_0_sw_init()
894 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); in gmc_v8_0_sw_init()
902 adev->vm_manager.max_pfn = amdgpu_vm_size << 18; in gmc_v8_0_sw_init()
908 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ in gmc_v8_0_sw_init()
915 adev->need_dma32 = false; in gmc_v8_0_sw_init()
916 dma_bits = adev->need_dma32 ? 32 : 40; in gmc_v8_0_sw_init()
917 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); in gmc_v8_0_sw_init()
919 adev->need_dma32 = true; in gmc_v8_0_sw_init()
923 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); in gmc_v8_0_sw_init()
925 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); in gmc_v8_0_sw_init()
929 r = gmc_v8_0_init_microcode(adev); in gmc_v8_0_sw_init()
935 r = gmc_v8_0_mc_init(adev); in gmc_v8_0_sw_init()
940 r = amdgpu_bo_init(adev); in gmc_v8_0_sw_init()
944 r = gmc_v8_0_gart_init(adev); in gmc_v8_0_sw_init()
948 if (!adev->vm_manager.enabled) { in gmc_v8_0_sw_init()
949 r = gmc_v8_0_vm_init(adev); in gmc_v8_0_sw_init()
951 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); in gmc_v8_0_sw_init()
954 adev->vm_manager.enabled = true; in gmc_v8_0_sw_init()
962 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_sw_fini() local
964 if (adev->vm_manager.enabled) { in gmc_v8_0_sw_fini()
965 amdgpu_vm_manager_fini(adev); in gmc_v8_0_sw_fini()
966 gmc_v8_0_vm_fini(adev); in gmc_v8_0_sw_fini()
967 adev->vm_manager.enabled = false; in gmc_v8_0_sw_fini()
969 gmc_v8_0_gart_fini(adev); in gmc_v8_0_sw_fini()
970 amdgpu_gem_fini(adev); in gmc_v8_0_sw_fini()
971 amdgpu_bo_fini(adev); in gmc_v8_0_sw_fini()
979 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_hw_init() local
981 gmc_v8_0_init_golden_registers(adev); in gmc_v8_0_hw_init()
983 gmc_v8_0_mc_program(adev); in gmc_v8_0_hw_init()
985 if (adev->asic_type == CHIP_TONGA) { in gmc_v8_0_hw_init()
986 r = gmc_v8_0_mc_load_microcode(adev); in gmc_v8_0_hw_init()
993 r = gmc_v8_0_gart_enable(adev); in gmc_v8_0_hw_init()
1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_hw_fini() local
1004 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); in gmc_v8_0_hw_fini()
1005 gmc_v8_0_gart_disable(adev); in gmc_v8_0_hw_fini()
1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_suspend() local
1014 if (adev->vm_manager.enabled) { in gmc_v8_0_suspend()
1015 amdgpu_vm_manager_fini(adev); in gmc_v8_0_suspend()
1016 gmc_v8_0_vm_fini(adev); in gmc_v8_0_suspend()
1017 adev->vm_manager.enabled = false; in gmc_v8_0_suspend()
1019 gmc_v8_0_hw_fini(adev); in gmc_v8_0_suspend()
1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_resume() local
1029 r = gmc_v8_0_hw_init(adev); in gmc_v8_0_resume()
1033 if (!adev->vm_manager.enabled) { in gmc_v8_0_resume()
1034 r = gmc_v8_0_vm_init(adev); in gmc_v8_0_resume()
1036 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); in gmc_v8_0_resume()
1039 adev->vm_manager.enabled = true; in gmc_v8_0_resume()
1047 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_is_idle() local
1061 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_wait_for_idle() local
1063 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v8_0_wait_for_idle()
1082 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_print_status() local
1084 dev_info(adev->dev, "GMC 8.x registers\n"); in gmc_v8_0_print_status()
1085 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", in gmc_v8_0_print_status()
1087 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", in gmc_v8_0_print_status()
1090 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in gmc_v8_0_print_status()
1092 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in gmc_v8_0_print_status()
1094 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1096 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1098 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", in gmc_v8_0_print_status()
1100 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", in gmc_v8_0_print_status()
1102 dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n", in gmc_v8_0_print_status()
1104 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1106 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1108 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1110 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", in gmc_v8_0_print_status()
1112 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1114 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1116 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1118 dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n", in gmc_v8_0_print_status()
1120 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1122 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1124 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1126 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", in gmc_v8_0_print_status()
1128 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1132 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1135 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1138 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1140 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1142 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", in gmc_v8_0_print_status()
1144 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", in gmc_v8_0_print_status()
1146 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", in gmc_v8_0_print_status()
1148 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", in gmc_v8_0_print_status()
1150 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", in gmc_v8_0_print_status()
1153 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1155 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", in gmc_v8_0_print_status()
1157 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", in gmc_v8_0_print_status()
1159 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", in gmc_v8_0_print_status()
1161 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1163 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", in gmc_v8_0_print_status()
1167 dev_info(adev->dev, " %d:\n", i); in gmc_v8_0_print_status()
1168 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v8_0_print_status()
1170 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v8_0_print_status()
1172 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v8_0_print_status()
1174 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v8_0_print_status()
1176 dev_info(adev->dev, " 0x%04X=0x%08X\n", in gmc_v8_0_print_status()
1180 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", in gmc_v8_0_print_status()
1188 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v8_0_soft_reset() local
1197 if (!(adev->flags & AMD_IS_APU)) in gmc_v8_0_soft_reset()
1203 gmc_v8_0_print_status((void *)adev); in gmc_v8_0_soft_reset()
1205 gmc_v8_0_mc_stop(adev, &save); in gmc_v8_0_soft_reset()
1206 if (gmc_v8_0_wait_for_idle(adev)) { in gmc_v8_0_soft_reset()
1207 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); in gmc_v8_0_soft_reset()
1213 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in gmc_v8_0_soft_reset()
1226 gmc_v8_0_mc_resume(adev, &save); in gmc_v8_0_soft_reset()
1229 gmc_v8_0_print_status((void *)adev); in gmc_v8_0_soft_reset()
1235 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v8_0_vm_fault_interrupt_state() argument
1277 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, in gmc_v8_0_process_interrupt() argument
1293 gmc_v8_0_set_fault_enable_default(adev, false); in gmc_v8_0_process_interrupt()
1295 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", in gmc_v8_0_process_interrupt()
1297 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in gmc_v8_0_process_interrupt()
1299 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in gmc_v8_0_process_interrupt()
1301 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); in gmc_v8_0_process_interrupt()
1345 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) in gmc_v8_0_set_gart_funcs() argument
1347 if (adev->gart.gart_funcs == NULL) in gmc_v8_0_set_gart_funcs()
1348 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; in gmc_v8_0_set_gart_funcs()
1351 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v8_0_set_irq_funcs() argument
1353 adev->mc.vm_fault.num_types = 1; in gmc_v8_0_set_irq_funcs()
1354 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; in gmc_v8_0_set_irq_funcs()