Lines Matching refs:adev
47 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
48 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
49 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
50 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
92 static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) in sdma_v2_4_init_golden_registers() argument
94 switch (adev->asic_type) { in sdma_v2_4_init_golden_registers()
96 amdgpu_program_register_sequence(adev, in sdma_v2_4_init_golden_registers()
99 amdgpu_program_register_sequence(adev, in sdma_v2_4_init_golden_registers()
117 static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) in sdma_v2_4_init_microcode() argument
128 switch (adev->asic_type) { in sdma_v2_4_init_microcode()
135 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_init_microcode()
140 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); in sdma_v2_4_init_microcode()
143 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); in sdma_v2_4_init_microcode()
146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v2_4_init_microcode()
147 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); in sdma_v2_4_init_microcode()
148 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); in sdma_v2_4_init_microcode()
149 if (adev->sdma.instance[i].feature_version >= 20) in sdma_v2_4_init_microcode()
150 adev->sdma.instance[i].burst_nop = true; in sdma_v2_4_init_microcode()
152 if (adev->firmware.smu_load) { in sdma_v2_4_init_microcode()
153 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; in sdma_v2_4_init_microcode()
155 info->fw = adev->sdma.instance[i].fw; in sdma_v2_4_init_microcode()
157 adev->firmware.fw_size += in sdma_v2_4_init_microcode()
167 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_init_microcode()
168 release_firmware(adev->sdma.instance[i].fw); in sdma_v2_4_init_microcode()
169 adev->sdma.instance[i].fw = NULL; in sdma_v2_4_init_microcode()
187 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; in sdma_v2_4_ring_get_rptr()
201 struct amdgpu_device *adev = ring->adev; in sdma_v2_4_ring_get_wptr() local
202 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; in sdma_v2_4_ring_get_wptr()
217 struct amdgpu_device *adev = ring->adev; in sdma_v2_4_ring_set_wptr() local
218 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; in sdma_v2_4_ring_set_wptr()
287 if (ring == &ring->adev->sdma.instance[0].ring) in sdma_v2_4_ring_emit_hdp_flush()
369 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) in sdma_v2_4_gfx_stop() argument
371 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; in sdma_v2_4_gfx_stop()
372 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; in sdma_v2_4_gfx_stop()
376 if ((adev->mman.buffer_funcs_ring == sdma0) || in sdma_v2_4_gfx_stop()
377 (adev->mman.buffer_funcs_ring == sdma1)) in sdma_v2_4_gfx_stop()
378 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); in sdma_v2_4_gfx_stop()
380 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_gfx_stop()
399 static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) in sdma_v2_4_rlc_stop() argument
412 static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) in sdma_v2_4_enable() argument
418 sdma_v2_4_gfx_stop(adev); in sdma_v2_4_enable()
419 sdma_v2_4_rlc_stop(adev); in sdma_v2_4_enable()
422 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_enable()
440 static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) in sdma_v2_4_gfx_resume() argument
448 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_gfx_resume()
449 ring = &adev->sdma.instance[i].ring; in sdma_v2_4_gfx_resume()
452 mutex_lock(&adev->srbm_mutex); in sdma_v2_4_gfx_resume()
454 vi_srbm_select(adev, 0, 0, 0, j); in sdma_v2_4_gfx_resume()
459 vi_srbm_select(adev, 0, 0, 0, 0); in sdma_v2_4_gfx_resume()
460 mutex_unlock(&adev->srbm_mutex); in sdma_v2_4_gfx_resume()
481 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); in sdma_v2_4_gfx_resume()
483 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); in sdma_v2_4_gfx_resume()
513 if (adev->mman.buffer_funcs_ring == ring) in sdma_v2_4_gfx_resume()
514 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); in sdma_v2_4_gfx_resume()
528 static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) in sdma_v2_4_rlc_resume() argument
542 static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) in sdma_v2_4_load_microcode() argument
550 sdma_v2_4_enable(adev, false); in sdma_v2_4_load_microcode()
552 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_load_microcode()
553 if (!adev->sdma.instance[i].fw) in sdma_v2_4_load_microcode()
555 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v2_4_load_microcode()
559 (adev->sdma.instance[i].fw->data + in sdma_v2_4_load_microcode()
564 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); in sdma_v2_4_load_microcode()
578 static int sdma_v2_4_start(struct amdgpu_device *adev) in sdma_v2_4_start() argument
582 if (!adev->firmware.smu_load) { in sdma_v2_4_start()
583 r = sdma_v2_4_load_microcode(adev); in sdma_v2_4_start()
587 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, in sdma_v2_4_start()
591 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, in sdma_v2_4_start()
598 sdma_v2_4_enable(adev, true); in sdma_v2_4_start()
601 r = sdma_v2_4_gfx_resume(adev); in sdma_v2_4_start()
604 r = sdma_v2_4_rlc_resume(adev); in sdma_v2_4_start()
622 struct amdgpu_device *adev = ring->adev; in sdma_v2_4_ring_test_ring() local
629 r = amdgpu_wb_get(adev, &index); in sdma_v2_4_ring_test_ring()
631 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); in sdma_v2_4_ring_test_ring()
635 gpu_addr = adev->wb.gpu_addr + (index * 4); in sdma_v2_4_ring_test_ring()
637 adev->wb.wb[index] = cpu_to_le32(tmp); in sdma_v2_4_ring_test_ring()
642 amdgpu_wb_free(adev, index); in sdma_v2_4_ring_test_ring()
654 for (i = 0; i < adev->usec_timeout; i++) { in sdma_v2_4_ring_test_ring()
655 tmp = le32_to_cpu(adev->wb.wb[index]); in sdma_v2_4_ring_test_ring()
661 if (i < adev->usec_timeout) { in sdma_v2_4_ring_test_ring()
668 amdgpu_wb_free(adev, index); in sdma_v2_4_ring_test_ring()
683 struct amdgpu_device *adev = ring->adev; in sdma_v2_4_ring_test_ib() local
692 r = amdgpu_wb_get(adev, &index); in sdma_v2_4_ring_test_ib()
694 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); in sdma_v2_4_ring_test_ib()
698 gpu_addr = adev->wb.gpu_addr + (index * 4); in sdma_v2_4_ring_test_ib()
700 adev->wb.wb[index] = cpu_to_le32(tmp); in sdma_v2_4_ring_test_ib()
719 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, in sdma_v2_4_ring_test_ib()
730 for (i = 0; i < adev->usec_timeout; i++) { in sdma_v2_4_ring_test_ib()
731 tmp = le32_to_cpu(adev->wb.wb[index]); in sdma_v2_4_ring_test_ib()
736 if (i < adev->usec_timeout) { in sdma_v2_4_ring_test_ib()
747 amdgpu_ib_free(adev, &ib); in sdma_v2_4_ring_test_ib()
749 amdgpu_wb_free(adev, index); in sdma_v2_4_ring_test_ib()
820 value = amdgpu_vm_map_gart(ib->ring->adev, addr); in sdma_v2_4_vm_write_pte()
947 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_early_init() local
949 adev->sdma.num_instances = SDMA_MAX_INSTANCE; in sdma_v2_4_early_init()
951 sdma_v2_4_set_ring_funcs(adev); in sdma_v2_4_early_init()
952 sdma_v2_4_set_buffer_funcs(adev); in sdma_v2_4_early_init()
953 sdma_v2_4_set_vm_pte_funcs(adev); in sdma_v2_4_early_init()
954 sdma_v2_4_set_irq_funcs(adev); in sdma_v2_4_early_init()
963 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_sw_init() local
966 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); in sdma_v2_4_sw_init()
971 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq); in sdma_v2_4_sw_init()
976 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq); in sdma_v2_4_sw_init()
980 r = sdma_v2_4_init_microcode(adev); in sdma_v2_4_sw_init()
986 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_sw_init()
987 ring = &adev->sdma.instance[i].ring; in sdma_v2_4_sw_init()
991 r = amdgpu_ring_init(adev, ring, 256 * 1024, in sdma_v2_4_sw_init()
993 &adev->sdma.trap_irq, in sdma_v2_4_sw_init()
1006 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_sw_fini() local
1009 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v2_4_sw_fini()
1010 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v2_4_sw_fini()
1018 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_hw_init() local
1020 sdma_v2_4_init_golden_registers(adev); in sdma_v2_4_hw_init()
1022 r = sdma_v2_4_start(adev); in sdma_v2_4_hw_init()
1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_hw_fini() local
1033 sdma_v2_4_enable(adev, false); in sdma_v2_4_hw_fini()
1040 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_suspend() local
1042 return sdma_v2_4_hw_fini(adev); in sdma_v2_4_suspend()
1047 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_resume() local
1049 return sdma_v2_4_hw_init(adev); in sdma_v2_4_resume()
1054 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_is_idle() local
1068 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_wait_for_idle() local
1070 for (i = 0; i < adev->usec_timeout; i++) { in sdma_v2_4_wait_for_idle()
1084 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_print_status() local
1086 dev_info(adev->dev, "VI SDMA registers\n"); in sdma_v2_4_print_status()
1087 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", in sdma_v2_4_print_status()
1089 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_print_status()
1090 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", in sdma_v2_4_print_status()
1092 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", in sdma_v2_4_print_status()
1094 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", in sdma_v2_4_print_status()
1096 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", in sdma_v2_4_print_status()
1098 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", in sdma_v2_4_print_status()
1100 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", in sdma_v2_4_print_status()
1102 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", in sdma_v2_4_print_status()
1104 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", in sdma_v2_4_print_status()
1106 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", in sdma_v2_4_print_status()
1108 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", in sdma_v2_4_print_status()
1110 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", in sdma_v2_4_print_status()
1112 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", in sdma_v2_4_print_status()
1114 mutex_lock(&adev->srbm_mutex); in sdma_v2_4_print_status()
1116 vi_srbm_select(adev, 0, 0, 0, j); in sdma_v2_4_print_status()
1117 dev_info(adev->dev, " VM %d:\n", j); in sdma_v2_4_print_status()
1118 dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", in sdma_v2_4_print_status()
1120 dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", in sdma_v2_4_print_status()
1123 vi_srbm_select(adev, 0, 0, 0, 0); in sdma_v2_4_print_status()
1124 mutex_unlock(&adev->srbm_mutex); in sdma_v2_4_print_status()
1131 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in sdma_v2_4_soft_reset() local
1150 sdma_v2_4_print_status((void *)adev); in sdma_v2_4_soft_reset()
1154 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in sdma_v2_4_soft_reset()
1167 sdma_v2_4_print_status((void *)adev); in sdma_v2_4_soft_reset()
1173 static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, in sdma_v2_4_set_trap_irq_state() argument
1219 static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, in sdma_v2_4_process_trap_irq() argument
1232 amdgpu_fence_process(&adev->sdma.instance[0].ring); in sdma_v2_4_process_trap_irq()
1245 amdgpu_fence_process(&adev->sdma.instance[1].ring); in sdma_v2_4_process_trap_irq()
1259 static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, in sdma_v2_4_process_illegal_inst_irq() argument
1264 schedule_work(&adev->reset_work); in sdma_v2_4_process_illegal_inst_irq()
1313 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) in sdma_v2_4_set_ring_funcs() argument
1317 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v2_4_set_ring_funcs()
1318 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs; in sdma_v2_4_set_ring_funcs()
1330 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) in sdma_v2_4_set_irq_funcs() argument
1332 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; in sdma_v2_4_set_irq_funcs()
1333 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; in sdma_v2_4_set_irq_funcs()
1334 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; in sdma_v2_4_set_irq_funcs()
1396 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) in sdma_v2_4_set_buffer_funcs() argument
1398 if (adev->mman.buffer_funcs == NULL) { in sdma_v2_4_set_buffer_funcs()
1399 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; in sdma_v2_4_set_buffer_funcs()
1400 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v2_4_set_buffer_funcs()
1411 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) in sdma_v2_4_set_vm_pte_funcs() argument
1413 if (adev->vm_manager.vm_pte_funcs == NULL) { in sdma_v2_4_set_vm_pte_funcs()
1414 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; in sdma_v2_4_set_vm_pte_funcs()
1415 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v2_4_set_vm_pte_funcs()
1416 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; in sdma_v2_4_set_vm_pte_funcs()