This source file includes following definitions.
- sdma_v2_4_init_golden_registers
- sdma_v2_4_free_microcode
- sdma_v2_4_init_microcode
- sdma_v2_4_ring_get_rptr
- sdma_v2_4_ring_get_wptr
- sdma_v2_4_ring_set_wptr
- sdma_v2_4_ring_insert_nop
- sdma_v2_4_ring_emit_ib
- sdma_v2_4_ring_emit_hdp_flush
- sdma_v2_4_ring_emit_fence
- sdma_v2_4_gfx_stop
- sdma_v2_4_rlc_stop
- sdma_v2_4_enable
- sdma_v2_4_gfx_resume
- sdma_v2_4_rlc_resume
- sdma_v2_4_start
- sdma_v2_4_ring_test_ring
- sdma_v2_4_ring_test_ib
- sdma_v2_4_vm_copy_pte
- sdma_v2_4_vm_write_pte
- sdma_v2_4_vm_set_pte_pde
- sdma_v2_4_ring_pad_ib
- sdma_v2_4_ring_emit_pipeline_sync
- sdma_v2_4_ring_emit_vm_flush
- sdma_v2_4_ring_emit_wreg
- sdma_v2_4_early_init
- sdma_v2_4_sw_init
- sdma_v2_4_sw_fini
- sdma_v2_4_hw_init
- sdma_v2_4_hw_fini
- sdma_v2_4_suspend
- sdma_v2_4_resume
- sdma_v2_4_is_idle
- sdma_v2_4_wait_for_idle
- sdma_v2_4_soft_reset
- sdma_v2_4_set_trap_irq_state
- sdma_v2_4_process_trap_irq
- sdma_v2_4_process_illegal_inst_irq
- sdma_v2_4_set_clockgating_state
- sdma_v2_4_set_powergating_state
- sdma_v2_4_set_ring_funcs
- sdma_v2_4_set_irq_funcs
- sdma_v2_4_emit_copy_buffer
- sdma_v2_4_emit_fill_buffer
- sdma_v2_4_set_buffer_funcs
- sdma_v2_4_set_vm_pte_funcs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/delay.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32 #include "vi.h"
33 #include "vid.h"
34
35 #include "oss/oss_2_4_d.h"
36 #include "oss/oss_2_4_sh_mask.h"
37
38 #include "gmc/gmc_7_1_d.h"
39 #include "gmc/gmc_7_1_sh_mask.h"
40
41 #include "gca/gfx_8_0_d.h"
42 #include "gca/gfx_8_0_enum.h"
43 #include "gca/gfx_8_0_sh_mask.h"
44
45 #include "bif/bif_5_0_d.h"
46 #include "bif/bif_5_0_sh_mask.h"
47
48 #include "iceland_sdma_pkt_open.h"
49
50 #include "ivsrcid/ivsrcid_vislands30.h"
51
52 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
53 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
54 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
55 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
56
57 MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
58 MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
59
60 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
61 {
62 SDMA0_REGISTER_OFFSET,
63 SDMA1_REGISTER_OFFSET
64 };
65
66 static const u32 golden_settings_iceland_a11[] =
67 {
68 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
69 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
70 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
71 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
72 };
73
74 static const u32 iceland_mgcg_cgcg_init[] =
75 {
76 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
77 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
78 };
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97 static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
98 {
99 switch (adev->asic_type) {
100 case CHIP_TOPAZ:
101 amdgpu_device_program_register_sequence(adev,
102 iceland_mgcg_cgcg_init,
103 ARRAY_SIZE(iceland_mgcg_cgcg_init));
104 amdgpu_device_program_register_sequence(adev,
105 golden_settings_iceland_a11,
106 ARRAY_SIZE(golden_settings_iceland_a11));
107 break;
108 default:
109 break;
110 }
111 }
112
113 static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
114 {
115 int i;
116 for (i = 0; i < adev->sdma.num_instances; i++) {
117 release_firmware(adev->sdma.instance[i].fw);
118 adev->sdma.instance[i].fw = NULL;
119 }
120 }
121
122
123
124
125
126
127
128
129
130
131 static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
132 {
133 const char *chip_name;
134 char fw_name[30];
135 int err = 0, i;
136 struct amdgpu_firmware_info *info = NULL;
137 const struct common_firmware_header *header = NULL;
138 const struct sdma_firmware_header_v1_0 *hdr;
139
140 DRM_DEBUG("\n");
141
142 switch (adev->asic_type) {
143 case CHIP_TOPAZ:
144 chip_name = "topaz";
145 break;
146 default: BUG();
147 }
148
149 for (i = 0; i < adev->sdma.num_instances; i++) {
150 if (i == 0)
151 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
152 else
153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
154 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
155 if (err)
156 goto out;
157 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
158 if (err)
159 goto out;
160 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
161 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
162 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
163 if (adev->sdma.instance[i].feature_version >= 20)
164 adev->sdma.instance[i].burst_nop = true;
165
166 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
167 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
168 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
169 info->fw = adev->sdma.instance[i].fw;
170 header = (const struct common_firmware_header *)info->fw->data;
171 adev->firmware.fw_size +=
172 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
173 }
174 }
175
176 out:
177 if (err) {
178 pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
179 for (i = 0; i < adev->sdma.num_instances; i++) {
180 release_firmware(adev->sdma.instance[i].fw);
181 adev->sdma.instance[i].fw = NULL;
182 }
183 }
184 return err;
185 }
186
187
188
189
190
191
192
193
194 static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
195 {
196
197 return ring->adev->wb.wb[ring->rptr_offs] >> 2;
198 }
199
200
201
202
203
204
205
206
207 static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
208 {
209 struct amdgpu_device *adev = ring->adev;
210 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
211
212 return wptr;
213 }
214
215
216
217
218
219
220
221
222 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
223 {
224 struct amdgpu_device *adev = ring->adev;
225
226 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
227 }
228
229 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
230 {
231 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
232 int i;
233
234 for (i = 0; i < count; i++)
235 if (sdma && sdma->burst_nop && (i == 0))
236 amdgpu_ring_write(ring, ring->funcs->nop |
237 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
238 else
239 amdgpu_ring_write(ring, ring->funcs->nop);
240 }
241
242
243
244
245
246
247
248
249
250 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
251 struct amdgpu_job *job,
252 struct amdgpu_ib *ib,
253 uint32_t flags)
254 {
255 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
256
257
258 sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
259
260 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
261 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
262
263 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
264 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
265 amdgpu_ring_write(ring, ib->length_dw);
266 amdgpu_ring_write(ring, 0);
267 amdgpu_ring_write(ring, 0);
268
269 }
270
271
272
273
274
275
276
277
278 static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
279 {
280 u32 ref_and_mask = 0;
281
282 if (ring->me == 0)
283 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
284 else
285 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
286
287 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
288 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
289 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
290 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
291 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
292 amdgpu_ring_write(ring, ref_and_mask);
293 amdgpu_ring_write(ring, ref_and_mask);
294 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
295 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
296 }
297
298
299
300
301
302
303
304
305
306
307
308 static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
309 unsigned flags)
310 {
311 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
312
313 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
314 amdgpu_ring_write(ring, lower_32_bits(addr));
315 amdgpu_ring_write(ring, upper_32_bits(addr));
316 amdgpu_ring_write(ring, lower_32_bits(seq));
317
318
319 if (write64bit) {
320 addr += 4;
321 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
322 amdgpu_ring_write(ring, lower_32_bits(addr));
323 amdgpu_ring_write(ring, upper_32_bits(addr));
324 amdgpu_ring_write(ring, upper_32_bits(seq));
325 }
326
327
328 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
329 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
330 }
331
332
333
334
335
336
337
338
339 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
340 {
341 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
342 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
343 u32 rb_cntl, ib_cntl;
344 int i;
345
346 if ((adev->mman.buffer_funcs_ring == sdma0) ||
347 (adev->mman.buffer_funcs_ring == sdma1))
348 amdgpu_ttm_set_buffer_funcs_status(adev, false);
349
350 for (i = 0; i < adev->sdma.num_instances; i++) {
351 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
352 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
353 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
354 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
355 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
356 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
357 }
358 sdma0->sched.ready = false;
359 sdma1->sched.ready = false;
360 }
361
362
363
364
365
366
367
368
369 static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
370 {
371
372 }
373
374
375
376
377
378
379
380
381
382 static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
383 {
384 u32 f32_cntl;
385 int i;
386
387 if (!enable) {
388 sdma_v2_4_gfx_stop(adev);
389 sdma_v2_4_rlc_stop(adev);
390 }
391
392 for (i = 0; i < adev->sdma.num_instances; i++) {
393 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
394 if (enable)
395 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
396 else
397 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
398 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
399 }
400 }
401
402
403
404
405
406
407
408
409
410 static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
411 {
412 struct amdgpu_ring *ring;
413 u32 rb_cntl, ib_cntl;
414 u32 rb_bufsz;
415 u32 wb_offset;
416 int i, j, r;
417
418 for (i = 0; i < adev->sdma.num_instances; i++) {
419 ring = &adev->sdma.instance[i].ring;
420 wb_offset = (ring->rptr_offs * 4);
421
422 mutex_lock(&adev->srbm_mutex);
423 for (j = 0; j < 16; j++) {
424 vi_srbm_select(adev, 0, 0, 0, j);
425
426 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
427 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
428 }
429 vi_srbm_select(adev, 0, 0, 0, 0);
430 mutex_unlock(&adev->srbm_mutex);
431
432 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
433 adev->gfx.config.gb_addr_config & 0x70);
434
435 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
436
437
438 rb_bufsz = order_base_2(ring->ring_size / 4);
439 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
440 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
441 #ifdef __BIG_ENDIAN
442 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
443 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
444 RPTR_WRITEBACK_SWAP_ENABLE, 1);
445 #endif
446 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
447
448
449 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
450 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
451 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
452 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
453
454
455 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
456 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
457 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
458 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
459
460 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
461
462 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
463 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
464
465 ring->wptr = 0;
466 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
467
468
469 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
470 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
471
472 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
473 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
474 #ifdef __BIG_ENDIAN
475 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
476 #endif
477
478 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
479
480 ring->sched.ready = true;
481 }
482
483 sdma_v2_4_enable(adev, true);
484 for (i = 0; i < adev->sdma.num_instances; i++) {
485 ring = &adev->sdma.instance[i].ring;
486 r = amdgpu_ring_test_helper(ring);
487 if (r)
488 return r;
489
490 if (adev->mman.buffer_funcs_ring == ring)
491 amdgpu_ttm_set_buffer_funcs_status(adev, true);
492 }
493
494 return 0;
495 }
496
497
498
499
500
501
502
503
504
505 static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
506 {
507
508 return 0;
509 }
510
511
512
513
514
515
516
517
518
519
520 static int sdma_v2_4_start(struct amdgpu_device *adev)
521 {
522 int r;
523
524
525 sdma_v2_4_enable(adev, false);
526
527
528 r = sdma_v2_4_gfx_resume(adev);
529 if (r)
530 return r;
531 r = sdma_v2_4_rlc_resume(adev);
532 if (r)
533 return r;
534
535 return 0;
536 }
537
538
539
540
541
542
543
544
545
546
547 static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
548 {
549 struct amdgpu_device *adev = ring->adev;
550 unsigned i;
551 unsigned index;
552 int r;
553 u32 tmp;
554 u64 gpu_addr;
555
556 r = amdgpu_device_wb_get(adev, &index);
557 if (r)
558 return r;
559
560 gpu_addr = adev->wb.gpu_addr + (index * 4);
561 tmp = 0xCAFEDEAD;
562 adev->wb.wb[index] = cpu_to_le32(tmp);
563
564 r = amdgpu_ring_alloc(ring, 5);
565 if (r)
566 goto error_free_wb;
567
568 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
569 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
570 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
571 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
572 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
573 amdgpu_ring_write(ring, 0xDEADBEEF);
574 amdgpu_ring_commit(ring);
575
576 for (i = 0; i < adev->usec_timeout; i++) {
577 tmp = le32_to_cpu(adev->wb.wb[index]);
578 if (tmp == 0xDEADBEEF)
579 break;
580 udelay(1);
581 }
582
583 if (i >= adev->usec_timeout)
584 r = -ETIMEDOUT;
585
586 error_free_wb:
587 amdgpu_device_wb_free(adev, index);
588 return r;
589 }
590
591
592
593
594
595
596
597
598
599 static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
600 {
601 struct amdgpu_device *adev = ring->adev;
602 struct amdgpu_ib ib;
603 struct dma_fence *f = NULL;
604 unsigned index;
605 u32 tmp = 0;
606 u64 gpu_addr;
607 long r;
608
609 r = amdgpu_device_wb_get(adev, &index);
610 if (r)
611 return r;
612
613 gpu_addr = adev->wb.gpu_addr + (index * 4);
614 tmp = 0xCAFEDEAD;
615 adev->wb.wb[index] = cpu_to_le32(tmp);
616 memset(&ib, 0, sizeof(ib));
617 r = amdgpu_ib_get(adev, NULL, 256, &ib);
618 if (r)
619 goto err0;
620
621 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
622 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
623 ib.ptr[1] = lower_32_bits(gpu_addr);
624 ib.ptr[2] = upper_32_bits(gpu_addr);
625 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
626 ib.ptr[4] = 0xDEADBEEF;
627 ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
628 ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
629 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
630 ib.length_dw = 8;
631
632 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
633 if (r)
634 goto err1;
635
636 r = dma_fence_wait_timeout(f, false, timeout);
637 if (r == 0) {
638 r = -ETIMEDOUT;
639 goto err1;
640 } else if (r < 0) {
641 goto err1;
642 }
643 tmp = le32_to_cpu(adev->wb.wb[index]);
644 if (tmp == 0xDEADBEEF)
645 r = 0;
646 else
647 r = -EINVAL;
648
649 err1:
650 amdgpu_ib_free(adev, &ib, NULL);
651 dma_fence_put(f);
652 err0:
653 amdgpu_device_wb_free(adev, index);
654 return r;
655 }
656
657
658
659
660
661
662
663
664
665
666
667 static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
668 uint64_t pe, uint64_t src,
669 unsigned count)
670 {
671 unsigned bytes = count * 8;
672
673 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
674 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
675 ib->ptr[ib->length_dw++] = bytes;
676 ib->ptr[ib->length_dw++] = 0;
677 ib->ptr[ib->length_dw++] = lower_32_bits(src);
678 ib->ptr[ib->length_dw++] = upper_32_bits(src);
679 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
680 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
681 }
682
683
684
685
686
687
688
689
690
691
692
693
694 static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
695 uint64_t value, unsigned count,
696 uint32_t incr)
697 {
698 unsigned ndw = count * 2;
699
700 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
701 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
702 ib->ptr[ib->length_dw++] = pe;
703 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
704 ib->ptr[ib->length_dw++] = ndw;
705 for (; ndw > 0; ndw -= 2) {
706 ib->ptr[ib->length_dw++] = lower_32_bits(value);
707 ib->ptr[ib->length_dw++] = upper_32_bits(value);
708 value += incr;
709 }
710 }
711
712
713
714
715
716
717
718
719
720
721
722
723
724 static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
725 uint64_t addr, unsigned count,
726 uint32_t incr, uint64_t flags)
727 {
728
729 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
730 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
731 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
732 ib->ptr[ib->length_dw++] = lower_32_bits(flags);
733 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
734 ib->ptr[ib->length_dw++] = lower_32_bits(addr);
735 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
736 ib->ptr[ib->length_dw++] = incr;
737 ib->ptr[ib->length_dw++] = 0;
738 ib->ptr[ib->length_dw++] = count;
739 }
740
741
742
743
744
745
746
747 static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
748 {
749 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
750 u32 pad_count;
751 int i;
752
753 pad_count = (-ib->length_dw) & 7;
754 for (i = 0; i < pad_count; i++)
755 if (sdma && sdma->burst_nop && (i == 0))
756 ib->ptr[ib->length_dw++] =
757 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
758 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
759 else
760 ib->ptr[ib->length_dw++] =
761 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
762 }
763
764
765
766
767
768
769
770
771 static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
772 {
773 uint32_t seq = ring->fence_drv.sync_seq;
774 uint64_t addr = ring->fence_drv.gpu_addr;
775
776
777 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
778 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
779 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) |
780 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
781 amdgpu_ring_write(ring, addr & 0xfffffffc);
782 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
783 amdgpu_ring_write(ring, seq);
784 amdgpu_ring_write(ring, 0xffffffff);
785 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
786 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4));
787 }
788
789
790
791
792
793
794
795
796
797
798 static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
799 unsigned vmid, uint64_t pd_addr)
800 {
801 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
802
803
804 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
805 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
806 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0));
807 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
808 amdgpu_ring_write(ring, 0);
809 amdgpu_ring_write(ring, 0);
810 amdgpu_ring_write(ring, 0);
811 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
812 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
813 }
814
815 static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
816 uint32_t reg, uint32_t val)
817 {
818 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
819 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
820 amdgpu_ring_write(ring, reg);
821 amdgpu_ring_write(ring, val);
822 }
823
824 static int sdma_v2_4_early_init(void *handle)
825 {
826 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
827
828 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
829
830 sdma_v2_4_set_ring_funcs(adev);
831 sdma_v2_4_set_buffer_funcs(adev);
832 sdma_v2_4_set_vm_pte_funcs(adev);
833 sdma_v2_4_set_irq_funcs(adev);
834
835 return 0;
836 }
837
838 static int sdma_v2_4_sw_init(void *handle)
839 {
840 struct amdgpu_ring *ring;
841 int r, i;
842 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
843
844
845 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
846 &adev->sdma.trap_irq);
847 if (r)
848 return r;
849
850
851 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
852 &adev->sdma.illegal_inst_irq);
853 if (r)
854 return r;
855
856
857 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
858 &adev->sdma.illegal_inst_irq);
859 if (r)
860 return r;
861
862 r = sdma_v2_4_init_microcode(adev);
863 if (r) {
864 DRM_ERROR("Failed to load sdma firmware!\n");
865 return r;
866 }
867
868 for (i = 0; i < adev->sdma.num_instances; i++) {
869 ring = &adev->sdma.instance[i].ring;
870 ring->ring_obj = NULL;
871 ring->use_doorbell = false;
872 sprintf(ring->name, "sdma%d", i);
873 r = amdgpu_ring_init(adev, ring, 1024,
874 &adev->sdma.trap_irq,
875 (i == 0) ?
876 AMDGPU_SDMA_IRQ_INSTANCE0 :
877 AMDGPU_SDMA_IRQ_INSTANCE1);
878 if (r)
879 return r;
880 }
881
882 return r;
883 }
884
885 static int sdma_v2_4_sw_fini(void *handle)
886 {
887 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888 int i;
889
890 for (i = 0; i < adev->sdma.num_instances; i++)
891 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
892
893 sdma_v2_4_free_microcode(adev);
894 return 0;
895 }
896
897 static int sdma_v2_4_hw_init(void *handle)
898 {
899 int r;
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
901
902 sdma_v2_4_init_golden_registers(adev);
903
904 r = sdma_v2_4_start(adev);
905 if (r)
906 return r;
907
908 return r;
909 }
910
911 static int sdma_v2_4_hw_fini(void *handle)
912 {
913 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
914
915 sdma_v2_4_enable(adev, false);
916
917 return 0;
918 }
919
920 static int sdma_v2_4_suspend(void *handle)
921 {
922 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
923
924 return sdma_v2_4_hw_fini(adev);
925 }
926
927 static int sdma_v2_4_resume(void *handle)
928 {
929 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
930
931 return sdma_v2_4_hw_init(adev);
932 }
933
934 static bool sdma_v2_4_is_idle(void *handle)
935 {
936 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
937 u32 tmp = RREG32(mmSRBM_STATUS2);
938
939 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
940 SRBM_STATUS2__SDMA1_BUSY_MASK))
941 return false;
942
943 return true;
944 }
945
946 static int sdma_v2_4_wait_for_idle(void *handle)
947 {
948 unsigned i;
949 u32 tmp;
950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
951
952 for (i = 0; i < adev->usec_timeout; i++) {
953 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
954 SRBM_STATUS2__SDMA1_BUSY_MASK);
955
956 if (!tmp)
957 return 0;
958 udelay(1);
959 }
960 return -ETIMEDOUT;
961 }
962
963 static int sdma_v2_4_soft_reset(void *handle)
964 {
965 u32 srbm_soft_reset = 0;
966 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
967 u32 tmp = RREG32(mmSRBM_STATUS2);
968
969 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
970
971 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
972 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
973 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
974 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
975 }
976 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
977
978 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
979 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
980 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
981 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
982 }
983
984 if (srbm_soft_reset) {
985 tmp = RREG32(mmSRBM_SOFT_RESET);
986 tmp |= srbm_soft_reset;
987 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
988 WREG32(mmSRBM_SOFT_RESET, tmp);
989 tmp = RREG32(mmSRBM_SOFT_RESET);
990
991 udelay(50);
992
993 tmp &= ~srbm_soft_reset;
994 WREG32(mmSRBM_SOFT_RESET, tmp);
995 tmp = RREG32(mmSRBM_SOFT_RESET);
996
997
998 udelay(50);
999 }
1000
1001 return 0;
1002 }
1003
1004 static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
1005 struct amdgpu_irq_src *src,
1006 unsigned type,
1007 enum amdgpu_interrupt_state state)
1008 {
1009 u32 sdma_cntl;
1010
1011 switch (type) {
1012 case AMDGPU_SDMA_IRQ_INSTANCE0:
1013 switch (state) {
1014 case AMDGPU_IRQ_STATE_DISABLE:
1015 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1016 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1017 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1018 break;
1019 case AMDGPU_IRQ_STATE_ENABLE:
1020 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1021 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1022 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1023 break;
1024 default:
1025 break;
1026 }
1027 break;
1028 case AMDGPU_SDMA_IRQ_INSTANCE1:
1029 switch (state) {
1030 case AMDGPU_IRQ_STATE_DISABLE:
1031 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1032 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1033 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1034 break;
1035 case AMDGPU_IRQ_STATE_ENABLE:
1036 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1037 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1038 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1039 break;
1040 default:
1041 break;
1042 }
1043 break;
1044 default:
1045 break;
1046 }
1047 return 0;
1048 }
1049
1050 static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1051 struct amdgpu_irq_src *source,
1052 struct amdgpu_iv_entry *entry)
1053 {
1054 u8 instance_id, queue_id;
1055
1056 instance_id = (entry->ring_id & 0x3) >> 0;
1057 queue_id = (entry->ring_id & 0xc) >> 2;
1058 DRM_DEBUG("IH: SDMA trap\n");
1059 switch (instance_id) {
1060 case 0:
1061 switch (queue_id) {
1062 case 0:
1063 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1064 break;
1065 case 1:
1066
1067 break;
1068 case 2:
1069
1070 break;
1071 }
1072 break;
1073 case 1:
1074 switch (queue_id) {
1075 case 0:
1076 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1077 break;
1078 case 1:
1079
1080 break;
1081 case 2:
1082
1083 break;
1084 }
1085 break;
1086 }
1087 return 0;
1088 }
1089
1090 static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1091 struct amdgpu_irq_src *source,
1092 struct amdgpu_iv_entry *entry)
1093 {
1094 u8 instance_id, queue_id;
1095
1096 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1097 instance_id = (entry->ring_id & 0x3) >> 0;
1098 queue_id = (entry->ring_id & 0xc) >> 2;
1099
1100 if (instance_id <= 1 && queue_id == 0)
1101 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
1102 return 0;
1103 }
1104
1105 static int sdma_v2_4_set_clockgating_state(void *handle,
1106 enum amd_clockgating_state state)
1107 {
1108
1109 return 0;
1110 }
1111
1112 static int sdma_v2_4_set_powergating_state(void *handle,
1113 enum amd_powergating_state state)
1114 {
1115 return 0;
1116 }
1117
1118 static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1119 .name = "sdma_v2_4",
1120 .early_init = sdma_v2_4_early_init,
1121 .late_init = NULL,
1122 .sw_init = sdma_v2_4_sw_init,
1123 .sw_fini = sdma_v2_4_sw_fini,
1124 .hw_init = sdma_v2_4_hw_init,
1125 .hw_fini = sdma_v2_4_hw_fini,
1126 .suspend = sdma_v2_4_suspend,
1127 .resume = sdma_v2_4_resume,
1128 .is_idle = sdma_v2_4_is_idle,
1129 .wait_for_idle = sdma_v2_4_wait_for_idle,
1130 .soft_reset = sdma_v2_4_soft_reset,
1131 .set_clockgating_state = sdma_v2_4_set_clockgating_state,
1132 .set_powergating_state = sdma_v2_4_set_powergating_state,
1133 };
1134
1135 static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1136 .type = AMDGPU_RING_TYPE_SDMA,
1137 .align_mask = 0xf,
1138 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1139 .support_64bit_ptrs = false,
1140 .get_rptr = sdma_v2_4_ring_get_rptr,
1141 .get_wptr = sdma_v2_4_ring_get_wptr,
1142 .set_wptr = sdma_v2_4_ring_set_wptr,
1143 .emit_frame_size =
1144 6 +
1145 3 +
1146 6 +
1147 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 +
1148 10 + 10 + 10,
1149 .emit_ib_size = 7 + 6,
1150 .emit_ib = sdma_v2_4_ring_emit_ib,
1151 .emit_fence = sdma_v2_4_ring_emit_fence,
1152 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1153 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1154 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1155 .test_ring = sdma_v2_4_ring_test_ring,
1156 .test_ib = sdma_v2_4_ring_test_ib,
1157 .insert_nop = sdma_v2_4_ring_insert_nop,
1158 .pad_ib = sdma_v2_4_ring_pad_ib,
1159 .emit_wreg = sdma_v2_4_ring_emit_wreg,
1160 };
1161
1162 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1163 {
1164 int i;
1165
1166 for (i = 0; i < adev->sdma.num_instances; i++) {
1167 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1168 adev->sdma.instance[i].ring.me = i;
1169 }
1170 }
1171
1172 static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1173 .set = sdma_v2_4_set_trap_irq_state,
1174 .process = sdma_v2_4_process_trap_irq,
1175 };
1176
1177 static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1178 .process = sdma_v2_4_process_illegal_inst_irq,
1179 };
1180
1181 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1182 {
1183 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1184 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1185 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1201 uint64_t src_offset,
1202 uint64_t dst_offset,
1203 uint32_t byte_count)
1204 {
1205 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1206 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1207 ib->ptr[ib->length_dw++] = byte_count;
1208 ib->ptr[ib->length_dw++] = 0;
1209 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1210 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1211 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1212 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1226 uint32_t src_data,
1227 uint64_t dst_offset,
1228 uint32_t byte_count)
1229 {
1230 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1231 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1232 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1233 ib->ptr[ib->length_dw++] = src_data;
1234 ib->ptr[ib->length_dw++] = byte_count;
1235 }
1236
1237 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1238 .copy_max_bytes = 0x1fffff,
1239 .copy_num_dw = 7,
1240 .emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1241
1242 .fill_max_bytes = 0x1fffff,
1243 .fill_num_dw = 7,
1244 .emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1245 };
1246
1247 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1248 {
1249 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1250 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1251 }
1252
1253 static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
1254 .copy_pte_num_dw = 7,
1255 .copy_pte = sdma_v2_4_vm_copy_pte,
1256
1257 .write_pte = sdma_v2_4_vm_write_pte,
1258 .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1259 };
1260
1261 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1262 {
1263 struct drm_gpu_scheduler *sched;
1264 unsigned i;
1265
1266 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1267 for (i = 0; i < adev->sdma.num_instances; i++) {
1268 sched = &adev->sdma.instance[i].ring.sched;
1269 adev->vm_manager.vm_pte_rqs[i] =
1270 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1271 }
1272 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
1273 }
1274
1275 const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
1276 {
1277 .type = AMD_IP_BLOCK_TYPE_SDMA,
1278 .major = 2,
1279 .minor = 4,
1280 .rev = 0,
1281 .funcs = &sdma_v2_4_ip_funcs,
1282 };