This source file includes following definitions.
- uvd_v6_0_enc_support
- uvd_v6_0_ring_get_rptr
- uvd_v6_0_enc_ring_get_rptr
- uvd_v6_0_ring_get_wptr
- uvd_v6_0_enc_ring_get_wptr
- uvd_v6_0_ring_set_wptr
- uvd_v6_0_enc_ring_set_wptr
- uvd_v6_0_enc_ring_test_ring
- uvd_v6_0_enc_get_create_msg
- uvd_v6_0_enc_get_destroy_msg
- uvd_v6_0_enc_ring_test_ib
- uvd_v6_0_early_init
- uvd_v6_0_sw_init
- uvd_v6_0_sw_fini
- uvd_v6_0_hw_init
- uvd_v6_0_hw_fini
- uvd_v6_0_suspend
- uvd_v6_0_resume
- uvd_v6_0_mc_resume
- cz_set_uvd_clock_gating_branches
- uvd_v6_0_start
- uvd_v6_0_stop
- uvd_v6_0_ring_emit_fence
- uvd_v6_0_enc_ring_emit_fence
- uvd_v6_0_ring_emit_hdp_flush
- uvd_v6_0_ring_test_ring
- uvd_v6_0_ring_emit_ib
- uvd_v6_0_enc_ring_emit_ib
- uvd_v6_0_ring_emit_wreg
- uvd_v6_0_ring_emit_vm_flush
- uvd_v6_0_ring_emit_pipeline_sync
- uvd_v6_0_ring_insert_nop
- uvd_v6_0_enc_ring_emit_pipeline_sync
- uvd_v6_0_enc_ring_insert_end
- uvd_v6_0_enc_ring_emit_vm_flush
- uvd_v6_0_is_idle
- uvd_v6_0_wait_for_idle
- uvd_v6_0_check_soft_reset
- uvd_v6_0_pre_soft_reset
- uvd_v6_0_soft_reset
- uvd_v6_0_post_soft_reset
- uvd_v6_0_set_interrupt_state
- uvd_v6_0_process_interrupt
- uvd_v6_0_enable_clock_gating
- uvd_v6_0_set_sw_clock_gating
- uvd_v6_0_set_hw_clock_gating
- uvd_v6_0_enable_mgcg
- uvd_v6_0_set_clockgating_state
- uvd_v6_0_set_powergating_state
- uvd_v6_0_get_clockgating_state
- uvd_v6_0_set_ring_funcs
- uvd_v6_0_set_enc_ring_funcs
- uvd_v6_0_set_irq_funcs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/firmware.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
38 #include "vi.h"
39 #include "ivsrcid/ivsrcid_vislands30.h"
40
41
42 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48 static int uvd_v6_0_start(struct amdgpu_device *adev);
49 static void uvd_v6_0_stop(struct amdgpu_device *adev);
50 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51 static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56
57
58
59
60
61
62
63 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64 {
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68 }
69
70
71
72
73
74
75
76
77 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78 {
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82 }
83
84
85
86
87
88
89
90
91 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92 {
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99 }
100
101
102
103
104
105
106
107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108 {
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112 }
113
114
115
116
117
118
119
120
121 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122 {
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129 }
130
131
132
133
134
135
136
137
138 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139 {
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143 }
144
145
146
147
148
149
150
151
152 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153 {
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162 }
163
164
165
166
167
168
169
170 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171 {
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196 }
197
198
199
200
201
202
203
204
205
206
207
208 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
211 {
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
216 uint64_t addr;
217 int i, r;
218
219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
220 if (r)
221 return r;
222
223 ib = &job->ibs[0];
224 addr = amdgpu_bo_gpu_offset(bo);
225
226 ib->length_dw = 0;
227 ib->ptr[ib->length_dw++] = 0x00000018;
228 ib->ptr[ib->length_dw++] = 0x00000001;
229 ib->ptr[ib->length_dw++] = handle;
230 ib->ptr[ib->length_dw++] = 0x00010000;
231 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
232 ib->ptr[ib->length_dw++] = addr;
233
234 ib->ptr[ib->length_dw++] = 0x00000014;
235 ib->ptr[ib->length_dw++] = 0x00000002;
236 ib->ptr[ib->length_dw++] = 0x0000001c;
237 ib->ptr[ib->length_dw++] = 0x00000001;
238 ib->ptr[ib->length_dw++] = 0x00000000;
239
240 ib->ptr[ib->length_dw++] = 0x00000008;
241 ib->ptr[ib->length_dw++] = 0x08000001;
242
243 for (i = ib->length_dw; i < ib_size_dw; ++i)
244 ib->ptr[i] = 0x0;
245
246 r = amdgpu_job_submit_direct(job, ring, &f);
247 if (r)
248 goto err;
249
250 if (fence)
251 *fence = dma_fence_get(f);
252 dma_fence_put(f);
253 return 0;
254
255 err:
256 amdgpu_job_free(job);
257 return r;
258 }
259
260
261
262
263
264
265
266
267
268
269
270 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
271 uint32_t handle,
272 struct amdgpu_bo *bo,
273 struct dma_fence **fence)
274 {
275 const unsigned ib_size_dw = 16;
276 struct amdgpu_job *job;
277 struct amdgpu_ib *ib;
278 struct dma_fence *f = NULL;
279 uint64_t addr;
280 int i, r;
281
282 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
283 if (r)
284 return r;
285
286 ib = &job->ibs[0];
287 addr = amdgpu_bo_gpu_offset(bo);
288
289 ib->length_dw = 0;
290 ib->ptr[ib->length_dw++] = 0x00000018;
291 ib->ptr[ib->length_dw++] = 0x00000001;
292 ib->ptr[ib->length_dw++] = handle;
293 ib->ptr[ib->length_dw++] = 0x00010000;
294 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
295 ib->ptr[ib->length_dw++] = addr;
296
297 ib->ptr[ib->length_dw++] = 0x00000014;
298 ib->ptr[ib->length_dw++] = 0x00000002;
299 ib->ptr[ib->length_dw++] = 0x0000001c;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = 0x00000000;
302
303 ib->ptr[ib->length_dw++] = 0x00000008;
304 ib->ptr[ib->length_dw++] = 0x08000002;
305
306 for (i = ib->length_dw; i < ib_size_dw; ++i)
307 ib->ptr[i] = 0x0;
308
309 r = amdgpu_job_submit_direct(job, ring, &f);
310 if (r)
311 goto err;
312
313 if (fence)
314 *fence = dma_fence_get(f);
315 dma_fence_put(f);
316 return 0;
317
318 err:
319 amdgpu_job_free(job);
320 return r;
321 }
322
323
324
325
326
327
328
329 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
330 {
331 struct dma_fence *fence = NULL;
332 struct amdgpu_bo *bo = NULL;
333 long r;
334
335 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
336 AMDGPU_GEM_DOMAIN_VRAM,
337 &bo, NULL, NULL);
338 if (r)
339 return r;
340
341 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
342 if (r)
343 goto error;
344
345 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
346 if (r)
347 goto error;
348
349 r = dma_fence_wait_timeout(fence, false, timeout);
350 if (r == 0)
351 r = -ETIMEDOUT;
352 else if (r > 0)
353 r = 0;
354
355 error:
356 dma_fence_put(fence);
357 amdgpu_bo_unreserve(bo);
358 amdgpu_bo_unref(&bo);
359 return r;
360 }
361
362 static int uvd_v6_0_early_init(void *handle)
363 {
364 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
365 adev->uvd.num_uvd_inst = 1;
366
367 if (!(adev->flags & AMD_IS_APU) &&
368 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
369 return -ENOENT;
370
371 uvd_v6_0_set_ring_funcs(adev);
372
373 if (uvd_v6_0_enc_support(adev)) {
374 adev->uvd.num_enc_rings = 2;
375 uvd_v6_0_set_enc_ring_funcs(adev);
376 }
377
378 uvd_v6_0_set_irq_funcs(adev);
379
380 return 0;
381 }
382
383 static int uvd_v6_0_sw_init(void *handle)
384 {
385 struct amdgpu_ring *ring;
386 int i, r;
387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
388
389
390 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
391 if (r)
392 return r;
393
394
395 if (uvd_v6_0_enc_support(adev)) {
396 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
397 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
398 if (r)
399 return r;
400 }
401 }
402
403 r = amdgpu_uvd_sw_init(adev);
404 if (r)
405 return r;
406
407 if (!uvd_v6_0_enc_support(adev)) {
408 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
409 adev->uvd.inst->ring_enc[i].funcs = NULL;
410
411 adev->uvd.inst->irq.num_types = 1;
412 adev->uvd.num_enc_rings = 0;
413
414 DRM_INFO("UVD ENC is disabled\n");
415 }
416
417 ring = &adev->uvd.inst->ring;
418 sprintf(ring->name, "uvd");
419 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
420 if (r)
421 return r;
422
423 r = amdgpu_uvd_resume(adev);
424 if (r)
425 return r;
426
427 if (uvd_v6_0_enc_support(adev)) {
428 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
429 ring = &adev->uvd.inst->ring_enc[i];
430 sprintf(ring->name, "uvd_enc%d", i);
431 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
432 if (r)
433 return r;
434 }
435 }
436
437 r = amdgpu_uvd_entity_init(adev);
438
439 return r;
440 }
441
442 static int uvd_v6_0_sw_fini(void *handle)
443 {
444 int i, r;
445 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
446
447 r = amdgpu_uvd_suspend(adev);
448 if (r)
449 return r;
450
451 if (uvd_v6_0_enc_support(adev)) {
452 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
453 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
454 }
455
456 return amdgpu_uvd_sw_fini(adev);
457 }
458
459
460
461
462
463
464
465
466 static int uvd_v6_0_hw_init(void *handle)
467 {
468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
470 uint32_t tmp;
471 int i, r;
472
473 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
474 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
475 uvd_v6_0_enable_mgcg(adev, true);
476
477 r = amdgpu_ring_test_helper(ring);
478 if (r)
479 goto done;
480
481 r = amdgpu_ring_alloc(ring, 10);
482 if (r) {
483 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
484 goto done;
485 }
486
487 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
488 amdgpu_ring_write(ring, tmp);
489 amdgpu_ring_write(ring, 0xFFFFF);
490
491 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
492 amdgpu_ring_write(ring, tmp);
493 amdgpu_ring_write(ring, 0xFFFFF);
494
495 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
496 amdgpu_ring_write(ring, tmp);
497 amdgpu_ring_write(ring, 0xFFFFF);
498
499
500 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
501 amdgpu_ring_write(ring, 0x8);
502
503 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
504 amdgpu_ring_write(ring, 3);
505
506 amdgpu_ring_commit(ring);
507
508 if (uvd_v6_0_enc_support(adev)) {
509 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
510 ring = &adev->uvd.inst->ring_enc[i];
511 r = amdgpu_ring_test_helper(ring);
512 if (r)
513 goto done;
514 }
515 }
516
517 done:
518 if (!r) {
519 if (uvd_v6_0_enc_support(adev))
520 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
521 else
522 DRM_INFO("UVD initialized successfully.\n");
523 }
524
525 return r;
526 }
527
528
529
530
531
532
533
534
535 static int uvd_v6_0_hw_fini(void *handle)
536 {
537 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
539
540 if (RREG32(mmUVD_STATUS) != 0)
541 uvd_v6_0_stop(adev);
542
543 ring->sched.ready = false;
544
545 return 0;
546 }
547
548 static int uvd_v6_0_suspend(void *handle)
549 {
550 int r;
551 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552
553 r = uvd_v6_0_hw_fini(adev);
554 if (r)
555 return r;
556
557 return amdgpu_uvd_suspend(adev);
558 }
559
560 static int uvd_v6_0_resume(void *handle)
561 {
562 int r;
563 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
564
565 r = amdgpu_uvd_resume(adev);
566 if (r)
567 return r;
568
569 return uvd_v6_0_hw_init(adev);
570 }
571
572
573
574
575
576
577
578
579 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
580 {
581 uint64_t offset;
582 uint32_t size;
583
584
585 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
586 lower_32_bits(adev->uvd.inst->gpu_addr));
587 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
588 upper_32_bits(adev->uvd.inst->gpu_addr));
589
590 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
591 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
592 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
593 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
594
595 offset += size;
596 size = AMDGPU_UVD_HEAP_SIZE;
597 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
598 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
599
600 offset += size;
601 size = AMDGPU_UVD_STACK_SIZE +
602 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
603 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
604 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
605
606 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
607 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
608 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
609
610 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
611 }
612
613 #if 0
614 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
615 bool enable)
616 {
617 u32 data, data1;
618
619 data = RREG32(mmUVD_CGC_GATE);
620 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
621 if (enable) {
622 data |= UVD_CGC_GATE__SYS_MASK |
623 UVD_CGC_GATE__UDEC_MASK |
624 UVD_CGC_GATE__MPEG2_MASK |
625 UVD_CGC_GATE__RBC_MASK |
626 UVD_CGC_GATE__LMI_MC_MASK |
627 UVD_CGC_GATE__IDCT_MASK |
628 UVD_CGC_GATE__MPRD_MASK |
629 UVD_CGC_GATE__MPC_MASK |
630 UVD_CGC_GATE__LBSI_MASK |
631 UVD_CGC_GATE__LRBBM_MASK |
632 UVD_CGC_GATE__UDEC_RE_MASK |
633 UVD_CGC_GATE__UDEC_CM_MASK |
634 UVD_CGC_GATE__UDEC_IT_MASK |
635 UVD_CGC_GATE__UDEC_DB_MASK |
636 UVD_CGC_GATE__UDEC_MP_MASK |
637 UVD_CGC_GATE__WCB_MASK |
638 UVD_CGC_GATE__VCPU_MASK |
639 UVD_CGC_GATE__SCPU_MASK;
640 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
641 UVD_SUVD_CGC_GATE__SIT_MASK |
642 UVD_SUVD_CGC_GATE__SMP_MASK |
643 UVD_SUVD_CGC_GATE__SCM_MASK |
644 UVD_SUVD_CGC_GATE__SDB_MASK |
645 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
646 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
647 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
648 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
649 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
650 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
651 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
652 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
653 } else {
654 data &= ~(UVD_CGC_GATE__SYS_MASK |
655 UVD_CGC_GATE__UDEC_MASK |
656 UVD_CGC_GATE__MPEG2_MASK |
657 UVD_CGC_GATE__RBC_MASK |
658 UVD_CGC_GATE__LMI_MC_MASK |
659 UVD_CGC_GATE__LMI_UMC_MASK |
660 UVD_CGC_GATE__IDCT_MASK |
661 UVD_CGC_GATE__MPRD_MASK |
662 UVD_CGC_GATE__MPC_MASK |
663 UVD_CGC_GATE__LBSI_MASK |
664 UVD_CGC_GATE__LRBBM_MASK |
665 UVD_CGC_GATE__UDEC_RE_MASK |
666 UVD_CGC_GATE__UDEC_CM_MASK |
667 UVD_CGC_GATE__UDEC_IT_MASK |
668 UVD_CGC_GATE__UDEC_DB_MASK |
669 UVD_CGC_GATE__UDEC_MP_MASK |
670 UVD_CGC_GATE__WCB_MASK |
671 UVD_CGC_GATE__VCPU_MASK |
672 UVD_CGC_GATE__SCPU_MASK);
673 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
674 UVD_SUVD_CGC_GATE__SIT_MASK |
675 UVD_SUVD_CGC_GATE__SMP_MASK |
676 UVD_SUVD_CGC_GATE__SCM_MASK |
677 UVD_SUVD_CGC_GATE__SDB_MASK |
678 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
679 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
680 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
681 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
682 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
683 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
684 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
685 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
686 }
687 WREG32(mmUVD_CGC_GATE, data);
688 WREG32(mmUVD_SUVD_CGC_GATE, data1);
689 }
690 #endif
691
692
693
694
695
696
697
698
699 static int uvd_v6_0_start(struct amdgpu_device *adev)
700 {
701 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
702 uint32_t rb_bufsz, tmp;
703 uint32_t lmi_swap_cntl;
704 uint32_t mp_swap_cntl;
705 int i, j, r;
706
707
708 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
709
710
711 lmi_swap_cntl = 0;
712 mp_swap_cntl = 0;
713
714 uvd_v6_0_mc_resume(adev);
715
716
717 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
718
719
720 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
721 mdelay(1);
722
723
724 WREG32(mmUVD_SOFT_RESET,
725 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
726 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
727 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
728 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
729 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
730 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
731 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
732 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
733 mdelay(5);
734
735
736 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
737 mdelay(5);
738
739
740 WREG32(mmUVD_LMI_CTRL,
741 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
742 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
743 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
744 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
745 UVD_LMI_CTRL__REQ_MODE_MASK |
746 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
747
748 #ifdef __BIG_ENDIAN
749
750 lmi_swap_cntl = 0xa;
751 mp_swap_cntl = 0;
752 #endif
753 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
754 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
755
756 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
757 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
758 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
759 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
760 WREG32(mmUVD_MPC_SET_ALU, 0);
761 WREG32(mmUVD_MPC_SET_MUX, 0x88);
762
763
764 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
765 mdelay(5);
766
767
768 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
769
770
771 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
772
773
774 WREG32(mmUVD_SOFT_RESET, 0);
775 mdelay(10);
776
777 for (i = 0; i < 10; ++i) {
778 uint32_t status;
779
780 for (j = 0; j < 100; ++j) {
781 status = RREG32(mmUVD_STATUS);
782 if (status & 2)
783 break;
784 mdelay(10);
785 }
786 r = 0;
787 if (status & 2)
788 break;
789
790 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
791 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
792 mdelay(10);
793 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
794 mdelay(10);
795 r = -1;
796 }
797
798 if (r) {
799 DRM_ERROR("UVD not responding, giving up!!!\n");
800 return r;
801 }
802
803 WREG32_P(mmUVD_MASTINT_EN,
804 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
805 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
806
807
808 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
809
810
811 rb_bufsz = order_base_2(ring->ring_size);
812 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
813 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
814 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
815 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
816 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
817 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
818 WREG32(mmUVD_RBC_RB_CNTL, tmp);
819
820
821 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
822
823
824 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
825
826
827 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
828 lower_32_bits(ring->gpu_addr));
829 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
830 upper_32_bits(ring->gpu_addr));
831
832
833 WREG32(mmUVD_RBC_RB_RPTR, 0);
834
835 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
836 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
837
838 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
839
840 if (uvd_v6_0_enc_support(adev)) {
841 ring = &adev->uvd.inst->ring_enc[0];
842 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
843 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
844 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
845 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
846 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
847
848 ring = &adev->uvd.inst->ring_enc[1];
849 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
850 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
851 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
852 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
853 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
854 }
855
856 return 0;
857 }
858
859
860
861
862
863
864
865
866 static void uvd_v6_0_stop(struct amdgpu_device *adev)
867 {
868
869 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
870
871
872 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
873 mdelay(1);
874
875
876 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
877 mdelay(5);
878
879
880 WREG32(mmUVD_VCPU_CNTL, 0x0);
881
882
883 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
884
885 WREG32(mmUVD_STATUS, 0);
886 }
887
888
889
890
891
892
893
894
895
896 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
897 unsigned flags)
898 {
899 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
900
901 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
902 amdgpu_ring_write(ring, seq);
903 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
904 amdgpu_ring_write(ring, addr & 0xffffffff);
905 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
906 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
907 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
908 amdgpu_ring_write(ring, 0);
909
910 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
911 amdgpu_ring_write(ring, 0);
912 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
913 amdgpu_ring_write(ring, 0);
914 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
915 amdgpu_ring_write(ring, 2);
916 }
917
918
919
920
921
922
923
924
925
926 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
927 u64 seq, unsigned flags)
928 {
929 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
930
931 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
932 amdgpu_ring_write(ring, addr);
933 amdgpu_ring_write(ring, upper_32_bits(addr));
934 amdgpu_ring_write(ring, seq);
935 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
936 }
937
938
939
940
941
942
943 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
944 {
945
946 }
947
948
949
950
951
952
953
954
955 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
956 {
957 struct amdgpu_device *adev = ring->adev;
958 uint32_t tmp = 0;
959 unsigned i;
960 int r;
961
962 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
963 r = amdgpu_ring_alloc(ring, 3);
964 if (r)
965 return r;
966
967 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
968 amdgpu_ring_write(ring, 0xDEADBEEF);
969 amdgpu_ring_commit(ring);
970 for (i = 0; i < adev->usec_timeout; i++) {
971 tmp = RREG32(mmUVD_CONTEXT_ID);
972 if (tmp == 0xDEADBEEF)
973 break;
974 udelay(1);
975 }
976
977 if (i >= adev->usec_timeout)
978 r = -ETIMEDOUT;
979
980 return r;
981 }
982
983
984
985
986
987
988
989
990
991 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
992 struct amdgpu_job *job,
993 struct amdgpu_ib *ib,
994 uint32_t flags)
995 {
996 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
997
998 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
999 amdgpu_ring_write(ring, vmid);
1000
1001 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1002 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1003 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1004 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1005 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1006 amdgpu_ring_write(ring, ib->length_dw);
1007 }
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1018 struct amdgpu_job *job,
1019 struct amdgpu_ib *ib,
1020 uint32_t flags)
1021 {
1022 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1023
1024 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1025 amdgpu_ring_write(ring, vmid);
1026 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1027 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1028 amdgpu_ring_write(ring, ib->length_dw);
1029 }
1030
1031 static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1032 uint32_t reg, uint32_t val)
1033 {
1034 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1035 amdgpu_ring_write(ring, reg << 2);
1036 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1037 amdgpu_ring_write(ring, val);
1038 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1039 amdgpu_ring_write(ring, 0x8);
1040 }
1041
1042 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1043 unsigned vmid, uint64_t pd_addr)
1044 {
1045 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1046
1047 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1048 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1049 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1050 amdgpu_ring_write(ring, 0);
1051 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1052 amdgpu_ring_write(ring, 1 << vmid);
1053 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1054 amdgpu_ring_write(ring, 0xC);
1055 }
1056
1057 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1058 {
1059 uint32_t seq = ring->fence_drv.sync_seq;
1060 uint64_t addr = ring->fence_drv.gpu_addr;
1061
1062 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1063 amdgpu_ring_write(ring, lower_32_bits(addr));
1064 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1065 amdgpu_ring_write(ring, upper_32_bits(addr));
1066 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1067 amdgpu_ring_write(ring, 0xffffffff);
1068 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1069 amdgpu_ring_write(ring, seq);
1070 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1071 amdgpu_ring_write(ring, 0xE);
1072 }
1073
1074 static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1075 {
1076 int i;
1077
1078 WARN_ON(ring->wptr % 2 || count % 2);
1079
1080 for (i = 0; i < count / 2; i++) {
1081 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1082 amdgpu_ring_write(ring, 0);
1083 }
1084 }
1085
1086 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1087 {
1088 uint32_t seq = ring->fence_drv.sync_seq;
1089 uint64_t addr = ring->fence_drv.gpu_addr;
1090
1091 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1092 amdgpu_ring_write(ring, lower_32_bits(addr));
1093 amdgpu_ring_write(ring, upper_32_bits(addr));
1094 amdgpu_ring_write(ring, seq);
1095 }
1096
1097 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1098 {
1099 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1100 }
1101
1102 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1103 unsigned int vmid, uint64_t pd_addr)
1104 {
1105 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1106 amdgpu_ring_write(ring, vmid);
1107 amdgpu_ring_write(ring, pd_addr >> 12);
1108
1109 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1110 amdgpu_ring_write(ring, vmid);
1111 }
1112
1113 static bool uvd_v6_0_is_idle(void *handle)
1114 {
1115 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1116
1117 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1118 }
1119
1120 static int uvd_v6_0_wait_for_idle(void *handle)
1121 {
1122 unsigned i;
1123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1124
1125 for (i = 0; i < adev->usec_timeout; i++) {
1126 if (uvd_v6_0_is_idle(handle))
1127 return 0;
1128 }
1129 return -ETIMEDOUT;
1130 }
1131
1132 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1133 static bool uvd_v6_0_check_soft_reset(void *handle)
1134 {
1135 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136 u32 srbm_soft_reset = 0;
1137 u32 tmp = RREG32(mmSRBM_STATUS);
1138
1139 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1140 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1141 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1142 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1143
1144 if (srbm_soft_reset) {
1145 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1146 return true;
1147 } else {
1148 adev->uvd.inst->srbm_soft_reset = 0;
1149 return false;
1150 }
1151 }
1152
1153 static int uvd_v6_0_pre_soft_reset(void *handle)
1154 {
1155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1156
1157 if (!adev->uvd.inst->srbm_soft_reset)
1158 return 0;
1159
1160 uvd_v6_0_stop(adev);
1161 return 0;
1162 }
1163
1164 static int uvd_v6_0_soft_reset(void *handle)
1165 {
1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167 u32 srbm_soft_reset;
1168
1169 if (!adev->uvd.inst->srbm_soft_reset)
1170 return 0;
1171 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1172
1173 if (srbm_soft_reset) {
1174 u32 tmp;
1175
1176 tmp = RREG32(mmSRBM_SOFT_RESET);
1177 tmp |= srbm_soft_reset;
1178 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1179 WREG32(mmSRBM_SOFT_RESET, tmp);
1180 tmp = RREG32(mmSRBM_SOFT_RESET);
1181
1182 udelay(50);
1183
1184 tmp &= ~srbm_soft_reset;
1185 WREG32(mmSRBM_SOFT_RESET, tmp);
1186 tmp = RREG32(mmSRBM_SOFT_RESET);
1187
1188
1189 udelay(50);
1190 }
1191
1192 return 0;
1193 }
1194
1195 static int uvd_v6_0_post_soft_reset(void *handle)
1196 {
1197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198
1199 if (!adev->uvd.inst->srbm_soft_reset)
1200 return 0;
1201
1202 mdelay(5);
1203
1204 return uvd_v6_0_start(adev);
1205 }
1206
1207 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1208 struct amdgpu_irq_src *source,
1209 unsigned type,
1210 enum amdgpu_interrupt_state state)
1211 {
1212
1213 return 0;
1214 }
1215
1216 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1217 struct amdgpu_irq_src *source,
1218 struct amdgpu_iv_entry *entry)
1219 {
1220 bool int_handled = true;
1221 DRM_DEBUG("IH: UVD TRAP\n");
1222
1223 switch (entry->src_id) {
1224 case 124:
1225 amdgpu_fence_process(&adev->uvd.inst->ring);
1226 break;
1227 case 119:
1228 if (likely(uvd_v6_0_enc_support(adev)))
1229 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1230 else
1231 int_handled = false;
1232 break;
1233 case 120:
1234 if (likely(uvd_v6_0_enc_support(adev)))
1235 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1236 else
1237 int_handled = false;
1238 break;
1239 }
1240
1241 if (false == int_handled)
1242 DRM_ERROR("Unhandled interrupt: %d %d\n",
1243 entry->src_id, entry->src_data[0]);
1244
1245 return 0;
1246 }
1247
1248 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1249 {
1250 uint32_t data1, data3;
1251
1252 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1253 data3 = RREG32(mmUVD_CGC_GATE);
1254
1255 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1256 UVD_SUVD_CGC_GATE__SIT_MASK |
1257 UVD_SUVD_CGC_GATE__SMP_MASK |
1258 UVD_SUVD_CGC_GATE__SCM_MASK |
1259 UVD_SUVD_CGC_GATE__SDB_MASK |
1260 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1261 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1262 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1263 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1264 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1265 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1266 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1267 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1268
1269 if (enable) {
1270 data3 |= (UVD_CGC_GATE__SYS_MASK |
1271 UVD_CGC_GATE__UDEC_MASK |
1272 UVD_CGC_GATE__MPEG2_MASK |
1273 UVD_CGC_GATE__RBC_MASK |
1274 UVD_CGC_GATE__LMI_MC_MASK |
1275 UVD_CGC_GATE__LMI_UMC_MASK |
1276 UVD_CGC_GATE__IDCT_MASK |
1277 UVD_CGC_GATE__MPRD_MASK |
1278 UVD_CGC_GATE__MPC_MASK |
1279 UVD_CGC_GATE__LBSI_MASK |
1280 UVD_CGC_GATE__LRBBM_MASK |
1281 UVD_CGC_GATE__UDEC_RE_MASK |
1282 UVD_CGC_GATE__UDEC_CM_MASK |
1283 UVD_CGC_GATE__UDEC_IT_MASK |
1284 UVD_CGC_GATE__UDEC_DB_MASK |
1285 UVD_CGC_GATE__UDEC_MP_MASK |
1286 UVD_CGC_GATE__WCB_MASK |
1287 UVD_CGC_GATE__JPEG_MASK |
1288 UVD_CGC_GATE__SCPU_MASK |
1289 UVD_CGC_GATE__JPEG2_MASK);
1290
1291 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1292 data3 |= UVD_CGC_GATE__VCPU_MASK;
1293
1294 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1295 } else {
1296 data3 = 0;
1297 }
1298
1299 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1300 WREG32(mmUVD_CGC_GATE, data3);
1301 }
1302
1303 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1304 {
1305 uint32_t data, data2;
1306
1307 data = RREG32(mmUVD_CGC_CTRL);
1308 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1309
1310
1311 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1312 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1313
1314
1315 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1316 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1317 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1318
1319 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1320 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1321 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1322 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1323 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1324 UVD_CGC_CTRL__SYS_MODE_MASK |
1325 UVD_CGC_CTRL__UDEC_MODE_MASK |
1326 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1327 UVD_CGC_CTRL__REGS_MODE_MASK |
1328 UVD_CGC_CTRL__RBC_MODE_MASK |
1329 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1330 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1331 UVD_CGC_CTRL__IDCT_MODE_MASK |
1332 UVD_CGC_CTRL__MPRD_MODE_MASK |
1333 UVD_CGC_CTRL__MPC_MODE_MASK |
1334 UVD_CGC_CTRL__LBSI_MODE_MASK |
1335 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1336 UVD_CGC_CTRL__WCB_MODE_MASK |
1337 UVD_CGC_CTRL__VCPU_MODE_MASK |
1338 UVD_CGC_CTRL__JPEG_MODE_MASK |
1339 UVD_CGC_CTRL__SCPU_MODE_MASK |
1340 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1341 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1342 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1343 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1344 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1345 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1346
1347 WREG32(mmUVD_CGC_CTRL, data);
1348 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1349 }
1350
1351 #if 0
1352 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1353 {
1354 uint32_t data, data1, cgc_flags, suvd_flags;
1355
1356 data = RREG32(mmUVD_CGC_GATE);
1357 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1358
1359 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1360 UVD_CGC_GATE__UDEC_MASK |
1361 UVD_CGC_GATE__MPEG2_MASK |
1362 UVD_CGC_GATE__RBC_MASK |
1363 UVD_CGC_GATE__LMI_MC_MASK |
1364 UVD_CGC_GATE__IDCT_MASK |
1365 UVD_CGC_GATE__MPRD_MASK |
1366 UVD_CGC_GATE__MPC_MASK |
1367 UVD_CGC_GATE__LBSI_MASK |
1368 UVD_CGC_GATE__LRBBM_MASK |
1369 UVD_CGC_GATE__UDEC_RE_MASK |
1370 UVD_CGC_GATE__UDEC_CM_MASK |
1371 UVD_CGC_GATE__UDEC_IT_MASK |
1372 UVD_CGC_GATE__UDEC_DB_MASK |
1373 UVD_CGC_GATE__UDEC_MP_MASK |
1374 UVD_CGC_GATE__WCB_MASK |
1375 UVD_CGC_GATE__VCPU_MASK |
1376 UVD_CGC_GATE__SCPU_MASK |
1377 UVD_CGC_GATE__JPEG_MASK |
1378 UVD_CGC_GATE__JPEG2_MASK;
1379
1380 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1381 UVD_SUVD_CGC_GATE__SIT_MASK |
1382 UVD_SUVD_CGC_GATE__SMP_MASK |
1383 UVD_SUVD_CGC_GATE__SCM_MASK |
1384 UVD_SUVD_CGC_GATE__SDB_MASK;
1385
1386 data |= cgc_flags;
1387 data1 |= suvd_flags;
1388
1389 WREG32(mmUVD_CGC_GATE, data);
1390 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1391 }
1392 #endif
1393
1394 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1395 bool enable)
1396 {
1397 u32 orig, data;
1398
1399 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1400 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1401 data |= 0xfff;
1402 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1403
1404 orig = data = RREG32(mmUVD_CGC_CTRL);
1405 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1406 if (orig != data)
1407 WREG32(mmUVD_CGC_CTRL, data);
1408 } else {
1409 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1410 data &= ~0xfff;
1411 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1412
1413 orig = data = RREG32(mmUVD_CGC_CTRL);
1414 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1415 if (orig != data)
1416 WREG32(mmUVD_CGC_CTRL, data);
1417 }
1418 }
1419
1420 static int uvd_v6_0_set_clockgating_state(void *handle,
1421 enum amd_clockgating_state state)
1422 {
1423 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1424 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1425
1426 if (enable) {
1427
1428 if (uvd_v6_0_wait_for_idle(handle))
1429 return -EBUSY;
1430 uvd_v6_0_enable_clock_gating(adev, true);
1431
1432
1433 } else {
1434
1435 uvd_v6_0_enable_clock_gating(adev, false);
1436 }
1437 uvd_v6_0_set_sw_clock_gating(adev);
1438 return 0;
1439 }
1440
1441 static int uvd_v6_0_set_powergating_state(void *handle,
1442 enum amd_powergating_state state)
1443 {
1444
1445
1446
1447
1448
1449
1450
1451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 int ret = 0;
1453
1454 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1455
1456 if (state == AMD_PG_STATE_GATE) {
1457 uvd_v6_0_stop(adev);
1458 } else {
1459 ret = uvd_v6_0_start(adev);
1460 if (ret)
1461 goto out;
1462 }
1463
1464 out:
1465 return ret;
1466 }
1467
1468 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1469 {
1470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1471 int data;
1472
1473 mutex_lock(&adev->pm.mutex);
1474
1475 if (adev->flags & AMD_IS_APU)
1476 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1477 else
1478 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1479
1480 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1481 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1482 goto out;
1483 }
1484
1485
1486 data = RREG32(mmUVD_CGC_CTRL);
1487 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1488 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1489
1490 out:
1491 mutex_unlock(&adev->pm.mutex);
1492 }
1493
1494 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1495 .name = "uvd_v6_0",
1496 .early_init = uvd_v6_0_early_init,
1497 .late_init = NULL,
1498 .sw_init = uvd_v6_0_sw_init,
1499 .sw_fini = uvd_v6_0_sw_fini,
1500 .hw_init = uvd_v6_0_hw_init,
1501 .hw_fini = uvd_v6_0_hw_fini,
1502 .suspend = uvd_v6_0_suspend,
1503 .resume = uvd_v6_0_resume,
1504 .is_idle = uvd_v6_0_is_idle,
1505 .wait_for_idle = uvd_v6_0_wait_for_idle,
1506 .check_soft_reset = uvd_v6_0_check_soft_reset,
1507 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1508 .soft_reset = uvd_v6_0_soft_reset,
1509 .post_soft_reset = uvd_v6_0_post_soft_reset,
1510 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1511 .set_powergating_state = uvd_v6_0_set_powergating_state,
1512 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1513 };
1514
1515 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1516 .type = AMDGPU_RING_TYPE_UVD,
1517 .align_mask = 0xf,
1518 .support_64bit_ptrs = false,
1519 .no_user_fence = true,
1520 .get_rptr = uvd_v6_0_ring_get_rptr,
1521 .get_wptr = uvd_v6_0_ring_get_wptr,
1522 .set_wptr = uvd_v6_0_ring_set_wptr,
1523 .parse_cs = amdgpu_uvd_ring_parse_cs,
1524 .emit_frame_size =
1525 6 +
1526 10 +
1527 14,
1528 .emit_ib_size = 8,
1529 .emit_ib = uvd_v6_0_ring_emit_ib,
1530 .emit_fence = uvd_v6_0_ring_emit_fence,
1531 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1532 .test_ring = uvd_v6_0_ring_test_ring,
1533 .test_ib = amdgpu_uvd_ring_test_ib,
1534 .insert_nop = uvd_v6_0_ring_insert_nop,
1535 .pad_ib = amdgpu_ring_generic_pad_ib,
1536 .begin_use = amdgpu_uvd_ring_begin_use,
1537 .end_use = amdgpu_uvd_ring_end_use,
1538 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1539 };
1540
1541 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1542 .type = AMDGPU_RING_TYPE_UVD,
1543 .align_mask = 0xf,
1544 .support_64bit_ptrs = false,
1545 .no_user_fence = true,
1546 .get_rptr = uvd_v6_0_ring_get_rptr,
1547 .get_wptr = uvd_v6_0_ring_get_wptr,
1548 .set_wptr = uvd_v6_0_ring_set_wptr,
1549 .emit_frame_size =
1550 6 +
1551 10 +
1552 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 +
1553 14 + 14,
1554 .emit_ib_size = 8,
1555 .emit_ib = uvd_v6_0_ring_emit_ib,
1556 .emit_fence = uvd_v6_0_ring_emit_fence,
1557 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1558 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1559 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1560 .test_ring = uvd_v6_0_ring_test_ring,
1561 .test_ib = amdgpu_uvd_ring_test_ib,
1562 .insert_nop = uvd_v6_0_ring_insert_nop,
1563 .pad_ib = amdgpu_ring_generic_pad_ib,
1564 .begin_use = amdgpu_uvd_ring_begin_use,
1565 .end_use = amdgpu_uvd_ring_end_use,
1566 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1567 };
1568
1569 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1570 .type = AMDGPU_RING_TYPE_UVD_ENC,
1571 .align_mask = 0x3f,
1572 .nop = HEVC_ENC_CMD_NO_OP,
1573 .support_64bit_ptrs = false,
1574 .no_user_fence = true,
1575 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1576 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1577 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1578 .emit_frame_size =
1579 4 +
1580 5 +
1581 5 + 5 +
1582 1,
1583 .emit_ib_size = 5,
1584 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1585 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1586 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1587 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1588 .test_ring = uvd_v6_0_enc_ring_test_ring,
1589 .test_ib = uvd_v6_0_enc_ring_test_ib,
1590 .insert_nop = amdgpu_ring_insert_nop,
1591 .insert_end = uvd_v6_0_enc_ring_insert_end,
1592 .pad_ib = amdgpu_ring_generic_pad_ib,
1593 .begin_use = amdgpu_uvd_ring_begin_use,
1594 .end_use = amdgpu_uvd_ring_end_use,
1595 };
1596
1597 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1598 {
1599 if (adev->asic_type >= CHIP_POLARIS10) {
1600 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1601 DRM_INFO("UVD is enabled in VM mode\n");
1602 } else {
1603 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1604 DRM_INFO("UVD is enabled in physical mode\n");
1605 }
1606 }
1607
1608 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1609 {
1610 int i;
1611
1612 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1613 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1614
1615 DRM_INFO("UVD ENC is enabled in VM mode\n");
1616 }
1617
1618 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1619 .set = uvd_v6_0_set_interrupt_state,
1620 .process = uvd_v6_0_process_interrupt,
1621 };
1622
1623 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1624 {
1625 if (uvd_v6_0_enc_support(adev))
1626 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1627 else
1628 adev->uvd.inst->irq.num_types = 1;
1629
1630 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1631 }
1632
1633 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1634 {
1635 .type = AMD_IP_BLOCK_TYPE_UVD,
1636 .major = 6,
1637 .minor = 0,
1638 .rev = 0,
1639 .funcs = &uvd_v6_0_ip_funcs,
1640 };
1641
1642 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1643 {
1644 .type = AMD_IP_BLOCK_TYPE_UVD,
1645 .major = 6,
1646 .minor = 2,
1647 .rev = 0,
1648 .funcs = &uvd_v6_0_ip_funcs,
1649 };
1650
1651 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1652 {
1653 .type = AMD_IP_BLOCK_TYPE_UVD,
1654 .major = 6,
1655 .minor = 3,
1656 .rev = 0,
1657 .funcs = &uvd_v6_0_ip_funcs,
1658 };