This source file includes following definitions.
- uvd_v1_0_get_rptr
- uvd_v1_0_get_wptr
- uvd_v1_0_set_wptr
- uvd_v1_0_fence_emit
- uvd_v1_0_resume
- uvd_v1_0_init
- uvd_v1_0_fini
- uvd_v1_0_start
- uvd_v1_0_stop
- uvd_v1_0_ring_test
- uvd_v1_0_semaphore_emit
- uvd_v1_0_ib_execute
- uvd_v1_0_ib_test
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/firmware.h>
26
27 #include "radeon.h"
28 #include "radeon_asic.h"
29 #include "r600d.h"
30
31
32
33
34
35
36
37
38
39 uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
40 struct radeon_ring *ring)
41 {
42 return RREG32(UVD_RBC_RB_RPTR);
43 }
44
45
46
47
48
49
50
51
52
53 uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
54 struct radeon_ring *ring)
55 {
56 return RREG32(UVD_RBC_RB_WPTR);
57 }
58
59
60
61
62
63
64
65
66
67 void uvd_v1_0_set_wptr(struct radeon_device *rdev,
68 struct radeon_ring *ring)
69 {
70 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
71 }
72
73
74
75
76
77
78
79
80
81 void uvd_v1_0_fence_emit(struct radeon_device *rdev,
82 struct radeon_fence *fence)
83 {
84 struct radeon_ring *ring = &rdev->ring[fence->ring];
85 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
86
87 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
88 radeon_ring_write(ring, addr & 0xffffffff);
89 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
90 radeon_ring_write(ring, fence->seq);
91 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
92 radeon_ring_write(ring, 0);
93
94 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
95 radeon_ring_write(ring, 0);
96 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
97 radeon_ring_write(ring, 0);
98 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
99 radeon_ring_write(ring, 2);
100 return;
101 }
102
103
104
105
106
107
108
109
110 int uvd_v1_0_resume(struct radeon_device *rdev)
111 {
112 uint64_t addr;
113 uint32_t size;
114 int r;
115
116 r = radeon_uvd_resume(rdev);
117 if (r)
118 return r;
119
120
121 addr = (rdev->uvd.gpu_addr >> 3) + 16;
122 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
123 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
124 WREG32(UVD_VCPU_CACHE_SIZE0, size);
125
126 addr += size;
127 size = RADEON_UVD_HEAP_SIZE >> 3;
128 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
129 WREG32(UVD_VCPU_CACHE_SIZE1, size);
130
131 addr += size;
132 size = (RADEON_UVD_STACK_SIZE +
133 (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
134 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
135 WREG32(UVD_VCPU_CACHE_SIZE2, size);
136
137
138 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
139 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
140
141
142 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
143 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
144
145 WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
146
147 return 0;
148 }
149
150
151
152
153
154
155
156
157 int uvd_v1_0_init(struct radeon_device *rdev)
158 {
159 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
160 uint32_t tmp;
161 int r;
162
163
164 if (rdev->family < CHIP_RV740)
165 radeon_set_uvd_clocks(rdev, 10000, 10000);
166 else
167 radeon_set_uvd_clocks(rdev, 53300, 40000);
168
169 r = uvd_v1_0_start(rdev);
170 if (r)
171 goto done;
172
173 ring->ready = true;
174 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
175 if (r) {
176 ring->ready = false;
177 goto done;
178 }
179
180 r = radeon_ring_lock(rdev, ring, 10);
181 if (r) {
182 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
183 goto done;
184 }
185
186 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
187 radeon_ring_write(ring, tmp);
188 radeon_ring_write(ring, 0xFFFFF);
189
190 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
191 radeon_ring_write(ring, tmp);
192 radeon_ring_write(ring, 0xFFFFF);
193
194 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
195 radeon_ring_write(ring, tmp);
196 radeon_ring_write(ring, 0xFFFFF);
197
198
199 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
200 radeon_ring_write(ring, 0x8);
201
202 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
203 radeon_ring_write(ring, 3);
204
205 radeon_ring_unlock_commit(rdev, ring, false);
206
207 done:
208
209 radeon_set_uvd_clocks(rdev, 0, 0);
210
211 if (!r) {
212 switch (rdev->family) {
213 case CHIP_RV610:
214 case CHIP_RV630:
215 case CHIP_RV620:
216
217 WREG32(MC_CONFIG, 0);
218 WREG32(MC_CONFIG, 1 << 4);
219 WREG32(RS_DQ_RD_RET_CONF, 0x3f);
220 WREG32(MC_CONFIG, 0x1f);
221
222
223 case CHIP_RV670:
224 case CHIP_RV635:
225
226
227 WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10);
228 break;
229
230 default:
231
232 break;
233 }
234
235 DRM_INFO("UVD initialized successfully.\n");
236 }
237
238 return r;
239 }
240
241
242
243
244
245
246
247
248 void uvd_v1_0_fini(struct radeon_device *rdev)
249 {
250 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
251
252 uvd_v1_0_stop(rdev);
253 ring->ready = false;
254 }
255
256
257
258
259
260
261
262
263 int uvd_v1_0_start(struct radeon_device *rdev)
264 {
265 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
266 uint32_t rb_bufsz;
267 int i, j, r;
268
269
270 u32 lmi_swap_cntl = 0;
271 u32 mp_swap_cntl = 0;
272
273
274 WREG32(UVD_CGC_GATE, 0);
275
276
277 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
278
279
280 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
281 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
282 mdelay(1);
283
284
285 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
286 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
287 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
288 mdelay(5);
289
290
291 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
292 mdelay(5);
293
294
295 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
296 (1 << 21) | (1 << 9) | (1 << 20));
297
298 #ifdef __BIG_ENDIAN
299
300 lmi_swap_cntl = 0xa;
301 mp_swap_cntl = 0;
302 #endif
303 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
304 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
305
306 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
307 WREG32(UVD_MPC_SET_MUXA1, 0x0);
308 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
309 WREG32(UVD_MPC_SET_MUXB1, 0x0);
310 WREG32(UVD_MPC_SET_ALU, 0);
311 WREG32(UVD_MPC_SET_MUX, 0x88);
312
313
314 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
315 mdelay(5);
316
317
318 WREG32(UVD_VCPU_CNTL, 1 << 9);
319
320
321 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
322
323 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
324
325
326 WREG32(UVD_SOFT_RESET, 0);
327 mdelay(10);
328
329 for (i = 0; i < 10; ++i) {
330 uint32_t status;
331 for (j = 0; j < 100; ++j) {
332 status = RREG32(UVD_STATUS);
333 if (status & 2)
334 break;
335 mdelay(10);
336 }
337 r = 0;
338 if (status & 2)
339 break;
340
341 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
342 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
343 mdelay(10);
344 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
345 mdelay(10);
346 r = -1;
347 }
348
349 if (r) {
350 DRM_ERROR("UVD not responding, giving up!!!\n");
351 return r;
352 }
353
354
355 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
356
357
358 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
359
360
361 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
362
363
364 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
365 (0x7 << 16) | (0x1 << 31));
366
367
368 WREG32(UVD_RBC_RB_RPTR, 0x0);
369
370 ring->wptr = RREG32(UVD_RBC_RB_RPTR);
371 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
372
373
374 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
375
376
377 rb_bufsz = order_base_2(ring->ring_size);
378 rb_bufsz = (0x1 << 8) | rb_bufsz;
379 WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
380
381 return 0;
382 }
383
384
385
386
387
388
389
390
391 void uvd_v1_0_stop(struct radeon_device *rdev)
392 {
393
394 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
395
396
397 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
398 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
399 mdelay(1);
400
401
402 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
403 mdelay(5);
404
405
406 WREG32(UVD_VCPU_CNTL, 0x0);
407
408
409 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
410 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
411 }
412
413
414
415
416
417
418
419
420
421 int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
422 {
423 uint32_t tmp = 0;
424 unsigned i;
425 int r;
426
427 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
428 r = radeon_ring_lock(rdev, ring, 3);
429 if (r) {
430 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
431 ring->idx, r);
432 return r;
433 }
434 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
435 radeon_ring_write(ring, 0xDEADBEEF);
436 radeon_ring_unlock_commit(rdev, ring, false);
437 for (i = 0; i < rdev->usec_timeout; i++) {
438 tmp = RREG32(UVD_CONTEXT_ID);
439 if (tmp == 0xDEADBEEF)
440 break;
441 udelay(1);
442 }
443
444 if (i < rdev->usec_timeout) {
445 DRM_INFO("ring test on %d succeeded in %d usecs\n",
446 ring->idx, i);
447 } else {
448 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
449 ring->idx, tmp);
450 r = -EINVAL;
451 }
452 return r;
453 }
454
455
456
457
458
459
460
461
462
463
464
465 bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
466 struct radeon_ring *ring,
467 struct radeon_semaphore *semaphore,
468 bool emit_wait)
469 {
470
471 return false;
472 }
473
474
475
476
477
478
479
480
481
482 void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
483 {
484 struct radeon_ring *ring = &rdev->ring[ib->ring];
485
486 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
487 radeon_ring_write(ring, ib->gpu_addr);
488 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
489 radeon_ring_write(ring, ib->length_dw);
490 }
491
492
493
494
495
496
497
498
499
500 int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
501 {
502 struct radeon_fence *fence = NULL;
503 int r;
504
505 if (rdev->family < CHIP_RV740)
506 r = radeon_set_uvd_clocks(rdev, 10000, 10000);
507 else
508 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
509 if (r) {
510 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
511 return r;
512 }
513
514 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
515 if (r) {
516 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
517 goto error;
518 }
519
520 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
521 if (r) {
522 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
523 goto error;
524 }
525
526 r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
527 RADEON_USEC_IB_TEST_TIMEOUT));
528 if (r < 0) {
529 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
530 goto error;
531 } else if (r == 0) {
532 DRM_ERROR("radeon: fence wait timed out.\n");
533 r = -ETIMEDOUT;
534 goto error;
535 }
536 r = 0;
537 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
538 error:
539 radeon_fence_unref(&fence);
540 radeon_set_uvd_clocks(rdev, 0, 0);
541 return r;
542 }