This source file includes following definitions.
- _a6xx_check_idle
- a6xx_idle
- a6xx_flush
- get_stats_counter
- a6xx_submit
- a6xx_set_hwcg
- a6xx_cp_init
- a6xx_ucode_init
- a6xx_zap_shader_init
- a6xx_hw_init
- a6xx_dump
- a6xx_recover
- a6xx_fault_handler
- a6xx_cp_hw_err_irq
- a6xx_fault_detect_irq
- a6xx_irq
- a6xx_pm_resume
- a6xx_pm_suspend
- a6xx_get_timestamp
- a6xx_active_ring
- a6xx_destroy
- a6xx_gpu_busy
- a6xx_gpu_init
1
2
3
4
5 #include "msm_gem.h"
6 #include "msm_mmu.h"
7 #include "msm_gpu_trace.h"
8 #include "a6xx_gpu.h"
9 #include "a6xx_gmu.xml.h"
10
11 #include <linux/devfreq.h>
12
13 #define GPU_PAS_ID 13
14
15 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
16 {
17 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
18 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
19
20
21 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
22 return false;
23
24
25 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
26 ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
27 return false;
28
29 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
30 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
31 }
32
33 bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
34 {
35
36 if (!adreno_idle(gpu, ring))
37 return false;
38
39 if (spin_until(_a6xx_check_idle(gpu))) {
40 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
41 gpu->name, __builtin_return_address(0),
42 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
43 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
44 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
45 gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
46 return false;
47 }
48
49 return true;
50 }
51
52 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
53 {
54 uint32_t wptr;
55 unsigned long flags;
56
57 spin_lock_irqsave(&ring->lock, flags);
58
59
60 ring->cur = ring->next;
61
62
63 wptr = get_wptr(ring);
64
65 spin_unlock_irqrestore(&ring->lock, flags);
66
67
68 mb();
69
70 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
71 }
72
73 static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
74 u64 iova)
75 {
76 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
77 OUT_RING(ring, counter | (1 << 30) | (2 << 18));
78 OUT_RING(ring, lower_32_bits(iova));
79 OUT_RING(ring, upper_32_bits(iova));
80 }
81
82 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
83 struct msm_file_private *ctx)
84 {
85 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
86 struct msm_drm_private *priv = gpu->dev->dev_private;
87 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
88 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
89 struct msm_ringbuffer *ring = submit->ring;
90 unsigned int i;
91
92 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
93 rbmemptr_stats(ring, index, cpcycles_start));
94
95
96
97
98
99
100 get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
101 rbmemptr_stats(ring, index, alwayson_start));
102
103
104 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
105 OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
106
107 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
108 OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
109
110
111 for (i = 0; i < submit->nr_cmds; i++) {
112 switch (submit->cmd[i].type) {
113 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
114 break;
115 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
116 if (priv->lastctx == ctx)
117 break;
118
119 case MSM_SUBMIT_CMD_BUF:
120 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
121 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
122 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
123 OUT_RING(ring, submit->cmd[i].size);
124 break;
125 }
126 }
127
128 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
129 rbmemptr_stats(ring, index, cpcycles_end));
130 get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
131 rbmemptr_stats(ring, index, alwayson_end));
132
133
134 OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
135 OUT_RING(ring, submit->seqno);
136
137
138
139
140
141 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
142 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
143 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
144 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
145 OUT_RING(ring, submit->seqno);
146
147 trace_msm_gpu_submit_flush(submit,
148 gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
149 REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
150
151 a6xx_flush(gpu, ring);
152 }
153
154 static const struct {
155 u32 offset;
156 u32 value;
157 } a6xx_hwcg[] = {
158 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
159 {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
160 {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
161 {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
162 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
163 {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
164 {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
165 {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
166 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
167 {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
168 {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
169 {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
170 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
171 {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
172 {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
173 {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
174 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
175 {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
176 {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
177 {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
178 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
179 {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
180 {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
181 {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
182 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
183 {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
184 {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
185 {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
186 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
187 {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
188 {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
189 {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
190 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
191 {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
192 {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
193 {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
194 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
195 {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
196 {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
197 {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
198 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
199 {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
200 {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
201 {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
202 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
203 {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
204 {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
205 {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
206 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
207 {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
208 {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
209 {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
210 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
211 {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
212 {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
213 {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
214 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
215 {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
216 {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
217 {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
218 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
219 {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
220 {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
221 {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
222 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
223 {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
224 {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
225 {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
226 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
227 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
228 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
229 {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
230 {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
231 {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
232 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
233 {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
234 {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
235 {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
236 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
237 {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
238 {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
239 {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
240 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
241 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
242 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
243 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
244 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
245 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
246 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
247 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
248 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
249 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
250 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
251 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
252 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
253 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
254 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
255 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
256 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
257 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
258 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
259 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
260 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
261 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
262 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
263 };
264
265 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
266 {
267 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
268 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
269 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
270 unsigned int i;
271 u32 val;
272
273 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
274
275
276 if ((!state && !val) || (state && (val == 0x8aa8aa02)))
277 return;
278
279
280 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
281
282 for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
283 gpu_write(gpu, a6xx_hwcg[i].offset,
284 state ? a6xx_hwcg[i].value : 0);
285
286
287 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
288
289 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
290 }
291
292 static int a6xx_cp_init(struct msm_gpu *gpu)
293 {
294 struct msm_ringbuffer *ring = gpu->rb[0];
295
296 OUT_PKT7(ring, CP_ME_INIT, 8);
297
298 OUT_RING(ring, 0x0000002f);
299
300
301 OUT_RING(ring, 0x00000003);
302
303
304 OUT_RING(ring, 0x20000000);
305
306
307 OUT_RING(ring, 0x00000000);
308 OUT_RING(ring, 0x00000000);
309
310
311 OUT_RING(ring, 0x00000000);
312
313
314 OUT_RING(ring, 0x00000000);
315 OUT_RING(ring, 0x00000000);
316
317 a6xx_flush(gpu, ring);
318 return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
319 }
320
321 static int a6xx_ucode_init(struct msm_gpu *gpu)
322 {
323 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
324 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
325
326 if (!a6xx_gpu->sqe_bo) {
327 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
328 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
329
330 if (IS_ERR(a6xx_gpu->sqe_bo)) {
331 int ret = PTR_ERR(a6xx_gpu->sqe_bo);
332
333 a6xx_gpu->sqe_bo = NULL;
334 DRM_DEV_ERROR(&gpu->pdev->dev,
335 "Could not allocate SQE ucode: %d\n", ret);
336
337 return ret;
338 }
339
340 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
341 }
342
343 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
344 REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
345
346 return 0;
347 }
348
349 static int a6xx_zap_shader_init(struct msm_gpu *gpu)
350 {
351 static bool loaded;
352 int ret;
353
354 if (loaded)
355 return 0;
356
357 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
358
359 loaded = !ret;
360 return ret;
361 }
362
363 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
364 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
365 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
366 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
367 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
368 A6XX_RBBM_INT_0_MASK_CP_RB | \
369 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
370 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
371 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
372 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
373 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
374
375 static int a6xx_hw_init(struct msm_gpu *gpu)
376 {
377 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
378 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
379 int ret;
380
381
382 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
383
384 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
385
386
387
388
389
390
391 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
392 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
393 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
394
395
396 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
397 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
398 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
399 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
400 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
401 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
402 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
403 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
404 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
405 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
406 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
407 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
408
409
410 a6xx_set_hwcg(gpu, true);
411
412
413 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
414 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
415
416
417 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
418
419
420 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
421 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
422 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
423 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
424 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
425 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
426
427
428 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
429 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
430
431 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
432 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
433 0x00100000 + adreno_gpu->gmem - 1);
434
435 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
436 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
437
438 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
439 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
440
441
442 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
443
444
445 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
446
447
448 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
449
450
451 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
452
453
454 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
455
456 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
457 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
458 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
459 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
460
461
462 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
463 (1 << 30) | 0x1fffff);
464
465 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
466
467
468 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
469
470 gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
471 A6XX_PROTECT_RDONLY(0x600, 0x51));
472 gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
473 gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
474 gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
475 gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
476 gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
477 gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
478 gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
479 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
480 gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
481 gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
482 gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
483 gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
484 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
485 gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
486 A6XX_PROTECT_RDONLY(0x501, 0xa));
487 gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
488 A6XX_PROTECT_RDONLY(0x511, 0x44));
489 gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
490 gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
491 gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
492 gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
493 gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
494 A6XX_PROTECT_RW(0xbe20, 0x11f3));
495 gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
496 gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
497 gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
498 gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
499 gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
500 gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
501 A6XX_PROTECT_RDONLY(0x980, 0x4));
502 gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
503
504
505 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
506
507 ret = adreno_hw_init(gpu);
508 if (ret)
509 goto out;
510
511 ret = a6xx_ucode_init(gpu);
512 if (ret)
513 goto out;
514
515
516 a6xx_gpu->cur_ring = gpu->rb[0];
517
518
519 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
520
521 ret = a6xx_cp_init(gpu);
522 if (ret)
523 goto out;
524
525
526
527
528
529
530
531
532 ret = a6xx_zap_shader_init(gpu);
533 if (!ret) {
534 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
535 OUT_RING(gpu->rb[0], 0x00000000);
536
537 a6xx_flush(gpu, gpu->rb[0]);
538 if (!a6xx_idle(gpu, gpu->rb[0]))
539 return -EINVAL;
540 } else if (ret == -ENODEV) {
541
542
543
544
545
546
547 dev_warn_once(gpu->dev->dev,
548 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
549 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
550 ret = 0;
551 } else {
552 return ret;
553 }
554
555 out:
556
557
558
559
560 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
561
562
563 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
564
565 return ret;
566 }
567
568 static void a6xx_dump(struct msm_gpu *gpu)
569 {
570 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
571 gpu_read(gpu, REG_A6XX_RBBM_STATUS));
572 adreno_dump(gpu);
573 }
574
575 #define VBIF_RESET_ACK_TIMEOUT 100
576 #define VBIF_RESET_ACK_MASK 0x00f0
577
578 static void a6xx_recover(struct msm_gpu *gpu)
579 {
580 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
581 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
582 int i;
583
584 adreno_dump_info(gpu);
585
586 for (i = 0; i < 8; i++)
587 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
588 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
589
590 if (hang_debug)
591 a6xx_dump(gpu);
592
593
594
595
596
597 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
598
599 gpu->funcs->pm_suspend(gpu);
600 gpu->funcs->pm_resume(gpu);
601
602 msm_gpu_hw_init(gpu);
603 }
604
605 static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
606 {
607 struct msm_gpu *gpu = arg;
608
609 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
610 iova, flags,
611 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
612 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
613 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
614 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
615
616 return -EFAULT;
617 }
618
619 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
620 {
621 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
622
623 if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
624 u32 val;
625
626 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
627 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
628 dev_err_ratelimited(&gpu->pdev->dev,
629 "CP | opcode error | possible opcode=0x%8.8X\n",
630 val);
631 }
632
633 if (status & A6XX_CP_INT_CP_UCODE_ERROR)
634 dev_err_ratelimited(&gpu->pdev->dev,
635 "CP ucode error interrupt\n");
636
637 if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
638 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
639 gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
640
641 if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
642 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
643
644 dev_err_ratelimited(&gpu->pdev->dev,
645 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
646 val & (1 << 20) ? "READ" : "WRITE",
647 (val & 0x3ffff), val);
648 }
649
650 if (status & A6XX_CP_INT_CP_AHB_ERROR)
651 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
652
653 if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
654 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
655
656 if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
657 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
658
659 }
660
661 static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
662 {
663 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
664 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
665 struct drm_device *dev = gpu->dev;
666 struct msm_drm_private *priv = dev->dev_private;
667 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
668
669
670
671
672
673 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
674
675 DRM_DEV_ERROR(&gpu->pdev->dev,
676 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
677 ring ? ring->id : -1, ring ? ring->seqno : 0,
678 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
679 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
680 gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
681 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
682 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
683 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
684 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
685
686
687 del_timer(&gpu->hangcheck_timer);
688
689 queue_work(priv->wq, &gpu->recover_work);
690 }
691
692 static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
693 {
694 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
695
696 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
697
698 if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
699 a6xx_fault_detect_irq(gpu);
700
701 if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
702 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
703
704 if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
705 a6xx_cp_hw_err_irq(gpu);
706
707 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
708 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
709
710 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
711 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
712
713 if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
714 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
715
716 if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
717 msm_gpu_retire(gpu);
718
719 return IRQ_HANDLED;
720 }
721
722 static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
723 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
724 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
725 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
726 REG_A6XX_CP_RB_RPTR_ADDR_LO),
727 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
728 REG_A6XX_CP_RB_RPTR_ADDR_HI),
729 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
730 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
731 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
732 };
733
734 static int a6xx_pm_resume(struct msm_gpu *gpu)
735 {
736 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
737 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
738 int ret;
739
740 gpu->needs_hw_init = true;
741
742 ret = a6xx_gmu_resume(a6xx_gpu);
743 if (ret)
744 return ret;
745
746 msm_gpu_resume_devfreq(gpu);
747
748 return 0;
749 }
750
751 static int a6xx_pm_suspend(struct msm_gpu *gpu)
752 {
753 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
754 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
755
756 devfreq_suspend_device(gpu->devfreq.devfreq);
757
758 return a6xx_gmu_stop(a6xx_gpu);
759 }
760
761 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
762 {
763 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
764 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
765
766
767 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
768
769 *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
770 REG_A6XX_RBBM_PERFCTR_CP_0_HI);
771
772 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
773 return 0;
774 }
775
776 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
777 {
778 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
779 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
780
781 return a6xx_gpu->cur_ring;
782 }
783
784 static void a6xx_destroy(struct msm_gpu *gpu)
785 {
786 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
787 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
788
789 if (a6xx_gpu->sqe_bo) {
790 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
791 drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
792 }
793
794 a6xx_gmu_remove(a6xx_gpu);
795
796 adreno_gpu_cleanup(adreno_gpu);
797 kfree(a6xx_gpu);
798 }
799
800 static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
801 {
802 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
803 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
804 u64 busy_cycles, busy_time;
805
806 busy_cycles = gmu_read64(&a6xx_gpu->gmu,
807 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
808 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
809
810 busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
811 do_div(busy_time, 192);
812
813 gpu->devfreq.busy_cycles = busy_cycles;
814
815 if (WARN_ON(busy_time > ~0LU))
816 return ~0LU;
817
818 return (unsigned long)busy_time;
819 }
820
821 static const struct adreno_gpu_funcs funcs = {
822 .base = {
823 .get_param = adreno_get_param,
824 .hw_init = a6xx_hw_init,
825 .pm_suspend = a6xx_pm_suspend,
826 .pm_resume = a6xx_pm_resume,
827 .recover = a6xx_recover,
828 .submit = a6xx_submit,
829 .flush = a6xx_flush,
830 .active_ring = a6xx_active_ring,
831 .irq = a6xx_irq,
832 .destroy = a6xx_destroy,
833 #if defined(CONFIG_DRM_MSM_GPU_STATE)
834 .show = a6xx_show,
835 #endif
836 .gpu_busy = a6xx_gpu_busy,
837 .gpu_get_freq = a6xx_gmu_get_freq,
838 .gpu_set_freq = a6xx_gmu_set_freq,
839 #if defined(CONFIG_DRM_MSM_GPU_STATE)
840 .gpu_state_get = a6xx_gpu_state_get,
841 .gpu_state_put = a6xx_gpu_state_put,
842 #endif
843 },
844 .get_timestamp = a6xx_get_timestamp,
845 };
846
847 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
848 {
849 struct msm_drm_private *priv = dev->dev_private;
850 struct platform_device *pdev = priv->gpu_pdev;
851 struct device_node *node;
852 struct a6xx_gpu *a6xx_gpu;
853 struct adreno_gpu *adreno_gpu;
854 struct msm_gpu *gpu;
855 int ret;
856
857 a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
858 if (!a6xx_gpu)
859 return ERR_PTR(-ENOMEM);
860
861 adreno_gpu = &a6xx_gpu->base;
862 gpu = &adreno_gpu->base;
863
864 adreno_gpu->registers = NULL;
865 adreno_gpu->reg_offsets = a6xx_register_offsets;
866
867 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
868 if (ret) {
869 a6xx_destroy(&(a6xx_gpu->base.base));
870 return ERR_PTR(ret);
871 }
872
873
874 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
875
876
877 BUG_ON(!node);
878
879 ret = a6xx_gmu_init(a6xx_gpu, node);
880 if (ret) {
881 a6xx_destroy(&(a6xx_gpu->base.base));
882 return ERR_PTR(ret);
883 }
884
885 if (gpu->aspace)
886 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
887 a6xx_fault_handler);
888
889 return gpu;
890 }