This source file includes following definitions.
- get_lpu_event
- get_cu_event
- get_dou_event
- get_pipeline_event
- d71_irq_handler
- d71_enable_irq
- d71_disable_irq
- d71_on_off_vblank
- to_d71_opmode
- d71_change_opmode
- d71_flush
- d71_reset
- d71_read_block_header
- d71_cleanup
- d71_enum_resources
- d71_format_mod_supported
- d71_init_fmt_tbl
- d71_connect_iommu
- d71_disconnect_iommu
- d71_identify
1
2
3
4
5
6
7
8 #include <drm/drm_print.h>
9 #include "d71_dev.h"
10 #include "malidp_io.h"
11
12 static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
13 {
14 u32 __iomem *reg = d71_pipeline->lpu_addr;
15 u32 status, raw_status;
16 u64 evts = 0ULL;
17
18 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
19 if (raw_status & LPU_IRQ_IBSY)
20 evts |= KOMEDA_EVENT_IBSY;
21 if (raw_status & LPU_IRQ_EOW)
22 evts |= KOMEDA_EVENT_EOW;
23
24 if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
25 u32 restore = 0, tbu_status;
26
27 status = malidp_read32(reg, BLK_STATUS);
28 if (status & LPU_STATUS_AXIE) {
29 restore |= LPU_STATUS_AXIE;
30 evts |= KOMEDA_ERR_AXIE;
31 }
32 if (status & LPU_STATUS_ACE0) {
33 restore |= LPU_STATUS_ACE0;
34 evts |= KOMEDA_ERR_ACE0;
35 }
36 if (status & LPU_STATUS_ACE1) {
37 restore |= LPU_STATUS_ACE1;
38 evts |= KOMEDA_ERR_ACE1;
39 }
40 if (status & LPU_STATUS_ACE2) {
41 restore |= LPU_STATUS_ACE2;
42 evts |= KOMEDA_ERR_ACE2;
43 }
44 if (status & LPU_STATUS_ACE3) {
45 restore |= LPU_STATUS_ACE3;
46 evts |= KOMEDA_ERR_ACE3;
47 }
48 if (restore != 0)
49 malidp_write32_mask(reg, BLK_STATUS, restore, 0);
50
51 restore = 0;
52
53 tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
54 if (tbu_status & LPU_TBU_STATUS_TCF) {
55 restore |= LPU_TBU_STATUS_TCF;
56 evts |= KOMEDA_ERR_TCF;
57 }
58 if (tbu_status & LPU_TBU_STATUS_TTNG) {
59 restore |= LPU_TBU_STATUS_TTNG;
60 evts |= KOMEDA_ERR_TTNG;
61 }
62 if (tbu_status & LPU_TBU_STATUS_TITR) {
63 restore |= LPU_TBU_STATUS_TITR;
64 evts |= KOMEDA_ERR_TITR;
65 }
66 if (tbu_status & LPU_TBU_STATUS_TEMR) {
67 restore |= LPU_TBU_STATUS_TEMR;
68 evts |= KOMEDA_ERR_TEMR;
69 }
70 if (tbu_status & LPU_TBU_STATUS_TTF) {
71 restore |= LPU_TBU_STATUS_TTF;
72 evts |= KOMEDA_ERR_TTF;
73 }
74 if (restore != 0)
75 malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
76 }
77
78 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
79 return evts;
80 }
81
82 static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
83 {
84 u32 __iomem *reg = d71_pipeline->cu_addr;
85 u32 status, raw_status;
86 u64 evts = 0ULL;
87
88 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
89 if (raw_status & CU_IRQ_OVR)
90 evts |= KOMEDA_EVENT_OVR;
91
92 if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
93 status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
94 if (status & CU_STATUS_CPE)
95 evts |= KOMEDA_ERR_CPE;
96 if (status & CU_STATUS_ZME)
97 evts |= KOMEDA_ERR_ZME;
98 if (status & CU_STATUS_CFGE)
99 evts |= KOMEDA_ERR_CFGE;
100 if (status)
101 malidp_write32_mask(reg, BLK_STATUS, status, 0);
102 }
103
104 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
105
106 return evts;
107 }
108
109 static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
110 {
111 u32 __iomem *reg = d71_pipeline->dou_addr;
112 u32 status, raw_status;
113 u64 evts = 0ULL;
114
115 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
116 if (raw_status & DOU_IRQ_PL0)
117 evts |= KOMEDA_EVENT_VSYNC;
118 if (raw_status & DOU_IRQ_UND)
119 evts |= KOMEDA_EVENT_URUN;
120
121 if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
122 u32 restore = 0;
123
124 status = malidp_read32(reg, BLK_STATUS);
125 if (status & DOU_STATUS_DRIFTTO) {
126 restore |= DOU_STATUS_DRIFTTO;
127 evts |= KOMEDA_ERR_DRIFTTO;
128 }
129 if (status & DOU_STATUS_FRAMETO) {
130 restore |= DOU_STATUS_FRAMETO;
131 evts |= KOMEDA_ERR_FRAMETO;
132 }
133 if (status & DOU_STATUS_TETO) {
134 restore |= DOU_STATUS_TETO;
135 evts |= KOMEDA_ERR_TETO;
136 }
137 if (status & DOU_STATUS_CSCE) {
138 restore |= DOU_STATUS_CSCE;
139 evts |= KOMEDA_ERR_CSCE;
140 }
141
142 if (restore != 0)
143 malidp_write32_mask(reg, BLK_STATUS, restore, 0);
144 }
145
146 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
147 return evts;
148 }
149
150 static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
151 {
152 u32 evts = 0ULL;
153
154 if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
155 evts |= get_lpu_event(d71_pipeline);
156
157 if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
158 evts |= get_cu_event(d71_pipeline);
159
160 if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
161 evts |= get_dou_event(d71_pipeline);
162
163 return evts;
164 }
165
166 static irqreturn_t
167 d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
168 {
169 struct d71_dev *d71 = mdev->chip_data;
170 u32 status, gcu_status, raw_status;
171
172 gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
173
174 if (gcu_status & GLB_IRQ_STATUS_GCU) {
175 raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
176 if (raw_status & GCU_IRQ_CVAL0)
177 evts->pipes[0] |= KOMEDA_EVENT_FLIP;
178 if (raw_status & GCU_IRQ_CVAL1)
179 evts->pipes[1] |= KOMEDA_EVENT_FLIP;
180 if (raw_status & GCU_IRQ_ERR) {
181 status = malidp_read32(d71->gcu_addr, BLK_STATUS);
182 if (status & GCU_STATUS_MERR) {
183 evts->global |= KOMEDA_ERR_MERR;
184 malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
185 GCU_STATUS_MERR, 0);
186 }
187 }
188
189 malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
190 }
191
192 if (gcu_status & GLB_IRQ_STATUS_PIPE0)
193 evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
194
195 if (gcu_status & GLB_IRQ_STATUS_PIPE1)
196 evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
197
198 return gcu_status ? IRQ_HANDLED : IRQ_NONE;
199 }
200
201 #define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
202 GCU_IRQ_MODE | GCU_IRQ_ERR)
203 #define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
204 #define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR)
205 #define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR)
206
207 static int d71_enable_irq(struct komeda_dev *mdev)
208 {
209 struct d71_dev *d71 = mdev->chip_data;
210 struct d71_pipeline *pipe;
211 u32 i;
212
213 malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
214 ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
215 for (i = 0; i < d71->num_pipelines; i++) {
216 pipe = d71->pipes[i];
217 malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
218 ENABLED_CU_IRQS, ENABLED_CU_IRQS);
219 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
220 ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
221 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
222 ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
223 }
224 return 0;
225 }
226
227 static int d71_disable_irq(struct komeda_dev *mdev)
228 {
229 struct d71_dev *d71 = mdev->chip_data;
230 struct d71_pipeline *pipe;
231 u32 i;
232
233 malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
234 for (i = 0; i < d71->num_pipelines; i++) {
235 pipe = d71->pipes[i];
236 malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
237 ENABLED_CU_IRQS, 0);
238 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
239 ENABLED_LPU_IRQS, 0);
240 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
241 ENABLED_DOU_IRQS, 0);
242 }
243 return 0;
244 }
245
246 static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
247 {
248 struct d71_dev *d71 = mdev->chip_data;
249 struct d71_pipeline *pipe = d71->pipes[master_pipe];
250
251 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
252 DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
253 }
254
255 static int to_d71_opmode(int core_mode)
256 {
257 switch (core_mode) {
258 case KOMEDA_MODE_DISP0:
259 return DO0_ACTIVE_MODE;
260 case KOMEDA_MODE_DISP1:
261 return DO1_ACTIVE_MODE;
262 case KOMEDA_MODE_DUAL_DISP:
263 return DO01_ACTIVE_MODE;
264 case KOMEDA_MODE_INACTIVE:
265 return INACTIVE_MODE;
266 default:
267 WARN(1, "Unknown operation mode");
268 return INACTIVE_MODE;
269 }
270 }
271
272 static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
273 {
274 struct d71_dev *d71 = mdev->chip_data;
275 u32 opmode = to_d71_opmode(new_mode);
276 int ret;
277
278 malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
279
280 ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
281 100, 1000, 10000);
282
283 return ret;
284 }
285
286 static void d71_flush(struct komeda_dev *mdev,
287 int master_pipe, u32 active_pipes)
288 {
289 struct d71_dev *d71 = mdev->chip_data;
290 u32 reg_offset = (master_pipe == 0) ?
291 GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
292
293 malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
294 }
295
296 static int d71_reset(struct d71_dev *d71)
297 {
298 u32 __iomem *gcu = d71->gcu_addr;
299 int ret;
300
301 malidp_write32_mask(gcu, BLK_CONTROL,
302 GCU_CONTROL_SRST, GCU_CONTROL_SRST);
303
304 ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
305 100, 1000, 10000);
306
307 return ret;
308 }
309
310 void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
311 {
312 int i;
313
314 blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
315 if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
316 return;
317
318 blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
319
320
321 for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
322 blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
323 for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
324 blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
325 }
326
327 static void d71_cleanup(struct komeda_dev *mdev)
328 {
329 struct d71_dev *d71 = mdev->chip_data;
330
331 if (!d71)
332 return;
333
334 devm_kfree(mdev->dev, d71);
335 mdev->chip_data = NULL;
336 }
337
338 static int d71_enum_resources(struct komeda_dev *mdev)
339 {
340 struct d71_dev *d71;
341 struct komeda_pipeline *pipe;
342 struct block_header blk;
343 u32 __iomem *blk_base;
344 u32 i, value, offset;
345 int err;
346
347 d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
348 if (!d71)
349 return -ENOMEM;
350
351 mdev->chip_data = d71;
352 d71->mdev = mdev;
353 d71->gcu_addr = mdev->reg_base;
354 d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
355
356 err = d71_reset(d71);
357 if (err) {
358 DRM_ERROR("Fail to reset d71 device.\n");
359 goto err_cleanup;
360 }
361
362
363 value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
364 d71->num_blocks = value & 0xFF;
365 d71->num_pipelines = (value >> 8) & 0x7;
366
367 if (d71->num_pipelines > D71_MAX_PIPELINE) {
368 DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
369 D71_MAX_PIPELINE, d71->num_pipelines);
370 err = -EINVAL;
371 goto err_cleanup;
372 }
373
374
375 value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
376 if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
377 DRM_ERROR("access blk periph but got blk: %d.\n",
378 BLOCK_INFO_BLK_TYPE(value));
379 err = -EINVAL;
380 goto err_cleanup;
381 }
382
383 value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
384
385 d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
386 d71->max_vsize = 4096;
387 d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
388 d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false;
389 d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false;
390
391 for (i = 0; i < d71->num_pipelines; i++) {
392 pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
393 &d71_pipeline_funcs);
394 if (IS_ERR(pipe)) {
395 err = PTR_ERR(pipe);
396 goto err_cleanup;
397 }
398 d71->pipes[i] = to_d71_pipeline(pipe);
399 }
400
401
402 i = 2;
403 offset = D71_BLOCK_SIZE;
404 while (i < d71->num_blocks) {
405 blk_base = mdev->reg_base + (offset >> 2);
406
407 d71_read_block_header(blk_base, &blk);
408 if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
409 err = d71_probe_block(d71, &blk, blk_base);
410 if (err)
411 goto err_cleanup;
412 i++;
413 }
414
415 offset += D71_BLOCK_SIZE;
416 }
417
418 DRM_DEBUG("total %d (out of %d) blocks are found.\n",
419 i, d71->num_blocks);
420
421 return 0;
422
423 err_cleanup:
424 d71_cleanup(mdev);
425 return err;
426 }
427
428 #define __HW_ID(__group, __format) \
429 ((((__group) & 0x7) << 3) | ((__format) & 0x7))
430
431 #define RICH KOMEDA_FMT_RICH_LAYER
432 #define SIMPLE KOMEDA_FMT_SIMPLE_LAYER
433 #define RICH_SIMPLE (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
434 #define RICH_WB (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
435 #define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
436
437 #define Rot_0 DRM_MODE_ROTATE_0
438 #define Flip_H_V (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
439 #define Rot_ALL_H_V (DRM_MODE_ROTATE_MASK | Flip_H_V)
440
441 #define LYT_NM BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
442 #define LYT_WB BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
443 #define LYT_NM_WB (LYT_NM | LYT_WB)
444
445 #define AFB_TH AFBC(_TILED | _SPARSE)
446 #define AFB_TH_SC_YTR AFBC(_TILED | _SC | _SPARSE | _YTR)
447 #define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
448
449 static struct komeda_format_caps d71_format_caps_table[] = {
450
451
452 {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
453 {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
454 {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS},
455 {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
456 {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
457
458 {__HW_ID(1, 0), DRM_FORMAT_ARGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
459 {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
460 {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS},
461 {__HW_ID(1, 2), DRM_FORMAT_RGBA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
462 {__HW_ID(1, 3), DRM_FORMAT_BGRA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
463
464 {__HW_ID(2, 0), DRM_FORMAT_XRGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
465 {__HW_ID(2, 1), DRM_FORMAT_XBGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
466 {__HW_ID(2, 2), DRM_FORMAT_RGBX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
467 {__HW_ID(2, 3), DRM_FORMAT_BGRX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
468
469 {__HW_ID(3, 0), DRM_FORMAT_RGB888, RICH_SIMPLE_WB, Rot_0, 0, 0},
470 {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE_WB, Rot_0, 0, 0},
471 {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS},
472
473 {__HW_ID(4, 0), DRM_FORMAT_RGBA5551, RICH_SIMPLE, Flip_H_V, 0, 0},
474 {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Flip_H_V, 0, 0},
475 {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR},
476 {__HW_ID(4, 2), DRM_FORMAT_RGB565, RICH_SIMPLE, Flip_H_V, 0, 0},
477 {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Flip_H_V, 0, 0},
478 {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR},
479 {__HW_ID(4, 4), DRM_FORMAT_R8, SIMPLE, Rot_0, 0, 0},
480
481 {__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
482 {__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0},
483 {__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0},
484 {__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0},
485 {__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
486 {__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0},
487
488 {__HW_ID(6, 6), DRM_FORMAT_X0L2, RICH, Flip_H_V, 0, 0},
489 {__HW_ID(6, 7), DRM_FORMAT_P010, RICH, Flip_H_V, 0, 0},
490 {__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
491 };
492
493 static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
494 u32 layer_type, u64 modifier, u32 rot)
495 {
496 uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
497
498 if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
499 drm_rotation_90_or_270(rot)) {
500 DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
501 return false;
502 }
503
504 return true;
505 }
506
507 static void d71_init_fmt_tbl(struct komeda_dev *mdev)
508 {
509 struct komeda_format_caps_table *table = &mdev->fmt_tbl;
510
511 table->format_caps = d71_format_caps_table;
512 table->format_mod_supported = d71_format_mod_supported;
513 table->n_formats = ARRAY_SIZE(d71_format_caps_table);
514 }
515
516 static int d71_connect_iommu(struct komeda_dev *mdev)
517 {
518 struct d71_dev *d71 = mdev->chip_data;
519 u32 __iomem *reg = d71->gcu_addr;
520 u32 check_bits = (d71->num_pipelines == 2) ?
521 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
522 int i, ret;
523
524 if (!d71->integrates_tbu)
525 return -1;
526
527 malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
528
529 ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
530 100, 1000, 1000);
531 if (ret < 0) {
532 DRM_ERROR("timed out connecting to TCU!\n");
533 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
534 return ret;
535 }
536
537 for (i = 0; i < d71->num_pipelines; i++)
538 malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
539 LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
540 return 0;
541 }
542
543 static int d71_disconnect_iommu(struct komeda_dev *mdev)
544 {
545 struct d71_dev *d71 = mdev->chip_data;
546 u32 __iomem *reg = d71->gcu_addr;
547 u32 check_bits = (d71->num_pipelines == 2) ?
548 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
549 int ret;
550
551 malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
552
553 ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
554 100, 1000, 1000);
555 if (ret < 0) {
556 DRM_ERROR("timed out disconnecting from TCU!\n");
557 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
558 }
559
560 return ret;
561 }
562
563 static const struct komeda_dev_funcs d71_chip_funcs = {
564 .init_format_table = d71_init_fmt_tbl,
565 .enum_resources = d71_enum_resources,
566 .cleanup = d71_cleanup,
567 .irq_handler = d71_irq_handler,
568 .enable_irq = d71_enable_irq,
569 .disable_irq = d71_disable_irq,
570 .on_off_vblank = d71_on_off_vblank,
571 .change_opmode = d71_change_opmode,
572 .flush = d71_flush,
573 .connect_iommu = d71_connect_iommu,
574 .disconnect_iommu = d71_disconnect_iommu,
575 };
576
577 const struct komeda_dev_funcs *
578 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
579 {
580 chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
581 chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
582 chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
583 chip->bus_width = D71_BUS_WIDTH_16_BYTES;
584
585 return &d71_chip_funcs;
586 }