This source file includes following definitions.
- vc4_hvs_dump_state
- vc4_hvs_debugfs_underrun
- vc4_hvs_upload_linear_kernel
- vc4_hvs_mask_underrun
- vc4_hvs_unmask_underrun
- vc4_hvs_report_underrun
- vc4_hvs_irq_handler
- vc4_hvs_bind
- vc4_hvs_unbind
- vc4_hvs_dev_probe
- vc4_hvs_dev_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/component.h>
23 #include <linux/platform_device.h>
24
25 #include <drm/drm_atomic_helper.h>
26
27 #include "vc4_drv.h"
28 #include "vc4_regs.h"
29
30 static const struct debugfs_reg32 hvs_regs[] = {
31 VC4_REG32(SCALER_DISPCTRL),
32 VC4_REG32(SCALER_DISPSTAT),
33 VC4_REG32(SCALER_DISPID),
34 VC4_REG32(SCALER_DISPECTRL),
35 VC4_REG32(SCALER_DISPPROF),
36 VC4_REG32(SCALER_DISPDITHER),
37 VC4_REG32(SCALER_DISPEOLN),
38 VC4_REG32(SCALER_DISPLIST0),
39 VC4_REG32(SCALER_DISPLIST1),
40 VC4_REG32(SCALER_DISPLIST2),
41 VC4_REG32(SCALER_DISPLSTAT),
42 VC4_REG32(SCALER_DISPLACT0),
43 VC4_REG32(SCALER_DISPLACT1),
44 VC4_REG32(SCALER_DISPLACT2),
45 VC4_REG32(SCALER_DISPCTRL0),
46 VC4_REG32(SCALER_DISPBKGND0),
47 VC4_REG32(SCALER_DISPSTAT0),
48 VC4_REG32(SCALER_DISPBASE0),
49 VC4_REG32(SCALER_DISPCTRL1),
50 VC4_REG32(SCALER_DISPBKGND1),
51 VC4_REG32(SCALER_DISPSTAT1),
52 VC4_REG32(SCALER_DISPBASE1),
53 VC4_REG32(SCALER_DISPCTRL2),
54 VC4_REG32(SCALER_DISPBKGND2),
55 VC4_REG32(SCALER_DISPSTAT2),
56 VC4_REG32(SCALER_DISPBASE2),
57 VC4_REG32(SCALER_DISPALPHA2),
58 VC4_REG32(SCALER_OLEDOFFS),
59 VC4_REG32(SCALER_OLEDCOEF0),
60 VC4_REG32(SCALER_OLEDCOEF1),
61 VC4_REG32(SCALER_OLEDCOEF2),
62 };
63
64 void vc4_hvs_dump_state(struct drm_device *dev)
65 {
66 struct vc4_dev *vc4 = to_vc4_dev(dev);
67 struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev);
68 int i;
69
70 drm_print_regset32(&p, &vc4->hvs->regset);
71
72 DRM_INFO("HVS ctx:\n");
73 for (i = 0; i < 64; i += 4) {
74 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
75 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
76 readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
77 readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
78 readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
79 readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
80 }
81 }
82
83 static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
84 {
85 struct drm_info_node *node = m->private;
86 struct drm_device *dev = node->minor->dev;
87 struct vc4_dev *vc4 = to_vc4_dev(dev);
88 struct drm_printer p = drm_seq_file_printer(m);
89
90 drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
91
92 return 0;
93 }
94
95
96
97
98 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
99 #define VC4_PPF_FILTER_WORD(c0, c1, c2) \
100 ((((c0) & 0x1ff) << 0) | \
101 (((c1) & 0x1ff) << 9) | \
102 (((c2) & 0x1ff) << 18))
103
104
105
106
107
108
109
110 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \
111 c9, c10, c11, c12, c13, c14, c15) \
112 {VC4_PPF_FILTER_WORD(c0, c1, c2), \
113 VC4_PPF_FILTER_WORD(c3, c4, c5), \
114 VC4_PPF_FILTER_WORD(c6, c7, c8), \
115 VC4_PPF_FILTER_WORD(c9, c10, c11), \
116 VC4_PPF_FILTER_WORD(c12, c13, c14), \
117 VC4_PPF_FILTER_WORD(c15, c15, 0)}
118
119 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
120 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
121
122
123
124
125 static const u32 mitchell_netravali_1_3_1_3_kernel[] =
126 VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
127 50, 82, 119, 155, 187, 213, 227);
128
129 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
130 struct drm_mm_node *space,
131 const u32 *kernel)
132 {
133 int ret, i;
134 u32 __iomem *dst_kernel;
135
136 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
137 if (ret) {
138 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
139 ret);
140 return ret;
141 }
142
143 dst_kernel = hvs->dlist + space->start;
144
145 for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
146 if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
147 writel(kernel[i], &dst_kernel[i]);
148 else {
149 writel(kernel[VC4_KERNEL_DWORDS - i - 1],
150 &dst_kernel[i]);
151 }
152 }
153
154 return 0;
155 }
156
157 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
158 {
159 struct vc4_dev *vc4 = to_vc4_dev(dev);
160 u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
161
162 dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
163
164 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
165 }
166
167 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel)
168 {
169 struct vc4_dev *vc4 = to_vc4_dev(dev);
170 u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
171
172 dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
173
174 HVS_WRITE(SCALER_DISPSTAT,
175 SCALER_DISPSTAT_EUFLOW(channel));
176 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
177 }
178
179 static void vc4_hvs_report_underrun(struct drm_device *dev)
180 {
181 struct vc4_dev *vc4 = to_vc4_dev(dev);
182
183 atomic_inc(&vc4->underrun);
184 DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
185 }
186
187 static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
188 {
189 struct drm_device *dev = data;
190 struct vc4_dev *vc4 = to_vc4_dev(dev);
191 irqreturn_t irqret = IRQ_NONE;
192 int channel;
193 u32 control;
194 u32 status;
195
196 status = HVS_READ(SCALER_DISPSTAT);
197 control = HVS_READ(SCALER_DISPCTRL);
198
199 for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
200
201 if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
202 control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
203 vc4_hvs_mask_underrun(dev, channel);
204 vc4_hvs_report_underrun(dev);
205
206 irqret = IRQ_HANDLED;
207 }
208 }
209
210
211 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
212 SCALER_DISPSTAT_IRQMASK(1) |
213 SCALER_DISPSTAT_IRQMASK(2));
214
215 return irqret;
216 }
217
218 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
219 {
220 struct platform_device *pdev = to_platform_device(dev);
221 struct drm_device *drm = dev_get_drvdata(master);
222 struct vc4_dev *vc4 = drm->dev_private;
223 struct vc4_hvs *hvs = NULL;
224 int ret;
225 u32 dispctrl;
226
227 hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
228 if (!hvs)
229 return -ENOMEM;
230
231 hvs->pdev = pdev;
232
233 hvs->regs = vc4_ioremap_regs(pdev, 0);
234 if (IS_ERR(hvs->regs))
235 return PTR_ERR(hvs->regs);
236
237 hvs->regset.base = hvs->regs;
238 hvs->regset.regs = hvs_regs;
239 hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
240
241 hvs->dlist = hvs->regs + SCALER_DLIST_START;
242
243 spin_lock_init(&hvs->mm_lock);
244
245
246
247
248
249
250 drm_mm_init(&hvs->dlist_mm,
251 HVS_BOOTLOADER_DLIST_END,
252 (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
253
254
255
256
257
258
259 drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
260
261
262
263
264 ret = vc4_hvs_upload_linear_kernel(hvs,
265 &hvs->mitchell_netravali_filter,
266 mitchell_netravali_1_3_1_3_kernel);
267 if (ret)
268 return ret;
269
270 vc4->hvs = hvs;
271
272 dispctrl = HVS_READ(SCALER_DISPCTRL);
273
274 dispctrl |= SCALER_DISPCTRL_ENABLE;
275 dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
276 SCALER_DISPCTRL_DISPEIRQ(1) |
277 SCALER_DISPCTRL_DISPEIRQ(2);
278
279
280
281
282 dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
283 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
284 SCALER_DISPCTRL_SLVWREIRQ |
285 SCALER_DISPCTRL_SLVRDEIRQ |
286 SCALER_DISPCTRL_DSPEIEOF(0) |
287 SCALER_DISPCTRL_DSPEIEOF(1) |
288 SCALER_DISPCTRL_DSPEIEOF(2) |
289 SCALER_DISPCTRL_DSPEIEOLN(0) |
290 SCALER_DISPCTRL_DSPEIEOLN(1) |
291 SCALER_DISPCTRL_DSPEIEOLN(2) |
292 SCALER_DISPCTRL_DSPEISLUR(0) |
293 SCALER_DISPCTRL_DSPEISLUR(1) |
294 SCALER_DISPCTRL_DSPEISLUR(2) |
295 SCALER_DISPCTRL_SCLEIRQ);
296 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
297
298 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
299
300 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
301 vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
302 if (ret)
303 return ret;
304
305 vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
306 vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
307 NULL);
308
309 return 0;
310 }
311
312 static void vc4_hvs_unbind(struct device *dev, struct device *master,
313 void *data)
314 {
315 struct drm_device *drm = dev_get_drvdata(master);
316 struct vc4_dev *vc4 = drm->dev_private;
317
318 if (vc4->hvs->mitchell_netravali_filter.allocated)
319 drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
320
321 drm_mm_takedown(&vc4->hvs->dlist_mm);
322 drm_mm_takedown(&vc4->hvs->lbm_mm);
323
324 vc4->hvs = NULL;
325 }
326
327 static const struct component_ops vc4_hvs_ops = {
328 .bind = vc4_hvs_bind,
329 .unbind = vc4_hvs_unbind,
330 };
331
332 static int vc4_hvs_dev_probe(struct platform_device *pdev)
333 {
334 return component_add(&pdev->dev, &vc4_hvs_ops);
335 }
336
337 static int vc4_hvs_dev_remove(struct platform_device *pdev)
338 {
339 component_del(&pdev->dev, &vc4_hvs_ops);
340 return 0;
341 }
342
343 static const struct of_device_id vc4_hvs_dt_match[] = {
344 { .compatible = "brcm,bcm2835-hvs" },
345 {}
346 };
347
348 struct platform_driver vc4_hvs_driver = {
349 .probe = vc4_hvs_dev_probe,
350 .remove = vc4_hvs_dev_remove,
351 .driver = {
352 .name = "vc4_hvs",
353 .of_match_table = vc4_hvs_dt_match,
354 },
355 };