1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19 #include "msm_drv.h"
20 #include "msm_mmu.h"
21 #include "mdp4_kms.h"
22
23 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
24
mdp4_hw_init(struct msm_kms * kms)25 static int mdp4_hw_init(struct msm_kms *kms)
26 {
27 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
28 struct drm_device *dev = mdp4_kms->dev;
29 uint32_t version, major, minor, dmap_cfg, vg_cfg;
30 unsigned long clk;
31 int ret = 0;
32
33 pm_runtime_get_sync(dev->dev);
34
35 mdp4_enable(mdp4_kms);
36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
37 mdp4_disable(mdp4_kms);
38
39 major = FIELD(version, MDP4_VERSION_MAJOR);
40 minor = FIELD(version, MDP4_VERSION_MINOR);
41
42 DBG("found MDP4 version v%d.%d", major, minor);
43
44 if (major != 4) {
45 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
46 major, minor);
47 ret = -ENXIO;
48 goto out;
49 }
50
51 mdp4_kms->rev = minor;
52
53 if (mdp4_kms->dsi_pll_vdda) {
54 if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
55 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
56 1200000, 1200000);
57 if (ret) {
58 dev_err(dev->dev,
59 "failed to set dsi_pll_vdda voltage: %d\n", ret);
60 goto out;
61 }
62 }
63 }
64
65 if (mdp4_kms->dsi_pll_vddio) {
66 if (mdp4_kms->rev == 2) {
67 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
68 1800000, 1800000);
69 if (ret) {
70 dev_err(dev->dev,
71 "failed to set dsi_pll_vddio voltage: %d\n", ret);
72 goto out;
73 }
74 }
75 }
76
77 if (mdp4_kms->rev > 1) {
78 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
79 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
80 }
81
82 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
83
84 /* max read pending cmd config, 3 pending requests: */
85 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
86
87 clk = clk_get_rate(mdp4_kms->clk);
88
89 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
90 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
91 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
92 } else {
93 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
94 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
95 }
96
97 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
98
99 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
100 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
101
102 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
103 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
104 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
105 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
106
107 if (mdp4_kms->rev >= 2)
108 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
109 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
110
111 /* disable CSC matrix / YUV by default: */
112 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
113 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
114 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
115 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
116 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
117 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
118
119 if (mdp4_kms->rev > 1)
120 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
121
122 out:
123 pm_runtime_put_sync(dev->dev);
124
125 return ret;
126 }
127
mdp4_prepare_commit(struct msm_kms * kms,struct drm_atomic_state * state)128 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
129 {
130 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
131 int i, ncrtcs = state->dev->mode_config.num_crtc;
132
133 mdp4_enable(mdp4_kms);
134
135 /* see 119ecb7fd */
136 for (i = 0; i < ncrtcs; i++) {
137 struct drm_crtc *crtc = state->crtcs[i];
138 if (!crtc)
139 continue;
140 drm_crtc_vblank_get(crtc);
141 }
142 }
143
mdp4_complete_commit(struct msm_kms * kms,struct drm_atomic_state * state)144 static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
145 {
146 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
147 int i, ncrtcs = state->dev->mode_config.num_crtc;
148
149 /* see 119ecb7fd */
150 for (i = 0; i < ncrtcs; i++) {
151 struct drm_crtc *crtc = state->crtcs[i];
152 if (!crtc)
153 continue;
154 drm_crtc_vblank_put(crtc);
155 }
156
157 mdp4_disable(mdp4_kms);
158 }
159
mdp4_round_pixclk(struct msm_kms * kms,unsigned long rate,struct drm_encoder * encoder)160 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
161 struct drm_encoder *encoder)
162 {
163 /* if we had >1 encoder, we'd need something more clever: */
164 return mdp4_dtv_round_pixclk(encoder, rate);
165 }
166
mdp4_preclose(struct msm_kms * kms,struct drm_file * file)167 static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
168 {
169 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
170 struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
171 unsigned i;
172
173 for (i = 0; i < priv->num_crtcs; i++)
174 mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
175 }
176
mdp4_destroy(struct msm_kms * kms)177 static void mdp4_destroy(struct msm_kms *kms)
178 {
179 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
180 if (mdp4_kms->blank_cursor_iova)
181 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
182 if (mdp4_kms->blank_cursor_bo)
183 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
184 kfree(mdp4_kms);
185 }
186
187 static const struct mdp_kms_funcs kms_funcs = {
188 .base = {
189 .hw_init = mdp4_hw_init,
190 .irq_preinstall = mdp4_irq_preinstall,
191 .irq_postinstall = mdp4_irq_postinstall,
192 .irq_uninstall = mdp4_irq_uninstall,
193 .irq = mdp4_irq,
194 .enable_vblank = mdp4_enable_vblank,
195 .disable_vblank = mdp4_disable_vblank,
196 .prepare_commit = mdp4_prepare_commit,
197 .complete_commit = mdp4_complete_commit,
198 .get_format = mdp_get_format,
199 .round_pixclk = mdp4_round_pixclk,
200 .preclose = mdp4_preclose,
201 .destroy = mdp4_destroy,
202 },
203 .set_irqmask = mdp4_set_irqmask,
204 };
205
mdp4_disable(struct mdp4_kms * mdp4_kms)206 int mdp4_disable(struct mdp4_kms *mdp4_kms)
207 {
208 DBG("");
209
210 clk_disable_unprepare(mdp4_kms->clk);
211 if (mdp4_kms->pclk)
212 clk_disable_unprepare(mdp4_kms->pclk);
213 clk_disable_unprepare(mdp4_kms->lut_clk);
214 if (mdp4_kms->axi_clk)
215 clk_disable_unprepare(mdp4_kms->axi_clk);
216
217 return 0;
218 }
219
mdp4_enable(struct mdp4_kms * mdp4_kms)220 int mdp4_enable(struct mdp4_kms *mdp4_kms)
221 {
222 DBG("");
223
224 clk_prepare_enable(mdp4_kms->clk);
225 if (mdp4_kms->pclk)
226 clk_prepare_enable(mdp4_kms->pclk);
227 clk_prepare_enable(mdp4_kms->lut_clk);
228 if (mdp4_kms->axi_clk)
229 clk_prepare_enable(mdp4_kms->axi_clk);
230
231 return 0;
232 }
233
234 #ifdef CONFIG_OF
detect_panel(struct drm_device * dev,const char * name)235 static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
236 {
237 struct device_node *n;
238 struct drm_panel *panel = NULL;
239
240 n = of_parse_phandle(dev->dev->of_node, name, 0);
241 if (n) {
242 panel = of_drm_find_panel(n);
243 if (!panel)
244 panel = ERR_PTR(-EPROBE_DEFER);
245 }
246
247 return panel;
248 }
249 #else
detect_panel(struct drm_device * dev,const char * name)250 static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
251 {
252 // ??? maybe use a module param to specify which panel is attached?
253 }
254 #endif
255
modeset_init(struct mdp4_kms * mdp4_kms)256 static int modeset_init(struct mdp4_kms *mdp4_kms)
257 {
258 struct drm_device *dev = mdp4_kms->dev;
259 struct msm_drm_private *priv = dev->dev_private;
260 struct drm_plane *plane;
261 struct drm_crtc *crtc;
262 struct drm_encoder *encoder;
263 struct drm_connector *connector;
264 struct drm_panel *panel;
265 int ret;
266
267 /* construct non-private planes: */
268 plane = mdp4_plane_init(dev, VG1, false);
269 if (IS_ERR(plane)) {
270 dev_err(dev->dev, "failed to construct plane for VG1\n");
271 ret = PTR_ERR(plane);
272 goto fail;
273 }
274 priv->planes[priv->num_planes++] = plane;
275
276 plane = mdp4_plane_init(dev, VG2, false);
277 if (IS_ERR(plane)) {
278 dev_err(dev->dev, "failed to construct plane for VG2\n");
279 ret = PTR_ERR(plane);
280 goto fail;
281 }
282 priv->planes[priv->num_planes++] = plane;
283
284 /*
285 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
286 */
287
288 panel = detect_panel(dev, "qcom,lvds-panel");
289 if (IS_ERR(panel)) {
290 ret = PTR_ERR(panel);
291 dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
292 goto fail;
293 }
294
295 plane = mdp4_plane_init(dev, RGB2, true);
296 if (IS_ERR(plane)) {
297 dev_err(dev->dev, "failed to construct plane for RGB2\n");
298 ret = PTR_ERR(plane);
299 goto fail;
300 }
301
302 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
303 if (IS_ERR(crtc)) {
304 dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
305 ret = PTR_ERR(crtc);
306 goto fail;
307 }
308
309 encoder = mdp4_lcdc_encoder_init(dev, panel);
310 if (IS_ERR(encoder)) {
311 dev_err(dev->dev, "failed to construct LCDC encoder\n");
312 ret = PTR_ERR(encoder);
313 goto fail;
314 }
315
316 /* LCDC can be hooked to DMA_P: */
317 encoder->possible_crtcs = 1 << priv->num_crtcs;
318
319 priv->crtcs[priv->num_crtcs++] = crtc;
320 priv->encoders[priv->num_encoders++] = encoder;
321
322 connector = mdp4_lvds_connector_init(dev, panel, encoder);
323 if (IS_ERR(connector)) {
324 ret = PTR_ERR(connector);
325 dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
326 goto fail;
327 }
328
329 priv->connectors[priv->num_connectors++] = connector;
330
331 /*
332 * Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI:
333 */
334
335 plane = mdp4_plane_init(dev, RGB1, true);
336 if (IS_ERR(plane)) {
337 dev_err(dev->dev, "failed to construct plane for RGB1\n");
338 ret = PTR_ERR(plane);
339 goto fail;
340 }
341
342 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
343 if (IS_ERR(crtc)) {
344 dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
345 ret = PTR_ERR(crtc);
346 goto fail;
347 }
348
349 encoder = mdp4_dtv_encoder_init(dev);
350 if (IS_ERR(encoder)) {
351 dev_err(dev->dev, "failed to construct DTV encoder\n");
352 ret = PTR_ERR(encoder);
353 goto fail;
354 }
355
356 /* DTV can be hooked to DMA_E: */
357 encoder->possible_crtcs = 1 << priv->num_crtcs;
358
359 priv->crtcs[priv->num_crtcs++] = crtc;
360 priv->encoders[priv->num_encoders++] = encoder;
361
362 if (priv->hdmi) {
363 /* Construct bridge/connector for HDMI: */
364 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
365 if (ret) {
366 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
367 goto fail;
368 }
369 }
370
371 return 0;
372
373 fail:
374 return ret;
375 }
376
377 static const char *iommu_ports[] = {
378 "mdp_port0_cb0", "mdp_port1_cb0",
379 };
380
mdp4_kms_init(struct drm_device * dev)381 struct msm_kms *mdp4_kms_init(struct drm_device *dev)
382 {
383 struct platform_device *pdev = dev->platformdev;
384 struct mdp4_platform_config *config = mdp4_get_config(pdev);
385 struct mdp4_kms *mdp4_kms;
386 struct msm_kms *kms = NULL;
387 struct msm_mmu *mmu;
388 int ret;
389
390 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
391 if (!mdp4_kms) {
392 dev_err(dev->dev, "failed to allocate kms\n");
393 ret = -ENOMEM;
394 goto fail;
395 }
396
397 mdp_kms_init(&mdp4_kms->base, &kms_funcs);
398
399 kms = &mdp4_kms->base.base;
400
401 mdp4_kms->dev = dev;
402
403 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
404 if (IS_ERR(mdp4_kms->mmio)) {
405 ret = PTR_ERR(mdp4_kms->mmio);
406 goto fail;
407 }
408
409 mdp4_kms->dsi_pll_vdda =
410 devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
411 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
412 mdp4_kms->dsi_pll_vdda = NULL;
413
414 mdp4_kms->dsi_pll_vddio =
415 devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
416 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
417 mdp4_kms->dsi_pll_vddio = NULL;
418
419 /* NOTE: driver for this regulator still missing upstream.. use
420 * _get_exclusive() and ignore the error if it does not exist
421 * (and hope that the bootloader left it on for us)
422 */
423 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
424 if (IS_ERR(mdp4_kms->vdd))
425 mdp4_kms->vdd = NULL;
426
427 if (mdp4_kms->vdd) {
428 ret = regulator_enable(mdp4_kms->vdd);
429 if (ret) {
430 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
431 goto fail;
432 }
433 }
434
435 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
436 if (IS_ERR(mdp4_kms->clk)) {
437 dev_err(dev->dev, "failed to get core_clk\n");
438 ret = PTR_ERR(mdp4_kms->clk);
439 goto fail;
440 }
441
442 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
443 if (IS_ERR(mdp4_kms->pclk))
444 mdp4_kms->pclk = NULL;
445
446 // XXX if (rev >= MDP_REV_42) { ???
447 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
448 if (IS_ERR(mdp4_kms->lut_clk)) {
449 dev_err(dev->dev, "failed to get lut_clk\n");
450 ret = PTR_ERR(mdp4_kms->lut_clk);
451 goto fail;
452 }
453
454 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
455 if (IS_ERR(mdp4_kms->axi_clk)) {
456 dev_err(dev->dev, "failed to get axi_clk\n");
457 ret = PTR_ERR(mdp4_kms->axi_clk);
458 goto fail;
459 }
460
461 clk_set_rate(mdp4_kms->clk, config->max_clk);
462 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
463
464 /* make sure things are off before attaching iommu (bootloader could
465 * have left things on, in which case we'll start getting faults if
466 * we don't disable):
467 */
468 mdp4_enable(mdp4_kms);
469 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
470 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
471 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
472 mdp4_disable(mdp4_kms);
473 mdelay(16);
474
475 if (config->iommu) {
476 mmu = msm_iommu_new(&pdev->dev, config->iommu);
477 if (IS_ERR(mmu)) {
478 ret = PTR_ERR(mmu);
479 goto fail;
480 }
481 ret = mmu->funcs->attach(mmu, iommu_ports,
482 ARRAY_SIZE(iommu_ports));
483 if (ret)
484 goto fail;
485 } else {
486 dev_info(dev->dev, "no iommu, fallback to phys "
487 "contig buffers for scanout\n");
488 mmu = NULL;
489 }
490
491 mdp4_kms->id = msm_register_mmu(dev, mmu);
492 if (mdp4_kms->id < 0) {
493 ret = mdp4_kms->id;
494 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
495 goto fail;
496 }
497
498 ret = modeset_init(mdp4_kms);
499 if (ret) {
500 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
501 goto fail;
502 }
503
504 mutex_lock(&dev->struct_mutex);
505 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
506 mutex_unlock(&dev->struct_mutex);
507 if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
508 ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
509 dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
510 mdp4_kms->blank_cursor_bo = NULL;
511 goto fail;
512 }
513
514 ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
515 &mdp4_kms->blank_cursor_iova);
516 if (ret) {
517 dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
518 goto fail;
519 }
520
521 return kms;
522
523 fail:
524 if (kms)
525 mdp4_destroy(kms);
526 return ERR_PTR(ret);
527 }
528
mdp4_get_config(struct platform_device * dev)529 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
530 {
531 static struct mdp4_platform_config config = {};
532 #ifdef CONFIG_OF
533 /* TODO */
534 config.max_clk = 266667000;
535 config.iommu = iommu_domain_alloc(&platform_bus_type);
536 #else
537 if (cpu_is_apq8064())
538 config.max_clk = 266667000;
539 else
540 config.max_clk = 200000000;
541
542 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
543 #endif
544 return &config;
545 }
546