root/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mdss_write
  2. mdss_read
  3. mdss_irq
  4. mdss_hw_mask_irq
  5. mdss_hw_unmask_irq
  6. mdss_hw_irqdomain_map
  7. mdss_irq_domain_init
  8. mdp5_mdss_enable
  9. mdp5_mdss_disable
  10. msm_mdss_get_clocks
  11. mdp5_mdss_destroy
  12. mdp5_mdss_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
   4  */
   5 
   6 #include <linux/irqdomain.h>
   7 #include <linux/irq.h>
   8 
   9 #include "msm_drv.h"
  10 #include "mdp5_kms.h"
  11 
  12 #define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
  13 
  14 struct mdp5_mdss {
  15         struct msm_mdss base;
  16 
  17         void __iomem *mmio, *vbif;
  18 
  19         struct regulator *vdd;
  20 
  21         struct clk *ahb_clk;
  22         struct clk *axi_clk;
  23         struct clk *vsync_clk;
  24 
  25         struct {
  26                 volatile unsigned long enabled_mask;
  27                 struct irq_domain *domain;
  28         } irqcontroller;
  29 };
  30 
  31 static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
  32 {
  33         msm_writel(data, mdp5_mdss->mmio + reg);
  34 }
  35 
  36 static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
  37 {
  38         return msm_readl(mdp5_mdss->mmio + reg);
  39 }
  40 
  41 static irqreturn_t mdss_irq(int irq, void *arg)
  42 {
  43         struct mdp5_mdss *mdp5_mdss = arg;
  44         u32 intr;
  45 
  46         intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
  47 
  48         VERB("intr=%08x", intr);
  49 
  50         while (intr) {
  51                 irq_hw_number_t hwirq = fls(intr) - 1;
  52 
  53                 generic_handle_irq(irq_find_mapping(
  54                                 mdp5_mdss->irqcontroller.domain, hwirq));
  55                 intr &= ~(1 << hwirq);
  56         }
  57 
  58         return IRQ_HANDLED;
  59 }
  60 
  61 /*
  62  * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
  63  * can register to get their irq's delivered
  64  */
  65 
  66 #define VALID_IRQS  (MDSS_HW_INTR_STATUS_INTR_MDP | \
  67                 MDSS_HW_INTR_STATUS_INTR_DSI0 | \
  68                 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
  69                 MDSS_HW_INTR_STATUS_INTR_HDMI | \
  70                 MDSS_HW_INTR_STATUS_INTR_EDP)
  71 
  72 static void mdss_hw_mask_irq(struct irq_data *irqd)
  73 {
  74         struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
  75 
  76         smp_mb__before_atomic();
  77         clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
  78         smp_mb__after_atomic();
  79 }
  80 
  81 static void mdss_hw_unmask_irq(struct irq_data *irqd)
  82 {
  83         struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
  84 
  85         smp_mb__before_atomic();
  86         set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
  87         smp_mb__after_atomic();
  88 }
  89 
  90 static struct irq_chip mdss_hw_irq_chip = {
  91         .name           = "mdss",
  92         .irq_mask       = mdss_hw_mask_irq,
  93         .irq_unmask     = mdss_hw_unmask_irq,
  94 };
  95 
  96 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
  97                                  irq_hw_number_t hwirq)
  98 {
  99         struct mdp5_mdss *mdp5_mdss = d->host_data;
 100 
 101         if (!(VALID_IRQS & (1 << hwirq)))
 102                 return -EPERM;
 103 
 104         irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
 105         irq_set_chip_data(irq, mdp5_mdss);
 106 
 107         return 0;
 108 }
 109 
 110 static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
 111         .map = mdss_hw_irqdomain_map,
 112         .xlate = irq_domain_xlate_onecell,
 113 };
 114 
 115 
 116 static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
 117 {
 118         struct device *dev = mdp5_mdss->base.dev->dev;
 119         struct irq_domain *d;
 120 
 121         d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
 122                                   mdp5_mdss);
 123         if (!d) {
 124                 DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
 125                 return -ENXIO;
 126         }
 127 
 128         mdp5_mdss->irqcontroller.enabled_mask = 0;
 129         mdp5_mdss->irqcontroller.domain = d;
 130 
 131         return 0;
 132 }
 133 
 134 static int mdp5_mdss_enable(struct msm_mdss *mdss)
 135 {
 136         struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
 137         DBG("");
 138 
 139         clk_prepare_enable(mdp5_mdss->ahb_clk);
 140         if (mdp5_mdss->axi_clk)
 141                 clk_prepare_enable(mdp5_mdss->axi_clk);
 142         if (mdp5_mdss->vsync_clk)
 143                 clk_prepare_enable(mdp5_mdss->vsync_clk);
 144 
 145         return 0;
 146 }
 147 
 148 static int mdp5_mdss_disable(struct msm_mdss *mdss)
 149 {
 150         struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
 151         DBG("");
 152 
 153         if (mdp5_mdss->vsync_clk)
 154                 clk_disable_unprepare(mdp5_mdss->vsync_clk);
 155         if (mdp5_mdss->axi_clk)
 156                 clk_disable_unprepare(mdp5_mdss->axi_clk);
 157         clk_disable_unprepare(mdp5_mdss->ahb_clk);
 158 
 159         return 0;
 160 }
 161 
 162 static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
 163 {
 164         struct platform_device *pdev =
 165                         to_platform_device(mdp5_mdss->base.dev->dev);
 166 
 167         mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
 168         if (IS_ERR(mdp5_mdss->ahb_clk))
 169                 mdp5_mdss->ahb_clk = NULL;
 170 
 171         mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
 172         if (IS_ERR(mdp5_mdss->axi_clk))
 173                 mdp5_mdss->axi_clk = NULL;
 174 
 175         mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
 176         if (IS_ERR(mdp5_mdss->vsync_clk))
 177                 mdp5_mdss->vsync_clk = NULL;
 178 
 179         return 0;
 180 }
 181 
 182 static void mdp5_mdss_destroy(struct drm_device *dev)
 183 {
 184         struct msm_drm_private *priv = dev->dev_private;
 185         struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
 186 
 187         if (!mdp5_mdss)
 188                 return;
 189 
 190         irq_domain_remove(mdp5_mdss->irqcontroller.domain);
 191         mdp5_mdss->irqcontroller.domain = NULL;
 192 
 193         regulator_disable(mdp5_mdss->vdd);
 194 
 195         pm_runtime_disable(dev->dev);
 196 }
 197 
 198 static const struct msm_mdss_funcs mdss_funcs = {
 199         .enable = mdp5_mdss_enable,
 200         .disable = mdp5_mdss_disable,
 201         .destroy = mdp5_mdss_destroy,
 202 };
 203 
 204 int mdp5_mdss_init(struct drm_device *dev)
 205 {
 206         struct platform_device *pdev = to_platform_device(dev->dev);
 207         struct msm_drm_private *priv = dev->dev_private;
 208         struct mdp5_mdss *mdp5_mdss;
 209         int ret;
 210 
 211         DBG("");
 212 
 213         if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
 214                 return 0;
 215 
 216         mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
 217         if (!mdp5_mdss) {
 218                 ret = -ENOMEM;
 219                 goto fail;
 220         }
 221 
 222         mdp5_mdss->base.dev = dev;
 223 
 224         mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
 225         if (IS_ERR(mdp5_mdss->mmio)) {
 226                 ret = PTR_ERR(mdp5_mdss->mmio);
 227                 goto fail;
 228         }
 229 
 230         mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
 231         if (IS_ERR(mdp5_mdss->vbif)) {
 232                 ret = PTR_ERR(mdp5_mdss->vbif);
 233                 goto fail;
 234         }
 235 
 236         ret = msm_mdss_get_clocks(mdp5_mdss);
 237         if (ret) {
 238                 DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
 239                 goto fail;
 240         }
 241 
 242         /* Regulator to enable GDSCs in downstream kernels */
 243         mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
 244         if (IS_ERR(mdp5_mdss->vdd)) {
 245                 ret = PTR_ERR(mdp5_mdss->vdd);
 246                 goto fail;
 247         }
 248 
 249         ret = regulator_enable(mdp5_mdss->vdd);
 250         if (ret) {
 251                 DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
 252                         ret);
 253                 goto fail;
 254         }
 255 
 256         ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
 257                                mdss_irq, 0, "mdss_isr", mdp5_mdss);
 258         if (ret) {
 259                 DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
 260                 goto fail_irq;
 261         }
 262 
 263         ret = mdss_irq_domain_init(mdp5_mdss);
 264         if (ret) {
 265                 DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
 266                 goto fail_irq;
 267         }
 268 
 269         mdp5_mdss->base.funcs = &mdss_funcs;
 270         priv->mdss = &mdp5_mdss->base;
 271 
 272         pm_runtime_enable(dev->dev);
 273 
 274         return 0;
 275 fail_irq:
 276         regulator_disable(mdp5_mdss->vdd);
 277 fail:
 278         return ret;
 279 }

/* [<][>][^][v][top][bottom][index][help] */