root/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvkm_device_tegra_power_up
  2. nvkm_device_tegra_power_down
  3. nvkm_device_tegra_probe_iommu
  4. nvkm_device_tegra_remove_iommu
  5. nvkm_device_tegra
  6. nvkm_device_tegra_resource
  7. nvkm_device_tegra_resource_addr
  8. nvkm_device_tegra_resource_size
  9. nvkm_device_tegra_intr
  10. nvkm_device_tegra_fini
  11. nvkm_device_tegra_init
  12. nvkm_device_tegra_dtor
  13. nvkm_device_tegra_new
  14. nvkm_device_tegra_new

   1 /*
   2  * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20  * DEALINGS IN THE SOFTWARE.
  21  */
  22 #include <core/tegra.h>
  23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  24 #include "priv.h"
  25 
  26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  27 #include <asm/dma-iommu.h>
  28 #endif
  29 
  30 static int
  31 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
  32 {
  33         int ret;
  34 
  35         if (tdev->vdd) {
  36                 ret = regulator_enable(tdev->vdd);
  37                 if (ret)
  38                         goto err_power;
  39         }
  40 
  41         ret = clk_prepare_enable(tdev->clk);
  42         if (ret)
  43                 goto err_clk;
  44         if (tdev->clk_ref) {
  45                 ret = clk_prepare_enable(tdev->clk_ref);
  46                 if (ret)
  47                         goto err_clk_ref;
  48         }
  49         ret = clk_prepare_enable(tdev->clk_pwr);
  50         if (ret)
  51                 goto err_clk_pwr;
  52         clk_set_rate(tdev->clk_pwr, 204000000);
  53         udelay(10);
  54 
  55         reset_control_assert(tdev->rst);
  56         udelay(10);
  57 
  58         if (!tdev->pdev->dev.pm_domain) {
  59                 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
  60                 if (ret)
  61                         goto err_clamp;
  62                 udelay(10);
  63         }
  64 
  65         reset_control_deassert(tdev->rst);
  66         udelay(10);
  67 
  68         return 0;
  69 
  70 err_clamp:
  71         clk_disable_unprepare(tdev->clk_pwr);
  72 err_clk_pwr:
  73         if (tdev->clk_ref)
  74                 clk_disable_unprepare(tdev->clk_ref);
  75 err_clk_ref:
  76         clk_disable_unprepare(tdev->clk);
  77 err_clk:
  78         if (tdev->vdd)
  79                 regulator_disable(tdev->vdd);
  80 err_power:
  81         return ret;
  82 }
  83 
  84 static int
  85 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
  86 {
  87         int ret;
  88 
  89         clk_disable_unprepare(tdev->clk_pwr);
  90         if (tdev->clk_ref)
  91                 clk_disable_unprepare(tdev->clk_ref);
  92         clk_disable_unprepare(tdev->clk);
  93         udelay(10);
  94 
  95         if (tdev->vdd) {
  96                 ret = regulator_disable(tdev->vdd);
  97                 if (ret)
  98                         return ret;
  99         }
 100 
 101         return 0;
 102 }
 103 
 104 static void
 105 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
 106 {
 107 #if IS_ENABLED(CONFIG_IOMMU_API)
 108         struct device *dev = &tdev->pdev->dev;
 109         unsigned long pgsize_bitmap;
 110         int ret;
 111 
 112 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
 113         if (dev->archdata.mapping) {
 114                 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 115 
 116                 arm_iommu_detach_device(dev);
 117                 arm_iommu_release_mapping(mapping);
 118         }
 119 #endif
 120 
 121         if (!tdev->func->iommu_bit)
 122                 return;
 123 
 124         mutex_init(&tdev->iommu.mutex);
 125 
 126         if (iommu_present(&platform_bus_type)) {
 127                 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
 128                 if (!tdev->iommu.domain)
 129                         goto error;
 130 
 131                 /*
 132                  * A IOMMU is only usable if it supports page sizes smaller
 133                  * or equal to the system's PAGE_SIZE, with a preference if
 134                  * both are equal.
 135                  */
 136                 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
 137                 if (pgsize_bitmap & PAGE_SIZE) {
 138                         tdev->iommu.pgshift = PAGE_SHIFT;
 139                 } else {
 140                         tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
 141                         if (tdev->iommu.pgshift == 0) {
 142                                 dev_warn(dev, "unsupported IOMMU page size\n");
 143                                 goto free_domain;
 144                         }
 145                         tdev->iommu.pgshift -= 1;
 146                 }
 147 
 148                 ret = iommu_attach_device(tdev->iommu.domain, dev);
 149                 if (ret)
 150                         goto free_domain;
 151 
 152                 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
 153                                    (1ULL << tdev->func->iommu_bit) >>
 154                                    tdev->iommu.pgshift, 1);
 155                 if (ret)
 156                         goto detach_device;
 157         }
 158 
 159         return;
 160 
 161 detach_device:
 162         iommu_detach_device(tdev->iommu.domain, dev);
 163 
 164 free_domain:
 165         iommu_domain_free(tdev->iommu.domain);
 166 
 167 error:
 168         tdev->iommu.domain = NULL;
 169         tdev->iommu.pgshift = 0;
 170         dev_err(dev, "cannot initialize IOMMU MM\n");
 171 #endif
 172 }
 173 
 174 static void
 175 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
 176 {
 177 #if IS_ENABLED(CONFIG_IOMMU_API)
 178         if (tdev->iommu.domain) {
 179                 nvkm_mm_fini(&tdev->iommu.mm);
 180                 iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
 181                 iommu_domain_free(tdev->iommu.domain);
 182         }
 183 #endif
 184 }
 185 
 186 static struct nvkm_device_tegra *
 187 nvkm_device_tegra(struct nvkm_device *device)
 188 {
 189         return container_of(device, struct nvkm_device_tegra, device);
 190 }
 191 
 192 static struct resource *
 193 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
 194 {
 195         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 196         return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
 197 }
 198 
 199 static resource_size_t
 200 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
 201 {
 202         struct resource *res = nvkm_device_tegra_resource(device, bar);
 203         return res ? res->start : 0;
 204 }
 205 
 206 static resource_size_t
 207 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
 208 {
 209         struct resource *res = nvkm_device_tegra_resource(device, bar);
 210         return res ? resource_size(res) : 0;
 211 }
 212 
 213 static irqreturn_t
 214 nvkm_device_tegra_intr(int irq, void *arg)
 215 {
 216         struct nvkm_device_tegra *tdev = arg;
 217         struct nvkm_device *device = &tdev->device;
 218         bool handled = false;
 219         nvkm_mc_intr_unarm(device);
 220         nvkm_mc_intr(device, &handled);
 221         nvkm_mc_intr_rearm(device);
 222         return handled ? IRQ_HANDLED : IRQ_NONE;
 223 }
 224 
 225 static void
 226 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
 227 {
 228         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 229         if (tdev->irq) {
 230                 free_irq(tdev->irq, tdev);
 231                 tdev->irq = 0;
 232         }
 233 }
 234 
 235 static int
 236 nvkm_device_tegra_init(struct nvkm_device *device)
 237 {
 238         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 239         int irq, ret;
 240 
 241         irq = platform_get_irq_byname(tdev->pdev, "stall");
 242         if (irq < 0)
 243                 return irq;
 244 
 245         ret = request_irq(irq, nvkm_device_tegra_intr,
 246                           IRQF_SHARED, "nvkm", tdev);
 247         if (ret)
 248                 return ret;
 249 
 250         tdev->irq = irq;
 251         return 0;
 252 }
 253 
 254 static void *
 255 nvkm_device_tegra_dtor(struct nvkm_device *device)
 256 {
 257         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 258         nvkm_device_tegra_power_down(tdev);
 259         nvkm_device_tegra_remove_iommu(tdev);
 260         return tdev;
 261 }
 262 
 263 static const struct nvkm_device_func
 264 nvkm_device_tegra_func = {
 265         .tegra = nvkm_device_tegra,
 266         .dtor = nvkm_device_tegra_dtor,
 267         .init = nvkm_device_tegra_init,
 268         .fini = nvkm_device_tegra_fini,
 269         .resource_addr = nvkm_device_tegra_resource_addr,
 270         .resource_size = nvkm_device_tegra_resource_size,
 271         .cpu_coherent = false,
 272 };
 273 
 274 int
 275 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 276                       struct platform_device *pdev,
 277                       const char *cfg, const char *dbg,
 278                       bool detect, bool mmio, u64 subdev_mask,
 279                       struct nvkm_device **pdevice)
 280 {
 281         struct nvkm_device_tegra *tdev;
 282         int ret;
 283 
 284         if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
 285                 return -ENOMEM;
 286 
 287         tdev->func = func;
 288         tdev->pdev = pdev;
 289 
 290         if (func->require_vdd) {
 291                 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
 292                 if (IS_ERR(tdev->vdd)) {
 293                         ret = PTR_ERR(tdev->vdd);
 294                         goto free;
 295                 }
 296         }
 297 
 298         tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
 299         if (IS_ERR(tdev->rst)) {
 300                 ret = PTR_ERR(tdev->rst);
 301                 goto free;
 302         }
 303 
 304         tdev->clk = devm_clk_get(&pdev->dev, "gpu");
 305         if (IS_ERR(tdev->clk)) {
 306                 ret = PTR_ERR(tdev->clk);
 307                 goto free;
 308         }
 309 
 310         if (func->require_ref_clk)
 311                 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
 312         if (IS_ERR(tdev->clk_ref)) {
 313                 ret = PTR_ERR(tdev->clk_ref);
 314                 goto free;
 315         }
 316 
 317         tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
 318         if (IS_ERR(tdev->clk_pwr)) {
 319                 ret = PTR_ERR(tdev->clk_pwr);
 320                 goto free;
 321         }
 322 
 323         /**
 324          * The IOMMU bit defines the upper limit of the GPU-addressable space.
 325          */
 326         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
 327         if (ret)
 328                 goto free;
 329 
 330         nvkm_device_tegra_probe_iommu(tdev);
 331 
 332         ret = nvkm_device_tegra_power_up(tdev);
 333         if (ret)
 334                 goto remove;
 335 
 336         tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
 337         tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
 338         ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
 339                                NVKM_DEVICE_TEGRA, pdev->id, NULL,
 340                                cfg, dbg, detect, mmio, subdev_mask,
 341                                &tdev->device);
 342         if (ret)
 343                 goto powerdown;
 344 
 345         *pdevice = &tdev->device;
 346 
 347         return 0;
 348 
 349 powerdown:
 350         nvkm_device_tegra_power_down(tdev);
 351 remove:
 352         nvkm_device_tegra_remove_iommu(tdev);
 353 free:
 354         kfree(tdev);
 355         return ret;
 356 }
 357 #else
 358 int
 359 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 360                       struct platform_device *pdev,
 361                       const char *cfg, const char *dbg,
 362                       bool detect, bool mmio, u64 subdev_mask,
 363                       struct nvkm_device **pdevice)
 364 {
 365         return -ENOSYS;
 366 }
 367 #endif

/* [<][>][^][v][top][bottom][index][help] */