root/drivers/staging/media/ipu3/ipu3-mmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_imgu_mmu
  2. imgu_mmu_tlb_invalidate
  3. call_if_imgu_is_powered
  4. imgu_mmu_set_halt
  5. imgu_mmu_alloc_page_table
  6. imgu_mmu_free_page_table
  7. address_to_pte_idx
  8. imgu_mmu_get_l2pt
  9. __imgu_mmu_map
  10. imgu_mmu_map
  11. imgu_mmu_map_sg
  12. __imgu_mmu_unmap
  13. imgu_mmu_unmap
  14. imgu_mmu_init
  15. imgu_mmu_exit
  16. imgu_mmu_suspend
  17. imgu_mmu_resume

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2018 Intel Corporation.
   4  * Copyright 2018 Google LLC.
   5  *
   6  * Author: Tuukka Toivonen <tuukka.toivonen@intel.com>
   7  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
   8  * Author: Samu Onkalo <samu.onkalo@intel.com>
   9  * Author: Tomasz Figa <tfiga@chromium.org>
  10  *
  11  */
  12 
  13 #include <linux/dma-mapping.h>
  14 #include <linux/iopoll.h>
  15 #include <linux/pm_runtime.h>
  16 #include <linux/slab.h>
  17 #include <linux/vmalloc.h>
  18 
  19 #include <asm/set_memory.h>
  20 
  21 #include "ipu3-mmu.h"
  22 
  23 #define IPU3_PT_BITS            10
  24 #define IPU3_PT_PTES            (1UL << IPU3_PT_BITS)
  25 #define IPU3_PT_SIZE            (IPU3_PT_PTES << 2)
  26 #define IPU3_PT_ORDER           (IPU3_PT_SIZE >> PAGE_SHIFT)
  27 
  28 #define IPU3_ADDR2PTE(addr)     ((addr) >> IPU3_PAGE_SHIFT)
  29 #define IPU3_PTE2ADDR(pte)      ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
  30 
  31 #define IPU3_L2PT_SHIFT         IPU3_PT_BITS
  32 #define IPU3_L2PT_MASK          ((1UL << IPU3_L2PT_SHIFT) - 1)
  33 
  34 #define IPU3_L1PT_SHIFT         IPU3_PT_BITS
  35 #define IPU3_L1PT_MASK          ((1UL << IPU3_L1PT_SHIFT) - 1)
  36 
  37 #define IPU3_MMU_ADDRESS_BITS   (IPU3_PAGE_SHIFT + \
  38                                  IPU3_L2PT_SHIFT + \
  39                                  IPU3_L1PT_SHIFT)
  40 
  41 #define IMGU_REG_BASE           0x4000
  42 #define REG_TLB_INVALIDATE      (IMGU_REG_BASE + 0x300)
  43 #define TLB_INVALIDATE          1
  44 #define REG_L1_PHYS             (IMGU_REG_BASE + 0x304) /* 27-bit pfn */
  45 #define REG_GP_HALT             (IMGU_REG_BASE + 0x5dc)
  46 #define REG_GP_HALTED           (IMGU_REG_BASE + 0x5e0)
  47 
  48 struct imgu_mmu {
  49         struct device *dev;
  50         void __iomem *base;
  51         /* protect access to l2pts, l1pt */
  52         spinlock_t lock;
  53 
  54         void *dummy_page;
  55         u32 dummy_page_pteval;
  56 
  57         u32 *dummy_l2pt;
  58         u32 dummy_l2pt_pteval;
  59 
  60         u32 **l2pts;
  61         u32 *l1pt;
  62 
  63         struct imgu_mmu_info geometry;
  64 };
  65 
  66 static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
  67 {
  68         return container_of(info, struct imgu_mmu, geometry);
  69 }
  70 
  71 /**
  72  * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer
  73  * @mmu: MMU to perform the invalidate operation on
  74  *
  75  * This function invalidates the whole TLB. Must be called when the hardware
  76  * is powered on.
  77  */
  78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
  79 {
  80         writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
  81 }
  82 
  83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
  84                                     void (*func)(struct imgu_mmu *mmu))
  85 {
  86         if (!pm_runtime_get_if_in_use(mmu->dev))
  87                 return;
  88 
  89         func(mmu);
  90         pm_runtime_put(mmu->dev);
  91 }
  92 
  93 /**
  94  * imgu_mmu_set_halt - set CIO gate halt bit
  95  * @mmu: MMU to set the CIO gate bit in.
  96  * @halt: Desired state of the gate bit.
  97  *
  98  * This function sets the CIO gate bit that controls whether external memory
  99  * accesses are allowed. Must be called when the hardware is powered on.
 100  */
 101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
 102 {
 103         int ret;
 104         u32 val;
 105 
 106         writel(halt, mmu->base + REG_GP_HALT);
 107         ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
 108                                  val, (val & 1) == halt, 1000, 100000);
 109 
 110         if (ret)
 111                 dev_err(mmu->dev, "failed to %s CIO gate halt\n",
 112                         halt ? "set" : "clear");
 113 }
 114 
 115 /**
 116  * imgu_mmu_alloc_page_table - allocate a pre-filled page table
 117  * @pteval: Value to initialize for page table entries with.
 118  *
 119  * Return: Pointer to allocated page table or NULL on failure.
 120  */
 121 static u32 *imgu_mmu_alloc_page_table(u32 pteval)
 122 {
 123         u32 *pt;
 124         int pte;
 125 
 126         pt = (u32 *)__get_free_page(GFP_KERNEL);
 127         if (!pt)
 128                 return NULL;
 129 
 130         for (pte = 0; pte < IPU3_PT_PTES; pte++)
 131                 pt[pte] = pteval;
 132 
 133         set_memory_uc((unsigned long int)pt, IPU3_PT_ORDER);
 134 
 135         return pt;
 136 }
 137 
 138 /**
 139  * imgu_mmu_free_page_table - free page table
 140  * @pt: Page table to free.
 141  */
 142 static void imgu_mmu_free_page_table(u32 *pt)
 143 {
 144         set_memory_wb((unsigned long int)pt, IPU3_PT_ORDER);
 145         free_page((unsigned long)pt);
 146 }
 147 
 148 /**
 149  * address_to_pte_idx - split IOVA into L1 and L2 page table indices
 150  * @iova: IOVA to split.
 151  * @l1pt_idx: Output for the L1 page table index.
 152  * @l2pt_idx: Output for the L2 page index.
 153  */
 154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
 155                                       u32 *l2pt_idx)
 156 {
 157         iova >>= IPU3_PAGE_SHIFT;
 158 
 159         if (l2pt_idx)
 160                 *l2pt_idx = iova & IPU3_L2PT_MASK;
 161 
 162         iova >>= IPU3_L2PT_SHIFT;
 163 
 164         if (l1pt_idx)
 165                 *l1pt_idx = iova & IPU3_L1PT_MASK;
 166 }
 167 
 168 static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
 169 {
 170         unsigned long flags;
 171         u32 *l2pt, *new_l2pt;
 172         u32 pteval;
 173 
 174         spin_lock_irqsave(&mmu->lock, flags);
 175 
 176         l2pt = mmu->l2pts[l1pt_idx];
 177         if (l2pt)
 178                 goto done;
 179 
 180         spin_unlock_irqrestore(&mmu->lock, flags);
 181 
 182         new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
 183         if (!new_l2pt)
 184                 return NULL;
 185 
 186         spin_lock_irqsave(&mmu->lock, flags);
 187 
 188         dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
 189                 new_l2pt, l1pt_idx);
 190 
 191         l2pt = mmu->l2pts[l1pt_idx];
 192         if (l2pt) {
 193                 imgu_mmu_free_page_table(new_l2pt);
 194                 goto done;
 195         }
 196 
 197         l2pt = new_l2pt;
 198         mmu->l2pts[l1pt_idx] = new_l2pt;
 199 
 200         pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
 201         mmu->l1pt[l1pt_idx] = pteval;
 202 
 203 done:
 204         spin_unlock_irqrestore(&mmu->lock, flags);
 205         return l2pt;
 206 }
 207 
 208 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
 209                           phys_addr_t paddr)
 210 {
 211         u32 l1pt_idx, l2pt_idx;
 212         unsigned long flags;
 213         u32 *l2pt;
 214 
 215         if (!mmu)
 216                 return -ENODEV;
 217 
 218         address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
 219 
 220         l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
 221         if (!l2pt)
 222                 return -ENOMEM;
 223 
 224         spin_lock_irqsave(&mmu->lock, flags);
 225 
 226         if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
 227                 spin_unlock_irqrestore(&mmu->lock, flags);
 228                 return -EBUSY;
 229         }
 230 
 231         l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
 232 
 233         spin_unlock_irqrestore(&mmu->lock, flags);
 234 
 235         return 0;
 236 }
 237 
 238 /**
 239  * imgu_mmu_map - map a buffer to a physical address
 240  *
 241  * @info: MMU mappable range
 242  * @iova: the virtual address
 243  * @paddr: the physical address
 244  * @size: length of the mappable area
 245  *
 246  * The function has been adapted from iommu_map() in
 247  * drivers/iommu/iommu.c .
 248  */
 249 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
 250                  phys_addr_t paddr, size_t size)
 251 {
 252         struct imgu_mmu *mmu = to_imgu_mmu(info);
 253         int ret = 0;
 254 
 255         /*
 256          * both the virtual address and the physical one, as well as
 257          * the size of the mapping, must be aligned (at least) to the
 258          * size of the smallest page supported by the hardware
 259          */
 260         if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
 261                 dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
 262                         iova, &paddr, size);
 263                 return -EINVAL;
 264         }
 265 
 266         dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
 267                 iova, &paddr, size);
 268 
 269         while (size) {
 270                 dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
 271 
 272                 ret = __imgu_mmu_map(mmu, iova, paddr);
 273                 if (ret)
 274                         break;
 275 
 276                 iova += IPU3_PAGE_SIZE;
 277                 paddr += IPU3_PAGE_SIZE;
 278                 size -= IPU3_PAGE_SIZE;
 279         }
 280 
 281         call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
 282 
 283         return ret;
 284 }
 285 
 286 /**
 287  * imgu_mmu_map_sg - Map a scatterlist
 288  *
 289  * @info: MMU mappable range
 290  * @iova: the virtual address
 291  * @sg: the scatterlist to map
 292  * @nents: number of entries in the scatterlist
 293  *
 294  * The function has been adapted from default_iommu_map_sg() in
 295  * drivers/iommu/iommu.c .
 296  */
 297 size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
 298                        struct scatterlist *sg, unsigned int nents)
 299 {
 300         struct imgu_mmu *mmu = to_imgu_mmu(info);
 301         struct scatterlist *s;
 302         size_t s_length, mapped = 0;
 303         unsigned int i;
 304         int ret;
 305 
 306         for_each_sg(sg, s, nents, i) {
 307                 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
 308 
 309                 s_length = s->length;
 310 
 311                 if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
 312                         goto out_err;
 313 
 314                 /* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
 315                 if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
 316                         s_length = PAGE_ALIGN(s->length);
 317 
 318                 ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
 319                 if (ret)
 320                         goto out_err;
 321 
 322                 mapped += s_length;
 323         }
 324 
 325         call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
 326 
 327         return mapped;
 328 
 329 out_err:
 330         /* undo mappings already done */
 331         imgu_mmu_unmap(info, iova, mapped);
 332 
 333         return 0;
 334 }
 335 
 336 static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
 337                                unsigned long iova, size_t size)
 338 {
 339         u32 l1pt_idx, l2pt_idx;
 340         unsigned long flags;
 341         size_t unmap = size;
 342         u32 *l2pt;
 343 
 344         if (!mmu)
 345                 return 0;
 346 
 347         address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
 348 
 349         spin_lock_irqsave(&mmu->lock, flags);
 350 
 351         l2pt = mmu->l2pts[l1pt_idx];
 352         if (!l2pt) {
 353                 spin_unlock_irqrestore(&mmu->lock, flags);
 354                 return 0;
 355         }
 356 
 357         if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
 358                 unmap = 0;
 359 
 360         l2pt[l2pt_idx] = mmu->dummy_page_pteval;
 361 
 362         spin_unlock_irqrestore(&mmu->lock, flags);
 363 
 364         return unmap;
 365 }
 366 
 367 /**
 368  * imgu_mmu_unmap - Unmap a buffer
 369  *
 370  * @info: MMU mappable range
 371  * @iova: the virtual address
 372  * @size: the length of the buffer
 373  *
 374  * The function has been adapted from iommu_unmap() in
 375  * drivers/iommu/iommu.c .
 376  */
 377 size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
 378                       size_t size)
 379 {
 380         struct imgu_mmu *mmu = to_imgu_mmu(info);
 381         size_t unmapped_page, unmapped = 0;
 382 
 383         /*
 384          * The virtual address, as well as the size of the mapping, must be
 385          * aligned (at least) to the size of the smallest page supported
 386          * by the hardware
 387          */
 388         if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
 389                 dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
 390                         iova, size);
 391                 return -EINVAL;
 392         }
 393 
 394         dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 395 
 396         /*
 397          * Keep iterating until we either unmap 'size' bytes (or more)
 398          * or we hit an area that isn't mapped.
 399          */
 400         while (unmapped < size) {
 401                 unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
 402                 if (!unmapped_page)
 403                         break;
 404 
 405                 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
 406                         iova, unmapped_page);
 407 
 408                 iova += unmapped_page;
 409                 unmapped += unmapped_page;
 410         }
 411 
 412         call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
 413 
 414         return unmapped;
 415 }
 416 
 417 /**
 418  * imgu_mmu_init() - initialize IPU3 MMU block
 419  *
 420  * @parent:     struct device parent
 421  * @base:       IOMEM base of hardware registers.
 422  *
 423  * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error.
 424  */
 425 struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
 426 {
 427         struct imgu_mmu *mmu;
 428         u32 pteval;
 429 
 430         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
 431         if (!mmu)
 432                 return ERR_PTR(-ENOMEM);
 433 
 434         mmu->dev = parent;
 435         mmu->base = base;
 436         spin_lock_init(&mmu->lock);
 437 
 438         /* Disallow external memory access when having no valid page tables. */
 439         imgu_mmu_set_halt(mmu, true);
 440 
 441         /*
 442          * The MMU does not have a "valid" bit, so we have to use a dummy
 443          * page for invalid entries.
 444          */
 445         mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
 446         if (!mmu->dummy_page)
 447                 goto fail_group;
 448         pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
 449         mmu->dummy_page_pteval = pteval;
 450 
 451         /*
 452          * Allocate a dummy L2 page table with all entries pointing to
 453          * the dummy page.
 454          */
 455         mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
 456         if (!mmu->dummy_l2pt)
 457                 goto fail_dummy_page;
 458         pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
 459         mmu->dummy_l2pt_pteval = pteval;
 460 
 461         /*
 462          * Allocate the array of L2PT CPU pointers, initialized to zero,
 463          * which means the dummy L2PT allocated above.
 464          */
 465         mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
 466         if (!mmu->l2pts)
 467                 goto fail_l2pt;
 468 
 469         /* Allocate the L1 page table. */
 470         mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
 471         if (!mmu->l1pt)
 472                 goto fail_l2pts;
 473 
 474         pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
 475         writel(pteval, mmu->base + REG_L1_PHYS);
 476         imgu_mmu_tlb_invalidate(mmu);
 477         imgu_mmu_set_halt(mmu, false);
 478 
 479         mmu->geometry.aperture_start = 0;
 480         mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
 481 
 482         return &mmu->geometry;
 483 
 484 fail_l2pts:
 485         vfree(mmu->l2pts);
 486 fail_l2pt:
 487         imgu_mmu_free_page_table(mmu->dummy_l2pt);
 488 fail_dummy_page:
 489         free_page((unsigned long)mmu->dummy_page);
 490 fail_group:
 491         kfree(mmu);
 492 
 493         return ERR_PTR(-ENOMEM);
 494 }
 495 
 496 /**
 497  * imgu_mmu_exit() - clean up IPU3 MMU block
 498  *
 499  * @info: MMU mappable range
 500  */
 501 void imgu_mmu_exit(struct imgu_mmu_info *info)
 502 {
 503         struct imgu_mmu *mmu = to_imgu_mmu(info);
 504 
 505         /* We are going to free our page tables, no more memory access. */
 506         imgu_mmu_set_halt(mmu, true);
 507         imgu_mmu_tlb_invalidate(mmu);
 508 
 509         imgu_mmu_free_page_table(mmu->l1pt);
 510         vfree(mmu->l2pts);
 511         imgu_mmu_free_page_table(mmu->dummy_l2pt);
 512         free_page((unsigned long)mmu->dummy_page);
 513         kfree(mmu);
 514 }
 515 
 516 void imgu_mmu_suspend(struct imgu_mmu_info *info)
 517 {
 518         struct imgu_mmu *mmu = to_imgu_mmu(info);
 519 
 520         imgu_mmu_set_halt(mmu, true);
 521 }
 522 
 523 void imgu_mmu_resume(struct imgu_mmu_info *info)
 524 {
 525         struct imgu_mmu *mmu = to_imgu_mmu(info);
 526         u32 pteval;
 527 
 528         imgu_mmu_set_halt(mmu, true);
 529 
 530         pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
 531         writel(pteval, mmu->base + REG_L1_PHYS);
 532 
 533         imgu_mmu_tlb_invalidate(mmu);
 534         imgu_mmu_set_halt(mmu, false);
 535 }

/* [<][>][^][v][top][bottom][index][help] */