root/drivers/gpu/drm/v3d/v3d_mmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. v3d_mmu_flush_all
  2. v3d_mmu_set_page_table
  3. v3d_mmu_insert_ptes
  4. v3d_mmu_remove_ptes

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /* Copyright (C) 2017-2018 Broadcom */
   3 
   4 /**
   5  * DOC: Broadcom V3D MMU
   6  *
   7  * The V3D 3.x hardware (compared to VC4) now includes an MMU.  It has
   8  * a single level of page tables for the V3D's 4GB address space to
   9  * map to AXI bus addresses, thus it could need up to 4MB of
  10  * physically contiguous memory to store the PTEs.
  11  *
  12  * Because the 4MB of contiguous memory for page tables is precious,
  13  * and switching between them is expensive, we load all BOs into the
  14  * same 4GB address space.
  15  *
  16  * To protect clients from each other, we should use the GMP to
  17  * quickly mask out (at 128kb granularity) what pages are available to
  18  * each client.  This is not yet implemented.
  19  */
  20 
  21 #include "v3d_drv.h"
  22 #include "v3d_regs.h"
  23 
  24 #define V3D_MMU_PAGE_SHIFT 12
  25 
  26 /* Note: All PTEs for the 1MB superpage must be filled with the
  27  * superpage bit set.
  28  */
  29 #define V3D_PTE_SUPERPAGE BIT(31)
  30 #define V3D_PTE_WRITEABLE BIT(29)
  31 #define V3D_PTE_VALID BIT(28)
  32 
  33 static int v3d_mmu_flush_all(struct v3d_dev *v3d)
  34 {
  35         int ret;
  36 
  37         /* Make sure that another flush isn't already running when we
  38          * start this one.
  39          */
  40         ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
  41                          V3D_MMU_CTL_TLB_CLEARING), 100);
  42         if (ret)
  43                 dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
  44 
  45         V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
  46                   V3D_MMU_CTL_TLB_CLEAR);
  47 
  48         V3D_WRITE(V3D_MMUC_CONTROL,
  49                   V3D_MMUC_CONTROL_FLUSH |
  50                   V3D_MMUC_CONTROL_ENABLE);
  51 
  52         ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
  53                          V3D_MMU_CTL_TLB_CLEARING), 100);
  54         if (ret) {
  55                 dev_err(v3d->dev, "TLB clear wait idle failed\n");
  56                 return ret;
  57         }
  58 
  59         ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
  60                          V3D_MMUC_CONTROL_FLUSHING), 100);
  61         if (ret)
  62                 dev_err(v3d->dev, "MMUC flush wait idle failed\n");
  63 
  64         return ret;
  65 }
  66 
  67 int v3d_mmu_set_page_table(struct v3d_dev *v3d)
  68 {
  69         V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
  70         V3D_WRITE(V3D_MMU_CTL,
  71                   V3D_MMU_CTL_ENABLE |
  72                   V3D_MMU_CTL_PT_INVALID_ENABLE |
  73                   V3D_MMU_CTL_PT_INVALID_ABORT |
  74                   V3D_MMU_CTL_PT_INVALID_INT |
  75                   V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
  76                   V3D_MMU_CTL_WRITE_VIOLATION_INT |
  77                   V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
  78                   V3D_MMU_CTL_CAP_EXCEEDED_INT);
  79         V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
  80                   (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
  81                   V3D_MMU_ILLEGAL_ADDR_ENABLE);
  82         V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
  83 
  84         return v3d_mmu_flush_all(v3d);
  85 }
  86 
  87 void v3d_mmu_insert_ptes(struct v3d_bo *bo)
  88 {
  89         struct drm_gem_shmem_object *shmem_obj = &bo->base;
  90         struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
  91         u32 page = bo->node.start;
  92         u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
  93         unsigned int count;
  94         struct scatterlist *sgl;
  95 
  96         for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
  97                 u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
  98                 u32 pte = page_prot | page_address;
  99                 u32 i;
 100 
 101                 BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
 102                        BIT(24));
 103 
 104                 for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
 105                         v3d->pt[page++] = pte + i;
 106         }
 107 
 108         WARN_ON_ONCE(page - bo->node.start !=
 109                      shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
 110 
 111         if (v3d_mmu_flush_all(v3d))
 112                 dev_err(v3d->dev, "MMU flush timeout\n");
 113 }
 114 
 115 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
 116 {
 117         struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
 118         u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
 119         u32 page;
 120 
 121         for (page = bo->node.start; page < bo->node.start + npages; page++)
 122                 v3d->pt[page] = 0;
 123 
 124         if (v3d_mmu_flush_all(v3d))
 125                 dev_err(v3d->dev, "MMU flush timeout\n");
 126 }

/* [<][>][^][v][top][bottom][index][help] */