root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nv44_vmm_pgt_fill
  2. nv44_vmm_pgt_pte
  3. nv44_vmm_pgt_sgl
  4. nv44_vmm_pgt_dma
  5. nv44_vmm_pgt_unmap
  6. nv44_vmm_flush
  7. nv44_vmm_new

   1 /*
   2  * Copyright 2017 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 #include "vmm.h"
  23 
  24 #include <subdev/timer.h>
  25 
  26 static void
  27 nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  28                   dma_addr_t *list, u32 ptei, u32 ptes)
  29 {
  30         u32 pteo = (ptei << 2) & ~0x0000000f;
  31         u32 tmp[4];
  32 
  33         tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
  34         tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
  35         tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
  36         tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
  37 
  38         while (ptes--) {
  39                 u32 addr = (list ? *list++ : vmm->null) >> 12;
  40                 switch (ptei++ & 0x3) {
  41                 case 0:
  42                         tmp[0] &= ~0x07ffffff;
  43                         tmp[0] |= addr;
  44                         break;
  45                 case 1:
  46                         tmp[0] &= ~0xf8000000;
  47                         tmp[0] |= addr << 27;
  48                         tmp[1] &= ~0x003fffff;
  49                         tmp[1] |= addr >> 5;
  50                         break;
  51                 case 2:
  52                         tmp[1] &= ~0xffc00000;
  53                         tmp[1] |= addr << 22;
  54                         tmp[2] &= ~0x0001ffff;
  55                         tmp[2] |= addr >> 10;
  56                         break;
  57                 case 3:
  58                         tmp[2] &= ~0xfffe0000;
  59                         tmp[2] |= addr << 17;
  60                         tmp[3] &= ~0x00000fff;
  61                         tmp[3] |= addr >> 15;
  62                         break;
  63                 }
  64         }
  65 
  66         VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
  67         VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
  68         VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
  69         VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
  70 }
  71 
  72 static void
  73 nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  74                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  75 {
  76         dma_addr_t tmp[4], i;
  77 
  78         if (ptei & 3) {
  79                 const u32 pten = min(ptes, 4 - (ptei & 3));
  80                 for (i = 0; i < pten; i++, addr += 0x1000)
  81                         tmp[i] = addr;
  82                 nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
  83                 ptei += pten;
  84                 ptes -= pten;
  85         }
  86 
  87         while (ptes >= 4) {
  88                 for (i = 0; i < 4; i++, addr += 0x1000)
  89                         tmp[i] = addr >> 12;
  90                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
  91                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
  92                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
  93                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
  94                 ptes -= 4;
  95         }
  96 
  97         if (ptes) {
  98                 for (i = 0; i < ptes; i++, addr += 0x1000)
  99                         tmp[i] = addr;
 100                 nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
 101         }
 102 }
 103 
 104 static void
 105 nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 106                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 107 {
 108         VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
 109 }
 110 
 111 static void
 112 nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 113                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 114 {
 115 #if PAGE_SHIFT == 12
 116         nvkm_kmap(pt->memory);
 117         if (ptei & 3) {
 118                 const u32 pten = min(ptes, 4 - (ptei & 3));
 119                 nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
 120                 ptei += pten;
 121                 ptes -= pten;
 122                 map->dma += pten;
 123         }
 124 
 125         while (ptes >= 4) {
 126                 u32 tmp[4], i;
 127                 for (i = 0; i < 4; i++)
 128                         tmp[i] = *map->dma++ >> 12;
 129                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
 130                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
 131                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
 132                 VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
 133                 ptes -= 4;
 134         }
 135 
 136         if (ptes) {
 137                 nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
 138                 map->dma += ptes;
 139         }
 140         nvkm_done(pt->memory);
 141 #else
 142         VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
 143 #endif
 144 }
 145 
 146 static void
 147 nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
 148                    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
 149 {
 150         nvkm_kmap(pt->memory);
 151         if (ptei & 3) {
 152                 const u32 pten = min(ptes, 4 - (ptei & 3));
 153                 nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
 154                 ptei += pten;
 155                 ptes -= pten;
 156         }
 157 
 158         while (ptes > 4) {
 159                 VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
 160                 VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
 161                 VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
 162                 VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
 163                 ptes -= 4;
 164         }
 165 
 166         if (ptes)
 167                 nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
 168         nvkm_done(pt->memory);
 169 }
 170 
 171 static const struct nvkm_vmm_desc_func
 172 nv44_vmm_desc_pgt = {
 173         .unmap = nv44_vmm_pgt_unmap,
 174         .dma = nv44_vmm_pgt_dma,
 175         .sgl = nv44_vmm_pgt_sgl,
 176 };
 177 
 178 static const struct nvkm_vmm_desc
 179 nv44_vmm_desc_12[] = {
 180         { PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
 181         {}
 182 };
 183 
 184 static void
 185 nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
 186 {
 187         struct nvkm_device *device = vmm->mmu->subdev.device;
 188         nvkm_wr32(device, 0x100814, vmm->limit - 4096);
 189         nvkm_wr32(device, 0x100808, 0x000000020);
 190         nvkm_msec(device, 2000,
 191                 if (nvkm_rd32(device, 0x100808) & 0x00000001)
 192                         break;
 193         );
 194         nvkm_wr32(device, 0x100808, 0x00000000);
 195 }
 196 
 197 static const struct nvkm_vmm_func
 198 nv44_vmm = {
 199         .valid = nv04_vmm_valid,
 200         .flush = nv44_vmm_flush,
 201         .page = {
 202                 { 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
 203                 {}
 204         }
 205 };
 206 
 207 int
 208 nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 209              void *argv, u32 argc, struct lock_class_key *key, const char *name,
 210              struct nvkm_vmm **pvmm)
 211 {
 212         struct nvkm_subdev *subdev = &mmu->subdev;
 213         struct nvkm_vmm *vmm;
 214         int ret;
 215 
 216         ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
 217                             argv, argc, key, name, &vmm);
 218         *pvmm = vmm;
 219         if (ret)
 220                 return ret;
 221 
 222         vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
 223                                         &vmm->null, GFP_KERNEL);
 224         if (!vmm->nullp) {
 225                 nvkm_warn(subdev, "unable to allocate dummy pages\n");
 226                 vmm->null = 0;
 227         }
 228 
 229         return 0;
 230 }

/* [<][>][^][v][top][bottom][index][help] */