root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nv04_vmm_pgt_pte
  2. nv04_vmm_pgt_sgl
  3. nv04_vmm_pgt_dma
  4. nv04_vmm_pgt_unmap
  5. nv04_vmm_valid
  6. nv04_vmm_new_
  7. nv04_vmm_new

   1 /*
   2  * Copyright 2017 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 #include "vmm.h"
  23 
  24 #include <nvif/if000d.h>
  25 #include <nvif/unpack.h>
  26 
  27 static inline void
  28 nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  29                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  30 {
  31         u32 data = addr | 0x00000003; /* PRESENT, RW. */
  32         while (ptes--) {
  33                 VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
  34                 data += 0x00001000;
  35         }
  36 }
  37 
  38 static void
  39 nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  40                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  41 {
  42         VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
  43 }
  44 
  45 static void
  46 nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  47                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  48 {
  49 #if PAGE_SHIFT == 12
  50         nvkm_kmap(pt->memory);
  51         while (ptes--)
  52                 VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
  53         nvkm_done(pt->memory);
  54 #else
  55         VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
  56 #endif
  57 }
  58 
  59 static void
  60 nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
  61                    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  62 {
  63         VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
  64 }
  65 
  66 static const struct nvkm_vmm_desc_func
  67 nv04_vmm_desc_pgt = {
  68         .unmap = nv04_vmm_pgt_unmap,
  69         .dma = nv04_vmm_pgt_dma,
  70         .sgl = nv04_vmm_pgt_sgl,
  71 };
  72 
  73 static const struct nvkm_vmm_desc
  74 nv04_vmm_desc_12[] = {
  75         { PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
  76         {}
  77 };
  78 
  79 int
  80 nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
  81                struct nvkm_vmm_map *map)
  82 {
  83         union {
  84                 struct nv04_vmm_map_vn vn;
  85         } *args = argv;
  86         int ret = -ENOSYS;
  87         if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
  88                 VMM_DEBUG(vmm, "args");
  89         return ret;
  90 }
  91 
  92 static const struct nvkm_vmm_func
  93 nv04_vmm = {
  94         .valid = nv04_vmm_valid,
  95         .page = {
  96                 { 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
  97                 {}
  98         }
  99 };
 100 
 101 int
 102 nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
 103               u32 pd_header, bool managed, u64 addr, u64 size,
 104               void *argv, u32 argc, struct lock_class_key *key,
 105               const char *name, struct nvkm_vmm **pvmm)
 106 {
 107         union {
 108                 struct nv04_vmm_vn vn;
 109         } *args = argv;
 110         int ret;
 111 
 112         ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
 113                             key, name, pvmm);
 114         if (ret)
 115                 return ret;
 116 
 117         return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
 118 }
 119 
 120 int
 121 nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 122              void *argv, u32 argc, struct lock_class_key *key, const char *name,
 123              struct nvkm_vmm **pvmm)
 124 {
 125         struct nvkm_memory *mem;
 126         struct nvkm_vmm *vmm;
 127         int ret;
 128 
 129         ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
 130                             argv, argc, key, name, &vmm);
 131         *pvmm = vmm;
 132         if (ret)
 133                 return ret;
 134 
 135         mem = vmm->pd->pt[0]->memory;
 136         nvkm_kmap(mem);
 137         nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
 138         nvkm_wo32(mem, 0x00004, vmm->limit - 1);
 139         nvkm_done(mem);
 140         return 0;
 141 }

/* [<][>][^][v][top][bottom][index][help] */