This source file includes following definitions.
- nv41_vmm_pgt_pte
 
- nv41_vmm_pgt_sgl
 
- nv41_vmm_pgt_dma
 
- nv41_vmm_pgt_unmap
 
- nv41_vmm_flush
 
- nv41_vmm_new
 
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 #include "vmm.h"
  23 
  24 #include <subdev/timer.h>
  25 
  26 static void
  27 nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  28                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  29 {
  30         u32 data = (addr >> 7) | 0x00000001; 
  31         while (ptes--) {
  32                 VMM_WO032(pt, vmm, ptei++ * 4, data);
  33                 data += 0x00000020;
  34         }
  35 }
  36 
  37 static void
  38 nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  39                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  40 {
  41         VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
  42 }
  43 
  44 static void
  45 nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  46                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  47 {
  48 #if PAGE_SHIFT == 12
  49         nvkm_kmap(pt->memory);
  50         while (ptes--) {
  51                 const u32 data = (*map->dma++ >> 7) | 0x00000001;
  52                 VMM_WO032(pt, vmm, ptei++ * 4, data);
  53         }
  54         nvkm_done(pt->memory);
  55 #else
  56         VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
  57 #endif
  58 }
  59 
  60 static void
  61 nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
  62                    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  63 {
  64         VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
  65 }
  66 
  67 static const struct nvkm_vmm_desc_func
  68 nv41_vmm_desc_pgt = {
  69         .unmap = nv41_vmm_pgt_unmap,
  70         .dma = nv41_vmm_pgt_dma,
  71         .sgl = nv41_vmm_pgt_sgl,
  72 };
  73 
  74 static const struct nvkm_vmm_desc
  75 nv41_vmm_desc_12[] = {
  76         { PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
  77         {}
  78 };
  79 
  80 static void
  81 nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
  82 {
  83         struct nvkm_subdev *subdev = &vmm->mmu->subdev;
  84         struct nvkm_device *device = subdev->device;
  85 
  86         mutex_lock(&subdev->mutex);
  87         nvkm_wr32(device, 0x100810, 0x00000022);
  88         nvkm_msec(device, 2000,
  89                 if (nvkm_rd32(device, 0x100810) & 0x00000020)
  90                         break;
  91         );
  92         nvkm_wr32(device, 0x100810, 0x00000000);
  93         mutex_unlock(&subdev->mutex);
  94 }
  95 
  96 static const struct nvkm_vmm_func
  97 nv41_vmm = {
  98         .valid = nv04_vmm_valid,
  99         .flush = nv41_vmm_flush,
 100         .page = {
 101                 { 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
 102                 {}
 103         }
 104 };
 105 
 106 int
 107 nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 108              void *argv, u32 argc, struct lock_class_key *key, const char *name,
 109              struct nvkm_vmm **pvmm)
 110 {
 111         return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
 112                              argv, argc, key, name, pvmm);
 113 }