This source file includes following definitions.
- nvif_vmm_unmap
- nvif_vmm_map
- nvif_vmm_put
- nvif_vmm_get
- nvif_vmm_fini
- nvif_vmm_init
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 #include <nvif/vmm.h>
  23 #include <nvif/mem.h>
  24 
  25 #include <nvif/if000c.h>
  26 
  27 int
  28 nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
  29 {
  30         return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
  31                                 &(struct nvif_vmm_unmap_v0) { .addr = addr },
  32                                 sizeof(struct nvif_vmm_unmap_v0));
  33 }
  34 
  35 int
  36 nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
  37              struct nvif_mem *mem, u64 offset)
  38 {
  39         struct nvif_vmm_map_v0 *args;
  40         u8 stack[48];
  41         int ret;
  42 
  43         if (sizeof(*args) + argc > sizeof(stack)) {
  44                 if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
  45                         return -ENOMEM;
  46         } else {
  47                 args = (void *)stack;
  48         }
  49 
  50         args->version = 0;
  51         args->addr = addr;
  52         args->size = size;
  53         args->memory = nvif_handle(&mem->object);
  54         args->offset = offset;
  55         memcpy(args->data, argv, argc);
  56 
  57         ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
  58                                args, sizeof(*args) + argc);
  59         if (args != (void *)stack)
  60                 kfree(args);
  61         return ret;
  62 }
  63 
  64 void
  65 nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
  66 {
  67         if (vma->size) {
  68                 WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
  69                                          &(struct nvif_vmm_put_v0) {
  70                                                 .addr = vma->addr,
  71                                          }, sizeof(struct nvif_vmm_put_v0)));
  72                 vma->size = 0;
  73         }
  74 }
  75 
  76 int
  77 nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
  78              u8 page, u8 align, u64 size, struct nvif_vma *vma)
  79 {
  80         struct nvif_vmm_get_v0 args;
  81         int ret;
  82 
  83         args.version = vma->size = 0;
  84         args.sparse = sparse;
  85         args.page = page;
  86         args.align = align;
  87         args.size = size;
  88 
  89         switch (type) {
  90         case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
  91         case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
  92         case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
  93         default:
  94                 WARN_ON(1);
  95                 return -EINVAL;
  96         }
  97 
  98         ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
  99                                &args, sizeof(args));
 100         if (ret == 0) {
 101                 vma->addr = args.addr;
 102                 vma->size = args.size;
 103         }
 104         return ret;
 105 }
 106 
 107 void
 108 nvif_vmm_fini(struct nvif_vmm *vmm)
 109 {
 110         kfree(vmm->page);
 111         nvif_object_fini(&vmm->object);
 112 }
 113 
 114 int
 115 nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
 116               u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
 117 {
 118         struct nvif_vmm_v0 *args;
 119         u32 argn = sizeof(*args) + argc;
 120         int ret = -ENOSYS, i;
 121 
 122         vmm->object.client = NULL;
 123         vmm->page = NULL;
 124 
 125         if (!(args = kmalloc(argn, GFP_KERNEL)))
 126                 return -ENOMEM;
 127         args->version = 0;
 128         args->managed = managed;
 129         args->addr = addr;
 130         args->size = size;
 131         memcpy(args->data, argv, argc);
 132 
 133         ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
 134                                &vmm->object);
 135         if (ret)
 136                 goto done;
 137 
 138         vmm->start = args->addr;
 139         vmm->limit = args->size;
 140 
 141         vmm->page_nr = args->page_nr;
 142         vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
 143                                   GFP_KERNEL);
 144         if (!vmm->page) {
 145                 ret = -ENOMEM;
 146                 goto done;
 147         }
 148 
 149         for (i = 0; i < vmm->page_nr; i++) {
 150                 struct nvif_vmm_page_v0 args = { .index = i };
 151 
 152                 ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
 153                                        &args, sizeof(args));
 154                 if (ret)
 155                         break;
 156 
 157                 vmm->page[i].shift = args.shift;
 158                 vmm->page[i].sparse = args.sparse;
 159                 vmm->page[i].vram = args.vram;
 160                 vmm->page[i].host = args.host;
 161                 vmm->page[i].comp = args.comp;
 162         }
 163 
 164 done:
 165         if (ret)
 166                 nvif_vmm_fini(vmm);
 167         kfree(args);
 168         return ret;
 169 }