root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvkm_mem_target
  2. nvkm_mem_page
  3. nvkm_mem_addr
  4. nvkm_mem_size
  5. nvkm_mem_map_dma
  6. nvkm_mem_dtor
  7. nvkm_mem_map_sgl
  8. nvkm_mem_map_host
  9. nvkm_mem_new_host
  10. nvkm_mem_new_type

   1 /*
   2  * Copyright 2017 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
  23 #include "mem.h"
  24 
  25 #include <core/memory.h>
  26 
  27 #include <nvif/if000a.h>
  28 #include <nvif/unpack.h>
  29 
  30 struct nvkm_mem {
  31         struct nvkm_memory memory;
  32         enum nvkm_memory_target target;
  33         struct nvkm_mmu *mmu;
  34         u64 pages;
  35         struct page **mem;
  36         union {
  37                 struct scatterlist *sgl;
  38                 dma_addr_t *dma;
  39         };
  40 };
  41 
  42 static enum nvkm_memory_target
  43 nvkm_mem_target(struct nvkm_memory *memory)
  44 {
  45         return nvkm_mem(memory)->target;
  46 }
  47 
  48 static u8
  49 nvkm_mem_page(struct nvkm_memory *memory)
  50 {
  51         return PAGE_SHIFT;
  52 }
  53 
  54 static u64
  55 nvkm_mem_addr(struct nvkm_memory *memory)
  56 {
  57         struct nvkm_mem *mem = nvkm_mem(memory);
  58         if (mem->pages == 1 && mem->mem)
  59                 return mem->dma[0];
  60         return ~0ULL;
  61 }
  62 
  63 static u64
  64 nvkm_mem_size(struct nvkm_memory *memory)
  65 {
  66         return nvkm_mem(memory)->pages << PAGE_SHIFT;
  67 }
  68 
  69 static int
  70 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
  71                  struct nvkm_vma *vma, void *argv, u32 argc)
  72 {
  73         struct nvkm_mem *mem = nvkm_mem(memory);
  74         struct nvkm_vmm_map map = {
  75                 .memory = &mem->memory,
  76                 .offset = offset,
  77                 .dma = mem->dma,
  78         };
  79         return nvkm_vmm_map(vmm, vma, argv, argc, &map);
  80 }
  81 
  82 static void *
  83 nvkm_mem_dtor(struct nvkm_memory *memory)
  84 {
  85         struct nvkm_mem *mem = nvkm_mem(memory);
  86         if (mem->mem) {
  87                 while (mem->pages--) {
  88                         dma_unmap_page(mem->mmu->subdev.device->dev,
  89                                        mem->dma[mem->pages], PAGE_SIZE,
  90                                        DMA_BIDIRECTIONAL);
  91                         __free_page(mem->mem[mem->pages]);
  92                 }
  93                 kvfree(mem->dma);
  94                 kvfree(mem->mem);
  95         }
  96         return mem;
  97 }
  98 
  99 static const struct nvkm_memory_func
 100 nvkm_mem_dma = {
 101         .dtor = nvkm_mem_dtor,
 102         .target = nvkm_mem_target,
 103         .page = nvkm_mem_page,
 104         .addr = nvkm_mem_addr,
 105         .size = nvkm_mem_size,
 106         .map = nvkm_mem_map_dma,
 107 };
 108 
 109 static int
 110 nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
 111                  struct nvkm_vma *vma, void *argv, u32 argc)
 112 {
 113         struct nvkm_mem *mem = nvkm_mem(memory);
 114         struct nvkm_vmm_map map = {
 115                 .memory = &mem->memory,
 116                 .offset = offset,
 117                 .sgl = mem->sgl,
 118         };
 119         return nvkm_vmm_map(vmm, vma, argv, argc, &map);
 120 }
 121 
 122 static const struct nvkm_memory_func
 123 nvkm_mem_sgl = {
 124         .dtor = nvkm_mem_dtor,
 125         .target = nvkm_mem_target,
 126         .page = nvkm_mem_page,
 127         .addr = nvkm_mem_addr,
 128         .size = nvkm_mem_size,
 129         .map = nvkm_mem_map_sgl,
 130 };
 131 
 132 int
 133 nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
 134 {
 135         struct nvkm_mem *mem = nvkm_mem(memory);
 136         if (mem->mem) {
 137                 *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
 138                 return *pmap ? 0 : -EFAULT;
 139         }
 140         return -EINVAL;
 141 }
 142 
 143 static int
 144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
 145                   void *argv, u32 argc, struct nvkm_memory **pmemory)
 146 {
 147         struct device *dev = mmu->subdev.device->dev;
 148         union {
 149                 struct nvif_mem_ram_vn vn;
 150                 struct nvif_mem_ram_v0 v0;
 151         } *args = argv;
 152         int ret = -ENOSYS;
 153         enum nvkm_memory_target target;
 154         struct nvkm_mem *mem;
 155         gfp_t gfp = GFP_USER | __GFP_ZERO;
 156 
 157         if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
 158             !(mmu->type[type].type & NVKM_MEM_UNCACHED))
 159                 target = NVKM_MEM_TARGET_HOST;
 160         else
 161                 target = NVKM_MEM_TARGET_NCOH;
 162 
 163         if (page != PAGE_SHIFT)
 164                 return -EINVAL;
 165 
 166         if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
 167                 return -ENOMEM;
 168         mem->target = target;
 169         mem->mmu = mmu;
 170         *pmemory = &mem->memory;
 171 
 172         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 173                 if (args->v0.dma) {
 174                         nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
 175                         mem->dma = args->v0.dma;
 176                 } else {
 177                         nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
 178                         mem->sgl = args->v0.sgl;
 179                 }
 180 
 181                 if (!IS_ALIGNED(size, PAGE_SIZE))
 182                         return -EINVAL;
 183                 mem->pages = size >> PAGE_SHIFT;
 184                 return 0;
 185         } else
 186         if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 187                 kfree(mem);
 188                 return ret;
 189         }
 190 
 191         nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
 192         size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
 193 
 194         if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL)))
 195                 return -ENOMEM;
 196         if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL)))
 197                 return -ENOMEM;
 198 
 199         if (mmu->dma_bits > 32)
 200                 gfp |= GFP_HIGHUSER;
 201         else
 202                 gfp |= GFP_DMA32;
 203 
 204         for (mem->pages = 0; size; size--, mem->pages++) {
 205                 struct page *p = alloc_page(gfp);
 206                 if (!p)
 207                         return -ENOMEM;
 208 
 209                 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
 210                                                     p, 0, PAGE_SIZE,
 211                                                     DMA_BIDIRECTIONAL);
 212                 if (dma_mapping_error(dev, mem->dma[mem->pages])) {
 213                         __free_page(p);
 214                         return -ENOMEM;
 215                 }
 216 
 217                 mem->mem[mem->pages] = p;
 218         }
 219 
 220         return 0;
 221 }
 222 
 223 int
 224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
 225                   void *argv, u32 argc, struct nvkm_memory **pmemory)
 226 {
 227         struct nvkm_memory *memory = NULL;
 228         int ret;
 229 
 230         if (mmu->type[type].type & NVKM_MEM_VRAM) {
 231                 ret = mmu->func->mem.vram(mmu, type, page, size,
 232                                           argv, argc, &memory);
 233         } else {
 234                 ret = nvkm_mem_new_host(mmu, type, page, size,
 235                                         argv, argc, &memory);
 236         }
 237 
 238         if (ret)
 239                 nvkm_memory_unref(&memory);
 240         *pmemory = memory;
 241         return ret;
 242 }

/* [<][>][^][v][top][bottom][index][help] */