root/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nv50_instobj_wr32_slow
  2. nv50_instobj_rd32_slow
  3. nv50_instobj_wr32
  4. nv50_instobj_rd32
  5. nv50_instobj_kmap
  6. nv50_instobj_map
  7. nv50_instobj_release
  8. nv50_instobj_acquire
  9. nv50_instobj_boot
  10. nv50_instobj_size
  11. nv50_instobj_addr
  12. nv50_instobj_bar2
  13. nv50_instobj_target
  14. nv50_instobj_dtor
  15. nv50_instobj_new
  16. nv50_instmem_fini
  17. nv50_instmem_new

   1 /*
   2  * Copyright 2012 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: Ben Skeggs
  23  */
  24 #define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
  25 #include "priv.h"
  26 
  27 #include <core/memory.h>
  28 #include <subdev/bar.h>
  29 #include <subdev/fb.h>
  30 #include <subdev/mmu.h>
  31 
  32 struct nv50_instmem {
  33         struct nvkm_instmem base;
  34         u64 addr;
  35 
  36         /* Mappings that can be evicted when BAR2 space has been exhausted. */
  37         struct list_head lru;
  38 };
  39 
  40 /******************************************************************************
  41  * instmem object implementation
  42  *****************************************************************************/
  43 #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
  44 
  45 struct nv50_instobj {
  46         struct nvkm_instobj base;
  47         struct nv50_instmem *imem;
  48         struct nvkm_memory *ram;
  49         struct nvkm_vma *bar;
  50         refcount_t maps;
  51         void *map;
  52         struct list_head lru;
  53 };
  54 
  55 static void
  56 nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
  57 {
  58         struct nv50_instobj *iobj = nv50_instobj(memory);
  59         struct nv50_instmem *imem = iobj->imem;
  60         struct nvkm_device *device = imem->base.subdev.device;
  61         u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
  62         u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
  63         unsigned long flags;
  64 
  65         spin_lock_irqsave(&imem->base.lock, flags);
  66         if (unlikely(imem->addr != base)) {
  67                 nvkm_wr32(device, 0x001700, base >> 16);
  68                 imem->addr = base;
  69         }
  70         nvkm_wr32(device, 0x700000 + addr, data);
  71         spin_unlock_irqrestore(&imem->base.lock, flags);
  72 }
  73 
  74 static u32
  75 nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
  76 {
  77         struct nv50_instobj *iobj = nv50_instobj(memory);
  78         struct nv50_instmem *imem = iobj->imem;
  79         struct nvkm_device *device = imem->base.subdev.device;
  80         u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
  81         u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
  82         u32 data;
  83         unsigned long flags;
  84 
  85         spin_lock_irqsave(&imem->base.lock, flags);
  86         if (unlikely(imem->addr != base)) {
  87                 nvkm_wr32(device, 0x001700, base >> 16);
  88                 imem->addr = base;
  89         }
  90         data = nvkm_rd32(device, 0x700000 + addr);
  91         spin_unlock_irqrestore(&imem->base.lock, flags);
  92         return data;
  93 }
  94 
  95 static const struct nvkm_memory_ptrs
  96 nv50_instobj_slow = {
  97         .rd32 = nv50_instobj_rd32_slow,
  98         .wr32 = nv50_instobj_wr32_slow,
  99 };
 100 
 101 static void
 102 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 103 {
 104         iowrite32_native(data, nv50_instobj(memory)->map + offset);
 105 }
 106 
 107 static u32
 108 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 109 {
 110         return ioread32_native(nv50_instobj(memory)->map + offset);
 111 }
 112 
 113 static const struct nvkm_memory_ptrs
 114 nv50_instobj_fast = {
 115         .rd32 = nv50_instobj_rd32,
 116         .wr32 = nv50_instobj_wr32,
 117 };
 118 
 119 static void
 120 nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
 121 {
 122         struct nv50_instmem *imem = iobj->imem;
 123         struct nv50_instobj *eobj;
 124         struct nvkm_memory *memory = &iobj->base.memory;
 125         struct nvkm_subdev *subdev = &imem->base.subdev;
 126         struct nvkm_device *device = subdev->device;
 127         struct nvkm_vma *bar = NULL, *ebar;
 128         u64 size = nvkm_memory_size(memory);
 129         void *emap;
 130         int ret;
 131 
 132         /* Attempt to allocate BAR2 address-space and map the object
 133          * into it.  The lock has to be dropped while doing this due
 134          * to the possibility of recursion for page table allocation.
 135          */
 136         mutex_unlock(&subdev->mutex);
 137         while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
 138                 /* Evict unused mappings, and keep retrying until we either
 139                  * succeed,or there's no more objects left on the LRU.
 140                  */
 141                 mutex_lock(&subdev->mutex);
 142                 eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
 143                 if (eobj) {
 144                         nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
 145                                    nvkm_memory_addr(&eobj->base.memory),
 146                                    nvkm_memory_size(&eobj->base.memory),
 147                                    eobj->bar->addr);
 148                         list_del_init(&eobj->lru);
 149                         ebar = eobj->bar;
 150                         eobj->bar = NULL;
 151                         emap = eobj->map;
 152                         eobj->map = NULL;
 153                 }
 154                 mutex_unlock(&subdev->mutex);
 155                 if (!eobj)
 156                         break;
 157                 iounmap(emap);
 158                 nvkm_vmm_put(vmm, &ebar);
 159         }
 160 
 161         if (ret == 0)
 162                 ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
 163         mutex_lock(&subdev->mutex);
 164         if (ret || iobj->bar) {
 165                 /* We either failed, or another thread beat us. */
 166                 mutex_unlock(&subdev->mutex);
 167                 nvkm_vmm_put(vmm, &bar);
 168                 mutex_lock(&subdev->mutex);
 169                 return;
 170         }
 171 
 172         /* Make the mapping visible to the host. */
 173         iobj->bar = bar;
 174         iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
 175                                (u32)iobj->bar->addr, size);
 176         if (!iobj->map) {
 177                 nvkm_warn(subdev, "PRAMIN ioremap failed\n");
 178                 nvkm_vmm_put(vmm, &iobj->bar);
 179         }
 180 }
 181 
 182 static int
 183 nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
 184                  struct nvkm_vma *vma, void *argv, u32 argc)
 185 {
 186         memory = nv50_instobj(memory)->ram;
 187         return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
 188 }
 189 
 190 static void
 191 nv50_instobj_release(struct nvkm_memory *memory)
 192 {
 193         struct nv50_instobj *iobj = nv50_instobj(memory);
 194         struct nv50_instmem *imem = iobj->imem;
 195         struct nvkm_subdev *subdev = &imem->base.subdev;
 196 
 197         wmb();
 198         nvkm_bar_flush(subdev->device->bar);
 199 
 200         if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
 201                 /* Add the now-unused mapping to the LRU instead of directly
 202                  * unmapping it here, in case we need to map it again later.
 203                  */
 204                 if (likely(iobj->lru.next) && iobj->map) {
 205                         BUG_ON(!list_empty(&iobj->lru));
 206                         list_add_tail(&iobj->lru, &imem->lru);
 207                 }
 208 
 209                 /* Switch back to NULL accessors when last map is gone. */
 210                 iobj->base.memory.ptrs = NULL;
 211                 mutex_unlock(&subdev->mutex);
 212         }
 213 }
 214 
 215 static void __iomem *
 216 nv50_instobj_acquire(struct nvkm_memory *memory)
 217 {
 218         struct nv50_instobj *iobj = nv50_instobj(memory);
 219         struct nvkm_instmem *imem = &iobj->imem->base;
 220         struct nvkm_vmm *vmm;
 221         void __iomem *map = NULL;
 222 
 223         /* Already mapped? */
 224         if (refcount_inc_not_zero(&iobj->maps))
 225                 return iobj->map;
 226 
 227         /* Take the lock, and re-check that another thread hasn't
 228          * already mapped the object in the meantime.
 229          */
 230         mutex_lock(&imem->subdev.mutex);
 231         if (refcount_inc_not_zero(&iobj->maps)) {
 232                 mutex_unlock(&imem->subdev.mutex);
 233                 return iobj->map;
 234         }
 235 
 236         /* Attempt to get a direct CPU mapping of the object. */
 237         if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) {
 238                 if (!iobj->map)
 239                         nv50_instobj_kmap(iobj, vmm);
 240                 map = iobj->map;
 241         }
 242 
 243         if (!refcount_inc_not_zero(&iobj->maps)) {
 244                 /* Exclude object from eviction while it's being accessed. */
 245                 if (likely(iobj->lru.next))
 246                         list_del_init(&iobj->lru);
 247 
 248                 if (map)
 249                         iobj->base.memory.ptrs = &nv50_instobj_fast;
 250                 else
 251                         iobj->base.memory.ptrs = &nv50_instobj_slow;
 252                 refcount_set(&iobj->maps, 1);
 253         }
 254 
 255         mutex_unlock(&imem->subdev.mutex);
 256         return map;
 257 }
 258 
 259 static void
 260 nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
 261 {
 262         struct nv50_instobj *iobj = nv50_instobj(memory);
 263         struct nvkm_instmem *imem = &iobj->imem->base;
 264 
 265         /* Exclude bootstrapped objects (ie. the page tables for the
 266          * instmem BAR itself) from eviction.
 267          */
 268         mutex_lock(&imem->subdev.mutex);
 269         if (likely(iobj->lru.next)) {
 270                 list_del_init(&iobj->lru);
 271                 iobj->lru.next = NULL;
 272         }
 273 
 274         nv50_instobj_kmap(iobj, vmm);
 275         nvkm_instmem_boot(imem);
 276         mutex_unlock(&imem->subdev.mutex);
 277 }
 278 
 279 static u64
 280 nv50_instobj_size(struct nvkm_memory *memory)
 281 {
 282         return nvkm_memory_size(nv50_instobj(memory)->ram);
 283 }
 284 
 285 static u64
 286 nv50_instobj_addr(struct nvkm_memory *memory)
 287 {
 288         return nvkm_memory_addr(nv50_instobj(memory)->ram);
 289 }
 290 
 291 static u64
 292 nv50_instobj_bar2(struct nvkm_memory *memory)
 293 {
 294         struct nv50_instobj *iobj = nv50_instobj(memory);
 295         u64 addr = ~0ULL;
 296         if (nv50_instobj_acquire(&iobj->base.memory)) {
 297                 iobj->lru.next = NULL; /* Exclude from eviction. */
 298                 addr = iobj->bar->addr;
 299         }
 300         nv50_instobj_release(&iobj->base.memory);
 301         return addr;
 302 }
 303 
 304 static enum nvkm_memory_target
 305 nv50_instobj_target(struct nvkm_memory *memory)
 306 {
 307         return nvkm_memory_target(nv50_instobj(memory)->ram);
 308 }
 309 
 310 static void *
 311 nv50_instobj_dtor(struct nvkm_memory *memory)
 312 {
 313         struct nv50_instobj *iobj = nv50_instobj(memory);
 314         struct nvkm_instmem *imem = &iobj->imem->base;
 315         struct nvkm_vma *bar;
 316         void *map = map;
 317 
 318         mutex_lock(&imem->subdev.mutex);
 319         if (likely(iobj->lru.next))
 320                 list_del(&iobj->lru);
 321         map = iobj->map;
 322         bar = iobj->bar;
 323         mutex_unlock(&imem->subdev.mutex);
 324 
 325         if (map) {
 326                 struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
 327                 iounmap(map);
 328                 if (likely(vmm)) /* Can be NULL during BAR destructor. */
 329                         nvkm_vmm_put(vmm, &bar);
 330         }
 331 
 332         nvkm_memory_unref(&iobj->ram);
 333         nvkm_instobj_dtor(imem, &iobj->base);
 334         return iobj;
 335 }
 336 
 337 static const struct nvkm_memory_func
 338 nv50_instobj_func = {
 339         .dtor = nv50_instobj_dtor,
 340         .target = nv50_instobj_target,
 341         .bar2 = nv50_instobj_bar2,
 342         .addr = nv50_instobj_addr,
 343         .size = nv50_instobj_size,
 344         .boot = nv50_instobj_boot,
 345         .acquire = nv50_instobj_acquire,
 346         .release = nv50_instobj_release,
 347         .map = nv50_instobj_map,
 348 };
 349 
 350 static int
 351 nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 352                  struct nvkm_memory **pmemory)
 353 {
 354         struct nv50_instmem *imem = nv50_instmem(base);
 355         struct nv50_instobj *iobj;
 356         struct nvkm_device *device = imem->base.subdev.device;
 357         u8 page = max(order_base_2(align), 12);
 358 
 359         if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
 360                 return -ENOMEM;
 361         *pmemory = &iobj->base.memory;
 362 
 363         nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
 364         iobj->imem = imem;
 365         refcount_set(&iobj->maps, 0);
 366         INIT_LIST_HEAD(&iobj->lru);
 367 
 368         return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
 369 }
 370 
 371 /******************************************************************************
 372  * instmem subdev implementation
 373  *****************************************************************************/
 374 
 375 static void
 376 nv50_instmem_fini(struct nvkm_instmem *base)
 377 {
 378         nv50_instmem(base)->addr = ~0ULL;
 379 }
 380 
 381 static const struct nvkm_instmem_func
 382 nv50_instmem = {
 383         .fini = nv50_instmem_fini,
 384         .memory_new = nv50_instobj_new,
 385         .zero = false,
 386 };
 387 
 388 int
 389 nv50_instmem_new(struct nvkm_device *device, int index,
 390                  struct nvkm_instmem **pimem)
 391 {
 392         struct nv50_instmem *imem;
 393 
 394         if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 395                 return -ENOMEM;
 396         nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
 397         INIT_LIST_HEAD(&imem->lru);
 398         *pimem = &imem->base;
 399         return 0;
 400 }

/* [<][>][^][v][top][bottom][index][help] */