1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24#define nv40_instmem(p) container_of((p), struct nv40_instmem, base) 25#include "priv.h" 26 27#include <core/memory.h> 28#include <core/ramht.h> 29#include <engine/gr/nv40.h> 30 31struct nv40_instmem { 32 struct nvkm_instmem base; 33 struct nvkm_mm heap; 34 void __iomem *iomem; 35}; 36 37/****************************************************************************** 38 * instmem object implementation 39 *****************************************************************************/ 40#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory) 41 42struct nv40_instobj { 43 struct nvkm_memory memory; 44 struct nv40_instmem *imem; 45 struct nvkm_mm_node *node; 46}; 47 48static enum nvkm_memory_target 49nv40_instobj_target(struct nvkm_memory *memory) 50{ 51 return NVKM_MEM_TARGET_INST; 52} 53 54static u64 55nv40_instobj_addr(struct nvkm_memory *memory) 56{ 57 return nv40_instobj(memory)->node->offset; 58} 59 60static u64 61nv40_instobj_size(struct nvkm_memory *memory) 62{ 63 return nv40_instobj(memory)->node->length; 64} 65 66static void __iomem * 67nv40_instobj_acquire(struct nvkm_memory *memory) 68{ 69 struct nv40_instobj *iobj = nv40_instobj(memory); 70 return iobj->imem->iomem + iobj->node->offset; 71} 72 73static void 74nv40_instobj_release(struct nvkm_memory *memory) 75{ 76} 77 78static u32 79nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) 80{ 81 struct nv40_instobj *iobj = nv40_instobj(memory); 82 return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset); 83} 84 85static void 86nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) 87{ 88 struct nv40_instobj *iobj = nv40_instobj(memory); 89 iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset); 90} 91 92static void * 93nv40_instobj_dtor(struct nvkm_memory *memory) 94{ 95 struct nv40_instobj *iobj = nv40_instobj(memory); 96 mutex_lock(&iobj->imem->base.subdev.mutex); 97 nvkm_mm_free(&iobj->imem->heap, &iobj->node); 98 mutex_unlock(&iobj->imem->base.subdev.mutex); 99 return iobj; 100} 101 102static const struct nvkm_memory_func 103nv40_instobj_func = { 104 .dtor = nv40_instobj_dtor, 105 .target = nv40_instobj_target, 106 .size = nv40_instobj_size, 107 .addr = nv40_instobj_addr, 108 .acquire = nv40_instobj_acquire, 109 .release = nv40_instobj_release, 110 .rd32 = nv40_instobj_rd32, 111 .wr32 = nv40_instobj_wr32, 112}; 113 114static int 115nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, 116 struct nvkm_memory **pmemory) 117{ 118 struct nv40_instmem *imem = nv40_instmem(base); 119 struct nv40_instobj *iobj; 120 int ret; 121 122 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) 123 return -ENOMEM; 124 *pmemory = &iobj->memory; 125 126 nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory); 127 iobj->imem = imem; 128 129 mutex_lock(&imem->base.subdev.mutex); 130 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, 131 align ? align : 1, &iobj->node); 132 mutex_unlock(&imem->base.subdev.mutex); 133 return ret; 134} 135 136/****************************************************************************** 137 * instmem subdev implementation 138 *****************************************************************************/ 139 140static u32 141nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr) 142{ 143 return ioread32_native(nv40_instmem(base)->iomem + addr); 144} 145 146static void 147nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data) 148{ 149 iowrite32_native(data, nv40_instmem(base)->iomem + addr); 150} 151 152static int 153nv40_instmem_oneinit(struct nvkm_instmem *base) 154{ 155 struct nv40_instmem *imem = nv40_instmem(base); 156 struct nvkm_device *device = imem->base.subdev.device; 157 int ret, vs; 158 159 /* PRAMIN aperture maps over the end of vram, reserve enough space 160 * to fit graphics contexts for every channel, the magics come 161 * from engine/gr/nv40.c 162 */ 163 vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8); 164 if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs; 165 else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs; 166 else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs; 167 else imem->base.reserved = 0x4a40 * vs; 168 imem->base.reserved += 16 * 1024; 169 imem->base.reserved *= 32; /* per-channel */ 170 imem->base.reserved += 512 * 1024; /* pci(e)gart table */ 171 imem->base.reserved += 512 * 1024; /* object storage */ 172 imem->base.reserved = round_up(imem->base.reserved, 4096); 173 174 ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1); 175 if (ret) 176 return ret; 177 178 /* 0x00000-0x10000: reserve for probable vbios image */ 179 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false, 180 &imem->base.vbios); 181 if (ret) 182 return ret; 183 184 /* 0x10000-0x18000: reserve for RAMHT */ 185 ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht); 186 if (ret) 187 return ret; 188 189 /* 0x18000-0x18200: reserve for RAMRO 190 * 0x18200-0x20000: padding 191 */ 192 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false, 193 &imem->base.ramro); 194 if (ret) 195 return ret; 196 197 /* 0x20000-0x21000: reserve for RAMFC 198 * 0x21000-0x40000: padding and some unknown crap 199 */ 200 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true, 201 &imem->base.ramfc); 202 if (ret) 203 return ret; 204 205 return 0; 206} 207 208static void * 209nv40_instmem_dtor(struct nvkm_instmem *base) 210{ 211 struct nv40_instmem *imem = nv40_instmem(base); 212 nvkm_memory_del(&imem->base.ramfc); 213 nvkm_memory_del(&imem->base.ramro); 214 nvkm_ramht_del(&imem->base.ramht); 215 nvkm_memory_del(&imem->base.vbios); 216 nvkm_mm_fini(&imem->heap); 217 if (imem->iomem) 218 iounmap(imem->iomem); 219 return imem; 220} 221 222static const struct nvkm_instmem_func 223nv40_instmem = { 224 .dtor = nv40_instmem_dtor, 225 .oneinit = nv40_instmem_oneinit, 226 .rd32 = nv40_instmem_rd32, 227 .wr32 = nv40_instmem_wr32, 228 .memory_new = nv40_instobj_new, 229 .persistent = false, 230 .zero = false, 231}; 232 233int 234nv40_instmem_new(struct nvkm_device *device, int index, 235 struct nvkm_instmem **pimem) 236{ 237 struct nv40_instmem *imem; 238 int bar; 239 240 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) 241 return -ENOMEM; 242 nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base); 243 *pimem = &imem->base; 244 245 /* map bar */ 246 if (device->func->resource_size(device, 2)) 247 bar = 2; 248 else 249 bar = 3; 250 251 imem->iomem = ioremap(device->func->resource_addr(device, bar), 252 device->func->resource_size(device, bar)); 253 if (!imem->iomem) { 254 nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n"); 255 return -EFAULT; 256 } 257 258 return 0; 259} 260