1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24#include "channv04.h" 25#include "regsnv04.h" 26 27#include <core/client.h> 28#include <core/ramht.h> 29#include <subdev/instmem.h> 30 31#include <nvif/class.h> 32#include <nvif/unpack.h> 33 34static bool 35nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx) 36{ 37 switch (engine->subdev.index) { 38 case NVKM_ENGINE_DMAOBJ: 39 case NVKM_ENGINE_SW: 40 return false; 41 case NVKM_ENGINE_GR: 42 *reg = 0x0032e0; 43 *ctx = 0x38; 44 return true; 45 case NVKM_ENGINE_MPEG: 46 *reg = 0x00330c; 47 *ctx = 0x54; 48 return true; 49 default: 50 WARN_ON(1); 51 return false; 52 } 53} 54 55static int 56nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base, 57 struct nvkm_engine *engine, bool suspend) 58{ 59 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 60 struct nv04_fifo *fifo = chan->fifo; 61 struct nvkm_device *device = fifo->base.engine.subdev.device; 62 struct nvkm_instmem *imem = device->imem; 63 unsigned long flags; 64 u32 reg, ctx; 65 int chid; 66 67 if (!nv40_fifo_dma_engine(engine, ®, &ctx)) 68 return 0; 69 70 spin_lock_irqsave(&fifo->base.lock, flags); 71 nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); 72 73 chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1); 74 if (chid == chan->base.chid) 75 nvkm_wr32(device, reg, 0x00000000); 76 nvkm_kmap(imem->ramfc); 77 nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000); 78 nvkm_done(imem->ramfc); 79 80 nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); 81 spin_unlock_irqrestore(&fifo->base.lock, flags); 82 return 0; 83} 84 85static int 86nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base, 87 struct nvkm_engine *engine) 88{ 89 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 90 struct nv04_fifo *fifo = chan->fifo; 91 struct nvkm_device *device = fifo->base.engine.subdev.device; 92 struct nvkm_instmem *imem = device->imem; 93 unsigned long flags; 94 u32 inst, reg, ctx; 95 int chid; 96 97 if (!nv40_fifo_dma_engine(engine, ®, &ctx)) 98 return 0; 99 inst = chan->engn[engine->subdev.index]->addr >> 4; 100 101 spin_lock_irqsave(&fifo->base.lock, flags); 102 nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); 103 104 chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1); 105 if (chid == chan->base.chid) 106 nvkm_wr32(device, reg, inst); 107 nvkm_kmap(imem->ramfc); 108 nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst); 109 nvkm_done(imem->ramfc); 110 111 nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); 112 spin_unlock_irqrestore(&fifo->base.lock, flags); 113 return 0; 114} 115 116static void 117nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base, 118 struct nvkm_engine *engine) 119{ 120 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 121 nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); 122} 123 124static int 125nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base, 126 struct nvkm_engine *engine, 127 struct nvkm_object *object) 128{ 129 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 130 const int engn = engine->subdev.index; 131 u32 reg, ctx; 132 133 if (!nv40_fifo_dma_engine(engine, ®, &ctx)) 134 return 0; 135 136 return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); 137} 138 139static int 140nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, 141 struct nvkm_object *object) 142{ 143 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 144 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; 145 u32 context = chan->base.chid << 23; 146 u32 handle = object->handle; 147 int hash; 148 149 switch (object->engine->subdev.index) { 150 case NVKM_ENGINE_DMAOBJ: 151 case NVKM_ENGINE_SW : context |= 0x00000000; break; 152 case NVKM_ENGINE_GR : context |= 0x00100000; break; 153 case NVKM_ENGINE_MPEG : context |= 0x00200000; break; 154 default: 155 WARN_ON(1); 156 return -EINVAL; 157 } 158 159 mutex_lock(&chan->fifo->base.engine.subdev.mutex); 160 hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, 161 handle, context); 162 mutex_unlock(&chan->fifo->base.engine.subdev.mutex); 163 return hash; 164} 165 166static const struct nvkm_fifo_chan_func 167nv40_fifo_dma_func = { 168 .dtor = nv04_fifo_dma_dtor, 169 .init = nv04_fifo_dma_init, 170 .fini = nv04_fifo_dma_fini, 171 .engine_ctor = nv40_fifo_dma_engine_ctor, 172 .engine_dtor = nv40_fifo_dma_engine_dtor, 173 .engine_init = nv40_fifo_dma_engine_init, 174 .engine_fini = nv40_fifo_dma_engine_fini, 175 .object_ctor = nv40_fifo_dma_object_ctor, 176 .object_dtor = nv04_fifo_dma_object_dtor, 177}; 178 179static int 180nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, 181 void *data, u32 size, struct nvkm_object **pobject) 182{ 183 struct nvkm_object *parent = oclass->parent; 184 union { 185 struct nv03_channel_dma_v0 v0; 186 } *args = data; 187 struct nv04_fifo *fifo = nv04_fifo(base); 188 struct nv04_fifo_chan *chan = NULL; 189 struct nvkm_device *device = fifo->base.engine.subdev.device; 190 struct nvkm_instmem *imem = device->imem; 191 int ret; 192 193 nvif_ioctl(parent, "create channel dma size %d\n", size); 194 if (nvif_unpack(args->v0, 0, 0, false)) { 195 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 196 "offset %08x\n", args->v0.version, 197 args->v0.pushbuf, args->v0.offset); 198 if (!args->v0.pushbuf) 199 return -EINVAL; 200 } else 201 return ret; 202 203 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 204 return -ENOMEM; 205 *pobject = &chan->base.object; 206 207 ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base, 208 0x1000, 0x1000, false, 0, args->v0.pushbuf, 209 (1ULL << NVKM_ENGINE_DMAOBJ) | 210 (1ULL << NVKM_ENGINE_GR) | 211 (1ULL << NVKM_ENGINE_MPEG) | 212 (1ULL << NVKM_ENGINE_SW), 213 0, 0xc00000, 0x1000, oclass, &chan->base); 214 chan->fifo = fifo; 215 if (ret) 216 return ret; 217 218 args->v0.chid = chan->base.chid; 219 chan->ramfc = chan->base.chid * 128; 220 221 nvkm_kmap(imem->ramfc); 222 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); 223 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); 224 nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4); 225 nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 | 226 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 227 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 228#ifdef __BIG_ENDIAN 229 NV_PFIFO_CACHE1_BIG_ENDIAN | 230#endif 231 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 232 nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff); 233 nvkm_done(imem->ramfc); 234 return 0; 235} 236 237const struct nvkm_fifo_chan_oclass 238nv40_fifo_dma_oclass = { 239 .base.oclass = NV40_CHANNEL_DMA, 240 .base.minver = 0, 241 .base.maxver = 0, 242 .ctor = nv40_fifo_dma_new, 243}; 244