1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24#include "channv50.h" 25 26#include <core/client.h> 27#include <core/ramht.h> 28#include <subdev/mmu.h> 29#include <subdev/timer.h> 30 31static int 32nv50_fifo_chan_engine_addr(struct nvkm_engine *engine) 33{ 34 switch (engine->subdev.index) { 35 case NVKM_ENGINE_DMAOBJ: 36 case NVKM_ENGINE_SW : return -1; 37 case NVKM_ENGINE_GR : return 0x0000; 38 case NVKM_ENGINE_MPEG : return 0x0060; 39 default: 40 WARN_ON(1); 41 return -1; 42 } 43} 44 45static int 46nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, 47 struct nvkm_engine *engine, bool suspend) 48{ 49 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 50 struct nv50_fifo *fifo = chan->fifo; 51 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 52 struct nvkm_device *device = subdev->device; 53 int offset, ret = 0; 54 u32 me; 55 56 offset = nv50_fifo_chan_engine_addr(engine); 57 if (offset < 0) 58 return 0; 59 60 /* HW bug workaround: 61 * 62 * PFIFO will hang forever if the connected engines don't report 63 * that they've processed the context switch request. 64 * 65 * In order for the kickoff to work, we need to ensure all the 66 * connected engines are in a state where they can answer. 67 * 68 * Newer chipsets don't seem to suffer from this issue, and well, 69 * there's also a "ignore these engines" bitmask reg we can use 70 * if we hit the issue there.. 71 */ 72 me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001); 73 74 /* do the kickoff... */ 75 nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); 76 if (nvkm_msec(device, 2000, 77 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff) 78 break; 79 ) < 0) { 80 nvkm_error(subdev, "channel %d [%s] unload timeout\n", 81 chan->base.chid, chan->base.object.client->name); 82 if (suspend) 83 ret = -EBUSY; 84 } 85 nvkm_wr32(device, 0x00b860, me); 86 87 if (ret == 0) { 88 nvkm_kmap(chan->eng); 89 nvkm_wo32(chan->eng, offset + 0x00, 0x00000000); 90 nvkm_wo32(chan->eng, offset + 0x04, 0x00000000); 91 nvkm_wo32(chan->eng, offset + 0x08, 0x00000000); 92 nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000); 93 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); 94 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); 95 nvkm_done(chan->eng); 96 } 97 98 return ret; 99} 100 101static int 102nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base, 103 struct nvkm_engine *engine) 104{ 105 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 106 struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; 107 u64 limit, start; 108 int offset; 109 110 offset = nv50_fifo_chan_engine_addr(engine); 111 if (offset < 0) 112 return 0; 113 limit = engn->addr + engn->size - 1; 114 start = engn->addr; 115 116 nvkm_kmap(chan->eng); 117 nvkm_wo32(chan->eng, offset + 0x00, 0x00190000); 118 nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit)); 119 nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start)); 120 nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 | 121 upper_32_bits(start)); 122 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); 123 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); 124 nvkm_done(chan->eng); 125 return 0; 126} 127 128void 129nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base, 130 struct nvkm_engine *engine) 131{ 132 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 133 nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); 134} 135 136static int 137nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base, 138 struct nvkm_engine *engine, 139 struct nvkm_object *object) 140{ 141 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 142 int engn = engine->subdev.index; 143 144 if (nv50_fifo_chan_engine_addr(engine) < 0) 145 return 0; 146 147 return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); 148} 149 150void 151nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie) 152{ 153 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 154 nvkm_ramht_remove(chan->ramht, cookie); 155} 156 157static int 158nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base, 159 struct nvkm_object *object) 160{ 161 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 162 u32 handle = object->handle; 163 u32 context; 164 165 switch (object->engine->subdev.index) { 166 case NVKM_ENGINE_DMAOBJ: 167 case NVKM_ENGINE_SW : context = 0x00000000; break; 168 case NVKM_ENGINE_GR : context = 0x00100000; break; 169 case NVKM_ENGINE_MPEG : context = 0x00200000; break; 170 default: 171 WARN_ON(1); 172 return -EINVAL; 173 } 174 175 return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context); 176} 177 178void 179nv50_fifo_chan_fini(struct nvkm_fifo_chan *base) 180{ 181 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 182 struct nv50_fifo *fifo = chan->fifo; 183 struct nvkm_device *device = fifo->base.engine.subdev.device; 184 u32 chid = chan->base.chid; 185 186 /* remove channel from runlist, fifo will unload context */ 187 nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000); 188 nv50_fifo_runlist_update(fifo); 189 nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000); 190} 191 192static void 193nv50_fifo_chan_init(struct nvkm_fifo_chan *base) 194{ 195 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 196 struct nv50_fifo *fifo = chan->fifo; 197 struct nvkm_device *device = fifo->base.engine.subdev.device; 198 u64 addr = chan->ramfc->addr >> 12; 199 u32 chid = chan->base.chid; 200 201 nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr); 202 nv50_fifo_runlist_update(fifo); 203} 204 205void * 206nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base) 207{ 208 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 209 nvkm_vm_ref(NULL, &chan->vm, chan->pgd); 210 nvkm_ramht_del(&chan->ramht); 211 nvkm_gpuobj_del(&chan->pgd); 212 nvkm_gpuobj_del(&chan->eng); 213 nvkm_gpuobj_del(&chan->cache); 214 nvkm_gpuobj_del(&chan->ramfc); 215 return chan; 216} 217 218static const struct nvkm_fifo_chan_func 219nv50_fifo_chan_func = { 220 .dtor = nv50_fifo_chan_dtor, 221 .init = nv50_fifo_chan_init, 222 .fini = nv50_fifo_chan_fini, 223 .engine_ctor = nv50_fifo_chan_engine_ctor, 224 .engine_dtor = nv50_fifo_chan_engine_dtor, 225 .engine_init = nv50_fifo_chan_engine_init, 226 .engine_fini = nv50_fifo_chan_engine_fini, 227 .object_ctor = nv50_fifo_chan_object_ctor, 228 .object_dtor = nv50_fifo_chan_object_dtor, 229}; 230 231int 232nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push, 233 const struct nvkm_oclass *oclass, 234 struct nv50_fifo_chan *chan) 235{ 236 struct nvkm_device *device = fifo->base.engine.subdev.device; 237 int ret; 238 239 ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base, 240 0x10000, 0x1000, false, vm, push, 241 (1ULL << NVKM_ENGINE_DMAOBJ) | 242 (1ULL << NVKM_ENGINE_SW) | 243 (1ULL << NVKM_ENGINE_GR) | 244 (1ULL << NVKM_ENGINE_MPEG), 245 0, 0xc00000, 0x2000, oclass, &chan->base); 246 chan->fifo = fifo; 247 if (ret) 248 return ret; 249 250 ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst, 251 &chan->ramfc); 252 if (ret) 253 return ret; 254 255 ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst, 256 &chan->eng); 257 if (ret) 258 return ret; 259 260 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst, 261 &chan->pgd); 262 if (ret) 263 return ret; 264 265 ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht); 266 if (ret) 267 return ret; 268 269 return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); 270} 271