root/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nv50_fifo_chan_engine_addr
  2. nv50_fifo_chan_engine_fini
  3. nv50_fifo_chan_engine_init
  4. nv50_fifo_chan_engine_dtor
  5. nv50_fifo_chan_engine_ctor
  6. nv50_fifo_chan_object_dtor
  7. nv50_fifo_chan_object_ctor
  8. nv50_fifo_chan_fini
  9. nv50_fifo_chan_init
  10. nv50_fifo_chan_dtor
  11. nv50_fifo_chan_ctor

   1 /*
   2  * Copyright 2012 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: Ben Skeggs
  23  */
  24 #include "channv50.h"
  25 
  26 #include <core/client.h>
  27 #include <core/ramht.h>
  28 #include <subdev/mmu.h>
  29 #include <subdev/timer.h>
  30 
  31 static int
  32 nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
  33 {
  34         switch (engine->subdev.index) {
  35         case NVKM_ENGINE_DMAOBJ:
  36         case NVKM_ENGINE_SW    : return -1;
  37         case NVKM_ENGINE_GR    : return 0x0000;
  38         case NVKM_ENGINE_MPEG  : return 0x0060;
  39         default:
  40                 WARN_ON(1);
  41                 return -1;
  42         }
  43 }
  44 
  45 static int
  46 nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
  47                            struct nvkm_engine *engine, bool suspend)
  48 {
  49         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
  50         struct nv50_fifo *fifo = chan->fifo;
  51         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  52         struct nvkm_device *device = subdev->device;
  53         int offset, ret = 0;
  54         u32 me;
  55 
  56         offset = nv50_fifo_chan_engine_addr(engine);
  57         if (offset < 0)
  58                 return 0;
  59 
  60         /* HW bug workaround:
  61          *
  62          * PFIFO will hang forever if the connected engines don't report
  63          * that they've processed the context switch request.
  64          *
  65          * In order for the kickoff to work, we need to ensure all the
  66          * connected engines are in a state where they can answer.
  67          *
  68          * Newer chipsets don't seem to suffer from this issue, and well,
  69          * there's also a "ignore these engines" bitmask reg we can use
  70          * if we hit the issue there..
  71          */
  72         me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
  73 
  74         /* do the kickoff... */
  75         nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
  76         if (nvkm_msec(device, 2000,
  77                 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
  78                         break;
  79         ) < 0) {
  80                 nvkm_error(subdev, "channel %d [%s] unload timeout\n",
  81                            chan->base.chid, chan->base.object.client->name);
  82                 if (suspend)
  83                         ret = -EBUSY;
  84         }
  85         nvkm_wr32(device, 0x00b860, me);
  86 
  87         if (ret == 0) {
  88                 nvkm_kmap(chan->eng);
  89                 nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
  90                 nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
  91                 nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
  92                 nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
  93                 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
  94                 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
  95                 nvkm_done(chan->eng);
  96         }
  97 
  98         return ret;
  99 }
 100 
 101 static int
 102 nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
 103                            struct nvkm_engine *engine)
 104 {
 105         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 106         struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
 107         u64 limit, start;
 108         int offset;
 109 
 110         offset = nv50_fifo_chan_engine_addr(engine);
 111         if (offset < 0)
 112                 return 0;
 113         limit = engn->addr + engn->size - 1;
 114         start = engn->addr;
 115 
 116         nvkm_kmap(chan->eng);
 117         nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
 118         nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
 119         nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
 120         nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
 121                                             upper_32_bits(start));
 122         nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
 123         nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
 124         nvkm_done(chan->eng);
 125         return 0;
 126 }
 127 
 128 void
 129 nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
 130                            struct nvkm_engine *engine)
 131 {
 132         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 133         nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
 134 }
 135 
 136 static int
 137 nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
 138                            struct nvkm_engine *engine,
 139                            struct nvkm_object *object)
 140 {
 141         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 142         int engn = engine->subdev.index;
 143 
 144         if (nv50_fifo_chan_engine_addr(engine) < 0)
 145                 return 0;
 146 
 147         return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
 148 }
 149 
 150 void
 151 nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
 152 {
 153         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 154         nvkm_ramht_remove(chan->ramht, cookie);
 155 }
 156 
 157 static int
 158 nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
 159                            struct nvkm_object *object)
 160 {
 161         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 162         u32 handle = object->handle;
 163         u32 context;
 164 
 165         switch (object->engine->subdev.index) {
 166         case NVKM_ENGINE_DMAOBJ:
 167         case NVKM_ENGINE_SW    : context = 0x00000000; break;
 168         case NVKM_ENGINE_GR    : context = 0x00100000; break;
 169         case NVKM_ENGINE_MPEG  : context = 0x00200000; break;
 170         default:
 171                 WARN_ON(1);
 172                 return -EINVAL;
 173         }
 174 
 175         return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
 176 }
 177 
 178 void
 179 nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
 180 {
 181         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 182         struct nv50_fifo *fifo = chan->fifo;
 183         struct nvkm_device *device = fifo->base.engine.subdev.device;
 184         u32 chid = chan->base.chid;
 185 
 186         /* remove channel from runlist, fifo will unload context */
 187         nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
 188         nv50_fifo_runlist_update(fifo);
 189         nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
 190 }
 191 
 192 static void
 193 nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
 194 {
 195         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 196         struct nv50_fifo *fifo = chan->fifo;
 197         struct nvkm_device *device = fifo->base.engine.subdev.device;
 198         u64 addr = chan->ramfc->addr >> 12;
 199         u32 chid = chan->base.chid;
 200 
 201         nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
 202         nv50_fifo_runlist_update(fifo);
 203 }
 204 
 205 void *
 206 nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
 207 {
 208         struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
 209         nvkm_ramht_del(&chan->ramht);
 210         nvkm_gpuobj_del(&chan->pgd);
 211         nvkm_gpuobj_del(&chan->eng);
 212         nvkm_gpuobj_del(&chan->cache);
 213         nvkm_gpuobj_del(&chan->ramfc);
 214         return chan;
 215 }
 216 
 217 static const struct nvkm_fifo_chan_func
 218 nv50_fifo_chan_func = {
 219         .dtor = nv50_fifo_chan_dtor,
 220         .init = nv50_fifo_chan_init,
 221         .fini = nv50_fifo_chan_fini,
 222         .engine_ctor = nv50_fifo_chan_engine_ctor,
 223         .engine_dtor = nv50_fifo_chan_engine_dtor,
 224         .engine_init = nv50_fifo_chan_engine_init,
 225         .engine_fini = nv50_fifo_chan_engine_fini,
 226         .object_ctor = nv50_fifo_chan_object_ctor,
 227         .object_dtor = nv50_fifo_chan_object_dtor,
 228 };
 229 
 230 int
 231 nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
 232                     const struct nvkm_oclass *oclass,
 233                     struct nv50_fifo_chan *chan)
 234 {
 235         struct nvkm_device *device = fifo->base.engine.subdev.device;
 236         int ret;
 237 
 238         if (!vmm)
 239                 return -EINVAL;
 240 
 241         ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
 242                                   0x10000, 0x1000, false, vmm, push,
 243                                   (1ULL << NVKM_ENGINE_DMAOBJ) |
 244                                   (1ULL << NVKM_ENGINE_SW) |
 245                                   (1ULL << NVKM_ENGINE_GR) |
 246                                   (1ULL << NVKM_ENGINE_MPEG),
 247                                   0, 0xc00000, 0x2000, oclass, &chan->base);
 248         chan->fifo = fifo;
 249         if (ret)
 250                 return ret;
 251 
 252         ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
 253                               &chan->ramfc);
 254         if (ret)
 255                 return ret;
 256 
 257         ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
 258                               &chan->eng);
 259         if (ret)
 260                 return ret;
 261 
 262         ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
 263                               &chan->pgd);
 264         if (ret)
 265                 return ret;
 266 
 267         return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
 268 }

/* [<][>][^][v][top][bottom][index][help] */