Lines Matching refs:engine

38 	struct nvkm_device *device = fifo->engine.subdev.device;  in gk104_fifo_uevent_fini()
45 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_init()
50 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) in gk104_fifo_runlist_update() argument
52 struct gk104_fifo_engn *engn = &fifo->engine[engine]; in gk104_fifo_runlist_update()
54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_runlist_update()
72 nvkm_wr32(device, 0x002274, (engine << 20) | nr); in gk104_fifo_runlist_update()
75 (engine * 0x08)) & 0x00100000), in gk104_fifo_runlist_update()
77 nvkm_error(subdev, "runlist %d update timeout\n", engine); in gk104_fifo_runlist_update()
84 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_engine()
95 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_recover_work()
96 struct nvkm_engine *engine; in gk104_fifo_recover_work() local
111 if ((engine = nvkm_device_engine(device, engn))) { in gk104_fifo_recover_work()
112 nvkm_subdev_fini(&engine->subdev, false); in gk104_fifo_recover_work()
113 WARN_ON(nvkm_subdev_init(&engine->subdev)); in gk104_fifo_recover_work()
123 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, in gk104_fifo_recover() argument
126 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover()
131 nvkm_subdev_name[engine->subdev.index], chid); in gk104_fifo_recover()
138 fifo->mask |= 1ULL << engine->subdev.index; in gk104_fifo_recover()
156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_bind()
175 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_sched_ctxsw()
176 struct nvkm_engine *engine; in gk104_fifo_intr_sched_ctxsw() local
182 for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) { in gk104_fifo_intr_sched_ctxsw()
194 list_for_each_entry(chan, &fifo->engine[engn].chan, head) { in gk104_fifo_intr_sched_ctxsw()
196 engine = gk104_fifo_engine(fifo, engn); in gk104_fifo_intr_sched_ctxsw()
197 if (!engine) in gk104_fifo_intr_sched_ctxsw()
199 gk104_fifo_recover(fifo, engine, chan); in gk104_fifo_intr_sched_ctxsw()
211 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_sched()
232 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_chsw()
242 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_dropped_fault()
356 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_fault()
368 struct nvkm_engine *engine = NULL; in gk104_fifo_intr_fault() local
394 engine = nvkm_device_engine(device, eu->data2); in gk104_fifo_intr_fault()
410 if (engine && chan) in gk104_fifo_intr_fault()
411 gk104_fifo_recover(fifo, engine, (void *)chan); in gk104_fifo_intr_fault()
452 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_0()
500 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_1()
521 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_runlist()
525 wake_up(&fifo->engine[engn].wait); in gk104_fifo_intr_runlist()
541 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr()
633 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_fini()
643 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_oneinit()
646 for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { in gk104_fifo_oneinit()
649 &fifo->engine[i].runlist[0]); in gk104_fifo_oneinit()
655 &fifo->engine[i].runlist[1]); in gk104_fifo_oneinit()
659 init_waitqueue_head(&fifo->engine[i].wait); in gk104_fifo_oneinit()
660 INIT_LIST_HEAD(&fifo->engine[i].chan); in gk104_fifo_oneinit()
682 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_init()
719 for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { in gk104_fifo_dtor()
720 nvkm_memory_del(&fifo->engine[i].runlist[1]); in gk104_fifo_dtor()
721 nvkm_memory_del(&fifo->engine[i].runlist[0]); in gk104_fifo_dtor()