/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/ |
D | engine.c | 33 struct nvkm_engine *engine = *pengine; in nvkm_engine_unref() local 34 if (engine) { in nvkm_engine_unref() 35 mutex_lock(&engine->subdev.mutex); in nvkm_engine_unref() 36 if (--engine->usecount == 0) in nvkm_engine_unref() 37 nvkm_subdev_fini(&engine->subdev, false); in nvkm_engine_unref() 38 mutex_unlock(&engine->subdev.mutex); in nvkm_engine_unref() 44 nvkm_engine_ref(struct nvkm_engine *engine) in nvkm_engine_ref() argument 46 if (engine) { in nvkm_engine_ref() 47 mutex_lock(&engine->subdev.mutex); in nvkm_engine_ref() 48 if (++engine->usecount == 1) { in nvkm_engine_ref() [all …]
|
D | object.c | 205 nvkm_engine_unref(&object->engine); in nvkm_object_dtor() 230 object->engine = nvkm_engine_ref(oclass->engine); in nvkm_object_ctor() 236 WARN_ON(oclass->engine && !object->engine); in nvkm_object_ctor()
|
D | ioctl.c | 116 if (oclass.engine) { in nvkm_ioctl_new() 117 oclass.engine = nvkm_engine_ref(oclass.engine); in nvkm_ioctl_new() 118 if (IS_ERR(oclass.engine)) in nvkm_ioctl_new() 119 return PTR_ERR(oclass.engine); in nvkm_ioctl_new() 123 nvkm_engine_unref(&oclass.engine); in nvkm_ioctl_new()
|
D | Kbuild | 2 nvkm-y += nvkm/core/engine.o
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/disp/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/disp/base.o 2 nvkm-y += nvkm/engine/disp/nv04.o 3 nvkm-y += nvkm/engine/disp/nv50.o 4 nvkm-y += nvkm/engine/disp/g84.o 5 nvkm-y += nvkm/engine/disp/g94.o 6 nvkm-y += nvkm/engine/disp/gt200.o 7 nvkm-y += nvkm/engine/disp/gt215.o 8 nvkm-y += nvkm/engine/disp/gf119.o 9 nvkm-y += nvkm/engine/disp/gk104.o 10 nvkm-y += nvkm/engine/disp/gk110.o [all …]
|
D | base.c | 126 struct nvkm_disp *disp = nvkm_disp(object->engine); in nvkm_disp_ntfy() 143 struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine); in nvkm_disp_class_del() 144 mutex_lock(&disp->engine.subdev.mutex); in nvkm_disp_class_del() 147 mutex_unlock(&disp->engine.subdev.mutex); in nvkm_disp_class_del() 161 struct nvkm_disp *disp = nvkm_disp(oclass->engine); in nvkm_disp_class_new() 170 mutex_lock(&disp->engine.subdev.mutex); in nvkm_disp_class_new() 172 mutex_unlock(&disp->engine.subdev.mutex); in nvkm_disp_class_new() 176 mutex_unlock(&disp->engine.subdev.mutex); in nvkm_disp_class_new() 190 struct nvkm_disp *disp = nvkm_disp(oclass->engine); in nvkm_disp_class_get() 202 nvkm_disp_intr(struct nvkm_engine *engine) in nvkm_disp_intr() argument [all …]
|
D | dmacnv50.c | 60 struct nvkm_device *device = root->disp->base.engine.subdev.device; in nv50_disp_dmac_child_new_() 89 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_dmac_child_get_() 92 sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ); in nv50_disp_dmac_child_get_() 93 if (sclass->engine && sclass->engine->func->base.sclass) { in nv50_disp_dmac_child_get_() 94 sclass->engine->func->base.sclass(sclass, index, &oclass); in nv50_disp_dmac_child_get_() 140 struct nvkm_device *device = root->disp->base.engine.subdev.device; in nv50_disp_dmac_new_() 191 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_dmac_fini() 214 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_dmac_init()
|
D | gf119.c | 36 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_vblank_init() 43 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_vblank_fini() 52 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in exec_lookup() 97 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in exec_script() 135 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in exec_clkcmp() 205 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_intr_unk2_0() 228 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_intr_unk2_1() 240 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_intr_unk2_2_tu() 301 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_intr_unk2_2() 355 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_intr_unk4_0() [all …]
|
D | nv04.c | 35 struct nvkm_device *device = disp->engine.subdev.device; in nv04_disp_vblank_init() 42 struct nvkm_device *device = disp->engine.subdev.device; in nv04_disp_vblank_fini() 49 struct nvkm_subdev *subdev = &disp->engine.subdev; in nv04_disp_intr()
|
D | channv50.c | 39 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_mthd_list() 68 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_chan_mthd() 106 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_chan_uevent_fini() 115 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_chan_uevent_init() 161 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_chan_rd32() 171 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_chan_wr32() 197 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_chan_map()
|
D | sorgm204.c | 44 struct nvkm_device *device = outp->disp->engine.subdev.device; in gm204_sor_magic() 62 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in gm204_sor_dp_pattern() 75 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in gm204_sor_dp_lnk_pwr() 96 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in gm204_sor_dp_drv_ctl()
|
D | sorg94.c | 75 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in g94_sor_dp_pattern() 84 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in g94_sor_dp_lnk_pwr() 104 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in g94_sor_dp_lnk_ctl() 124 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in g94_sor_dp_drv_ctl()
|
D | nv50.c | 166 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_vblank_fini() 173 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_vblank_init() 195 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_intr_error() 231 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in exec_lookup() 287 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in exec_script() 346 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in exec_clkcmp() 442 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_intr_unk20_0() 477 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_intr_unk20_1() 488 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_intr_unk20_2_dp() 608 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_intr_unk20_2() [all …]
|
D | changf119.c | 30 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_chan_uevent_fini() 39 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_chan_uevent_init()
|
D | sorgf119.c | 42 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in gf119_sor_dp_pattern() 51 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in gf119_sor_dp_lnk_ctl() 71 struct nvkm_device *device = outp->base.disp->engine.subdev.device; in gf119_sor_dp_drv_ctl()
|
D | piocnv50.c | 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_pioc_fini() 51 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_pioc_init()
|
D | piocgf119.c | 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_pioc_fini() 55 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_pioc_init()
|
D | rootgf119.c | 37 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_root_scanoutpos() 71 struct nvkm_device *device = root->disp->base.engine.subdev.device; in gf119_disp_root_fini() 80 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_disp_root_init()
|
D | dmacgf119.c | 43 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_dmac_fini() 67 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_dmac_init()
|
D | coregf119.c | 178 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_core_fini() 201 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in gf119_disp_core_init()
|
D | piornv50.c | 37 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_pior_power() 120 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c; in nv50_pior_dp_new()
|
D | dacnv50.c | 36 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_dac_power() 72 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_dac_sense()
|
D | rootnv50.c | 37 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_root_scanoutpos() 277 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_root_new_() 288 ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000, in nv50_disp_root_new_() 299 struct nvkm_device *device = root->disp->base.engine.subdev.device; in nv50_disp_root_fini() 309 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_disp_root_init()
|
D | corenv50.c | 171 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_core_fini() 193 struct nvkm_subdev *subdev = &disp->base.engine.subdev; in nv50_disp_core_init()
|
D | conn.c | 37 struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio; in nvkm_connector_hpd() 82 struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio; in nvkm_connector_ctor()
|
D | dport.c | 53 struct nvkm_subdev *subdev = &disp->engine.subdev; in dp_set_link_config() 264 struct nvkm_subdev *subdev = &disp->engine.subdev; in dp_link_train_init() 290 struct nvkm_subdev *subdev = &disp->engine.subdev; in dp_link_train_fini() 338 if (disp->base.engine.subdev.device->chipset < 0xd0) in nvkm_dp_train()
|
D | conn.h | 27 nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n", \
|
D | sornv50.c | 36 struct nvkm_device *device = disp->base.engine.subdev.device; in nv50_sor_power()
|
D | hdagt215.c | 36 struct nvkm_device *device = disp->base.engine.subdev.device; in gt215_hda_eld()
|
D | hdmigf119.c | 34 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_hdmi_ctrl()
|
D | hdagf119.c | 38 struct nvkm_device *device = disp->base.engine.subdev.device; in gf119_hda_eld()
|
D | hdmigk104.c | 34 struct nvkm_device *device = disp->base.engine.subdev.device; in gk104_hdmi_ctrl()
|
D | outp.h | 49 nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n", \
|
D | hdmig84.c | 34 struct nvkm_device *device = disp->base.engine.subdev.device; in g84_hdmi_ctrl()
|
D | hdmigt215.c | 35 struct nvkm_device *device = disp->base.engine.subdev.device; in gt215_hdmi_ctrl()
|
D | outpdp.h | 6 nvkm_##l(&outp->base.disp->engine.subdev, "%02x:%04x:%04x: "f, \
|
D | outp.c | 60 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c; in nvkm_output_ctor()
|
D | outpdp.c | 214 struct nvkm_device *device = disp->engine.subdev.device; in nvkm_output_dp_ctor() 282 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c; in nvkm_output_dp_new_()
|
D | rootnv04.c | 41 struct nvkm_device *device = root->disp->engine.subdev.device; in nv04_disp_scanoutpos()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/gr/base.o 2 nvkm-y += nvkm/engine/gr/nv04.o 3 nvkm-y += nvkm/engine/gr/nv10.o 4 nvkm-y += nvkm/engine/gr/nv15.o 5 nvkm-y += nvkm/engine/gr/nv17.o 6 nvkm-y += nvkm/engine/gr/nv20.o 7 nvkm-y += nvkm/engine/gr/nv25.o 8 nvkm-y += nvkm/engine/gr/nv2a.o 9 nvkm-y += nvkm/engine/gr/nv30.o 10 nvkm-y += nvkm/engine/gr/nv34.o [all …]
|
D | base.c | 29 nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile) in nvkm_gr_tile() argument 31 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_tile() 55 struct nvkm_gr *gr = nvkm_gr(oclass->engine); in nvkm_gr_oclass_get() 80 struct nvkm_gr *gr = nvkm_gr(oclass->engine); in nvkm_gr_cclass_new() 87 nvkm_gr_intr(struct nvkm_engine *engine) in nvkm_gr_intr() argument 89 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_intr() 94 nvkm_gr_oneinit(struct nvkm_engine *engine) in nvkm_gr_oneinit() argument 96 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_oneinit() 103 nvkm_gr_init(struct nvkm_engine *engine) in nvkm_gr_init() argument 105 struct nvkm_gr *gr = nvkm_gr(engine); in nvkm_gr_init() [all …]
|
D | nv40.c | 36 return nvkm_rd32(gr->engine.subdev.device, 0x1540); in nv40_gr_units() 47 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align, in nv40_gr_object_bind() 79 int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, in nv40_gr_chan_bind() 84 nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj); in nv40_gr_chan_bind() 96 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv40_gr_chan_fini() 134 spin_lock_irqsave(&chan->gr->base.engine.lock, flags); in nv40_gr_chan_dtor() 136 spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags); in nv40_gr_chan_dtor() 162 spin_lock_irqsave(&chan->gr->base.engine.lock, flags); in nv40_gr_chan_new() 164 spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags); in nv40_gr_chan_new() 176 struct nvkm_device *device = gr->base.engine.subdev.device; in nv40_gr_tile() [all …]
|
D | gf100.c | 46 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_gr_zbc_clear_color() 62 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; in gf100_gr_zbc_color_get() 97 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_gr_zbc_clear_depth() 109 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; in gf100_gr_zbc_depth_get() 146 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); in gf100_fermi_mthd_zbc_color() 192 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); in gf100_fermi_mthd_zbc_depth() 287 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, in gf100_gr_chan_bind() 351 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_gr_chan_new() 684 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; in gf100_gr_zbc_init() 710 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in gf100_gr_wait_idle() [all …]
|
D | nv10.c | 433 struct nvkm_device *device = chan->object.engine->subdev.device; in nv17_gr_mthd_lma_window() 506 struct nvkm_device *device = chan->object.engine->subdev.device; in nv17_gr_mthd_lma_enable() 549 struct nvkm_device *device = gr->base.engine.subdev.device; in nv10_gr_channel() 564 struct nvkm_device *device = gr->base.engine.subdev.device; in nv10_gr_save_pipe() 583 struct nvkm_device *device = gr->base.engine.subdev.device; in nv10_gr_load_pipe() 633 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv10_gr_create_pipe() 788 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv10_gr_ctx_regs_find_offset() 801 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv17_gr_ctx_regs_find_offset() 815 struct nvkm_device *device = gr->base.engine.subdev.device; in nv10_gr_load_dma_vtxbuf() 886 struct nvkm_device *device = gr->base.engine.subdev.device; in nv10_gr_load_context() [all …]
|
D | nv50.c | 33 return nvkm_rd32(gr->engine.subdev.device, 0x1540); in nv50_gr_units() 44 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, in nv50_gr_object_bind() 71 int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, in nv50_gr_chan_bind() 75 nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj); in nv50_gr_chan_bind() 240 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv50_gr_prop_trap() 282 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv50_gr_mp_trap() 326 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv50_gr_tp_trap() 396 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv50_gr_trap_handler() 621 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv50_gr_intr() 680 struct nvkm_device *device = gr->base.engine.subdev.device; in nv50_gr_init()
|
D | ctxgm204.c | 924 struct nvkm_device *device = gr->base.engine.subdev.device; in gm204_grctx_generate_tpcid() 942 struct nvkm_device *device = gr->base.engine.subdev.device; in gm204_grctx_generate_rop_active_fbps() 951 struct nvkm_device *device = gr->base.engine.subdev.device; in gm204_grctx_generate_405b60() 983 struct nvkm_device *device = gr->base.engine.subdev.device; in gm204_grctx_generate_main()
|
D | gm20b.c | 32 struct nvkm_device *device = gr->base.engine.subdev.device; in gm20b_gr_init_gpc_mmu() 57 struct nvkm_device *device = gr->base.engine.subdev.device; in gm20b_gr_set_hww_esr_report_mask()
|
D | ctxgf100.c | 1007 struct nvkm_device *device = info->gr->base.engine.subdev.device; in gf100_grctx_mmio_item() 1088 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_grctx_generate_tpcid() 1110 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_grctx_generate_r406028() 1123 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_grctx_generate_r4060a8() 1146 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_grctx_generate_r418bb8() 1205 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_grctx_generate_r406800() 1239 struct nvkm_device *device = gr->base.engine.subdev.device; in gf100_grctx_generate_main() 1273 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in gf100_grctx_generate()
|
D | ctxgk104.c | 877 struct nvkm_device *device = gr->base.engine.subdev.device; in gk104_grctx_generate_unkn() 889 struct nvkm_device *device = gr->base.engine.subdev.device; in gk104_grctx_generate_r418bb8() 948 struct nvkm_device *device = gr->base.engine.subdev.device; in gk104_grctx_generate_rop_active_fbps() 957 struct nvkm_device *device = gr->base.engine.subdev.device; in gk104_grctx_generate_main()
|
D | nv20.c | 33 struct nvkm_device *device = gr->base.engine.subdev.device; in nv20_gr_chan_fini() 88 ret = nvkm_memory_new(gr->base.engine.subdev.device, in nv20_gr_chan_new() 151 struct nvkm_device *device = gr->base.engine.subdev.device; in nv20_gr_tile() 182 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv20_gr_intr() 222 return nvkm_memory_new(gr->base.engine.subdev.device, in nv20_gr_oneinit() 231 struct nvkm_device *device = gr->base.engine.subdev.device; in nv20_gr_init()
|
D | ctxgm20b.c | 27 struct nvkm_device *device = gr->base.engine.subdev.device; in gm20b_grctx_generate_r406028() 41 struct nvkm_device *device = gr->base.engine.subdev.device; in gm20b_grctx_generate_main()
|
D | g84.c | 96 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nvkm_gr_vstatus_print() 116 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in g84_gr_tlb_flush()
|
D | nv04.c | 1046 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align, in nv04_gr_object_bind() 1074 struct nvkm_device *device = gr->base.engine.subdev.device; in nv04_gr_channel() 1087 struct nvkm_device *device = chan->gr->base.engine.subdev.device; in nv04_gr_load_context() 1102 struct nvkm_device *device = chan->gr->base.engine.subdev.device; in nv04_gr_unload_context() 1116 struct nvkm_device *device = gr->base.engine.subdev.device; in nv04_gr_context_switch() 1165 struct nvkm_device *device = gr->base.engine.subdev.device; in nv04_gr_chan_fini() 1213 struct nvkm_subdev *subdev = &gr->engine.subdev; in nv04_gr_idle() 1275 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in nv04_gr_intr() 1331 struct nvkm_device *device = gr->base.engine.subdev.device; in nv04_gr_init()
|
D | gk20a.c | 152 struct nvkm_subdev *subdev = &gr->base.engine.subdev; in gk20a_gr_wait_mem_scrubbing() 177 struct nvkm_device *device = gr->base.engine.subdev.device; in gk20a_gr_set_hww_esr_report_mask() 185 struct nvkm_device *device = gr->base.engine.subdev.device; in gk20a_gr_init()
|
D | priv.h | 3 #define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
|
D | nv30.c | 35 ret = nvkm_memory_new(gr->base.engine.subdev.device, in nv30_gr_chan_new() 106 struct nvkm_device *device = gr->base.engine.subdev.device; in nv30_gr_init()
|
D | ctxgk20a.c | 30 struct nvkm_device *device = gr->base.engine.subdev.device; in gk20a_grctx_generate_main()
|
D | ctxgm107.c | 937 struct nvkm_device *device = gr->base.engine.subdev.device; in gm107_grctx_generate_tpcid() 958 struct nvkm_device *device = gr->base.engine.subdev.device; in gm107_grctx_generate_main()
|
D | gm107.c | 294 struct nvkm_device *device = gr->base.engine.subdev.device; in gm107_gr_init_bios() 313 struct nvkm_device *device = gr->base.engine.subdev.device; in gm107_gr_init()
|
D | nv44.c | 34 struct nvkm_device *device = gr->base.engine.subdev.device; in nv44_gr_tile()
|
D | ctxgf108.c | 770 struct nvkm_device *device = gr->base.engine.subdev.device; in gf108_grctx_generate_unkn()
|
/linux-4.4.14/drivers/video/fbdev/via/ |
D | accel.c | 27 static int viafb_set_bpp(void __iomem *engine, u8 bpp) in viafb_set_bpp() argument 33 gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc; in viafb_set_bpp() 48 writel(gemode, engine + VIA_REG_GEMODE); in viafb_set_bpp() 53 static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height, in hw_bitblt_1() argument 93 ret = viafb_set_bpp(engine, dst_bpp); in hw_bitblt_1() 105 writel(tmp, engine + 0x08); in hw_bitblt_1() 114 writel(tmp, engine + 0x0C); in hw_bitblt_1() 122 writel(tmp, engine + 0x10); in hw_bitblt_1() 125 writel(fg_color, engine + 0x18); in hw_bitblt_1() 128 writel(bg_color, engine + 0x1C); in hw_bitblt_1() [all …]
|
D | viafbdev.h | 66 int (*hw_bitblt)(void __iomem *engine, u8 op, u32 width, u32 height,
|
/linux-4.4.14/drivers/crypto/ |
D | picoxcell_crypto.c | 94 struct spacc_engine *engine; member 108 struct spacc_engine *engine; member 151 struct spacc_engine *engine; member 159 struct spacc_engine *engine; member 199 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) in spacc_fifo_cmd_full() argument 201 u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); in spacc_fifo_cmd_full() 217 return is_cipher_ctx ? ctx->engine->cipher_ctx_base + in spacc_ctx_page_addr() 218 (indx * ctx->engine->cipher_pg_sz) : in spacc_ctx_page_addr() 219 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); in spacc_ctx_page_addr() 253 unsigned indx = ctx->engine->next_ctx++; in spacc_load_ctx() [all …]
|
D | Kconfig | 17 Some VIA processors come with an integrated crypto engine 53 tristate "Support for the Geode LX AES engine" 59 engine for the CryptoAPI AES algorithm. 269 Driver for the IXP4xx NPE crypto engine. 292 tristate "Support for OMAP AES hw engine" 301 tristate "Support for OMAP DES3DES hw engine" 366 Driver for ST-Ericsson UX500 crypto engine. 449 tristate "Qualcomm crypto engine accelerator" 459 This driver supports Qualcomm crypto engine accelerator
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/fifo/base.o 2 nvkm-y += nvkm/engine/fifo/nv04.o 3 nvkm-y += nvkm/engine/fifo/nv10.o 4 nvkm-y += nvkm/engine/fifo/nv17.o 5 nvkm-y += nvkm/engine/fifo/nv40.o 6 nvkm-y += nvkm/engine/fifo/nv50.o 7 nvkm-y += nvkm/engine/fifo/g84.o 8 nvkm-y += nvkm/engine/fifo/gf100.o 9 nvkm-y += nvkm/engine/fifo/gk104.o 10 nvkm-y += nvkm/engine/fifo/gk208.o [all …]
|
D | gpfifogk104.c | 39 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_gpfifo_kick() 57 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) in gk104_fifo_gpfifo_engine_addr() argument 59 switch (engine->subdev.index) { in gk104_fifo_gpfifo_engine_addr() 76 struct nvkm_engine *engine, bool suspend) in gk104_fifo_gpfifo_engine_fini() argument 78 const u32 offset = gk104_fifo_gpfifo_engine_addr(engine); in gk104_fifo_gpfifo_engine_fini() 99 struct nvkm_engine *engine) in gk104_fifo_gpfifo_engine_init() argument 101 const u32 offset = gk104_fifo_gpfifo_engine_addr(engine); in gk104_fifo_gpfifo_engine_init() 106 u64 addr = chan->engn[engine->subdev.index].vma.offset; in gk104_fifo_gpfifo_engine_init() 118 struct nvkm_engine *engine) in gk104_fifo_gpfifo_engine_dtor() argument 121 nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma); in gk104_fifo_gpfifo_engine_dtor() [all …]
|
D | chan.c | 43 struct nvkm_engine *engine = object->oproxy.object->engine; in nvkm_fifo_chan_child_fini() local 45 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; in nvkm_fifo_chan_child_fini() 46 const char *name = nvkm_subdev_name[engine->subdev.index]; in nvkm_fifo_chan_child_fini() 53 ret = chan->func->engine_fini(chan, engine, suspend); in nvkm_fifo_chan_child_fini() 76 struct nvkm_engine *engine = object->oproxy.object->engine; in nvkm_fifo_chan_child_init() local 78 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; in nvkm_fifo_chan_child_init() 79 const char *name = nvkm_subdev_name[engine->subdev.index]; in nvkm_fifo_chan_child_init() 92 ret = chan->func->engine_init(chan, engine); in nvkm_fifo_chan_child_init() 109 struct nvkm_engine *engine = object->oproxy.base.engine; in nvkm_fifo_chan_child_del() local 111 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; in nvkm_fifo_chan_child_del() [all …]
|
D | gk104.c | 38 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_fini() 45 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_init() 50 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) in gk104_fifo_runlist_update() argument 52 struct gk104_fifo_engn *engn = &fifo->engine[engine]; in gk104_fifo_runlist_update() 54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_runlist_update() 72 nvkm_wr32(device, 0x002274, (engine << 20) | nr); in gk104_fifo_runlist_update() 75 (engine * 0x08)) & 0x00100000), in gk104_fifo_runlist_update() 77 nvkm_error(subdev, "runlist %d update timeout\n", engine); in gk104_fifo_runlist_update() 84 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_engine() 95 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_recover_work() [all …]
|
D | dmanv40.c | 35 nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx) in nv40_fifo_dma_engine() argument 37 switch (engine->subdev.index) { in nv40_fifo_dma_engine() 57 struct nvkm_engine *engine, bool suspend) in nv40_fifo_dma_engine_fini() argument 61 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv40_fifo_dma_engine_fini() 67 if (!nv40_fifo_dma_engine(engine, ®, &ctx)) in nv40_fifo_dma_engine_fini() 87 struct nvkm_engine *engine) in nv40_fifo_dma_engine_init() argument 91 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv40_fifo_dma_engine_init() 97 if (!nv40_fifo_dma_engine(engine, ®, &ctx)) in nv40_fifo_dma_engine_init() 99 inst = chan->engn[engine->subdev.index]->addr >> 4; in nv40_fifo_dma_engine_init() 118 struct nvkm_engine *engine) in nv40_fifo_dma_engine_dtor() argument [all …]
|
D | chang84.c | 48 g84_fifo_chan_engine(struct nvkm_engine *engine) in g84_fifo_chan_engine() argument 50 switch (engine->subdev.index) { in g84_fifo_chan_engine() 68 g84_fifo_chan_engine_addr(struct nvkm_engine *engine) in g84_fifo_chan_engine_addr() argument 70 switch (engine->subdev.index) { in g84_fifo_chan_engine_addr() 91 struct nvkm_engine *engine, bool suspend) in g84_fifo_chan_engine_fini() argument 95 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in g84_fifo_chan_engine_fini() 101 offset = g84_fifo_chan_engine_addr(engine); in g84_fifo_chan_engine_fini() 105 engn = g84_fifo_chan_engine(engine); in g84_fifo_chan_engine_fini() 134 struct nvkm_engine *engine) in g84_fifo_chan_engine_init() argument 137 struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; in g84_fifo_chan_engine_init() [all …]
|
D | channv50.c | 32 nv50_fifo_chan_engine_addr(struct nvkm_engine *engine) in nv50_fifo_chan_engine_addr() argument 34 switch (engine->subdev.index) { in nv50_fifo_chan_engine_addr() 47 struct nvkm_engine *engine, bool suspend) in nv50_fifo_chan_engine_fini() argument 51 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in nv50_fifo_chan_engine_fini() 56 offset = nv50_fifo_chan_engine_addr(engine); in nv50_fifo_chan_engine_fini() 103 struct nvkm_engine *engine) in nv50_fifo_chan_engine_init() argument 106 struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; in nv50_fifo_chan_engine_init() 110 offset = nv50_fifo_chan_engine_addr(engine); in nv50_fifo_chan_engine_init() 130 struct nvkm_engine *engine) in nv50_fifo_chan_engine_dtor() argument 133 nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); in nv50_fifo_chan_engine_dtor() [all …]
|
D | gf100.c | 38 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_init() 45 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_fini() 53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_runlist_update() 100 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_engine() 120 struct nvkm_device *device = fifo->base.engine.subdev.device; in gf100_fifo_recover_work() 121 struct nvkm_engine *engine; in gf100_fifo_recover_work() local 136 if ((engine = nvkm_device_engine(device, engn))) { in gf100_fifo_recover_work() 137 nvkm_subdev_fini(&engine->subdev, false); in gf100_fifo_recover_work() 138 WARN_ON(nvkm_subdev_init(&engine->subdev)); in gf100_fifo_recover_work() 148 gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, in gf100_fifo_recover() argument [all …]
|
D | gpfifogf100.c | 35 gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) in gf100_fifo_gpfifo_engine_addr() argument 37 switch (engine->subdev.index) { in gf100_fifo_gpfifo_engine_addr() 53 struct nvkm_engine *engine, bool suspend) in gf100_fifo_gpfifo_engine_fini() argument 55 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); in gf100_fifo_gpfifo_engine_fini() 57 struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev; in gf100_fifo_gpfifo_engine_fini() 86 struct nvkm_engine *engine) in gf100_fifo_gpfifo_engine_init() argument 88 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); in gf100_fifo_gpfifo_engine_init() 93 u64 addr = chan->engn[engine->subdev.index].vma.offset; in gf100_fifo_gpfifo_engine_init() 105 struct nvkm_engine *engine) in gf100_fifo_gpfifo_engine_dtor() argument 108 nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma); in gf100_fifo_gpfifo_engine_dtor() [all …]
|
D | base.c | 164 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); in nvkm_fifo_class_new() 177 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); in nvkm_fifo_class_get() 194 nvkm_fifo_intr(struct nvkm_engine *engine) in nvkm_fifo_intr() argument 196 struct nvkm_fifo *fifo = nvkm_fifo(engine); in nvkm_fifo_intr() 201 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend) in nvkm_fifo_fini() argument 203 struct nvkm_fifo *fifo = nvkm_fifo(engine); in nvkm_fifo_fini() 210 nvkm_fifo_oneinit(struct nvkm_engine *engine) in nvkm_fifo_oneinit() argument 212 struct nvkm_fifo *fifo = nvkm_fifo(engine); in nvkm_fifo_oneinit() 219 nvkm_fifo_init(struct nvkm_engine *engine) in nvkm_fifo_init() argument 221 struct nvkm_fifo *fifo = nvkm_fifo(engine); in nvkm_fifo_init() [all …]
|
D | gk104.h | 21 struct gk104_fifo_engn engine[7]; member 38 void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine); 41 gk104_fifo_engine_subdev(int engine) in gk104_fifo_engine_subdev() argument 43 switch (engine) { in gk104_fifo_engine_subdev()
|
D | dmanv04.c | 38 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; in nv04_fifo_dma_object_dtor() 47 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; in nv04_fifo_dma_object_ctor() 52 switch (object->engine->subdev.index) { in nv04_fifo_dma_object_ctor() 62 mutex_lock(&chan->fifo->base.engine.subdev.mutex); in nv04_fifo_dma_object_ctor() 65 mutex_unlock(&chan->fifo->base.engine.subdev.mutex); in nv04_fifo_dma_object_ctor() 74 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv04_fifo_dma_fini() 125 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv04_fifo_dma_init() 138 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; in nv04_fifo_dma_dtor() 168 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv04_fifo_dma_new()
|
D | nv50.c | 32 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv50_fifo_runlist_update_locked() 54 mutex_lock(&fifo->base.engine.subdev.mutex); in nv50_fifo_runlist_update() 56 mutex_unlock(&fifo->base.engine.subdev.mutex); in nv50_fifo_runlist_update() 63 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv50_fifo_oneinit() 79 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv50_fifo_init()
|
D | nv04.c | 52 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv04_fifo_pause() 88 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv04_fifo_start() 114 u32 engine = nvkm_rd32(device, 0x003280); in nv04_fifo_swmthd() local 119 nvkm_wr32(device, 0x003280, (engine &= ~mask)); in nv04_fifo_swmthd() 124 if (!(engine & mask) && sw) in nv04_fifo_swmthd() 137 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in nv04_fifo_cache_error() 188 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in nv04_fifo_dma_pusher() 241 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in nv04_fifo_intr() 300 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv04_fifo_init()
|
D | g84.c | 30 struct nvkm_device *device = fifo->engine.subdev.device; in g84_fifo_uevent_fini() 37 struct nvkm_device *device = fifo->engine.subdev.device; in g84_fifo_uevent_init()
|
D | changk104.h | 10 int engine; member
|
D | priv.h | 3 #define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
|
D | nv17.c | 54 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv17_fifo_init()
|
D | nv40.c | 63 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv40_fifo_init()
|
D | dmanv10.c | 44 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv10_fifo_dma_new()
|
D | dmanv17.c | 44 struct nvkm_device *device = fifo->base.engine.subdev.device; in nv17_fifo_dma_new()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/falcon.o 2 nvkm-y += nvkm/engine/xtensa.o 4 include $(src)/nvkm/engine/bsp/Kbuild 5 include $(src)/nvkm/engine/ce/Kbuild 6 include $(src)/nvkm/engine/cipher/Kbuild 7 include $(src)/nvkm/engine/device/Kbuild 8 include $(src)/nvkm/engine/disp/Kbuild 9 include $(src)/nvkm/engine/dma/Kbuild 10 include $(src)/nvkm/engine/fifo/Kbuild 11 include $(src)/nvkm/engine/gr/Kbuild [all …]
|
D | xtensa.c | 30 struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine); in nvkm_xtensa_oclass_get() 47 return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align, in nvkm_xtensa_cclass_bind() 57 nvkm_xtensa_intr(struct nvkm_engine *engine) in nvkm_xtensa_intr() argument 59 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); in nvkm_xtensa_intr() 60 struct nvkm_subdev *subdev = &xtensa->engine.subdev; in nvkm_xtensa_intr() 79 nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend) in nvkm_xtensa_fini() argument 81 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); in nvkm_xtensa_fini() 82 struct nvkm_device *device = xtensa->engine.subdev.device; in nvkm_xtensa_fini() 94 nvkm_xtensa_init(struct nvkm_engine *engine) in nvkm_xtensa_init() argument 96 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); in nvkm_xtensa_init() [all …]
|
D | falcon.c | 31 struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine); in nvkm_falcon_oclass_get() 48 return nvkm_gpuobj_new(object->engine->subdev.device, 256, in nvkm_falcon_cclass_bind() 58 nvkm_falcon_intr(struct nvkm_engine *engine) in nvkm_falcon_intr() argument 60 struct nvkm_falcon *falcon = nvkm_falcon(engine); in nvkm_falcon_intr() 61 struct nvkm_subdev *subdev = &falcon->engine.subdev; in nvkm_falcon_intr() 95 nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) in nvkm_falcon_fini() argument 97 struct nvkm_falcon *falcon = nvkm_falcon(engine); in nvkm_falcon_fini() 98 struct nvkm_device *device = falcon->engine.subdev.device; in nvkm_falcon_fini() 126 nvkm_falcon_oneinit(struct nvkm_engine *engine) in nvkm_falcon_oneinit() argument 128 struct nvkm_falcon *falcon = nvkm_falcon(engine); in nvkm_falcon_oneinit() [all …]
|
/linux-4.4.14/drivers/crypto/marvell/ |
D | cesa.c | 40 static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) in mv_cesa_dequeue_req_unlocked() argument 48 engine->req = req; in mv_cesa_dequeue_req_unlocked() 58 ctx->ops->prepare(req, engine); in mv_cesa_dequeue_req_unlocked() 64 struct mv_cesa_engine *engine = priv; in mv_cesa_int() local 73 mask = mv_cesa_get_int_mask(engine); in mv_cesa_int() 74 status = readl(engine->regs + CESA_SA_INT_STATUS); in mv_cesa_int() 83 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); in mv_cesa_int() 84 writel(~status, engine->regs + CESA_SA_INT_STATUS); in mv_cesa_int() 87 spin_lock_bh(&engine->lock); in mv_cesa_int() 88 req = engine->req; in mv_cesa_int() [all …]
|
D | tdma.c | 42 struct mv_cesa_engine *engine = dreq->base.engine; in mv_cesa_dma_step() local 44 writel_relaxed(0, engine->regs + CESA_SA_CFG); in mv_cesa_dma_step() 46 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); in mv_cesa_dma_step() 49 engine->regs + CESA_TDMA_CONTROL); in mv_cesa_dma_step() 53 engine->regs + CESA_SA_CFG); in mv_cesa_dma_step() 55 engine->regs + CESA_TDMA_NEXT_ADDR); in mv_cesa_dma_step() 56 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); in mv_cesa_dma_step() 80 struct mv_cesa_engine *engine) in mv_cesa_dma_prepare() argument 86 tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); in mv_cesa_dma_prepare() 89 tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); in mv_cesa_dma_prepare() [all …]
|
D | cipher.c | 88 struct mv_cesa_engine *engine = sreq->base.engine; in mv_cesa_ablkcipher_std_step() local 93 engine->sram + CESA_SA_DATA_SRAM_OFFSET, in mv_cesa_ablkcipher_std_step() 101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); in mv_cesa_ablkcipher_std_step() 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc)); in mv_cesa_ablkcipher_std_step() 107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); in mv_cesa_ablkcipher_std_step() 108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); in mv_cesa_ablkcipher_std_step() 109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); in mv_cesa_ablkcipher_std_step() 117 struct mv_cesa_engine *engine = sreq->base.engine; in mv_cesa_ablkcipher_std_process() local 121 engine->sram + CESA_SA_DATA_SRAM_OFFSET, in mv_cesa_ablkcipher_std_process() 137 struct mv_cesa_engine *engine = sreq->base.engine; in mv_cesa_ablkcipher_process() local [all …]
|
D | cesa.h | 460 struct mv_cesa_engine *engine); 515 struct mv_cesa_engine *engine; member 642 static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine, in mv_cesa_adjust_op() argument 645 u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK; in mv_cesa_adjust_op() 676 static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine, in mv_cesa_set_int_mask() argument 679 if (int_mask == engine->int_mask) in mv_cesa_set_int_mask() 682 writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK); in mv_cesa_set_int_mask() 683 engine->int_mask = int_mask; in mv_cesa_set_int_mask() 686 static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) in mv_cesa_get_int_mask() argument 688 return engine->int_mask; in mv_cesa_get_int_mask() [all …]
|
D | hash.c | 205 struct mv_cesa_engine *engine = sreq->base.engine; in mv_cesa_ahash_std_step() local 212 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, in mv_cesa_ahash_std_step() 225 engine->sram + in mv_cesa_ahash_std_step() 255 engine->sram + in mv_cesa_ahash_std_step() 260 engine->sram + len + in mv_cesa_ahash_std_step() 275 memcpy_toio(engine->sram, op, sizeof(*op)); in mv_cesa_ahash_std_step() 283 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); in mv_cesa_ahash_std_step() 284 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); in mv_cesa_ahash_std_step() 285 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); in mv_cesa_ahash_std_step() 304 mv_cesa_dma_prepare(dreq, dreq->base.engine); in mv_cesa_ahash_dma_prepare() [all …]
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/dma/base.o 2 nvkm-y += nvkm/engine/dma/nv04.o 3 nvkm-y += nvkm/engine/dma/nv50.o 4 nvkm-y += nvkm/engine/dma/gf100.o 5 nvkm-y += nvkm/engine/dma/gf119.o 7 nvkm-y += nvkm/engine/dma/user.o 8 nvkm-y += nvkm/engine/dma/usernv04.o 9 nvkm-y += nvkm/engine/dma/usernv50.o 10 nvkm-y += nvkm/engine/dma/usergf100.o 11 nvkm-y += nvkm/engine/dma/usergf119.o
|
D | base.c | 54 struct nvkm_dma *dma = nvkm_dma(oclass->engine); in nvkm_dma_oclass_new() 95 return nvkm_dma_oclass_new(oclass->engine->subdev.device, in nvkm_dma_oclass_fifo_new() 133 nvkm_dma_dtor(struct nvkm_engine *engine) in nvkm_dma_dtor() argument 135 return nvkm_dma(engine); in nvkm_dma_dtor() 156 0, true, &dma->engine); in nvkm_dma_new_()
|
D | usernv04.c | 45 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device; in nv04_dmaobj_bind() 84 struct nvkm_device *device = dma->engine.subdev.device; in nv04_dmaobj_new()
|
D | priv.h | 3 #define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
|
D | usergf119.c | 44 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device; in gf119_dmaobj_bind()
|
D | usergf100.c | 45 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device; in gf100_dmaobj_bind()
|
D | user.c | 65 struct nvkm_device *device = dma->engine.subdev.device; in nvkm_dmaobj_ctor()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/pm/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/pm/base.o 2 nvkm-y += nvkm/engine/pm/nv40.o 3 nvkm-y += nvkm/engine/pm/nv50.o 4 nvkm-y += nvkm/engine/pm/g84.o 5 nvkm-y += nvkm/engine/pm/gt200.o 6 nvkm-y += nvkm/engine/pm/gt215.o 7 nvkm-y += nvkm/engine/pm/gf100.o 8 nvkm-y += nvkm/engine/pm/gf108.o 9 nvkm-y += nvkm/engine/pm/gf117.o 10 nvkm-y += nvkm/engine/pm/gk104.o
|
D | gf100.c | 131 struct nvkm_device *device = pm->engine.subdev.device; in gf100_perfctr_init() 149 struct nvkm_device *device = pm->engine.subdev.device; in gf100_perfctr_read() 163 struct nvkm_device *device = pm->engine.subdev.device; in gf100_perfctr_next() 178 struct nvkm_device *device = pm->engine.subdev.device; in gf100_pm_fini()
|
D | nv40.c | 30 struct nvkm_device *device = pm->engine.subdev.device; in nv40_perfctr_init() 47 struct nvkm_device *device = pm->engine.subdev.device; in nv40_perfctr_read() 61 struct nvkm_device *device = pm->engine.subdev.device; in nv40_perfctr_next()
|
D | base.c | 129 struct nvkm_subdev *subdev = &pm->engine.subdev; in nvkm_perfsrc_enable() 168 struct nvkm_subdev *subdev = &pm->engine.subdev; in nvkm_perfsrc_disable() 488 struct nvkm_device *device = pm->engine.subdev.device; in nvkm_perfmon_mthd_query_signal() 629 mutex_lock(&pm->engine.subdev.mutex); in nvkm_perfmon_dtor() 632 mutex_unlock(&pm->engine.subdev.mutex); in nvkm_perfmon_dtor() 665 struct nvkm_pm *pm = nvkm_pm(oclass->engine); in nvkm_pm_oclass_new() 672 mutex_lock(&pm->engine.subdev.mutex); in nvkm_pm_oclass_new() 676 mutex_unlock(&pm->engine.subdev.mutex); in nvkm_pm_oclass_new() 821 nvkm_pm_fini(struct nvkm_engine *engine, bool suspend) in nvkm_pm_fini() argument 823 struct nvkm_pm *pm = nvkm_pm(engine); in nvkm_pm_fini() [all …]
|
D | priv.h | 3 #define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
|
/linux-4.4.14/drivers/gpu/drm/via/ |
D | via_dmablit.c | 209 via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) in via_fire_dmablit() argument 213 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); in via_fire_dmablit() 214 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); in via_fire_dmablit() 215 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | in via_fire_dmablit() 217 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); in via_fire_dmablit() 218 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); in via_fire_dmablit() 219 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); in via_fire_dmablit() 221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); in via_fire_dmablit() 222 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); in via_fire_dmablit() 290 via_abort_dmablit(struct drm_device *dev, int engine) in via_abort_dmablit() argument [all …]
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/ |
D | nv44.c | 24 #define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine) 34 struct nvkm_engine engine; member 56 int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4, in nv44_mpeg_chan_bind() 73 struct nvkm_device *device = mpeg->engine.subdev.device; in nv44_mpeg_chan_fini() 89 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv44_mpeg_chan_dtor() 91 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv44_mpeg_chan_dtor() 107 struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine); in nv44_mpeg_chan_new() 118 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv44_mpeg_chan_new() 120 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv44_mpeg_chan_new() 143 nv44_mpeg_intr(struct nvkm_engine *engine) in nv44_mpeg_intr() argument [all …]
|
D | nv31.c | 42 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align, in nv31_mpeg_object_bind() 71 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv31_mpeg_chan_dtor() 74 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv31_mpeg_chan_dtor() 88 struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine); in nv31_mpeg_chan_new() 100 spin_lock_irqsave(&mpeg->engine.lock, flags); in nv31_mpeg_chan_new() 105 spin_unlock_irqrestore(&mpeg->engine.lock, flags); in nv31_mpeg_chan_new() 114 nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile) in nv31_mpeg_tile() argument 116 struct nv31_mpeg *mpeg = nv31_mpeg(engine); in nv31_mpeg_tile() 117 struct nvkm_device *device = mpeg->engine.subdev.device; in nv31_mpeg_tile() 166 struct nvkm_device *device = mpeg->engine.subdev.device; in nv31_mpeg_mthd() [all …]
|
D | Kbuild | 1 nvkm-y += nvkm/engine/mpeg/nv31.o 2 nvkm-y += nvkm/engine/mpeg/nv40.o 3 nvkm-y += nvkm/engine/mpeg/nv44.o 4 nvkm-y += nvkm/engine/mpeg/nv50.o 5 nvkm-y += nvkm/engine/mpeg/g84.o
|
D | nv31.h | 3 #define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine) 9 struct nvkm_engine engine; member
|
D | nv50.c | 39 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4, in nv50_mpeg_cclass_bind()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/sw/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/sw/base.o 2 nvkm-y += nvkm/engine/sw/nv04.o 3 nvkm-y += nvkm/engine/sw/nv10.o 4 nvkm-y += nvkm/engine/sw/nv50.o 5 nvkm-y += nvkm/engine/sw/gf100.o 7 nvkm-y += nvkm/engine/sw/chan.o 9 nvkm-y += nvkm/engine/sw/nvsw.o
|
D | base.c | 36 spin_lock_irqsave(&sw->engine.lock, flags); in nvkm_sw_mthd() 45 spin_unlock_irqrestore(&sw->engine.lock, flags); in nvkm_sw_mthd() 61 struct nvkm_sw *sw = nvkm_sw(oclass->engine); in nvkm_sw_oclass_get() 81 struct nvkm_sw *sw = nvkm_sw(oclass->engine); in nvkm_sw_cclass_get() 86 nvkm_sw_dtor(struct nvkm_engine *engine) in nvkm_sw_dtor() argument 88 return nvkm_sw(engine); in nvkm_sw_dtor() 109 return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine); in nvkm_sw_new_()
|
D | gf100.c | 44 struct nvkm_device *device = sw->engine.subdev.device; in gf100_sw_chan_vblsem_release() 60 struct nvkm_engine *engine = chan->base.object.engine; in gf100_sw_chan_mthd() local 61 struct nvkm_device *device = engine->subdev.device; in gf100_sw_chan_mthd() 109 struct nvkm_disp *disp = sw->engine.subdev.device->disp; in gf100_sw_chan_new()
|
D | nv50.c | 44 struct nvkm_device *device = sw->engine.subdev.device; in nv50_sw_chan_vblsem_release() 65 struct nvkm_engine *engine = chan->base.object.engine; in nv50_sw_chan_mthd() local 66 struct nvkm_device *device = engine->subdev.device; in nv50_sw_chan_mthd() 103 struct nvkm_disp *disp = sw->engine.subdev.device->disp; in nv50_sw_chan_new()
|
D | chan.c | 84 spin_lock_irqsave(&sw->engine.lock, flags); in nvkm_sw_chan_dtor() 86 spin_unlock_irqrestore(&sw->engine.lock, flags); in nvkm_sw_chan_dtor() 106 spin_lock_irqsave(&sw->engine.lock, flags); in nvkm_sw_chan_ctor() 108 spin_unlock_irqrestore(&sw->engine.lock, flags); in nvkm_sw_chan_ctor()
|
D | priv.h | 3 #define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/device/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/device/acpi.o 2 nvkm-y += nvkm/engine/device/base.o 3 nvkm-y += nvkm/engine/device/ctrl.o 4 nvkm-y += nvkm/engine/device/pci.o 5 nvkm-y += nvkm/engine/device/tegra.o 6 nvkm-y += nvkm/engine/device/user.o
|
D | user.c | 269 struct nvkm_engine *engine; in nvkm_udevice_child_get() local 278 if (!(engine = nvkm_device_engine(device, i)) || in nvkm_udevice_child_get() 279 !(engine->func->base.sclass)) in nvkm_udevice_child_get() 281 oclass->engine = engine; in nvkm_udevice_child_get() 283 index -= engine->func->base.sclass(oclass, index, &sclass); in nvkm_udevice_child_get()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/msvld/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/msvld/base.o 2 nvkm-y += nvkm/engine/msvld/g98.o 3 nvkm-y += nvkm/engine/msvld/gt215.o 4 nvkm-y += nvkm/engine/msvld/mcp89.o 5 nvkm-y += nvkm/engine/msvld/gf100.o 6 nvkm-y += nvkm/engine/msvld/gk104.o
|
D | g98.c | 31 struct nvkm_device *device = msvld->engine.subdev.device; in g98_msvld_init()
|
D | gf100.c | 31 struct nvkm_device *device = msvld->engine.subdev.device; in gf100_msvld_init()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/mspdec/base.o 2 nvkm-y += nvkm/engine/mspdec/g98.o 3 nvkm-y += nvkm/engine/mspdec/gt215.o 4 nvkm-y += nvkm/engine/mspdec/gf100.o 5 nvkm-y += nvkm/engine/mspdec/gk104.o
|
D | g98.c | 31 struct nvkm_device *device = mspdec->engine.subdev.device; in g98_mspdec_init()
|
D | gf100.c | 31 struct nvkm_device *device = mspdec->engine.subdev.device; in gf100_mspdec_init()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/ce/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/ce/gt215.o 2 nvkm-y += nvkm/engine/ce/gf100.o 3 nvkm-y += nvkm/engine/ce/gk104.o 4 nvkm-y += nvkm/engine/ce/gm204.o
|
D | gf100.c | 32 struct nvkm_device *device = ce->engine.subdev.device; in gf100_ce_init() 33 const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0; in gf100_ce_init()
|
D | gt215.c | 45 struct nvkm_subdev *subdev = &ce->engine.subdev; in gt215_ce_intr()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/msppp/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/msppp/base.o 2 nvkm-y += nvkm/engine/msppp/g98.o 3 nvkm-y += nvkm/engine/msppp/gt215.o 4 nvkm-y += nvkm/engine/msppp/gf100.o
|
D | gf100.c | 31 struct nvkm_device *device = msppp->engine.subdev.device; in gf100_msppp_init()
|
D | g98.c | 31 struct nvkm_device *device = msppp->engine.subdev.device; in g98_msppp_init()
|
/linux-4.4.14/drivers/gpu/drm/omapdrm/ |
D | omap_dmm_tiler.c | 86 struct refill_engine *engine = txn->engine_handle; in alloc_dma() local 98 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE); in alloc_dma() 104 static int wait_status(struct refill_engine *engine, uint32_t wait_mask) in wait_status() argument 106 struct dmm *dmm = engine->dmm; in wait_status() 111 r = readl(dmm->base + reg[PAT_STATUS][engine->id]); in wait_status() 128 static void release_engine(struct refill_engine *engine) in release_engine() argument 133 list_add(&engine->idle_node, &omap_dmm->idle_head); in release_engine() 169 struct refill_engine *engine = NULL; in dmm_txn_init() local 183 engine = list_entry(dmm->idle_head.next, struct refill_engine, in dmm_txn_init() 185 list_del(&engine->idle_node); in dmm_txn_init() [all …]
|
/linux-4.4.14/drivers/dma/ |
D | Kconfig | 2 # DMA engine configuration 21 say N here. This enables DMA engine core and driver debugging. 29 the DMA engine core and drivers. 66 which can provide DMA engine support 110 tristate "BCM2835 DMA engine support" 144 Support the DMA engine found on Intel StrongARM SA-1100 and 145 SA-1110 SoCs. This DMA engine can only be used with on-chip 166 Support for the DMA engine first found in Allwinner A31 SoCs. 187 tristate "Freescale eDMA engine support" 192 Support the Freescale eDMA engine with programmable channel [all …]
|
/linux-4.4.14/drivers/dma/sh/ |
D | usb-dmac.c | 101 struct dma_device engine; member 109 #define to_usb_dmac(d) container_of(d, struct usb_dmac, engine) 742 vchan_init(&uchan->vc, &dmac->engine); in usb_dmac_chan_probe() 772 struct dma_device *engine; in usb_dmac_probe() local 816 INIT_LIST_HEAD(&dmac->engine.channels); in usb_dmac_probe() 835 engine = &dmac->engine; in usb_dmac_probe() 836 dma_cap_set(DMA_SLAVE, engine->cap_mask); in usb_dmac_probe() 838 engine->dev = &pdev->dev; in usb_dmac_probe() 840 engine->src_addr_widths = widths; in usb_dmac_probe() 841 engine->dst_addr_widths = widths; in usb_dmac_probe() [all …]
|
D | rcar-dmac.c | 179 struct dma_device engine; member 189 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) 1567 chan->device = &dmac->engine; in rcar_dmac_chan_probe() 1570 list_add_tail(&chan->device_node, &dmac->engine.channels); in rcar_dmac_chan_probe() 1602 struct dma_device *engine; in rcar_dmac_probe() local 1681 INIT_LIST_HEAD(&dmac->engine.channels); in rcar_dmac_probe() 1701 engine = &dmac->engine; in rcar_dmac_probe() 1702 dma_cap_set(DMA_MEMCPY, engine->cap_mask); in rcar_dmac_probe() 1703 dma_cap_set(DMA_SLAVE, engine->cap_mask); in rcar_dmac_probe() 1705 engine->dev = &pdev->dev; in rcar_dmac_probe() [all …]
|
D | Kconfig | 2 # DMA engine configuration for sh
|
/linux-4.4.14/Documentation/netlabel/ |
D | cipso_ipv4.txt | 9 The NetLabel CIPSO/IPv4 protocol engine is based on the IETF Commercial IP 17 The CIPSO/IPv4 protocol engine applies the CIPSO IP option to packets by 28 The CIPSO/IPv4 protocol engine validates every CIPSO IP option it finds at the 37 The CIPSO/IPv4 protocol engine contains a mechanism to translate CIPSO security 48 CIPSO/IPv4 protocol engine supports this caching mechanism.
|
D | introduction.txt | 20 engine will handle those tasks as well. Other kernel subsystems should 24 Detailed information about each NetLabel protocol engine can be found in this
|
D | 00-INDEX | 4 - documentation on the IPv4 CIPSO protocol engine.
|
/linux-4.4.14/drivers/net/ethernet/hisilicon/ |
D | Kconfig | 46 is needed by any driver which provides HNS acceleration engine or make 47 use of the engine 55 acceleration engine support. The engine is used in Hisilicon hip05,
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/engine/ |
D | xtensa.h | 3 #define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine) 9 struct nvkm_engine engine; member
|
D | disp.h | 3 #define nvkm_disp(p) container_of((p), struct nvkm_disp, engine) 9 struct nvkm_engine engine; member
|
D | falcon.h | 3 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine) 9 struct nvkm_engine engine; member
|
D | sw.h | 7 struct nvkm_engine engine; member
|
D | pm.h | 7 struct nvkm_engine engine; member
|
D | dma.h | 22 struct nvkm_engine engine; member
|
D | fifo.h | 34 struct nvkm_engine engine; member
|
D | gr.h | 7 struct nvkm_engine engine; member
|
/linux-4.4.14/Documentation/leds/ |
D | leds-lp5562.txt | 13 All four channels can be also controlled using the engine micro programs. 20 Therefore each channel should be mapped to the engine number. 24 Unlike the LP5521/LP5523/55231, LP5562 has unique feature for the engine mux, 36 the engine selection and loading the firmware. 42 echo "RGB" > /sys/bus/i2c/devices/xxxx/engine_mux # engine mux for RGB
|
D | leds-lp55xx.txt | 66 (1) Select an engine number (1/2/3) 72 select_engine : Select which engine is used for running program 77 It is used for selecting LED output(s) at each engine number. 80 For example, run blinking pattern in engine #1 of LP5521 87 For example, run blinking pattern in engine #3 of LP55231 96 To start blinking patterns in engine #2 and #3 simultaneously, 117 Inside the callback, the selected engine is loaded and memory is updated. 148 run_engine : Control the selected engine
|
D | leds-lp5521.txt | 17 All three channels can be also controlled using the engine micro programs. 27 enginex_load : store program (visible only in engine load mode) 35 To stop the engine:
|
/linux-4.4.14/Documentation/devicetree/bindings/crypto/ |
D | picochip-spacc.txt | 7 - compatible : "picochip,spacc-ipsec" for the IPSEC offload engine 8 "picochip,spacc-l2" for the femtocell layer 2 ciphering engine.
|
D | marvell-cesa.txt | 12 - reg: base physical address of the engine and length of memory mapped 21 - clock-names: "cesaX" and "cesazX", X should be replaced by the crypto engine
|
D | sun4i-ss.txt | 17 crypto: crypto-engine@01c15000 {
|
D | qcom-qce.txt | 1 Qualcomm crypto engine driver
|
D | mv_cesa.txt | 8 - reg: base physical address of the engine and length of memory mapped
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | intel_lrc.c | 295 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; in intel_lr_context_descriptor() 361 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in execlists_update_context() 453 ringbuf = req0->ctx->engine[ring->id].ringbuf; in execlists_context_unqueue() 477 head_req->ctx->engine[ring->id].state; in execlists_check_remove_request() 660 request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; in intel_logical_ring_alloc_request_extras() 875 struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf; in intel_execlists_submission() 963 ctx->engine[ring->id].state; in intel_execlists_retire_requests() 1046 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in intel_lr_context_pin() 1049 if (rq->ctx->engine[ring->id].pin_count++ == 0) { in intel_lr_context_pin() 1057 rq->ctx->engine[ring->id].pin_count = 0; in intel_lr_context_pin() [all …]
|
D | i915_guc_submission.c | 402 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; in guc_init_ctx_desc() 414 obj = ctx->engine[i].state; in guc_init_ctx_desc() 571 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state; in lr_context_update() 947 data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); in intel_guc_suspend() 972 data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); in intel_guc_resume()
|
D | intel_uncore.c | 1488 struct intel_engine_cs *engine; in gen8_do_reset() local 1491 for_each_ring(engine, dev_priv, i) { in gen8_do_reset() 1492 I915_WRITE(RING_RESET_CTL(engine->mmio_base), in gen8_do_reset() 1496 RING_RESET_CTL(engine->mmio_base), in gen8_do_reset() 1500 DRM_ERROR("%s: reset request timeout\n", engine->name); in gen8_do_reset() 1508 for_each_ring(engine, dev_priv, i) in gen8_do_reset() 1509 I915_WRITE(RING_RESET_CTL(engine->mmio_base), in gen8_do_reset()
|
/linux-4.4.14/Documentation/crypto/ |
D | async-tx-api.txt | 29 the details of different hardware offload engine implementations. Code 44 operation will be offloaded when an engine is available and carried out 78 resources, under control of the offload engine driver, to be reused as 82 acknowledged by the application before the offload engine driver is allowed to 92 async_<operation> call. Offload engine drivers batch operations to 107 context if the offload engine driver supports interrupts, or it is 178 Primarily this requirement arises from cases where a DMA engine driver 220 drivers/dma/dmaengine.c: offload engine channel management routines 221 drivers/dma/: location for offload engine drivers
|
/linux-4.4.14/Documentation/dmaengine/ |
D | client.txt | 73 DMA-engine are: 115 added and the descriptor must then be submitted. Some DMA engine 133 Therefore, it is important that DMA engine drivers drop any 143 added, it must be placed on the DMA engine drivers pending queue. 148 This returns a cookie can be used to check the progress of DMA engine 149 activity via other DMA engine calls not covered in this document. 196 Not all DMA engine drivers can return reliable information for 197 a running DMA channel. It is recommended that DMA engine users
|
/linux-4.4.14/arch/powerpc/platforms/pasemi/ |
D | Kconfig | 24 bool "Force DMA engine to use IOMMU" 28 DMA engine. Otherwise the kernel will use it only when
|
/linux-4.4.14/Documentation/devicetree/bindings/ata/ |
D | cavium-compact-flash.txt | 20 - cavium,dma-engine-handle: Optional, a phandle for the DMA Engine connected 29 cavium,dma-engine-handle = <&dma0>;
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/cipher/ |
D | g84.c | 37 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, in g84_cipher_oclass_bind() 59 return nvkm_gpuobj_new(object->engine->subdev.device, 256, in g84_cipher_cclass_bind()
|
D | Kbuild | 1 nvkm-y += nvkm/engine/cipher/g84.o
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | sdma.h | 533 static inline int __sdma_running(struct sdma_engine *engine) in __sdma_running() argument 535 return engine->state.current_state == sdma_state_s99_running; in __sdma_running() 550 static inline int sdma_running(struct sdma_engine *engine) in sdma_running() argument 555 spin_lock_irqsave(&engine->tail_lock, flags); in sdma_running() 556 ret = __sdma_running(engine); in sdma_running() 557 spin_unlock_irqrestore(&engine->tail_lock, flags); in sdma_running()
|
/linux-4.4.14/Documentation/devicetree/bindings/dma/ |
D | mv-xor.txt | 7 registers for the XOR engine. 11 XOR engine has. Those sub-nodes have the following required
|
/linux-4.4.14/drivers/net/ethernet/packetengines/ |
D | Kconfig | 2 # Packet engine device configuration 14 the questions about packet engine devices. If you say Y, you will
|
/linux-4.4.14/drivers/leds/ |
D | leds-lp5523.c | 427 struct lp55xx_engine *engine = &chip->engines[nr - 1]; in store_engine_mode() local 435 engine->mode = LP55XX_ENGINE_RUN; in store_engine_mode() 439 engine->mode = LP55XX_ENGINE_LOAD; in store_engine_mode() 442 engine->mode = LP55XX_ENGINE_DISABLED; in store_engine_mode() 506 struct lp55xx_engine *engine = &chip->engines[nr - 1]; in lp5523_load_mux() local 528 engine->led_mux = mux; in lp5523_load_mux() 538 struct lp55xx_engine *engine = &chip->engines[nr - 1]; in store_engine_leds() local 550 if (engine->mode != LP55XX_ENGINE_LOAD) in store_engine_leds()
|
D | leds-lp5521.c | 405 struct lp55xx_engine *engine = &chip->engines[nr - 1]; in store_engine_mode() local 413 engine->mode = LP55XX_ENGINE_RUN; in store_engine_mode() 417 engine->mode = LP55XX_ENGINE_LOAD; in store_engine_mode() 420 engine->mode = LP55XX_ENGINE_DISABLED; in store_engine_mode()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/bsp/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/bsp/g84.o
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/sec/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/sec/g98.o
|
D | g98.c | 45 struct nvkm_subdev *subdev = &sec->engine.subdev; in g98_sec_intr()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/vp/ |
D | Kbuild | 1 nvkm-y += nvkm/engine/vp/g84.o
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/ |
D | object.h | 12 struct nvkm_engine *engine; member 86 struct nvkm_engine *engine; member
|
/linux-4.4.14/drivers/mfd/ |
D | jz4740-adc.c | 98 static inline void jz4740_adc_set_enabled(struct jz4740_adc *adc, int engine, in jz4740_adc_set_enabled() argument 108 val |= BIT(engine); in jz4740_adc_set_enabled() 110 val &= ~BIT(engine); in jz4740_adc_set_enabled()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/ |
D | Kbuild | 3 include $(src)/nvkm/engine/Kbuild
|
/linux-4.4.14/Documentation/devicetree/bindings/clock/ |
D | mvebu-gated-clock.txt | 22 23 crypto CESA (crypto engine) 114 23 crypto CESA engine 135 15 crypto CESA engine 158 17 crypto CESA engine
|
D | ste-u300-syscon-clock.txt | 38 2 8 XGAM graphics engine clock
|
/linux-4.4.14/drivers/dma/hsu/ |
D | Kconfig | 1 # DMA engine configuration for hsu
|
/linux-4.4.14/Documentation/ |
D | Intel-IOMMU.txt | 22 device scope relationships between PCI devices and which DMA engine controls 58 option intel_iommu=igfx_off to turn off the integrated graphics engine. 70 When errors are reported, the DMA engine signals via an interrupt. The fault
|
/linux-4.4.14/Documentation/video4linux/ |
D | README.cpia2 | 78 frame rate achieved by the camera. If the compression engine is able to 81 The compression engine starts out at maximum compression, and will 83 as the compression engine can keep up with the frame rate, after a short time 85 At low alternate settings, the compression engine may not be able to
|
D | videobuf | 197 typical driver read() implementation will start the capture engine, call 198 one of the above functions, then stop the engine before returning (though a 199 smarter implementation might leave the engine running for a little while in 227 still up to the driver to stop the capture engine. The call to 259 stopping the capture engine. 318 the engine and enqueueing buffers are done in separate steps, it's possible 319 for the engine to be running without any buffers available - in the
|
/linux-4.4.14/Documentation/ABI/removed/ |
D | net_dma | 6 that will be offloaded to a DMA copy engine. Removed due to
|
/linux-4.4.14/Documentation/devicetree/bindings/spi/ |
D | sh-msiof.txt | 16 by both the CPU and the DMA engine. 19 DMA engine.
|
/linux-4.4.14/Documentation/fb/ |
D | aty128fb.txt | 55 noaccel - do not use acceleration engine. It is default. 56 accel - use acceleration engine. Not finished.
|
/linux-4.4.14/drivers/gpu/drm/amd/include/ |
D | cgs_common.h | 452 enum cgs_engine engine, int powered); 611 #define cgs_pm_request_engine(dev,request,engine,powered) \ argument 612 CGS_CALL(pm_request_engine,dev,request,engine,powered)
|
/linux-4.4.14/drivers/hsi/controllers/ |
D | Kconfig | 11 an application engine with a cellular modem.
|
/linux-4.4.14/Documentation/devicetree/bindings/mips/cavium/ |
D | dma-engine.txt | 17 dma0: dma-engine@1180000000100 {
|
/linux-4.4.14/drivers/gpu/host1x/ |
D | Kconfig | 7 The Tegra host1x module is the DMA engine for register access to
|
/linux-4.4.14/drivers/dma/dw/ |
D | Kconfig | 2 # DMA engine configuration for dw
|
/linux-4.4.14/Documentation/devicetree/bindings/serial/ |
D | qcom,msm-uart.txt | 4 dma-engine isn't needed. From a software perspective it's mostly compatible
|
/linux-4.4.14/drivers/media/platform/sti/c8sectpfe/ |
D | Kconfig | 20 memdma engine, and HW PID filtering.
|
/linux-4.4.14/drivers/dma/bestcomm/ |
D | Kconfig | 6 tristate "Bestcomm DMA engine support"
|
/linux-4.4.14/drivers/gpu/drm/nouveau/ |
D | nouveau_chan.c | 187 u32 engine, struct nouveau_channel **pchan) in nouveau_channel_ind() argument 215 args.kepler.engine = engine; in nouveau_channel_ind()
|
/linux-4.4.14/Documentation/devicetree/bindings/usb/ |
D | msm-hsusb.txt | 32 "core" Protocol engine clock 34 "alt_core" Protocol engine clock for targets with asynchronous
|
/linux-4.4.14/Documentation/video4linux/cx2341x/ |
D | fw-dma.txt | 2 engine. 8 engine to efficiently transfer large volumes of data between the card and main
|
/linux-4.4.14/Documentation/devicetree/bindings/iio/accel/ |
D | lis302.txt | 52 engine. 54 engine.
|
/linux-4.4.14/Documentation/devicetree/bindings/media/ |
D | renesas,vsp1.txt | 3 The VSP1 is a video processing engine that supports up-/down-scaling, alpha
|