dmm               126 drivers/gpu/drm/omapdrm/omap_dmm_priv.h struct dmm;
dmm               140 drivers/gpu/drm/omapdrm/omap_dmm_priv.h 	struct dmm *dmm;
dmm                43 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static struct dmm *omap_dmm;
dmm                83 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
dmm                85 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct dma_device *dma_dev = dmm->wa_dma_chan->device;
dmm                90 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
dmm                92 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
dmm                98 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
dmm               102 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dma_async_issue_pending(dmm->wa_dma_chan);
dmm               103 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	status = dma_sync_wait(dmm->wa_dma_chan, cookie);
dmm               105 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dev_err(dmm->dev, "i878 wa DMA copy failure\n");
dmm               107 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dmaengine_terminate_all(dmm->wa_dma_chan);
dmm               111 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
dmm               116 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	src = dmm->phys_base + reg;
dmm               117 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dst = dmm->wa_dma_handle;
dmm               119 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	r = dmm_dma_copy(dmm, src, dst);
dmm               121 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dev_err(dmm->dev, "sDMA read transfer timeout\n");
dmm               122 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		return readl(dmm->base + reg);
dmm               131 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return readl(dmm->wa_dma_data);
dmm               134 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
dmm               139 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	writel(val, dmm->wa_dma_data);
dmm               148 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	src = dmm->wa_dma_handle;
dmm               149 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dst = dmm->phys_base + reg;
dmm               151 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	r = dmm_dma_copy(dmm, src, dst);
dmm               153 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dev_err(dmm->dev, "sDMA write transfer timeout\n");
dmm               154 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		writel(val, dmm->base + reg);
dmm               158 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static u32 dmm_read(struct dmm *dmm, u32 reg)
dmm               160 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (dmm->dmm_workaround) {
dmm               164 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		spin_lock_irqsave(&dmm->wa_lock, flags);
dmm               165 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		v = dmm_read_wa(dmm, reg);
dmm               166 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		spin_unlock_irqrestore(&dmm->wa_lock, flags);
dmm               170 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		return readl(dmm->base + reg);
dmm               174 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
dmm               176 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (dmm->dmm_workaround) {
dmm               179 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		spin_lock_irqsave(&dmm->wa_lock, flags);
dmm               180 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dmm_write_wa(dmm, val, reg);
dmm               181 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		spin_unlock_irqrestore(&dmm->wa_lock, flags);
dmm               183 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		writel(val, dmm->base + reg);
dmm               187 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static int dmm_workaround_init(struct dmm *dmm)
dmm               191 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	spin_lock_init(&dmm->wa_lock);
dmm               193 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dmm->wa_dma_data = dma_alloc_coherent(dmm->dev,  sizeof(u32),
dmm               194 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 					      &dmm->wa_dma_handle, GFP_KERNEL);
dmm               195 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (!dmm->wa_dma_data)
dmm               201 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
dmm               202 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (!dmm->wa_dma_chan) {
dmm               203 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
dmm               210 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void dmm_workaround_uninit(struct dmm *dmm)
dmm               212 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dma_release_channel(dmm->wa_dma_chan);
dmm               214 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
dmm               241 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct dmm *dmm = engine->dmm;
dmm               246 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
dmm               249 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			dev_err(dmm->dev,
dmm               259 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			dev_err(dmm->dev,
dmm               285 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct dmm *dmm = arg;
dmm               286 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
dmm               290 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
dmm               292 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	for (i = 0; i < dmm->num_engines; i++) {
dmm               294 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			dev_err(dmm->dev,
dmm               299 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			if (dmm->engines[i].async)
dmm               300 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 				release_engine(&dmm->engines[i]);
dmm               302 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			complete(&dmm->engines[i].compl);
dmm               314 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
dmm               330 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (!list_empty(&dmm->idle_head)) {
dmm               331 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		engine = list_entry(dmm->idle_head.next, struct refill_engine,
dmm               389 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			page_to_phys(pages[n]) : engine->dmm->dummy_pa;
dmm               404 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct dmm *dmm = engine->dmm;
dmm               407 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		dev_err(engine->dmm->dev, "need at least one txn\n");
dmm               426 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
dmm               442 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
dmm               447 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			dev_err(dmm->dev, "timed out waiting for done\n");
dmm               907 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		omap_dmm->engines[i].dmm = omap_dmm;