1/*
2 * Copyright 2015 Linaro.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/sched.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/of_device.h>
21#include <linux/of.h>
22#include <linux/clk.h>
23#include <linux/of_dma.h>
24
25#include "virt-dma.h"
26
27#define DRIVER_NAME		"zx-dma"
28#define DMA_ALIGN		4
29#define DMA_MAX_SIZE		(0x10000 - PAGE_SIZE)
30#define LLI_BLOCK_SIZE		(4 * PAGE_SIZE)
31
32#define REG_ZX_SRC_ADDR			0x00
33#define REG_ZX_DST_ADDR			0x04
34#define REG_ZX_TX_X_COUNT		0x08
35#define REG_ZX_TX_ZY_COUNT		0x0c
36#define REG_ZX_SRC_ZY_STEP		0x10
37#define REG_ZX_DST_ZY_STEP		0x14
38#define REG_ZX_LLI_ADDR			0x1c
39#define REG_ZX_CTRL			0x20
40#define REG_ZX_TC_IRQ			0x800
41#define REG_ZX_SRC_ERR_IRQ		0x804
42#define REG_ZX_DST_ERR_IRQ		0x808
43#define REG_ZX_CFG_ERR_IRQ		0x80c
44#define REG_ZX_TC_IRQ_RAW		0x810
45#define REG_ZX_SRC_ERR_IRQ_RAW		0x814
46#define REG_ZX_DST_ERR_IRQ_RAW		0x818
47#define REG_ZX_CFG_ERR_IRQ_RAW		0x81c
48#define REG_ZX_STATUS			0x820
49#define REG_ZX_DMA_GRP_PRIO		0x824
50#define REG_ZX_DMA_ARB			0x828
51
52#define ZX_FORCE_CLOSE			BIT(31)
53#define ZX_DST_BURST_WIDTH(x)		(((x) & 0x7) << 13)
54#define ZX_MAX_BURST_LEN		16
55#define ZX_SRC_BURST_LEN(x)		(((x) & 0xf) << 9)
56#define ZX_SRC_BURST_WIDTH(x)		(((x) & 0x7) << 6)
57#define ZX_IRQ_ENABLE_ALL		(3 << 4)
58#define ZX_DST_FIFO_MODE		BIT(3)
59#define ZX_SRC_FIFO_MODE		BIT(2)
60#define ZX_SOFT_REQ			BIT(1)
61#define ZX_CH_ENABLE			BIT(0)
62
63#define ZX_DMA_BUSWIDTHS \
64	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
65	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
66	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
67	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
68	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
69
70enum zx_dma_burst_width {
71	ZX_DMA_WIDTH_8BIT	= 0,
72	ZX_DMA_WIDTH_16BIT	= 1,
73	ZX_DMA_WIDTH_32BIT	= 2,
74	ZX_DMA_WIDTH_64BIT	= 3,
75};
76
77struct zx_desc_hw {
78	u32 saddr;
79	u32 daddr;
80	u32 src_x;
81	u32 src_zy;
82	u32 src_zy_step;
83	u32 dst_zy_step;
84	u32 reserved1;
85	u32 lli;
86	u32 ctr;
87	u32 reserved[7]; /* pack as hardware registers region size */
88} __aligned(32);
89
90struct zx_dma_desc_sw {
91	struct virt_dma_desc	vd;
92	dma_addr_t		desc_hw_lli;
93	size_t			desc_num;
94	size_t			size;
95	struct zx_desc_hw	*desc_hw;
96};
97
98struct zx_dma_phy;
99
100struct zx_dma_chan {
101	struct dma_slave_config slave_cfg;
102	int			id; /* Request phy chan id */
103	u32			ccfg;
104	u32			cyclic;
105	struct virt_dma_chan	vc;
106	struct zx_dma_phy	*phy;
107	struct list_head	node;
108	dma_addr_t		dev_addr;
109	enum dma_status		status;
110};
111
112struct zx_dma_phy {
113	u32			idx;
114	void __iomem		*base;
115	struct zx_dma_chan	*vchan;
116	struct zx_dma_desc_sw	*ds_run;
117	struct zx_dma_desc_sw	*ds_done;
118};
119
120struct zx_dma_dev {
121	struct dma_device	slave;
122	void __iomem		*base;
123	spinlock_t		lock; /* lock for ch and phy */
124	struct list_head	chan_pending;
125	struct zx_dma_phy	*phy;
126	struct zx_dma_chan	*chans;
127	struct clk		*clk;
128	struct dma_pool		*pool;
129	u32			dma_channels;
130	u32			dma_requests;
131	int 			irq;
132};
133
134#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
135
136static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
137{
138	return container_of(chan, struct zx_dma_chan, vc.chan);
139}
140
141static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
142{
143	u32 val = 0;
144
145	val = readl_relaxed(phy->base + REG_ZX_CTRL);
146	val &= ~ZX_CH_ENABLE;
147	val |= ZX_FORCE_CLOSE;
148	writel_relaxed(val, phy->base + REG_ZX_CTRL);
149
150	val = 0x1 << phy->idx;
151	writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
152	writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
153	writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
154	writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
155}
156
157static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
158{
159	writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
160	writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
161	writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
162	writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
163	writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
164	writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
165	writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
166	writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
167}
168
169static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
170{
171	return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
172}
173
174static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
175{
176	return readl_relaxed(d->base + REG_ZX_STATUS);
177}
178
179static void zx_dma_init_state(struct zx_dma_dev *d)
180{
181	/* set same priority */
182	writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
183	/* clear all irq */
184	writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
185	writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
186	writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
187	writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
188}
189
190static int zx_dma_start_txd(struct zx_dma_chan *c)
191{
192	struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
193	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
194
195	if (!c->phy)
196		return -EAGAIN;
197
198	if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
199		return -EAGAIN;
200
201	if (vd) {
202		struct zx_dma_desc_sw *ds =
203			container_of(vd, struct zx_dma_desc_sw, vd);
204		/*
205		 * fetch and remove request from vc->desc_issued
206		 * so vc->desc_issued only contains desc pending
207		 */
208		list_del(&ds->vd.node);
209		c->phy->ds_run = ds;
210		c->phy->ds_done = NULL;
211		/* start dma */
212		zx_dma_set_desc(c->phy, ds->desc_hw);
213		return 0;
214	}
215	c->phy->ds_done = NULL;
216	c->phy->ds_run = NULL;
217	return -EAGAIN;
218}
219
220static void zx_dma_task(struct zx_dma_dev *d)
221{
222	struct zx_dma_phy *p;
223	struct zx_dma_chan *c, *cn;
224	unsigned pch, pch_alloc = 0;
225	unsigned long flags;
226
227	/* check new dma request of running channel in vc->desc_issued */
228	list_for_each_entry_safe(c, cn, &d->slave.channels,
229				 vc.chan.device_node) {
230		spin_lock_irqsave(&c->vc.lock, flags);
231		p = c->phy;
232		if (p && p->ds_done && zx_dma_start_txd(c)) {
233			/* No current txd associated with this channel */
234			dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
235			/* Mark this channel free */
236			c->phy = NULL;
237			p->vchan = NULL;
238		}
239		spin_unlock_irqrestore(&c->vc.lock, flags);
240	}
241
242	/* check new channel request in d->chan_pending */
243	spin_lock_irqsave(&d->lock, flags);
244	while (!list_empty(&d->chan_pending)) {
245		c = list_first_entry(&d->chan_pending,
246				     struct zx_dma_chan, node);
247		p = &d->phy[c->id];
248		if (!p->vchan) {
249			/* remove from d->chan_pending */
250			list_del_init(&c->node);
251			pch_alloc |= 1 << c->id;
252			/* Mark this channel allocated */
253			p->vchan = c;
254			c->phy = p;
255		} else {
256			dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
257		}
258	}
259	spin_unlock_irqrestore(&d->lock, flags);
260
261	for (pch = 0; pch < d->dma_channels; pch++) {
262		if (pch_alloc & (1 << pch)) {
263			p = &d->phy[pch];
264			c = p->vchan;
265			if (c) {
266				spin_lock_irqsave(&c->vc.lock, flags);
267				zx_dma_start_txd(c);
268				spin_unlock_irqrestore(&c->vc.lock, flags);
269			}
270		}
271	}
272}
273
274static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
275{
276	struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
277	struct zx_dma_phy *p;
278	struct zx_dma_chan *c;
279	u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
280	u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
281	u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
282	u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
283	u32 i, irq_chan = 0, task = 0;
284
285	while (tc) {
286		i = __ffs(tc);
287		tc &= ~BIT(i);
288		p = &d->phy[i];
289		c = p->vchan;
290		if (c) {
291			unsigned long flags;
292
293			spin_lock_irqsave(&c->vc.lock, flags);
294			if (c->cyclic) {
295				vchan_cyclic_callback(&p->ds_run->vd);
296			} else {
297				vchan_cookie_complete(&p->ds_run->vd);
298				p->ds_done = p->ds_run;
299				task = 1;
300			}
301			spin_unlock_irqrestore(&c->vc.lock, flags);
302			irq_chan |= BIT(i);
303		}
304	}
305
306	if (serr || derr || cfg)
307		dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
308			 serr, derr, cfg);
309
310	writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
311	writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
312	writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
313	writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
314
315	if (task)
316		zx_dma_task(d);
317	return IRQ_HANDLED;
318}
319
320static void zx_dma_free_chan_resources(struct dma_chan *chan)
321{
322	struct zx_dma_chan *c = to_zx_chan(chan);
323	struct zx_dma_dev *d = to_zx_dma(chan->device);
324	unsigned long flags;
325
326	spin_lock_irqsave(&d->lock, flags);
327	list_del_init(&c->node);
328	spin_unlock_irqrestore(&d->lock, flags);
329
330	vchan_free_chan_resources(&c->vc);
331	c->ccfg = 0;
332}
333
334static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
335					dma_cookie_t cookie,
336					struct dma_tx_state *state)
337{
338	struct zx_dma_chan *c = to_zx_chan(chan);
339	struct zx_dma_phy *p;
340	struct virt_dma_desc *vd;
341	unsigned long flags;
342	enum dma_status ret;
343	size_t bytes = 0;
344
345	ret = dma_cookie_status(&c->vc.chan, cookie, state);
346	if (ret == DMA_COMPLETE || !state)
347		return ret;
348
349	spin_lock_irqsave(&c->vc.lock, flags);
350	p = c->phy;
351	ret = c->status;
352
353	/*
354	 * If the cookie is on our issue queue, then the residue is
355	 * its total size.
356	 */
357	vd = vchan_find_desc(&c->vc, cookie);
358	if (vd) {
359		bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
360	} else if ((!p) || (!p->ds_run)) {
361		bytes = 0;
362	} else {
363		struct zx_dma_desc_sw *ds = p->ds_run;
364		u32 clli = 0, index = 0;
365
366		bytes = 0;
367		clli = zx_dma_get_curr_lli(p);
368		index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
369		for (; index < ds->desc_num; index++) {
370			bytes += ds->desc_hw[index].src_x;
371			/* end of lli */
372			if (!ds->desc_hw[index].lli)
373				break;
374		}
375	}
376	spin_unlock_irqrestore(&c->vc.lock, flags);
377	dma_set_residue(state, bytes);
378	return ret;
379}
380
381static void zx_dma_issue_pending(struct dma_chan *chan)
382{
383	struct zx_dma_chan *c = to_zx_chan(chan);
384	struct zx_dma_dev *d = to_zx_dma(chan->device);
385	unsigned long flags;
386	int issue = 0;
387
388	spin_lock_irqsave(&c->vc.lock, flags);
389	/* add request to vc->desc_issued */
390	if (vchan_issue_pending(&c->vc)) {
391		spin_lock(&d->lock);
392		if (!c->phy && list_empty(&c->node)) {
393			/* if new channel, add chan_pending */
394			list_add_tail(&c->node, &d->chan_pending);
395			issue = 1;
396			dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
397		}
398		spin_unlock(&d->lock);
399	} else {
400		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
401	}
402	spin_unlock_irqrestore(&c->vc.lock, flags);
403
404	if (issue)
405		zx_dma_task(d);
406}
407
408static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
409			     dma_addr_t src, size_t len, u32 num, u32 ccfg)
410{
411	if ((num + 1) < ds->desc_num)
412		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
413			sizeof(struct zx_desc_hw);
414	ds->desc_hw[num].saddr = src;
415	ds->desc_hw[num].daddr = dst;
416	ds->desc_hw[num].src_x = len;
417	ds->desc_hw[num].ctr = ccfg;
418}
419
420static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
421						     struct dma_chan *chan)
422{
423	struct zx_dma_chan *c = to_zx_chan(chan);
424	struct zx_dma_desc_sw *ds;
425	struct zx_dma_dev *d = to_zx_dma(chan->device);
426	int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
427
428	if (num > lli_limit) {
429		dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
430			&c->vc, num, lli_limit);
431		return NULL;
432	}
433
434	ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
435	if (!ds)
436		return NULL;
437
438	ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
439	if (!ds->desc_hw) {
440		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
441		kfree(ds);
442		return NULL;
443	}
444	memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
445	ds->desc_num = num;
446	return ds;
447}
448
449static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
450{
451	switch (width) {
452	case DMA_SLAVE_BUSWIDTH_1_BYTE:
453	case DMA_SLAVE_BUSWIDTH_2_BYTES:
454	case DMA_SLAVE_BUSWIDTH_4_BYTES:
455	case DMA_SLAVE_BUSWIDTH_8_BYTES:
456		return ffs(width) - 1;
457	default:
458		return ZX_DMA_WIDTH_32BIT;
459	}
460}
461
462static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
463{
464	struct dma_slave_config *cfg = &c->slave_cfg;
465	enum zx_dma_burst_width src_width;
466	enum zx_dma_burst_width dst_width;
467	u32 maxburst = 0;
468
469	switch (dir) {
470	case DMA_MEM_TO_MEM:
471		c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
472			| ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
473			| ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
474			| ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
475		break;
476	case DMA_MEM_TO_DEV:
477		c->dev_addr = cfg->dst_addr;
478		/* dst len is calculated from src width, len and dst width.
479		 * We need make sure dst len not exceed MAX LEN.
480		 * Trailing single transaction that does not fill a full
481		 * burst also require identical src/dst data width.
482		 */
483		dst_width = zx_dma_burst_width(cfg->dst_addr_width);
484		maxburst = cfg->dst_maxburst;
485		maxburst = maxburst < ZX_MAX_BURST_LEN ?
486				maxburst : ZX_MAX_BURST_LEN;
487		c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
488			| ZX_SRC_BURST_LEN(maxburst - 1)
489			| ZX_SRC_BURST_WIDTH(dst_width)
490			| ZX_DST_BURST_WIDTH(dst_width);
491		break;
492	case DMA_DEV_TO_MEM:
493		c->dev_addr = cfg->src_addr;
494		src_width = zx_dma_burst_width(cfg->src_addr_width);
495		maxburst = cfg->src_maxburst;
496		maxburst = maxburst < ZX_MAX_BURST_LEN ?
497				maxburst : ZX_MAX_BURST_LEN;
498		c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
499			| ZX_SRC_BURST_LEN(maxburst - 1)
500			| ZX_SRC_BURST_WIDTH(src_width)
501			| ZX_DST_BURST_WIDTH(src_width);
502		break;
503	default:
504		return -EINVAL;
505	}
506	return 0;
507}
508
509static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
510	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
511	size_t len, unsigned long flags)
512{
513	struct zx_dma_chan *c = to_zx_chan(chan);
514	struct zx_dma_desc_sw *ds;
515	size_t copy = 0;
516	int num = 0;
517
518	if (!len)
519		return NULL;
520
521	if (zx_pre_config(c, DMA_MEM_TO_MEM))
522		return NULL;
523
524	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
525
526	ds = zx_alloc_desc_resource(num, chan);
527	if (!ds)
528		return NULL;
529
530	ds->size = len;
531	num = 0;
532
533	do {
534		copy = min_t(size_t, len, DMA_MAX_SIZE);
535		zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
536
537		src += copy;
538		dst += copy;
539		len -= copy;
540	} while (len);
541
542	c->cyclic = 0;
543	ds->desc_hw[num - 1].lli = 0;	/* end of link */
544	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
545	return vchan_tx_prep(&c->vc, &ds->vd, flags);
546}
547
548static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
549	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
550	enum dma_transfer_direction dir, unsigned long flags, void *context)
551{
552	struct zx_dma_chan *c = to_zx_chan(chan);
553	struct zx_dma_desc_sw *ds;
554	size_t len, avail, total = 0;
555	struct scatterlist *sg;
556	dma_addr_t addr, src = 0, dst = 0;
557	int num = sglen, i;
558
559	if (!sgl)
560		return NULL;
561
562	if (zx_pre_config(c, dir))
563		return NULL;
564
565	for_each_sg(sgl, sg, sglen, i) {
566		avail = sg_dma_len(sg);
567		if (avail > DMA_MAX_SIZE)
568			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
569	}
570
571	ds = zx_alloc_desc_resource(num, chan);
572	if (!ds)
573		return NULL;
574
575	c->cyclic = 0;
576	num = 0;
577	for_each_sg(sgl, sg, sglen, i) {
578		addr = sg_dma_address(sg);
579		avail = sg_dma_len(sg);
580		total += avail;
581
582		do {
583			len = min_t(size_t, avail, DMA_MAX_SIZE);
584
585			if (dir == DMA_MEM_TO_DEV) {
586				src = addr;
587				dst = c->dev_addr;
588			} else if (dir == DMA_DEV_TO_MEM) {
589				src = c->dev_addr;
590				dst = addr;
591			}
592
593			zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
594
595			addr += len;
596			avail -= len;
597		} while (avail);
598	}
599
600	ds->desc_hw[num - 1].lli = 0;	/* end of link */
601	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
602	ds->size = total;
603	return vchan_tx_prep(&c->vc, &ds->vd, flags);
604}
605
606static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
607		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
608		size_t period_len, enum dma_transfer_direction dir,
609		unsigned long flags)
610{
611	struct zx_dma_chan *c = to_zx_chan(chan);
612	struct zx_dma_desc_sw *ds;
613	dma_addr_t src = 0, dst = 0;
614	int num_periods = buf_len / period_len;
615	int buf = 0, num = 0;
616
617	if (period_len > DMA_MAX_SIZE) {
618		dev_err(chan->device->dev, "maximum period size exceeded\n");
619		return NULL;
620	}
621
622	if (zx_pre_config(c, dir))
623		return NULL;
624
625	ds = zx_alloc_desc_resource(num_periods, chan);
626	if (!ds)
627		return NULL;
628	c->cyclic = 1;
629
630	while (buf < buf_len) {
631		if (dir == DMA_MEM_TO_DEV) {
632			src = dma_addr;
633			dst = c->dev_addr;
634		} else if (dir == DMA_DEV_TO_MEM) {
635			src = c->dev_addr;
636			dst = dma_addr;
637		}
638		zx_dma_fill_desc(ds, dst, src, period_len, num++,
639				 c->ccfg | ZX_IRQ_ENABLE_ALL);
640		dma_addr += period_len;
641		buf += period_len;
642	}
643
644	ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
645	ds->size = buf_len;
646	return vchan_tx_prep(&c->vc, &ds->vd, flags);
647}
648
649static int zx_dma_config(struct dma_chan *chan,
650			 struct dma_slave_config *cfg)
651{
652	struct zx_dma_chan *c = to_zx_chan(chan);
653
654	if (!cfg)
655		return -EINVAL;
656
657	memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
658
659	return 0;
660}
661
662static int zx_dma_terminate_all(struct dma_chan *chan)
663{
664	struct zx_dma_chan *c = to_zx_chan(chan);
665	struct zx_dma_dev *d = to_zx_dma(chan->device);
666	struct zx_dma_phy *p = c->phy;
667	unsigned long flags;
668	LIST_HEAD(head);
669
670	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
671
672	/* Prevent this channel being scheduled */
673	spin_lock(&d->lock);
674	list_del_init(&c->node);
675	spin_unlock(&d->lock);
676
677	/* Clear the tx descriptor lists */
678	spin_lock_irqsave(&c->vc.lock, flags);
679	vchan_get_all_descriptors(&c->vc, &head);
680	if (p) {
681		/* vchan is assigned to a pchan - stop the channel */
682		zx_dma_terminate_chan(p, d);
683		c->phy = NULL;
684		p->vchan = NULL;
685		p->ds_run = NULL;
686		p->ds_done = NULL;
687	}
688	spin_unlock_irqrestore(&c->vc.lock, flags);
689	vchan_dma_desc_free_list(&c->vc, &head);
690
691	return 0;
692}
693
694static int zx_dma_transfer_pause(struct dma_chan *chan)
695{
696	struct zx_dma_chan *c = to_zx_chan(chan);
697	u32 val = 0;
698
699	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
700	val &= ~ZX_CH_ENABLE;
701	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
702
703	return 0;
704}
705
706static int zx_dma_transfer_resume(struct dma_chan *chan)
707{
708	struct zx_dma_chan *c = to_zx_chan(chan);
709	u32 val = 0;
710
711	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
712	val |= ZX_CH_ENABLE;
713	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
714
715	return 0;
716}
717
718static void zx_dma_free_desc(struct virt_dma_desc *vd)
719{
720	struct zx_dma_desc_sw *ds =
721		container_of(vd, struct zx_dma_desc_sw, vd);
722	struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
723
724	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
725	kfree(ds);
726}
727
728static const struct of_device_id zx6702_dma_dt_ids[] = {
729	{ .compatible = "zte,zx296702-dma", },
730	{}
731};
732MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
733
734static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
735					       struct of_dma *ofdma)
736{
737	struct zx_dma_dev *d = ofdma->of_dma_data;
738	unsigned int request = dma_spec->args[0];
739	struct dma_chan *chan;
740	struct zx_dma_chan *c;
741
742	if (request >= d->dma_requests)
743		return NULL;
744
745	chan = dma_get_any_slave_channel(&d->slave);
746	if (!chan) {
747		dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
748		return NULL;
749	}
750	c = to_zx_chan(chan);
751	c->id = request;
752	dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
753		 c->id, &c->vc);
754	return chan;
755}
756
757static int zx_dma_probe(struct platform_device *op)
758{
759	struct zx_dma_dev *d;
760	struct resource *iores;
761	int i, ret = 0;
762
763	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
764	if (!iores)
765		return -EINVAL;
766
767	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
768	if (!d)
769		return -ENOMEM;
770
771	d->base = devm_ioremap_resource(&op->dev, iores);
772	if (IS_ERR(d->base))
773		return PTR_ERR(d->base);
774
775	of_property_read_u32((&op->dev)->of_node,
776			     "dma-channels", &d->dma_channels);
777	of_property_read_u32((&op->dev)->of_node,
778			     "dma-requests", &d->dma_requests);
779	if (!d->dma_requests || !d->dma_channels)
780		return -EINVAL;
781
782	d->clk = devm_clk_get(&op->dev, NULL);
783	if (IS_ERR(d->clk)) {
784		dev_err(&op->dev, "no dma clk\n");
785		return PTR_ERR(d->clk);
786	}
787
788	d->irq = platform_get_irq(op, 0);
789	ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
790			       0, DRIVER_NAME, d);
791	if (ret)
792		return ret;
793
794	/* A DMA memory pool for LLIs, align on 32-byte boundary */
795	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
796			LLI_BLOCK_SIZE, 32, 0);
797	if (!d->pool)
798		return -ENOMEM;
799
800	/* init phy channel */
801	d->phy = devm_kzalloc(&op->dev,
802		d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
803	if (!d->phy)
804		return -ENOMEM;
805
806	for (i = 0; i < d->dma_channels; i++) {
807		struct zx_dma_phy *p = &d->phy[i];
808
809		p->idx = i;
810		p->base = d->base + i * 0x40;
811	}
812
813	INIT_LIST_HEAD(&d->slave.channels);
814	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
815	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
816	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
817	d->slave.dev = &op->dev;
818	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
819	d->slave.device_tx_status = zx_dma_tx_status;
820	d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
821	d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
822	d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
823	d->slave.device_issue_pending = zx_dma_issue_pending;
824	d->slave.device_config = zx_dma_config;
825	d->slave.device_terminate_all = zx_dma_terminate_all;
826	d->slave.device_pause = zx_dma_transfer_pause;
827	d->slave.device_resume = zx_dma_transfer_resume;
828	d->slave.copy_align = DMA_ALIGN;
829	d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
830	d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
831	d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
832			| BIT(DMA_DEV_TO_MEM);
833	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
834
835	/* init virtual channel */
836	d->chans = devm_kzalloc(&op->dev,
837		d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
838	if (!d->chans)
839		return -ENOMEM;
840
841	for (i = 0; i < d->dma_requests; i++) {
842		struct zx_dma_chan *c = &d->chans[i];
843
844		c->status = DMA_IN_PROGRESS;
845		INIT_LIST_HEAD(&c->node);
846		c->vc.desc_free = zx_dma_free_desc;
847		vchan_init(&c->vc, &d->slave);
848	}
849
850	/* Enable clock before accessing registers */
851	ret = clk_prepare_enable(d->clk);
852	if (ret < 0) {
853		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
854		goto zx_dma_out;
855	}
856
857	zx_dma_init_state(d);
858
859	spin_lock_init(&d->lock);
860	INIT_LIST_HEAD(&d->chan_pending);
861	platform_set_drvdata(op, d);
862
863	ret = dma_async_device_register(&d->slave);
864	if (ret)
865		goto clk_dis;
866
867	ret = of_dma_controller_register((&op->dev)->of_node,
868					 zx_of_dma_simple_xlate, d);
869	if (ret)
870		goto of_dma_register_fail;
871
872	dev_info(&op->dev, "initialized\n");
873	return 0;
874
875of_dma_register_fail:
876	dma_async_device_unregister(&d->slave);
877clk_dis:
878	clk_disable_unprepare(d->clk);
879zx_dma_out:
880	return ret;
881}
882
883static int zx_dma_remove(struct platform_device *op)
884{
885	struct zx_dma_chan *c, *cn;
886	struct zx_dma_dev *d = platform_get_drvdata(op);
887
888	/* explictly free the irq */
889	devm_free_irq(&op->dev, d->irq, d);
890
891	dma_async_device_unregister(&d->slave);
892	of_dma_controller_free((&op->dev)->of_node);
893
894	list_for_each_entry_safe(c, cn, &d->slave.channels,
895				 vc.chan.device_node) {
896		list_del(&c->vc.chan.device_node);
897	}
898	clk_disable_unprepare(d->clk);
899	dmam_pool_destroy(d->pool);
900
901	return 0;
902}
903
904#ifdef CONFIG_PM_SLEEP
905static int zx_dma_suspend_dev(struct device *dev)
906{
907	struct zx_dma_dev *d = dev_get_drvdata(dev);
908	u32 stat = 0;
909
910	stat = zx_dma_get_chan_stat(d);
911	if (stat) {
912		dev_warn(d->slave.dev,
913			 "chan %d is running fail to suspend\n", stat);
914		return -1;
915	}
916	clk_disable_unprepare(d->clk);
917	return 0;
918}
919
920static int zx_dma_resume_dev(struct device *dev)
921{
922	struct zx_dma_dev *d = dev_get_drvdata(dev);
923	int ret = 0;
924
925	ret = clk_prepare_enable(d->clk);
926	if (ret < 0) {
927		dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
928		return ret;
929	}
930	zx_dma_init_state(d);
931	return 0;
932}
933#endif
934
935static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
936
937static struct platform_driver zx_pdma_driver = {
938	.driver		= {
939		.name	= DRIVER_NAME,
940		.pm	= &zx_dma_pmops,
941		.of_match_table = zx6702_dma_dt_ids,
942	},
943	.probe		= zx_dma_probe,
944	.remove		= zx_dma_remove,
945};
946
947module_platform_driver(zx_pdma_driver);
948
949MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
950MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
951MODULE_LICENSE("GPL v2");
952