1/*
2 * SA11x0 DMAengine support
3 *
4 * Copyright (C) 2012 Russell King
5 *   Derived in part from arch/arm/mach-sa1100/dma.c,
6 *   Copyright (C) 2000, 2001 by Nicolas Pitre
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/sched.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/sa11x0-dma.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24#include "virt-dma.h"
25
26#define NR_PHY_CHAN	6
27#define DMA_ALIGN	3
28#define DMA_MAX_SIZE	0x1fff
29#define DMA_CHUNK_SIZE	0x1000
30
31#define DMA_DDAR	0x00
32#define DMA_DCSR_S	0x04
33#define DMA_DCSR_C	0x08
34#define DMA_DCSR_R	0x0c
35#define DMA_DBSA	0x10
36#define DMA_DBTA	0x14
37#define DMA_DBSB	0x18
38#define DMA_DBTB	0x1c
39#define DMA_SIZE	0x20
40
41#define DCSR_RUN	(1 << 0)
42#define DCSR_IE		(1 << 1)
43#define DCSR_ERROR	(1 << 2)
44#define DCSR_DONEA	(1 << 3)
45#define DCSR_STRTA	(1 << 4)
46#define DCSR_DONEB	(1 << 5)
47#define DCSR_STRTB	(1 << 6)
48#define DCSR_BIU	(1 << 7)
49
50#define DDAR_RW		(1 << 0)	/* 0 = W, 1 = R */
51#define DDAR_E		(1 << 1)	/* 0 = LE, 1 = BE */
52#define DDAR_BS		(1 << 2)	/* 0 = BS4, 1 = BS8 */
53#define DDAR_DW		(1 << 3)	/* 0 = 8b, 1 = 16b */
54#define DDAR_Ser0UDCTr	(0x0 << 4)
55#define DDAR_Ser0UDCRc	(0x1 << 4)
56#define DDAR_Ser1SDLCTr	(0x2 << 4)
57#define DDAR_Ser1SDLCRc	(0x3 << 4)
58#define DDAR_Ser1UARTTr	(0x4 << 4)
59#define DDAR_Ser1UARTRc	(0x5 << 4)
60#define DDAR_Ser2ICPTr	(0x6 << 4)
61#define DDAR_Ser2ICPRc	(0x7 << 4)
62#define DDAR_Ser3UARTTr	(0x8 << 4)
63#define DDAR_Ser3UARTRc	(0x9 << 4)
64#define DDAR_Ser4MCP0Tr	(0xa << 4)
65#define DDAR_Ser4MCP0Rc	(0xb << 4)
66#define DDAR_Ser4MCP1Tr	(0xc << 4)
67#define DDAR_Ser4MCP1Rc	(0xd << 4)
68#define DDAR_Ser4SSPTr	(0xe << 4)
69#define DDAR_Ser4SSPRc	(0xf << 4)
70
71struct sa11x0_dma_sg {
72	u32			addr;
73	u32			len;
74};
75
76struct sa11x0_dma_desc {
77	struct virt_dma_desc	vd;
78
79	u32			ddar;
80	size_t			size;
81	unsigned		period;
82	bool			cyclic;
83
84	unsigned		sglen;
85	struct sa11x0_dma_sg	sg[0];
86};
87
88struct sa11x0_dma_phy;
89
90struct sa11x0_dma_chan {
91	struct virt_dma_chan	vc;
92
93	/* protected by c->vc.lock */
94	struct sa11x0_dma_phy	*phy;
95	enum dma_status		status;
96
97	/* protected by d->lock */
98	struct list_head	node;
99
100	u32			ddar;
101	const char		*name;
102};
103
104struct sa11x0_dma_phy {
105	void __iomem		*base;
106	struct sa11x0_dma_dev	*dev;
107	unsigned		num;
108
109	struct sa11x0_dma_chan	*vchan;
110
111	/* Protected by c->vc.lock */
112	unsigned		sg_load;
113	struct sa11x0_dma_desc	*txd_load;
114	unsigned		sg_done;
115	struct sa11x0_dma_desc	*txd_done;
116	u32			dbs[2];
117	u32			dbt[2];
118	u32			dcsr;
119};
120
121struct sa11x0_dma_dev {
122	struct dma_device	slave;
123	void __iomem		*base;
124	spinlock_t		lock;
125	struct tasklet_struct	task;
126	struct list_head	chan_pending;
127	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
128};
129
130static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
131{
132	return container_of(chan, struct sa11x0_dma_chan, vc.chan);
133}
134
135static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
136{
137	return container_of(dmadev, struct sa11x0_dma_dev, slave);
138}
139
140static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
141{
142	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
143
144	return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
145}
146
147static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
148{
149	kfree(container_of(vd, struct sa11x0_dma_desc, vd));
150}
151
152static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
153{
154	list_del(&txd->vd.node);
155	p->txd_load = txd;
156	p->sg_load = 0;
157
158	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
159		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
160}
161
162static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
163	struct sa11x0_dma_chan *c)
164{
165	struct sa11x0_dma_desc *txd = p->txd_load;
166	struct sa11x0_dma_sg *sg;
167	void __iomem *base = p->base;
168	unsigned dbsx, dbtx;
169	u32 dcsr;
170
171	if (!txd)
172		return;
173
174	dcsr = readl_relaxed(base + DMA_DCSR_R);
175
176	/* Don't try to load the next transfer if both buffers are started */
177	if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
178		return;
179
180	if (p->sg_load == txd->sglen) {
181		if (!txd->cyclic) {
182			struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
183
184			/*
185			 * We have reached the end of the current descriptor.
186			 * Peek at the next descriptor, and if compatible with
187			 * the current, start processing it.
188			 */
189			if (txn && txn->ddar == txd->ddar) {
190				txd = txn;
191				sa11x0_dma_start_desc(p, txn);
192			} else {
193				p->txd_load = NULL;
194				return;
195			}
196		} else {
197			/* Cyclic: reset back to beginning */
198			p->sg_load = 0;
199		}
200	}
201
202	sg = &txd->sg[p->sg_load++];
203
204	/* Select buffer to load according to channel status */
205	if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
206	    ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
207		dbsx = DMA_DBSA;
208		dbtx = DMA_DBTA;
209		dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
210	} else {
211		dbsx = DMA_DBSB;
212		dbtx = DMA_DBTB;
213		dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
214	}
215
216	writel_relaxed(sg->addr, base + dbsx);
217	writel_relaxed(sg->len, base + dbtx);
218	writel(dcsr, base + DMA_DCSR_S);
219
220	dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
221		p->num, dcsr,
222		'A' + (dbsx == DMA_DBSB), sg->addr,
223		'A' + (dbtx == DMA_DBTB), sg->len);
224}
225
226static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
227	struct sa11x0_dma_chan *c)
228{
229	struct sa11x0_dma_desc *txd = p->txd_done;
230
231	if (++p->sg_done == txd->sglen) {
232		if (!txd->cyclic) {
233			vchan_cookie_complete(&txd->vd);
234
235			p->sg_done = 0;
236			p->txd_done = p->txd_load;
237
238			if (!p->txd_done)
239				tasklet_schedule(&p->dev->task);
240		} else {
241			if ((p->sg_done % txd->period) == 0)
242				vchan_cyclic_callback(&txd->vd);
243
244			/* Cyclic: reset back to beginning */
245			p->sg_done = 0;
246		}
247	}
248
249	sa11x0_dma_start_sg(p, c);
250}
251
252static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
253{
254	struct sa11x0_dma_phy *p = dev_id;
255	struct sa11x0_dma_dev *d = p->dev;
256	struct sa11x0_dma_chan *c;
257	u32 dcsr;
258
259	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
260	if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
261		return IRQ_NONE;
262
263	/* Clear reported status bits */
264	writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
265		p->base + DMA_DCSR_C);
266
267	dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
268
269	if (dcsr & DCSR_ERROR) {
270		dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
271			p->num, dcsr,
272			readl_relaxed(p->base + DMA_DDAR),
273			readl_relaxed(p->base + DMA_DBSA),
274			readl_relaxed(p->base + DMA_DBTA),
275			readl_relaxed(p->base + DMA_DBSB),
276			readl_relaxed(p->base + DMA_DBTB));
277	}
278
279	c = p->vchan;
280	if (c) {
281		unsigned long flags;
282
283		spin_lock_irqsave(&c->vc.lock, flags);
284		/*
285		 * Now that we're holding the lock, check that the vchan
286		 * really is associated with this pchan before touching the
287		 * hardware.  This should always succeed, because we won't
288		 * change p->vchan or c->phy while the channel is actively
289		 * transferring.
290		 */
291		if (c->phy == p) {
292			if (dcsr & DCSR_DONEA)
293				sa11x0_dma_complete(p, c);
294			if (dcsr & DCSR_DONEB)
295				sa11x0_dma_complete(p, c);
296		}
297		spin_unlock_irqrestore(&c->vc.lock, flags);
298	}
299
300	return IRQ_HANDLED;
301}
302
303static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
304{
305	struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
306
307	/* If the issued list is empty, we have no further txds to process */
308	if (txd) {
309		struct sa11x0_dma_phy *p = c->phy;
310
311		sa11x0_dma_start_desc(p, txd);
312		p->txd_done = txd;
313		p->sg_done = 0;
314
315		/* The channel should not have any transfers started */
316		WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
317				      (DCSR_STRTA | DCSR_STRTB));
318
319		/* Clear the run and start bits before changing DDAR */
320		writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
321			       p->base + DMA_DCSR_C);
322		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
323
324		/* Try to start both buffers */
325		sa11x0_dma_start_sg(p, c);
326		sa11x0_dma_start_sg(p, c);
327	}
328}
329
330static void sa11x0_dma_tasklet(unsigned long arg)
331{
332	struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
333	struct sa11x0_dma_phy *p;
334	struct sa11x0_dma_chan *c;
335	unsigned pch, pch_alloc = 0;
336
337	dev_dbg(d->slave.dev, "tasklet enter\n");
338
339	list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
340		spin_lock_irq(&c->vc.lock);
341		p = c->phy;
342		if (p && !p->txd_done) {
343			sa11x0_dma_start_txd(c);
344			if (!p->txd_done) {
345				/* No current txd associated with this channel */
346				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
347
348				/* Mark this channel free */
349				c->phy = NULL;
350				p->vchan = NULL;
351			}
352		}
353		spin_unlock_irq(&c->vc.lock);
354	}
355
356	spin_lock_irq(&d->lock);
357	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
358		p = &d->phy[pch];
359
360		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
361			c = list_first_entry(&d->chan_pending,
362				struct sa11x0_dma_chan, node);
363			list_del_init(&c->node);
364
365			pch_alloc |= 1 << pch;
366
367			/* Mark this channel allocated */
368			p->vchan = c;
369
370			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
371		}
372	}
373	spin_unlock_irq(&d->lock);
374
375	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
376		if (pch_alloc & (1 << pch)) {
377			p = &d->phy[pch];
378			c = p->vchan;
379
380			spin_lock_irq(&c->vc.lock);
381			c->phy = p;
382
383			sa11x0_dma_start_txd(c);
384			spin_unlock_irq(&c->vc.lock);
385		}
386	}
387
388	dev_dbg(d->slave.dev, "tasklet exit\n");
389}
390
391
392static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
393{
394	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
395	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
396	unsigned long flags;
397
398	spin_lock_irqsave(&d->lock, flags);
399	list_del_init(&c->node);
400	spin_unlock_irqrestore(&d->lock, flags);
401
402	vchan_free_chan_resources(&c->vc);
403}
404
405static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
406{
407	unsigned reg;
408	u32 dcsr;
409
410	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
411
412	if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
413	    (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
414		reg = DMA_DBSA;
415	else
416		reg = DMA_DBSB;
417
418	return readl_relaxed(p->base + reg);
419}
420
421static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
422	dma_cookie_t cookie, struct dma_tx_state *state)
423{
424	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
425	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
426	struct sa11x0_dma_phy *p;
427	struct virt_dma_desc *vd;
428	unsigned long flags;
429	enum dma_status ret;
430
431	ret = dma_cookie_status(&c->vc.chan, cookie, state);
432	if (ret == DMA_COMPLETE)
433		return ret;
434
435	if (!state)
436		return c->status;
437
438	spin_lock_irqsave(&c->vc.lock, flags);
439	p = c->phy;
440
441	/*
442	 * If the cookie is on our issue queue, then the residue is
443	 * its total size.
444	 */
445	vd = vchan_find_desc(&c->vc, cookie);
446	if (vd) {
447		state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
448	} else if (!p) {
449		state->residue = 0;
450	} else {
451		struct sa11x0_dma_desc *txd;
452		size_t bytes = 0;
453
454		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
455			txd = p->txd_done;
456		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
457			txd = p->txd_load;
458		else
459			txd = NULL;
460
461		ret = c->status;
462		if (txd) {
463			dma_addr_t addr = sa11x0_dma_pos(p);
464			unsigned i;
465
466			dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
467
468			for (i = 0; i < txd->sglen; i++) {
469				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
470					i, txd->sg[i].addr, txd->sg[i].len);
471				if (addr >= txd->sg[i].addr &&
472				    addr < txd->sg[i].addr + txd->sg[i].len) {
473					unsigned len;
474
475					len = txd->sg[i].len -
476						(addr - txd->sg[i].addr);
477					dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
478						i, len);
479					bytes += len;
480					i++;
481					break;
482				}
483			}
484			for (; i < txd->sglen; i++) {
485				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
486					i, txd->sg[i].addr, txd->sg[i].len);
487				bytes += txd->sg[i].len;
488			}
489		}
490		state->residue = bytes;
491	}
492	spin_unlock_irqrestore(&c->vc.lock, flags);
493
494	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
495
496	return ret;
497}
498
499/*
500 * Move pending txds to the issued list, and re-init pending list.
501 * If not already pending, add this channel to the list of pending
502 * channels and trigger the tasklet to run.
503 */
504static void sa11x0_dma_issue_pending(struct dma_chan *chan)
505{
506	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
507	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
508	unsigned long flags;
509
510	spin_lock_irqsave(&c->vc.lock, flags);
511	if (vchan_issue_pending(&c->vc)) {
512		if (!c->phy) {
513			spin_lock(&d->lock);
514			if (list_empty(&c->node)) {
515				list_add_tail(&c->node, &d->chan_pending);
516				tasklet_schedule(&d->task);
517				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
518			}
519			spin_unlock(&d->lock);
520		}
521	} else
522		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
523	spin_unlock_irqrestore(&c->vc.lock, flags);
524}
525
526static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
527	struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
528	enum dma_transfer_direction dir, unsigned long flags, void *context)
529{
530	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
531	struct sa11x0_dma_desc *txd;
532	struct scatterlist *sgent;
533	unsigned i, j = sglen;
534	size_t size = 0;
535
536	/* SA11x0 channels can only operate in their native direction */
537	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
538		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
539			&c->vc, c->ddar, dir);
540		return NULL;
541	}
542
543	/* Do not allow zero-sized txds */
544	if (sglen == 0)
545		return NULL;
546
547	for_each_sg(sg, sgent, sglen, i) {
548		dma_addr_t addr = sg_dma_address(sgent);
549		unsigned int len = sg_dma_len(sgent);
550
551		if (len > DMA_MAX_SIZE)
552			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
553		if (addr & DMA_ALIGN) {
554			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
555				&c->vc, addr);
556			return NULL;
557		}
558	}
559
560	txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
561	if (!txd) {
562		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
563		return NULL;
564	}
565
566	j = 0;
567	for_each_sg(sg, sgent, sglen, i) {
568		dma_addr_t addr = sg_dma_address(sgent);
569		unsigned len = sg_dma_len(sgent);
570
571		size += len;
572
573		do {
574			unsigned tlen = len;
575
576			/*
577			 * Check whether the transfer will fit.  If not, try
578			 * to split the transfer up such that we end up with
579			 * equal chunks - but make sure that we preserve the
580			 * alignment.  This avoids small segments.
581			 */
582			if (tlen > DMA_MAX_SIZE) {
583				unsigned mult = DIV_ROUND_UP(tlen,
584					DMA_MAX_SIZE & ~DMA_ALIGN);
585
586				tlen = (tlen / mult) & ~DMA_ALIGN;
587			}
588
589			txd->sg[j].addr = addr;
590			txd->sg[j].len = tlen;
591
592			addr += tlen;
593			len -= tlen;
594			j++;
595		} while (len);
596	}
597
598	txd->ddar = c->ddar;
599	txd->size = size;
600	txd->sglen = j;
601
602	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
603		&c->vc, &txd->vd, txd->size, txd->sglen);
604
605	return vchan_tx_prep(&c->vc, &txd->vd, flags);
606}
607
608static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
609	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
610	enum dma_transfer_direction dir, unsigned long flags)
611{
612	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
613	struct sa11x0_dma_desc *txd;
614	unsigned i, j, k, sglen, sgperiod;
615
616	/* SA11x0 channels can only operate in their native direction */
617	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
618		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
619			&c->vc, c->ddar, dir);
620		return NULL;
621	}
622
623	sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
624	sglen = size * sgperiod / period;
625
626	/* Do not allow zero-sized txds */
627	if (sglen == 0)
628		return NULL;
629
630	txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
631	if (!txd) {
632		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
633		return NULL;
634	}
635
636	for (i = k = 0; i < size / period; i++) {
637		size_t tlen, len = period;
638
639		for (j = 0; j < sgperiod; j++, k++) {
640			tlen = len;
641
642			if (tlen > DMA_MAX_SIZE) {
643				unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
644				tlen = (tlen / mult) & ~DMA_ALIGN;
645			}
646
647			txd->sg[k].addr = addr;
648			txd->sg[k].len = tlen;
649			addr += tlen;
650			len -= tlen;
651		}
652
653		WARN_ON(len != 0);
654	}
655
656	WARN_ON(k != sglen);
657
658	txd->ddar = c->ddar;
659	txd->size = size;
660	txd->sglen = sglen;
661	txd->cyclic = 1;
662	txd->period = sgperiod;
663
664	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
665}
666
667static int sa11x0_dma_device_config(struct dma_chan *chan,
668				    struct dma_slave_config *cfg)
669{
670	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
671	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
672	dma_addr_t addr;
673	enum dma_slave_buswidth width;
674	u32 maxburst;
675
676	if (ddar & DDAR_RW) {
677		addr = cfg->src_addr;
678		width = cfg->src_addr_width;
679		maxburst = cfg->src_maxburst;
680	} else {
681		addr = cfg->dst_addr;
682		width = cfg->dst_addr_width;
683		maxburst = cfg->dst_maxburst;
684	}
685
686	if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
687	     width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
688	    (maxburst != 4 && maxburst != 8))
689		return -EINVAL;
690
691	if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
692		ddar |= DDAR_DW;
693	if (maxburst == 8)
694		ddar |= DDAR_BS;
695
696	dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
697		&c->vc, addr, width, maxburst);
698
699	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
700
701	return 0;
702}
703
704static int sa11x0_dma_device_pause(struct dma_chan *chan)
705{
706	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
707	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
708	struct sa11x0_dma_phy *p;
709	LIST_HEAD(head);
710	unsigned long flags;
711
712	dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
713	spin_lock_irqsave(&c->vc.lock, flags);
714	if (c->status == DMA_IN_PROGRESS) {
715		c->status = DMA_PAUSED;
716
717		p = c->phy;
718		if (p) {
719			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
720		} else {
721			spin_lock(&d->lock);
722			list_del_init(&c->node);
723			spin_unlock(&d->lock);
724		}
725	}
726	spin_unlock_irqrestore(&c->vc.lock, flags);
727
728	return 0;
729}
730
731static int sa11x0_dma_device_resume(struct dma_chan *chan)
732{
733	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
734	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
735	struct sa11x0_dma_phy *p;
736	LIST_HEAD(head);
737	unsigned long flags;
738
739	dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
740	spin_lock_irqsave(&c->vc.lock, flags);
741	if (c->status == DMA_PAUSED) {
742		c->status = DMA_IN_PROGRESS;
743
744		p = c->phy;
745		if (p) {
746			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
747		} else if (!list_empty(&c->vc.desc_issued)) {
748			spin_lock(&d->lock);
749			list_add_tail(&c->node, &d->chan_pending);
750			spin_unlock(&d->lock);
751		}
752	}
753	spin_unlock_irqrestore(&c->vc.lock, flags);
754
755	return 0;
756}
757
758static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
759{
760	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
761	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
762	struct sa11x0_dma_phy *p;
763	LIST_HEAD(head);
764	unsigned long flags;
765
766	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
767	/* Clear the tx descriptor lists */
768	spin_lock_irqsave(&c->vc.lock, flags);
769	vchan_get_all_descriptors(&c->vc, &head);
770
771	p = c->phy;
772	if (p) {
773		dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
774		/* vchan is assigned to a pchan - stop the channel */
775		writel(DCSR_RUN | DCSR_IE |
776		       DCSR_STRTA | DCSR_DONEA |
777		       DCSR_STRTB | DCSR_DONEB,
778		       p->base + DMA_DCSR_C);
779
780		if (p->txd_load) {
781			if (p->txd_load != p->txd_done)
782				list_add_tail(&p->txd_load->vd.node, &head);
783			p->txd_load = NULL;
784		}
785		if (p->txd_done) {
786			list_add_tail(&p->txd_done->vd.node, &head);
787			p->txd_done = NULL;
788		}
789		c->phy = NULL;
790		spin_lock(&d->lock);
791		p->vchan = NULL;
792		spin_unlock(&d->lock);
793		tasklet_schedule(&d->task);
794	}
795	spin_unlock_irqrestore(&c->vc.lock, flags);
796	vchan_dma_desc_free_list(&c->vc, &head);
797
798	return 0;
799}
800
801struct sa11x0_dma_channel_desc {
802	u32 ddar;
803	const char *name;
804};
805
806#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
807static const struct sa11x0_dma_channel_desc chan_desc[] = {
808	CD(Ser0UDCTr, 0),
809	CD(Ser0UDCRc, DDAR_RW),
810	CD(Ser1SDLCTr, 0),
811	CD(Ser1SDLCRc, DDAR_RW),
812	CD(Ser1UARTTr, 0),
813	CD(Ser1UARTRc, DDAR_RW),
814	CD(Ser2ICPTr, 0),
815	CD(Ser2ICPRc, DDAR_RW),
816	CD(Ser3UARTTr, 0),
817	CD(Ser3UARTRc, DDAR_RW),
818	CD(Ser4MCP0Tr, 0),
819	CD(Ser4MCP0Rc, DDAR_RW),
820	CD(Ser4MCP1Tr, 0),
821	CD(Ser4MCP1Rc, DDAR_RW),
822	CD(Ser4SSPTr, 0),
823	CD(Ser4SSPRc, DDAR_RW),
824};
825
826static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
827	struct device *dev)
828{
829	unsigned i;
830
831	INIT_LIST_HEAD(&dmadev->channels);
832	dmadev->dev = dev;
833	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
834	dmadev->device_config = sa11x0_dma_device_config;
835	dmadev->device_pause = sa11x0_dma_device_pause;
836	dmadev->device_resume = sa11x0_dma_device_resume;
837	dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
838	dmadev->device_tx_status = sa11x0_dma_tx_status;
839	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
840
841	for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
842		struct sa11x0_dma_chan *c;
843
844		c = kzalloc(sizeof(*c), GFP_KERNEL);
845		if (!c) {
846			dev_err(dev, "no memory for channel %u\n", i);
847			return -ENOMEM;
848		}
849
850		c->status = DMA_IN_PROGRESS;
851		c->ddar = chan_desc[i].ddar;
852		c->name = chan_desc[i].name;
853		INIT_LIST_HEAD(&c->node);
854
855		c->vc.desc_free = sa11x0_dma_free_desc;
856		vchan_init(&c->vc, dmadev);
857	}
858
859	return dma_async_device_register(dmadev);
860}
861
862static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
863	void *data)
864{
865	int irq = platform_get_irq(pdev, nr);
866
867	if (irq <= 0)
868		return -ENXIO;
869
870	return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
871}
872
873static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
874	void *data)
875{
876	int irq = platform_get_irq(pdev, nr);
877	if (irq > 0)
878		free_irq(irq, data);
879}
880
881static void sa11x0_dma_free_channels(struct dma_device *dmadev)
882{
883	struct sa11x0_dma_chan *c, *cn;
884
885	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
886		list_del(&c->vc.chan.device_node);
887		tasklet_kill(&c->vc.task);
888		kfree(c);
889	}
890}
891
892static int sa11x0_dma_probe(struct platform_device *pdev)
893{
894	struct sa11x0_dma_dev *d;
895	struct resource *res;
896	unsigned i;
897	int ret;
898
899	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
900	if (!res)
901		return -ENXIO;
902
903	d = kzalloc(sizeof(*d), GFP_KERNEL);
904	if (!d) {
905		ret = -ENOMEM;
906		goto err_alloc;
907	}
908
909	spin_lock_init(&d->lock);
910	INIT_LIST_HEAD(&d->chan_pending);
911
912	d->base = ioremap(res->start, resource_size(res));
913	if (!d->base) {
914		ret = -ENOMEM;
915		goto err_ioremap;
916	}
917
918	tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
919
920	for (i = 0; i < NR_PHY_CHAN; i++) {
921		struct sa11x0_dma_phy *p = &d->phy[i];
922
923		p->dev = d;
924		p->num = i;
925		p->base = d->base + i * DMA_SIZE;
926		writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
927			DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
928			p->base + DMA_DCSR_C);
929		writel_relaxed(0, p->base + DMA_DDAR);
930
931		ret = sa11x0_dma_request_irq(pdev, i, p);
932		if (ret) {
933			while (i) {
934				i--;
935				sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
936			}
937			goto err_irq;
938		}
939	}
940
941	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
942	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
943	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
944	d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
945	d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
946	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
947	d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
948				   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
949	d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
950				   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
951	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
952	if (ret) {
953		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
954			ret);
955		goto err_slave_reg;
956	}
957
958	platform_set_drvdata(pdev, d);
959	return 0;
960
961 err_slave_reg:
962	sa11x0_dma_free_channels(&d->slave);
963	for (i = 0; i < NR_PHY_CHAN; i++)
964		sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
965 err_irq:
966	tasklet_kill(&d->task);
967	iounmap(d->base);
968 err_ioremap:
969	kfree(d);
970 err_alloc:
971	return ret;
972}
973
974static int sa11x0_dma_remove(struct platform_device *pdev)
975{
976	struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
977	unsigned pch;
978
979	dma_async_device_unregister(&d->slave);
980
981	sa11x0_dma_free_channels(&d->slave);
982	for (pch = 0; pch < NR_PHY_CHAN; pch++)
983		sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
984	tasklet_kill(&d->task);
985	iounmap(d->base);
986	kfree(d);
987
988	return 0;
989}
990
991static int sa11x0_dma_suspend(struct device *dev)
992{
993	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
994	unsigned pch;
995
996	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
997		struct sa11x0_dma_phy *p = &d->phy[pch];
998		u32 dcsr, saved_dcsr;
999
1000		dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1001		if (dcsr & DCSR_RUN) {
1002			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1003			dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1004		}
1005
1006		saved_dcsr &= DCSR_RUN | DCSR_IE;
1007		if (dcsr & DCSR_BIU) {
1008			p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1009			p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1010			p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1011			p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1012			saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1013				      (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1014		} else {
1015			p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1016			p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1017			p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1018			p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1019			saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1020		}
1021		p->dcsr = saved_dcsr;
1022
1023		writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1024	}
1025
1026	return 0;
1027}
1028
1029static int sa11x0_dma_resume(struct device *dev)
1030{
1031	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1032	unsigned pch;
1033
1034	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1035		struct sa11x0_dma_phy *p = &d->phy[pch];
1036		struct sa11x0_dma_desc *txd = NULL;
1037		u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1038
1039		WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1040
1041		if (p->txd_done)
1042			txd = p->txd_done;
1043		else if (p->txd_load)
1044			txd = p->txd_load;
1045
1046		if (!txd)
1047			continue;
1048
1049		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1050
1051		writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1052		writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1053		writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1054		writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1055		writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1056	}
1057
1058	return 0;
1059}
1060
1061static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1062	.suspend_noirq = sa11x0_dma_suspend,
1063	.resume_noirq = sa11x0_dma_resume,
1064	.freeze_noirq = sa11x0_dma_suspend,
1065	.thaw_noirq = sa11x0_dma_resume,
1066	.poweroff_noirq = sa11x0_dma_suspend,
1067	.restore_noirq = sa11x0_dma_resume,
1068};
1069
1070static struct platform_driver sa11x0_dma_driver = {
1071	.driver = {
1072		.name	= "sa11x0-dma",
1073		.pm	= &sa11x0_dma_pm_ops,
1074	},
1075	.probe		= sa11x0_dma_probe,
1076	.remove		= sa11x0_dma_remove,
1077};
1078
1079bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1080{
1081	if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1082		struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1083		const char *p = param;
1084
1085		return !strcmp(c->name, p);
1086	}
1087	return false;
1088}
1089EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1090
1091static int __init sa11x0_dma_init(void)
1092{
1093	return platform_driver_register(&sa11x0_dma_driver);
1094}
1095subsys_initcall(sa11x0_dma_init);
1096
1097static void __exit sa11x0_dma_exit(void)
1098{
1099	platform_driver_unregister(&sa11x0_dma_driver);
1100}
1101module_exit(sa11x0_dma_exit);
1102
1103MODULE_AUTHOR("Russell King");
1104MODULE_DESCRIPTION("SA-11x0 DMA driver");
1105MODULE_LICENSE("GPL v2");
1106MODULE_ALIAS("platform:sa11x0-dma");
1107