1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014
6 *
7 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
8 * (defines, structures and comments) was taken from MPC5121 DMA driver
9 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
10 *
11 * Approved as OSADL project by a majority of OSADL members and funded
12 * by OSADL membership fees in 2009;  for details see www.osadl.org.
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
22 * more details.
23 *
24 * The full GNU General Public License is included in this distribution in the
25 * file called COPYING.
26 */
27
28/*
29 * MPC512x and MPC8308 DMA driver. It supports
30 * memory to memory data transfers (tested using dmatest module) and
31 * data transfers between memory and peripheral I/O memory
32 * by means of slave scatter/gather with these limitations:
33 *  - chunked transfers (described by s/g lists with more than one item)
34 *     are refused as long as proper support for scatter/gather is missing;
35 *  - transfers on MPC8308 always start from software as this SoC appears
36 *     not to have external request lines for peripheral flow control;
37 *  - only peripheral devices with 4-byte FIFO access register are supported;
38 *  - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
39 *     source and destination addresses must be 4-byte aligned
40 *     and transfer size must be aligned on (4 * maxburst) boundary;
41 */
42
43#include <linux/module.h>
44#include <linux/dmaengine.h>
45#include <linux/dma-mapping.h>
46#include <linux/interrupt.h>
47#include <linux/io.h>
48#include <linux/slab.h>
49#include <linux/of_address.h>
50#include <linux/of_device.h>
51#include <linux/of_irq.h>
52#include <linux/of_dma.h>
53#include <linux/of_platform.h>
54
55#include <linux/random.h>
56
57#include "dmaengine.h"
58
59/* Number of DMA Transfer descriptors allocated per channel */
60#define MPC_DMA_DESCRIPTORS	64
61
62/* Macro definitions */
63#define MPC_DMA_TCD_OFFSET	0x1000
64
65/*
66 * Maximum channel counts for individual hardware variants
67 * and the maximum channel count over all supported controllers,
68 * used for data structure size
69 */
70#define MPC8308_DMACHAN_MAX	16
71#define MPC512x_DMACHAN_MAX	64
72#define MPC_DMA_CHANNELS	64
73
74/* Arbitration mode of group and channel */
75#define MPC_DMA_DMACR_EDCG	(1 << 31)
76#define MPC_DMA_DMACR_ERGA	(1 << 3)
77#define MPC_DMA_DMACR_ERCA	(1 << 2)
78
79/* Error codes */
80#define MPC_DMA_DMAES_VLD	(1 << 31)
81#define MPC_DMA_DMAES_GPE	(1 << 15)
82#define MPC_DMA_DMAES_CPE	(1 << 14)
83#define MPC_DMA_DMAES_ERRCHN(err) \
84				(((err) >> 8) & 0x3f)
85#define MPC_DMA_DMAES_SAE	(1 << 7)
86#define MPC_DMA_DMAES_SOE	(1 << 6)
87#define MPC_DMA_DMAES_DAE	(1 << 5)
88#define MPC_DMA_DMAES_DOE	(1 << 4)
89#define MPC_DMA_DMAES_NCE	(1 << 3)
90#define MPC_DMA_DMAES_SGE	(1 << 2)
91#define MPC_DMA_DMAES_SBE	(1 << 1)
92#define MPC_DMA_DMAES_DBE	(1 << 0)
93
94#define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
95
96#define MPC_DMA_TSIZE_1		0x00
97#define MPC_DMA_TSIZE_2		0x01
98#define MPC_DMA_TSIZE_4		0x02
99#define MPC_DMA_TSIZE_16	0x04
100#define MPC_DMA_TSIZE_32	0x05
101
102/* MPC5121 DMA engine registers */
103struct __attribute__ ((__packed__)) mpc_dma_regs {
104	/* 0x00 */
105	u32 dmacr;		/* DMA control register */
106	u32 dmaes;		/* DMA error status */
107	/* 0x08 */
108	u32 dmaerqh;		/* DMA enable request high(channels 63~32) */
109	u32 dmaerql;		/* DMA enable request low(channels 31~0) */
110	u32 dmaeeih;		/* DMA enable error interrupt high(ch63~32) */
111	u32 dmaeeil;		/* DMA enable error interrupt low(ch31~0) */
112	/* 0x18 */
113	u8 dmaserq;		/* DMA set enable request */
114	u8 dmacerq;		/* DMA clear enable request */
115	u8 dmaseei;		/* DMA set enable error interrupt */
116	u8 dmaceei;		/* DMA clear enable error interrupt */
117	/* 0x1c */
118	u8 dmacint;		/* DMA clear interrupt request */
119	u8 dmacerr;		/* DMA clear error */
120	u8 dmassrt;		/* DMA set start bit */
121	u8 dmacdne;		/* DMA clear DONE status bit */
122	/* 0x20 */
123	u32 dmainth;		/* DMA interrupt request high(ch63~32) */
124	u32 dmaintl;		/* DMA interrupt request low(ch31~0) */
125	u32 dmaerrh;		/* DMA error high(ch63~32) */
126	u32 dmaerrl;		/* DMA error low(ch31~0) */
127	/* 0x30 */
128	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
129	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
130	union {
131		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
132		u32 dmagpor;	/* (General purpose register on MPC8308) */
133	};
134	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
135	/* 0x40 ~ 0xff */
136	u32 reserve0[48];	/* Reserved */
137	/* 0x100 */
138	u8 dchpri[MPC_DMA_CHANNELS];
139	/* DMA channels(0~63) priority */
140};
141
142struct __attribute__ ((__packed__)) mpc_dma_tcd {
143	/* 0x00 */
144	u32 saddr;		/* Source address */
145
146	u32 smod:5;		/* Source address modulo */
147	u32 ssize:3;		/* Source data transfer size */
148	u32 dmod:5;		/* Destination address modulo */
149	u32 dsize:3;		/* Destination data transfer size */
150	u32 soff:16;		/* Signed source address offset */
151
152	/* 0x08 */
153	u32 nbytes;		/* Inner "minor" byte count */
154	u32 slast;		/* Last source address adjustment */
155	u32 daddr;		/* Destination address */
156
157	/* 0x14 */
158	u32 citer_elink:1;	/* Enable channel-to-channel linking on
159				 * minor loop complete
160				 */
161	u32 citer_linkch:6;	/* Link channel for minor loop complete */
162	u32 citer:9;		/* Current "major" iteration count */
163	u32 doff:16;		/* Signed destination address offset */
164
165	/* 0x18 */
166	u32 dlast_sga;		/* Last Destination address adjustment/scatter
167				 * gather address
168				 */
169
170	/* 0x1c */
171	u32 biter_elink:1;	/* Enable channel-to-channel linking on major
172				 * loop complete
173				 */
174	u32 biter_linkch:6;
175	u32 biter:9;		/* Beginning "major" iteration count */
176	u32 bwc:2;		/* Bandwidth control */
177	u32 major_linkch:6;	/* Link channel number */
178	u32 done:1;		/* Channel done */
179	u32 active:1;		/* Channel active */
180	u32 major_elink:1;	/* Enable channel-to-channel linking on major
181				 * loop complete
182				 */
183	u32 e_sg:1;		/* Enable scatter/gather processing */
184	u32 d_req:1;		/* Disable request */
185	u32 int_half:1;		/* Enable an interrupt when major counter is
186				 * half complete
187				 */
188	u32 int_maj:1;		/* Enable an interrupt when major iteration
189				 * count completes
190				 */
191	u32 start:1;		/* Channel start */
192};
193
194struct mpc_dma_desc {
195	struct dma_async_tx_descriptor	desc;
196	struct mpc_dma_tcd		*tcd;
197	dma_addr_t			tcd_paddr;
198	int				error;
199	struct list_head		node;
200	int				will_access_peripheral;
201};
202
203struct mpc_dma_chan {
204	struct dma_chan			chan;
205	struct list_head		free;
206	struct list_head		prepared;
207	struct list_head		queued;
208	struct list_head		active;
209	struct list_head		completed;
210	struct mpc_dma_tcd		*tcd;
211	dma_addr_t			tcd_paddr;
212
213	/* Settings for access to peripheral FIFO */
214	dma_addr_t			src_per_paddr;
215	u32				src_tcd_nunits;
216	dma_addr_t			dst_per_paddr;
217	u32				dst_tcd_nunits;
218
219	/* Lock for this structure */
220	spinlock_t			lock;
221};
222
223struct mpc_dma {
224	struct dma_device		dma;
225	struct tasklet_struct		tasklet;
226	struct mpc_dma_chan		channels[MPC_DMA_CHANNELS];
227	struct mpc_dma_regs __iomem	*regs;
228	struct mpc_dma_tcd __iomem	*tcd;
229	int				irq;
230	int				irq2;
231	uint				error_status;
232	int				is_mpc8308;
233
234	/* Lock for error_status field in this structure */
235	spinlock_t			error_status_lock;
236};
237
238#define DRV_NAME	"mpc512x_dma"
239
240/* Convert struct dma_chan to struct mpc_dma_chan */
241static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
242{
243	return container_of(c, struct mpc_dma_chan, chan);
244}
245
246/* Convert struct dma_chan to struct mpc_dma */
247static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
248{
249	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
250	return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
251}
252
253/*
254 * Execute all queued DMA descriptors.
255 *
256 * Following requirements must be met while calling mpc_dma_execute():
257 * 	a) mchan->lock is acquired,
258 * 	b) mchan->active list is empty,
259 * 	c) mchan->queued list contains at least one entry.
260 */
261static void mpc_dma_execute(struct mpc_dma_chan *mchan)
262{
263	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
264	struct mpc_dma_desc *first = NULL;
265	struct mpc_dma_desc *prev = NULL;
266	struct mpc_dma_desc *mdesc;
267	int cid = mchan->chan.chan_id;
268
269	while (!list_empty(&mchan->queued)) {
270		mdesc = list_first_entry(&mchan->queued,
271						struct mpc_dma_desc, node);
272		/*
273		 * Grab either several mem-to-mem transfer descriptors
274		 * or one peripheral transfer descriptor,
275		 * don't mix mem-to-mem and peripheral transfer descriptors
276		 * within the same 'active' list.
277		 */
278		if (mdesc->will_access_peripheral) {
279			if (list_empty(&mchan->active))
280				list_move_tail(&mdesc->node, &mchan->active);
281			break;
282		} else {
283			list_move_tail(&mdesc->node, &mchan->active);
284		}
285	}
286
287	/* Chain descriptors into one transaction */
288	list_for_each_entry(mdesc, &mchan->active, node) {
289		if (!first)
290			first = mdesc;
291
292		if (!prev) {
293			prev = mdesc;
294			continue;
295		}
296
297		prev->tcd->dlast_sga = mdesc->tcd_paddr;
298		prev->tcd->e_sg = 1;
299		mdesc->tcd->start = 1;
300
301		prev = mdesc;
302	}
303
304	prev->tcd->int_maj = 1;
305
306	/* Send first descriptor in chain into hardware */
307	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
308
309	if (first != prev)
310		mdma->tcd[cid].e_sg = 1;
311
312	if (mdma->is_mpc8308) {
313		/* MPC8308, no request lines, software initiated start */
314		out_8(&mdma->regs->dmassrt, cid);
315	} else if (first->will_access_peripheral) {
316		/* Peripherals involved, start by external request signal */
317		out_8(&mdma->regs->dmaserq, cid);
318	} else {
319		/* Memory to memory transfer, software initiated start */
320		out_8(&mdma->regs->dmassrt, cid);
321	}
322}
323
324/* Handle interrupt on one half of DMA controller (32 channels) */
325static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
326{
327	struct mpc_dma_chan *mchan;
328	struct mpc_dma_desc *mdesc;
329	u32 status = is | es;
330	int ch;
331
332	while ((ch = fls(status) - 1) >= 0) {
333		status &= ~(1 << ch);
334		mchan = &mdma->channels[ch + off];
335
336		spin_lock(&mchan->lock);
337
338		out_8(&mdma->regs->dmacint, ch + off);
339		out_8(&mdma->regs->dmacerr, ch + off);
340
341		/* Check error status */
342		if (es & (1 << ch))
343			list_for_each_entry(mdesc, &mchan->active, node)
344				mdesc->error = -EIO;
345
346		/* Execute queued descriptors */
347		list_splice_tail_init(&mchan->active, &mchan->completed);
348		if (!list_empty(&mchan->queued))
349			mpc_dma_execute(mchan);
350
351		spin_unlock(&mchan->lock);
352	}
353}
354
355/* Interrupt handler */
356static irqreturn_t mpc_dma_irq(int irq, void *data)
357{
358	struct mpc_dma *mdma = data;
359	uint es;
360
361	/* Save error status register */
362	es = in_be32(&mdma->regs->dmaes);
363	spin_lock(&mdma->error_status_lock);
364	if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
365		mdma->error_status = es;
366	spin_unlock(&mdma->error_status_lock);
367
368	/* Handle interrupt on each channel */
369	if (mdma->dma.chancnt > 32) {
370		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
371					in_be32(&mdma->regs->dmaerrh), 32);
372	}
373	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
374					in_be32(&mdma->regs->dmaerrl), 0);
375
376	/* Schedule tasklet */
377	tasklet_schedule(&mdma->tasklet);
378
379	return IRQ_HANDLED;
380}
381
382/* process completed descriptors */
383static void mpc_dma_process_completed(struct mpc_dma *mdma)
384{
385	dma_cookie_t last_cookie = 0;
386	struct mpc_dma_chan *mchan;
387	struct mpc_dma_desc *mdesc;
388	struct dma_async_tx_descriptor *desc;
389	unsigned long flags;
390	LIST_HEAD(list);
391	int i;
392
393	for (i = 0; i < mdma->dma.chancnt; i++) {
394		mchan = &mdma->channels[i];
395
396		/* Get all completed descriptors */
397		spin_lock_irqsave(&mchan->lock, flags);
398		if (!list_empty(&mchan->completed))
399			list_splice_tail_init(&mchan->completed, &list);
400		spin_unlock_irqrestore(&mchan->lock, flags);
401
402		if (list_empty(&list))
403			continue;
404
405		/* Execute callbacks and run dependencies */
406		list_for_each_entry(mdesc, &list, node) {
407			desc = &mdesc->desc;
408
409			if (desc->callback)
410				desc->callback(desc->callback_param);
411
412			last_cookie = desc->cookie;
413			dma_run_dependencies(desc);
414		}
415
416		/* Free descriptors */
417		spin_lock_irqsave(&mchan->lock, flags);
418		list_splice_tail_init(&list, &mchan->free);
419		mchan->chan.completed_cookie = last_cookie;
420		spin_unlock_irqrestore(&mchan->lock, flags);
421	}
422}
423
424/* DMA Tasklet */
425static void mpc_dma_tasklet(unsigned long data)
426{
427	struct mpc_dma *mdma = (void *)data;
428	unsigned long flags;
429	uint es;
430
431	spin_lock_irqsave(&mdma->error_status_lock, flags);
432	es = mdma->error_status;
433	mdma->error_status = 0;
434	spin_unlock_irqrestore(&mdma->error_status_lock, flags);
435
436	/* Print nice error report */
437	if (es) {
438		dev_err(mdma->dma.dev,
439			"Hardware reported following error(s) on channel %u:\n",
440						      MPC_DMA_DMAES_ERRCHN(es));
441
442		if (es & MPC_DMA_DMAES_GPE)
443			dev_err(mdma->dma.dev, "- Group Priority Error\n");
444		if (es & MPC_DMA_DMAES_CPE)
445			dev_err(mdma->dma.dev, "- Channel Priority Error\n");
446		if (es & MPC_DMA_DMAES_SAE)
447			dev_err(mdma->dma.dev, "- Source Address Error\n");
448		if (es & MPC_DMA_DMAES_SOE)
449			dev_err(mdma->dma.dev, "- Source Offset"
450						" Configuration Error\n");
451		if (es & MPC_DMA_DMAES_DAE)
452			dev_err(mdma->dma.dev, "- Destination Address"
453								" Error\n");
454		if (es & MPC_DMA_DMAES_DOE)
455			dev_err(mdma->dma.dev, "- Destination Offset"
456						" Configuration Error\n");
457		if (es & MPC_DMA_DMAES_NCE)
458			dev_err(mdma->dma.dev, "- NBytes/Citter"
459						" Configuration Error\n");
460		if (es & MPC_DMA_DMAES_SGE)
461			dev_err(mdma->dma.dev, "- Scatter/Gather"
462						" Configuration Error\n");
463		if (es & MPC_DMA_DMAES_SBE)
464			dev_err(mdma->dma.dev, "- Source Bus Error\n");
465		if (es & MPC_DMA_DMAES_DBE)
466			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
467	}
468
469	mpc_dma_process_completed(mdma);
470}
471
472/* Submit descriptor to hardware */
473static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
474{
475	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
476	struct mpc_dma_desc *mdesc;
477	unsigned long flags;
478	dma_cookie_t cookie;
479
480	mdesc = container_of(txd, struct mpc_dma_desc, desc);
481
482	spin_lock_irqsave(&mchan->lock, flags);
483
484	/* Move descriptor to queue */
485	list_move_tail(&mdesc->node, &mchan->queued);
486
487	/* If channel is idle, execute all queued descriptors */
488	if (list_empty(&mchan->active))
489		mpc_dma_execute(mchan);
490
491	/* Update cookie */
492	cookie = dma_cookie_assign(txd);
493	spin_unlock_irqrestore(&mchan->lock, flags);
494
495	return cookie;
496}
497
498/* Alloc channel resources */
499static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
500{
501	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
502	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
503	struct mpc_dma_desc *mdesc;
504	struct mpc_dma_tcd *tcd;
505	dma_addr_t tcd_paddr;
506	unsigned long flags;
507	LIST_HEAD(descs);
508	int i;
509
510	/* Alloc DMA memory for Transfer Control Descriptors */
511	tcd = dma_alloc_coherent(mdma->dma.dev,
512			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
513							&tcd_paddr, GFP_KERNEL);
514	if (!tcd)
515		return -ENOMEM;
516
517	/* Alloc descriptors for this channel */
518	for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
519		mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
520		if (!mdesc) {
521			dev_notice(mdma->dma.dev, "Memory allocation error. "
522					"Allocated only %u descriptors\n", i);
523			break;
524		}
525
526		dma_async_tx_descriptor_init(&mdesc->desc, chan);
527		mdesc->desc.flags = DMA_CTRL_ACK;
528		mdesc->desc.tx_submit = mpc_dma_tx_submit;
529
530		mdesc->tcd = &tcd[i];
531		mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
532
533		list_add_tail(&mdesc->node, &descs);
534	}
535
536	/* Return error only if no descriptors were allocated */
537	if (i == 0) {
538		dma_free_coherent(mdma->dma.dev,
539			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
540								tcd, tcd_paddr);
541		return -ENOMEM;
542	}
543
544	spin_lock_irqsave(&mchan->lock, flags);
545	mchan->tcd = tcd;
546	mchan->tcd_paddr = tcd_paddr;
547	list_splice_tail_init(&descs, &mchan->free);
548	spin_unlock_irqrestore(&mchan->lock, flags);
549
550	/* Enable Error Interrupt */
551	out_8(&mdma->regs->dmaseei, chan->chan_id);
552
553	return 0;
554}
555
556/* Free channel resources */
557static void mpc_dma_free_chan_resources(struct dma_chan *chan)
558{
559	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
560	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
561	struct mpc_dma_desc *mdesc, *tmp;
562	struct mpc_dma_tcd *tcd;
563	dma_addr_t tcd_paddr;
564	unsigned long flags;
565	LIST_HEAD(descs);
566
567	spin_lock_irqsave(&mchan->lock, flags);
568
569	/* Channel must be idle */
570	BUG_ON(!list_empty(&mchan->prepared));
571	BUG_ON(!list_empty(&mchan->queued));
572	BUG_ON(!list_empty(&mchan->active));
573	BUG_ON(!list_empty(&mchan->completed));
574
575	/* Move data */
576	list_splice_tail_init(&mchan->free, &descs);
577	tcd = mchan->tcd;
578	tcd_paddr = mchan->tcd_paddr;
579
580	spin_unlock_irqrestore(&mchan->lock, flags);
581
582	/* Free DMA memory used by descriptors */
583	dma_free_coherent(mdma->dma.dev,
584			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
585								tcd, tcd_paddr);
586
587	/* Free descriptors */
588	list_for_each_entry_safe(mdesc, tmp, &descs, node)
589		kfree(mdesc);
590
591	/* Disable Error Interrupt */
592	out_8(&mdma->regs->dmaceei, chan->chan_id);
593}
594
595/* Send all pending descriptor to hardware */
596static void mpc_dma_issue_pending(struct dma_chan *chan)
597{
598	/*
599	 * We are posting descriptors to the hardware as soon as
600	 * they are ready, so this function does nothing.
601	 */
602}
603
604/* Check request completion status */
605static enum dma_status
606mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
607	       struct dma_tx_state *txstate)
608{
609	return dma_cookie_status(chan, cookie, txstate);
610}
611
612/* Prepare descriptor for memory to memory copy */
613static struct dma_async_tx_descriptor *
614mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
615					size_t len, unsigned long flags)
616{
617	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
618	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
619	struct mpc_dma_desc *mdesc = NULL;
620	struct mpc_dma_tcd *tcd;
621	unsigned long iflags;
622
623	/* Get free descriptor */
624	spin_lock_irqsave(&mchan->lock, iflags);
625	if (!list_empty(&mchan->free)) {
626		mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
627									node);
628		list_del(&mdesc->node);
629	}
630	spin_unlock_irqrestore(&mchan->lock, iflags);
631
632	if (!mdesc) {
633		/* try to free completed descriptors */
634		mpc_dma_process_completed(mdma);
635		return NULL;
636	}
637
638	mdesc->error = 0;
639	mdesc->will_access_peripheral = 0;
640	tcd = mdesc->tcd;
641
642	/* Prepare Transfer Control Descriptor for this transaction */
643	memset(tcd, 0, sizeof(struct mpc_dma_tcd));
644
645	if (IS_ALIGNED(src | dst | len, 32)) {
646		tcd->ssize = MPC_DMA_TSIZE_32;
647		tcd->dsize = MPC_DMA_TSIZE_32;
648		tcd->soff = 32;
649		tcd->doff = 32;
650	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
651		/* MPC8308 doesn't support 16 byte transfers */
652		tcd->ssize = MPC_DMA_TSIZE_16;
653		tcd->dsize = MPC_DMA_TSIZE_16;
654		tcd->soff = 16;
655		tcd->doff = 16;
656	} else if (IS_ALIGNED(src | dst | len, 4)) {
657		tcd->ssize = MPC_DMA_TSIZE_4;
658		tcd->dsize = MPC_DMA_TSIZE_4;
659		tcd->soff = 4;
660		tcd->doff = 4;
661	} else if (IS_ALIGNED(src | dst | len, 2)) {
662		tcd->ssize = MPC_DMA_TSIZE_2;
663		tcd->dsize = MPC_DMA_TSIZE_2;
664		tcd->soff = 2;
665		tcd->doff = 2;
666	} else {
667		tcd->ssize = MPC_DMA_TSIZE_1;
668		tcd->dsize = MPC_DMA_TSIZE_1;
669		tcd->soff = 1;
670		tcd->doff = 1;
671	}
672
673	tcd->saddr = src;
674	tcd->daddr = dst;
675	tcd->nbytes = len;
676	tcd->biter = 1;
677	tcd->citer = 1;
678
679	/* Place descriptor in prepared list */
680	spin_lock_irqsave(&mchan->lock, iflags);
681	list_add_tail(&mdesc->node, &mchan->prepared);
682	spin_unlock_irqrestore(&mchan->lock, iflags);
683
684	return &mdesc->desc;
685}
686
687static struct dma_async_tx_descriptor *
688mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
689		unsigned int sg_len, enum dma_transfer_direction direction,
690		unsigned long flags, void *context)
691{
692	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
693	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
694	struct mpc_dma_desc *mdesc = NULL;
695	dma_addr_t per_paddr;
696	u32 tcd_nunits;
697	struct mpc_dma_tcd *tcd;
698	unsigned long iflags;
699	struct scatterlist *sg;
700	size_t len;
701	int iter, i;
702
703	/* Currently there is no proper support for scatter/gather */
704	if (sg_len != 1)
705		return NULL;
706
707	if (!is_slave_direction(direction))
708		return NULL;
709
710	for_each_sg(sgl, sg, sg_len, i) {
711		spin_lock_irqsave(&mchan->lock, iflags);
712
713		mdesc = list_first_entry(&mchan->free,
714						struct mpc_dma_desc, node);
715		if (!mdesc) {
716			spin_unlock_irqrestore(&mchan->lock, iflags);
717			/* Try to free completed descriptors */
718			mpc_dma_process_completed(mdma);
719			return NULL;
720		}
721
722		list_del(&mdesc->node);
723
724		if (direction == DMA_DEV_TO_MEM) {
725			per_paddr = mchan->src_per_paddr;
726			tcd_nunits = mchan->src_tcd_nunits;
727		} else {
728			per_paddr = mchan->dst_per_paddr;
729			tcd_nunits = mchan->dst_tcd_nunits;
730		}
731
732		spin_unlock_irqrestore(&mchan->lock, iflags);
733
734		if (per_paddr == 0 || tcd_nunits == 0)
735			goto err_prep;
736
737		mdesc->error = 0;
738		mdesc->will_access_peripheral = 1;
739
740		/* Prepare Transfer Control Descriptor for this transaction */
741		tcd = mdesc->tcd;
742
743		memset(tcd, 0, sizeof(struct mpc_dma_tcd));
744
745		if (!IS_ALIGNED(sg_dma_address(sg), 4))
746			goto err_prep;
747
748		if (direction == DMA_DEV_TO_MEM) {
749			tcd->saddr = per_paddr;
750			tcd->daddr = sg_dma_address(sg);
751			tcd->soff = 0;
752			tcd->doff = 4;
753		} else {
754			tcd->saddr = sg_dma_address(sg);
755			tcd->daddr = per_paddr;
756			tcd->soff = 4;
757			tcd->doff = 0;
758		}
759
760		tcd->ssize = MPC_DMA_TSIZE_4;
761		tcd->dsize = MPC_DMA_TSIZE_4;
762
763		len = sg_dma_len(sg);
764		tcd->nbytes = tcd_nunits * 4;
765		if (!IS_ALIGNED(len, tcd->nbytes))
766			goto err_prep;
767
768		iter = len / tcd->nbytes;
769		if (iter >= 1 << 15) {
770			/* len is too big */
771			goto err_prep;
772		}
773		/* citer_linkch contains the high bits of iter */
774		tcd->biter = iter & 0x1ff;
775		tcd->biter_linkch = iter >> 9;
776		tcd->citer = tcd->biter;
777		tcd->citer_linkch = tcd->biter_linkch;
778
779		tcd->e_sg = 0;
780		tcd->d_req = 1;
781
782		/* Place descriptor in prepared list */
783		spin_lock_irqsave(&mchan->lock, iflags);
784		list_add_tail(&mdesc->node, &mchan->prepared);
785		spin_unlock_irqrestore(&mchan->lock, iflags);
786	}
787
788	return &mdesc->desc;
789
790err_prep:
791	/* Put the descriptor back */
792	spin_lock_irqsave(&mchan->lock, iflags);
793	list_add_tail(&mdesc->node, &mchan->free);
794	spin_unlock_irqrestore(&mchan->lock, iflags);
795
796	return NULL;
797}
798
799static int mpc_dma_device_config(struct dma_chan *chan,
800				 struct dma_slave_config *cfg)
801{
802	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
803	unsigned long flags;
804
805	/*
806	 * Software constraints:
807	 *  - only transfers between a peripheral device and
808	 *     memory are supported;
809	 *  - only peripheral devices with 4-byte FIFO access register
810	 *     are supported;
811	 *  - minimal transfer chunk is 4 bytes and consequently
812	 *     source and destination addresses must be 4-byte aligned
813	 *     and transfer size must be aligned on (4 * maxburst)
814	 *     boundary;
815	 *  - during the transfer RAM address is being incremented by
816	 *     the size of minimal transfer chunk;
817	 *  - peripheral port's address is constant during the transfer.
818	 */
819
820	if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
821	    cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
822	    !IS_ALIGNED(cfg->src_addr, 4) ||
823	    !IS_ALIGNED(cfg->dst_addr, 4)) {
824		return -EINVAL;
825	}
826
827	spin_lock_irqsave(&mchan->lock, flags);
828
829	mchan->src_per_paddr = cfg->src_addr;
830	mchan->src_tcd_nunits = cfg->src_maxburst;
831	mchan->dst_per_paddr = cfg->dst_addr;
832	mchan->dst_tcd_nunits = cfg->dst_maxburst;
833
834	/* Apply defaults */
835	if (mchan->src_tcd_nunits == 0)
836		mchan->src_tcd_nunits = 1;
837	if (mchan->dst_tcd_nunits == 0)
838		mchan->dst_tcd_nunits = 1;
839
840	spin_unlock_irqrestore(&mchan->lock, flags);
841
842	return 0;
843}
844
845static int mpc_dma_device_terminate_all(struct dma_chan *chan)
846{
847	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
848	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
849	unsigned long flags;
850
851	/* Disable channel requests */
852	spin_lock_irqsave(&mchan->lock, flags);
853
854	out_8(&mdma->regs->dmacerq, chan->chan_id);
855	list_splice_tail_init(&mchan->prepared, &mchan->free);
856	list_splice_tail_init(&mchan->queued, &mchan->free);
857	list_splice_tail_init(&mchan->active, &mchan->free);
858
859	spin_unlock_irqrestore(&mchan->lock, flags);
860
861	return 0;
862}
863
864static int mpc_dma_probe(struct platform_device *op)
865{
866	struct device_node *dn = op->dev.of_node;
867	struct device *dev = &op->dev;
868	struct dma_device *dma;
869	struct mpc_dma *mdma;
870	struct mpc_dma_chan *mchan;
871	struct resource res;
872	ulong regs_start, regs_size;
873	int retval, i;
874	u8 chancnt;
875
876	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
877	if (!mdma) {
878		dev_err(dev, "Memory exhausted!\n");
879		retval = -ENOMEM;
880		goto err;
881	}
882
883	mdma->irq = irq_of_parse_and_map(dn, 0);
884	if (mdma->irq == NO_IRQ) {
885		dev_err(dev, "Error mapping IRQ!\n");
886		retval = -EINVAL;
887		goto err;
888	}
889
890	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
891		mdma->is_mpc8308 = 1;
892		mdma->irq2 = irq_of_parse_and_map(dn, 1);
893		if (mdma->irq2 == NO_IRQ) {
894			dev_err(dev, "Error mapping IRQ!\n");
895			retval = -EINVAL;
896			goto err_dispose1;
897		}
898	}
899
900	retval = of_address_to_resource(dn, 0, &res);
901	if (retval) {
902		dev_err(dev, "Error parsing memory region!\n");
903		goto err_dispose2;
904	}
905
906	regs_start = res.start;
907	regs_size = resource_size(&res);
908
909	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
910		dev_err(dev, "Error requesting memory region!\n");
911		retval = -EBUSY;
912		goto err_dispose2;
913	}
914
915	mdma->regs = devm_ioremap(dev, regs_start, regs_size);
916	if (!mdma->regs) {
917		dev_err(dev, "Error mapping memory region!\n");
918		retval = -ENOMEM;
919		goto err_dispose2;
920	}
921
922	mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
923							+ MPC_DMA_TCD_OFFSET);
924
925	retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
926	if (retval) {
927		dev_err(dev, "Error requesting IRQ!\n");
928		retval = -EINVAL;
929		goto err_dispose2;
930	}
931
932	if (mdma->is_mpc8308) {
933		retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
934							DRV_NAME, mdma);
935		if (retval) {
936			dev_err(dev, "Error requesting IRQ2!\n");
937			retval = -EINVAL;
938			goto err_free1;
939		}
940	}
941
942	spin_lock_init(&mdma->error_status_lock);
943
944	dma = &mdma->dma;
945	dma->dev = dev;
946	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
947	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
948	dma->device_issue_pending = mpc_dma_issue_pending;
949	dma->device_tx_status = mpc_dma_tx_status;
950	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
951	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
952	dma->device_config = mpc_dma_device_config;
953	dma->device_terminate_all = mpc_dma_device_terminate_all;
954
955	INIT_LIST_HEAD(&dma->channels);
956	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
957	dma_cap_set(DMA_SLAVE, dma->cap_mask);
958
959	if (mdma->is_mpc8308)
960		chancnt = MPC8308_DMACHAN_MAX;
961	else
962		chancnt = MPC512x_DMACHAN_MAX;
963
964	for (i = 0; i < chancnt; i++) {
965		mchan = &mdma->channels[i];
966
967		mchan->chan.device = dma;
968		dma_cookie_init(&mchan->chan);
969
970		INIT_LIST_HEAD(&mchan->free);
971		INIT_LIST_HEAD(&mchan->prepared);
972		INIT_LIST_HEAD(&mchan->queued);
973		INIT_LIST_HEAD(&mchan->active);
974		INIT_LIST_HEAD(&mchan->completed);
975
976		spin_lock_init(&mchan->lock);
977		list_add_tail(&mchan->chan.device_node, &dma->channels);
978	}
979
980	tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
981
982	/*
983	 * Configure DMA Engine:
984	 * - Dynamic clock,
985	 * - Round-robin group arbitration,
986	 * - Round-robin channel arbitration.
987	 */
988	if (mdma->is_mpc8308) {
989		/* MPC8308 has 16 channels and lacks some registers */
990		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
991
992		/* enable snooping */
993		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
994		/* Disable error interrupts */
995		out_be32(&mdma->regs->dmaeeil, 0);
996
997		/* Clear interrupts status */
998		out_be32(&mdma->regs->dmaintl, 0xFFFF);
999		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1000	} else {
1001		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1002					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
1003
1004		/* Disable hardware DMA requests */
1005		out_be32(&mdma->regs->dmaerqh, 0);
1006		out_be32(&mdma->regs->dmaerql, 0);
1007
1008		/* Disable error interrupts */
1009		out_be32(&mdma->regs->dmaeeih, 0);
1010		out_be32(&mdma->regs->dmaeeil, 0);
1011
1012		/* Clear interrupts status */
1013		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1014		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1015		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1016		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1017
1018		/* Route interrupts to IPIC */
1019		out_be32(&mdma->regs->dmaihsa, 0);
1020		out_be32(&mdma->regs->dmailsa, 0);
1021	}
1022
1023	/* Register DMA engine */
1024	dev_set_drvdata(dev, mdma);
1025	retval = dma_async_device_register(dma);
1026	if (retval)
1027		goto err_free2;
1028
1029	/* Register with OF helpers for DMA lookups (nonfatal) */
1030	if (dev->of_node) {
1031		retval = of_dma_controller_register(dev->of_node,
1032						of_dma_xlate_by_chan_id, mdma);
1033		if (retval)
1034			dev_warn(dev, "Could not register for OF lookup\n");
1035	}
1036
1037	return 0;
1038
1039err_free2:
1040	if (mdma->is_mpc8308)
1041		free_irq(mdma->irq2, mdma);
1042err_free1:
1043	free_irq(mdma->irq, mdma);
1044err_dispose2:
1045	if (mdma->is_mpc8308)
1046		irq_dispose_mapping(mdma->irq2);
1047err_dispose1:
1048	irq_dispose_mapping(mdma->irq);
1049err:
1050	return retval;
1051}
1052
1053static int mpc_dma_remove(struct platform_device *op)
1054{
1055	struct device *dev = &op->dev;
1056	struct mpc_dma *mdma = dev_get_drvdata(dev);
1057
1058	if (dev->of_node)
1059		of_dma_controller_free(dev->of_node);
1060	dma_async_device_unregister(&mdma->dma);
1061	if (mdma->is_mpc8308) {
1062		free_irq(mdma->irq2, mdma);
1063		irq_dispose_mapping(mdma->irq2);
1064	}
1065	free_irq(mdma->irq, mdma);
1066	irq_dispose_mapping(mdma->irq);
1067
1068	return 0;
1069}
1070
1071static const struct of_device_id mpc_dma_match[] = {
1072	{ .compatible = "fsl,mpc5121-dma", },
1073	{ .compatible = "fsl,mpc8308-dma", },
1074	{},
1075};
1076
1077static struct platform_driver mpc_dma_driver = {
1078	.probe		= mpc_dma_probe,
1079	.remove		= mpc_dma_remove,
1080	.driver = {
1081		.name = DRV_NAME,
1082		.of_match_table	= mpc_dma_match,
1083	},
1084};
1085
1086module_platform_driver(mpc_dma_driver);
1087
1088MODULE_LICENSE("GPL");
1089MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
1090