1/*
2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
3 *
4 * Copyright (C) 2014 Atmel Corporation
5 *
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/barrier.h>
22#include <dt-bindings/dma/at91.h>
23#include <linux/clk.h>
24#include <linux/dmaengine.h>
25#include <linux/dmapool.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/of_dma.h>
32#include <linux/of_platform.h>
33#include <linux/platform_device.h>
34#include <linux/pm.h>
35
36#include "dmaengine.h"
37
38/* Global registers */
39#define AT_XDMAC_GTYPE		0x00	/* Global Type Register */
40#define		AT_XDMAC_NB_CH(i)	(((i) & 0x1F) + 1)		/* Number of Channels Minus One */
41#define		AT_XDMAC_FIFO_SZ(i)	(((i) >> 5) & 0x7FF)		/* Number of Bytes */
42#define		AT_XDMAC_NB_REQ(i)	((((i) >> 16) & 0x3F) + 1)	/* Number of Peripheral Requests Minus One */
43#define AT_XDMAC_GCFG		0x04	/* Global Configuration Register */
44#define AT_XDMAC_GWAC		0x08	/* Global Weighted Arbiter Configuration Register */
45#define AT_XDMAC_GIE		0x0C	/* Global Interrupt Enable Register */
46#define AT_XDMAC_GID		0x10	/* Global Interrupt Disable Register */
47#define AT_XDMAC_GIM		0x14	/* Global Interrupt Mask Register */
48#define AT_XDMAC_GIS		0x18	/* Global Interrupt Status Register */
49#define AT_XDMAC_GE		0x1C	/* Global Channel Enable Register */
50#define AT_XDMAC_GD		0x20	/* Global Channel Disable Register */
51#define AT_XDMAC_GS		0x24	/* Global Channel Status Register */
52#define AT_XDMAC_GRS		0x28	/* Global Channel Read Suspend Register */
53#define AT_XDMAC_GWS		0x2C	/* Global Write Suspend Register */
54#define AT_XDMAC_GRWS		0x30	/* Global Channel Read Write Suspend Register */
55#define AT_XDMAC_GRWR		0x34	/* Global Channel Read Write Resume Register */
56#define AT_XDMAC_GSWR		0x38	/* Global Channel Software Request Register */
57#define AT_XDMAC_GSWS		0x3C	/* Global channel Software Request Status Register */
58#define AT_XDMAC_GSWF		0x40	/* Global Channel Software Flush Request Register */
59#define AT_XDMAC_VERSION	0xFFC	/* XDMAC Version Register */
60
61/* Channel relative registers offsets */
62#define AT_XDMAC_CIE		0x00	/* Channel Interrupt Enable Register */
63#define		AT_XDMAC_CIE_BIE	BIT(0)	/* End of Block Interrupt Enable Bit */
64#define		AT_XDMAC_CIE_LIE	BIT(1)	/* End of Linked List Interrupt Enable Bit */
65#define		AT_XDMAC_CIE_DIE	BIT(2)	/* End of Disable Interrupt Enable Bit */
66#define		AT_XDMAC_CIE_FIE	BIT(3)	/* End of Flush Interrupt Enable Bit */
67#define		AT_XDMAC_CIE_RBEIE	BIT(4)	/* Read Bus Error Interrupt Enable Bit */
68#define		AT_XDMAC_CIE_WBEIE	BIT(5)	/* Write Bus Error Interrupt Enable Bit */
69#define		AT_XDMAC_CIE_ROIE	BIT(6)	/* Request Overflow Interrupt Enable Bit */
70#define AT_XDMAC_CID		0x04	/* Channel Interrupt Disable Register */
71#define		AT_XDMAC_CID_BID	BIT(0)	/* End of Block Interrupt Disable Bit */
72#define		AT_XDMAC_CID_LID	BIT(1)	/* End of Linked List Interrupt Disable Bit */
73#define		AT_XDMAC_CID_DID	BIT(2)	/* End of Disable Interrupt Disable Bit */
74#define		AT_XDMAC_CID_FID	BIT(3)	/* End of Flush Interrupt Disable Bit */
75#define		AT_XDMAC_CID_RBEID	BIT(4)	/* Read Bus Error Interrupt Disable Bit */
76#define		AT_XDMAC_CID_WBEID	BIT(5)	/* Write Bus Error Interrupt Disable Bit */
77#define		AT_XDMAC_CID_ROID	BIT(6)	/* Request Overflow Interrupt Disable Bit */
78#define AT_XDMAC_CIM		0x08	/* Channel Interrupt Mask Register */
79#define		AT_XDMAC_CIM_BIM	BIT(0)	/* End of Block Interrupt Mask Bit */
80#define		AT_XDMAC_CIM_LIM	BIT(1)	/* End of Linked List Interrupt Mask Bit */
81#define		AT_XDMAC_CIM_DIM	BIT(2)	/* End of Disable Interrupt Mask Bit */
82#define		AT_XDMAC_CIM_FIM	BIT(3)	/* End of Flush Interrupt Mask Bit */
83#define		AT_XDMAC_CIM_RBEIM	BIT(4)	/* Read Bus Error Interrupt Mask Bit */
84#define		AT_XDMAC_CIM_WBEIM	BIT(5)	/* Write Bus Error Interrupt Mask Bit */
85#define		AT_XDMAC_CIM_ROIM	BIT(6)	/* Request Overflow Interrupt Mask Bit */
86#define AT_XDMAC_CIS		0x0C	/* Channel Interrupt Status Register */
87#define		AT_XDMAC_CIS_BIS	BIT(0)	/* End of Block Interrupt Status Bit */
88#define		AT_XDMAC_CIS_LIS	BIT(1)	/* End of Linked List Interrupt Status Bit */
89#define		AT_XDMAC_CIS_DIS	BIT(2)	/* End of Disable Interrupt Status Bit */
90#define		AT_XDMAC_CIS_FIS	BIT(3)	/* End of Flush Interrupt Status Bit */
91#define		AT_XDMAC_CIS_RBEIS	BIT(4)	/* Read Bus Error Interrupt Status Bit */
92#define		AT_XDMAC_CIS_WBEIS	BIT(5)	/* Write Bus Error Interrupt Status Bit */
93#define		AT_XDMAC_CIS_ROIS	BIT(6)	/* Request Overflow Interrupt Status Bit */
94#define AT_XDMAC_CSA		0x10	/* Channel Source Address Register */
95#define AT_XDMAC_CDA		0x14	/* Channel Destination Address Register */
96#define AT_XDMAC_CNDA		0x18	/* Channel Next Descriptor Address Register */
97#define		AT_XDMAC_CNDA_NDAIF(i)	((i) & 0x1)			/* Channel x Next Descriptor Interface */
98#define		AT_XDMAC_CNDA_NDA(i)	((i) & 0xfffffffc)		/* Channel x Next Descriptor Address */
99#define AT_XDMAC_CNDC		0x1C	/* Channel Next Descriptor Control Register */
100#define		AT_XDMAC_CNDC_NDE		(0x1 << 0)		/* Channel x Next Descriptor Enable */
101#define		AT_XDMAC_CNDC_NDSUP		(0x1 << 1)		/* Channel x Next Descriptor Source Update */
102#define		AT_XDMAC_CNDC_NDDUP		(0x1 << 2)		/* Channel x Next Descriptor Destination Update */
103#define		AT_XDMAC_CNDC_NDVIEW_NDV0	(0x0 << 3)		/* Channel x Next Descriptor View 0 */
104#define		AT_XDMAC_CNDC_NDVIEW_NDV1	(0x1 << 3)		/* Channel x Next Descriptor View 1 */
105#define		AT_XDMAC_CNDC_NDVIEW_NDV2	(0x2 << 3)		/* Channel x Next Descriptor View 2 */
106#define		AT_XDMAC_CNDC_NDVIEW_NDV3	(0x3 << 3)		/* Channel x Next Descriptor View 3 */
107#define AT_XDMAC_CUBC		0x20	/* Channel Microblock Control Register */
108#define AT_XDMAC_CBC		0x24	/* Channel Block Control Register */
109#define AT_XDMAC_CC		0x28	/* Channel Configuration Register */
110#define		AT_XDMAC_CC_TYPE	(0x1 << 0)	/* Channel Transfer Type */
111#define			AT_XDMAC_CC_TYPE_MEM_TRAN	(0x0 << 0)	/* Memory to Memory Transfer */
112#define			AT_XDMAC_CC_TYPE_PER_TRAN	(0x1 << 0)	/* Peripheral to Memory or Memory to Peripheral Transfer */
113#define		AT_XDMAC_CC_MBSIZE_MASK	(0x3 << 1)
114#define			AT_XDMAC_CC_MBSIZE_SINGLE	(0x0 << 1)
115#define			AT_XDMAC_CC_MBSIZE_FOUR		(0x1 << 1)
116#define			AT_XDMAC_CC_MBSIZE_EIGHT	(0x2 << 1)
117#define			AT_XDMAC_CC_MBSIZE_SIXTEEN	(0x3 << 1)
118#define		AT_XDMAC_CC_DSYNC	(0x1 << 4)	/* Channel Synchronization */
119#define			AT_XDMAC_CC_DSYNC_PER2MEM	(0x0 << 4)
120#define			AT_XDMAC_CC_DSYNC_MEM2PER	(0x1 << 4)
121#define		AT_XDMAC_CC_PROT	(0x1 << 5)	/* Channel Protection */
122#define			AT_XDMAC_CC_PROT_SEC		(0x0 << 5)
123#define			AT_XDMAC_CC_PROT_UNSEC		(0x1 << 5)
124#define		AT_XDMAC_CC_SWREQ	(0x1 << 6)	/* Channel Software Request Trigger */
125#define			AT_XDMAC_CC_SWREQ_HWR_CONNECTED	(0x0 << 6)
126#define			AT_XDMAC_CC_SWREQ_SWR_CONNECTED	(0x1 << 6)
127#define		AT_XDMAC_CC_MEMSET	(0x1 << 7)	/* Channel Fill Block of memory */
128#define			AT_XDMAC_CC_MEMSET_NORMAL_MODE	(0x0 << 7)
129#define			AT_XDMAC_CC_MEMSET_HW_MODE	(0x1 << 7)
130#define		AT_XDMAC_CC_CSIZE(i)	((0x7 & (i)) << 8)	/* Channel Chunk Size */
131#define		AT_XDMAC_CC_DWIDTH_OFFSET	11
132#define		AT_XDMAC_CC_DWIDTH_MASK	(0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
133#define		AT_XDMAC_CC_DWIDTH(i)	((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)	/* Channel Data Width */
134#define			AT_XDMAC_CC_DWIDTH_BYTE		0x0
135#define			AT_XDMAC_CC_DWIDTH_HALFWORD	0x1
136#define			AT_XDMAC_CC_DWIDTH_WORD		0x2
137#define			AT_XDMAC_CC_DWIDTH_DWORD	0x3
138#define		AT_XDMAC_CC_SIF(i)	((0x1 & (i)) << 13)	/* Channel Source Interface Identifier */
139#define		AT_XDMAC_CC_DIF(i)	((0x1 & (i)) << 14)	/* Channel Destination Interface Identifier */
140#define		AT_XDMAC_CC_SAM_MASK	(0x3 << 16)	/* Channel Source Addressing Mode */
141#define			AT_XDMAC_CC_SAM_FIXED_AM	(0x0 << 16)
142#define			AT_XDMAC_CC_SAM_INCREMENTED_AM	(0x1 << 16)
143#define			AT_XDMAC_CC_SAM_UBS_AM		(0x2 << 16)
144#define			AT_XDMAC_CC_SAM_UBS_DS_AM	(0x3 << 16)
145#define		AT_XDMAC_CC_DAM_MASK	(0x3 << 18)	/* Channel Source Addressing Mode */
146#define			AT_XDMAC_CC_DAM_FIXED_AM	(0x0 << 18)
147#define			AT_XDMAC_CC_DAM_INCREMENTED_AM	(0x1 << 18)
148#define			AT_XDMAC_CC_DAM_UBS_AM		(0x2 << 18)
149#define			AT_XDMAC_CC_DAM_UBS_DS_AM	(0x3 << 18)
150#define		AT_XDMAC_CC_INITD	(0x1 << 21)	/* Channel Initialization Terminated (read only) */
151#define			AT_XDMAC_CC_INITD_TERMINATED	(0x0 << 21)
152#define			AT_XDMAC_CC_INITD_IN_PROGRESS	(0x1 << 21)
153#define		AT_XDMAC_CC_RDIP	(0x1 << 22)	/* Read in Progress (read only) */
154#define			AT_XDMAC_CC_RDIP_DONE		(0x0 << 22)
155#define			AT_XDMAC_CC_RDIP_IN_PROGRESS	(0x1 << 22)
156#define		AT_XDMAC_CC_WRIP	(0x1 << 23)	/* Write in Progress (read only) */
157#define			AT_XDMAC_CC_WRIP_DONE		(0x0 << 23)
158#define			AT_XDMAC_CC_WRIP_IN_PROGRESS	(0x1 << 23)
159#define		AT_XDMAC_CC_PERID(i)	(0x7f & (h) << 24)	/* Channel Peripheral Identifier */
160#define AT_XDMAC_CDS_MSP	0x2C	/* Channel Data Stride Memory Set Pattern */
161#define AT_XDMAC_CSUS		0x30	/* Channel Source Microblock Stride */
162#define AT_XDMAC_CDUS		0x34	/* Channel Destination Microblock Stride */
163
164#define AT_XDMAC_CHAN_REG_BASE	0x50	/* Channel registers base address */
165
166/* Microblock control members */
167#define AT_XDMAC_MBR_UBC_UBLEN_MAX	0xFFFFFFUL	/* Maximum Microblock Length */
168#define AT_XDMAC_MBR_UBC_NDE		(0x1 << 24)	/* Next Descriptor Enable */
169#define AT_XDMAC_MBR_UBC_NSEN		(0x1 << 25)	/* Next Descriptor Source Update */
170#define AT_XDMAC_MBR_UBC_NDEN		(0x1 << 26)	/* Next Descriptor Destination Update */
171#define AT_XDMAC_MBR_UBC_NDV0		(0x0 << 27)	/* Next Descriptor View 0 */
172#define AT_XDMAC_MBR_UBC_NDV1		(0x1 << 27)	/* Next Descriptor View 1 */
173#define AT_XDMAC_MBR_UBC_NDV2		(0x2 << 27)	/* Next Descriptor View 2 */
174#define AT_XDMAC_MBR_UBC_NDV3		(0x3 << 27)	/* Next Descriptor View 3 */
175
176#define AT_XDMAC_MAX_CHAN	0x20
177#define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
178#define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
179#define AT_XDMAC_RESIDUE_MAX_RETRIES	5
180
181#define AT_XDMAC_DMA_BUSWIDTHS\
182	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
183	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
184	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
185	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
186	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
187
188enum atc_status {
189	AT_XDMAC_CHAN_IS_CYCLIC = 0,
190	AT_XDMAC_CHAN_IS_PAUSED,
191};
192
193/* ----- Channels ----- */
194struct at_xdmac_chan {
195	struct dma_chan			chan;
196	void __iomem			*ch_regs;
197	u32				mask;		/* Channel Mask */
198	u32				cfg;		/* Channel Configuration Register */
199	u8				perid;		/* Peripheral ID */
200	u8				perif;		/* Peripheral Interface */
201	u8				memif;		/* Memory Interface */
202	u32				save_cc;
203	u32				save_cim;
204	u32				save_cnda;
205	u32				save_cndc;
206	unsigned long			status;
207	struct tasklet_struct		tasklet;
208	struct dma_slave_config		sconfig;
209
210	spinlock_t			lock;
211
212	struct list_head		xfers_list;
213	struct list_head		free_descs_list;
214};
215
216
217/* ----- Controller ----- */
218struct at_xdmac {
219	struct dma_device	dma;
220	void __iomem		*regs;
221	int			irq;
222	struct clk		*clk;
223	u32			save_gim;
224	u32			save_gs;
225	struct dma_pool		*at_xdmac_desc_pool;
226	struct at_xdmac_chan	chan[0];
227};
228
229
230/* ----- Descriptors ----- */
231
232/* Linked List Descriptor */
233struct at_xdmac_lld {
234	dma_addr_t	mbr_nda;	/* Next Descriptor Member */
235	u32		mbr_ubc;	/* Microblock Control Member */
236	dma_addr_t	mbr_sa;		/* Source Address Member */
237	dma_addr_t	mbr_da;		/* Destination Address Member */
238	u32		mbr_cfg;	/* Configuration Register */
239};
240
241
242struct at_xdmac_desc {
243	struct at_xdmac_lld		lld;
244	enum dma_transfer_direction	direction;
245	struct dma_async_tx_descriptor	tx_dma_desc;
246	struct list_head		desc_node;
247	/* Following members are only used by the first descriptor */
248	bool				active_xfer;
249	unsigned int			xfer_size;
250	struct list_head		descs_list;
251	struct list_head		xfer_node;
252};
253
254static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
255{
256	return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
257}
258
259#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
260#define at_xdmac_write(atxdmac, reg, value) \
261	writel_relaxed((value), (atxdmac)->regs + (reg))
262
263#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
264#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
265
266static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
267{
268	return container_of(dchan, struct at_xdmac_chan, chan);
269}
270
271static struct device *chan2dev(struct dma_chan *chan)
272{
273	return &chan->dev->device;
274}
275
276static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
277{
278	return container_of(ddev, struct at_xdmac, dma);
279}
280
281static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
282{
283	return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
284}
285
286static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
287{
288	return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
289}
290
291static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
292{
293	return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
294}
295
296static inline int at_xdmac_csize(u32 maxburst)
297{
298	int csize;
299
300	csize = ffs(maxburst) - 1;
301	if (csize > 4)
302		csize = -EINVAL;
303
304	return csize;
305};
306
307static inline u8 at_xdmac_get_dwidth(u32 cfg)
308{
309	return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
310};
311
312static unsigned int init_nr_desc_per_channel = 64;
313module_param(init_nr_desc_per_channel, uint, 0644);
314MODULE_PARM_DESC(init_nr_desc_per_channel,
315		 "initial descriptors per channel (default: 64)");
316
317
318static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
319{
320	return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
321}
322
323static void at_xdmac_off(struct at_xdmac *atxdmac)
324{
325	at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
326
327	/* Wait that all chans are disabled. */
328	while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
329		cpu_relax();
330
331	at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
332}
333
334/* Call with lock hold. */
335static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
336				struct at_xdmac_desc *first)
337{
338	struct at_xdmac	*atxdmac = to_at_xdmac(atchan->chan.device);
339	u32		reg;
340
341	dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
342
343	if (at_xdmac_chan_is_enabled(atchan))
344		return;
345
346	/* Set transfer as active to not try to start it again. */
347	first->active_xfer = true;
348
349	/* Tell xdmac where to get the first descriptor. */
350	reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
351	      | AT_XDMAC_CNDA_NDAIF(atchan->memif);
352	at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
353
354	/*
355	 * When doing non cyclic transfer we need to use the next
356	 * descriptor view 2 since some fields of the configuration register
357	 * depend on transfer size and src/dest addresses.
358	 */
359	if (at_xdmac_chan_is_cyclic(atchan)) {
360		reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
361		at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
362	} else {
363		/*
364		 * No need to write AT_XDMAC_CC reg, it will be done when the
365		 * descriptor is fecthed.
366		 */
367		reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
368	}
369
370	reg |= AT_XDMAC_CNDC_NDDUP
371	       | AT_XDMAC_CNDC_NDSUP
372	       | AT_XDMAC_CNDC_NDE;
373	at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
374
375	dev_vdbg(chan2dev(&atchan->chan),
376		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
377		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
378		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
379		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
380		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
381		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
382		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
383
384	at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
385	reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
386	/*
387	 * There is no end of list when doing cyclic dma, we need to get
388	 * an interrupt after each periods.
389	 */
390	if (at_xdmac_chan_is_cyclic(atchan))
391		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
392				    reg | AT_XDMAC_CIE_BIE);
393	else
394		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
395				    reg | AT_XDMAC_CIE_LIE);
396	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
397	dev_vdbg(chan2dev(&atchan->chan),
398		 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
399	wmb();
400	at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
401
402	dev_vdbg(chan2dev(&atchan->chan),
403		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
404		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
405		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
406		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
407		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
408		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
409		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
410
411}
412
413static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
414{
415	struct at_xdmac_desc	*desc = txd_to_at_desc(tx);
416	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(tx->chan);
417	dma_cookie_t		cookie;
418	unsigned long		irqflags;
419
420	spin_lock_irqsave(&atchan->lock, irqflags);
421	cookie = dma_cookie_assign(tx);
422
423	dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
424		 __func__, atchan, desc);
425	list_add_tail(&desc->xfer_node, &atchan->xfers_list);
426	if (list_is_singular(&atchan->xfers_list))
427		at_xdmac_start_xfer(atchan, desc);
428
429	spin_unlock_irqrestore(&atchan->lock, irqflags);
430	return cookie;
431}
432
433static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
434						 gfp_t gfp_flags)
435{
436	struct at_xdmac_desc	*desc;
437	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
438	dma_addr_t		phys;
439
440	desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
441	if (desc) {
442		memset(desc, 0, sizeof(*desc));
443		INIT_LIST_HEAD(&desc->descs_list);
444		dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
445		desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
446		desc->tx_dma_desc.phys = phys;
447	}
448
449	return desc;
450}
451
452/* Call must be protected by lock. */
453static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
454{
455	struct at_xdmac_desc *desc;
456
457	if (list_empty(&atchan->free_descs_list)) {
458		desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
459	} else {
460		desc = list_first_entry(&atchan->free_descs_list,
461					struct at_xdmac_desc, desc_node);
462		list_del(&desc->desc_node);
463		desc->active_xfer = false;
464	}
465
466	return desc;
467}
468
469static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
470				       struct of_dma *of_dma)
471{
472	struct at_xdmac		*atxdmac = of_dma->of_dma_data;
473	struct at_xdmac_chan	*atchan;
474	struct dma_chan		*chan;
475	struct device		*dev = atxdmac->dma.dev;
476
477	if (dma_spec->args_count != 1) {
478		dev_err(dev, "dma phandler args: bad number of args\n");
479		return NULL;
480	}
481
482	chan = dma_get_any_slave_channel(&atxdmac->dma);
483	if (!chan) {
484		dev_err(dev, "can't get a dma channel\n");
485		return NULL;
486	}
487
488	atchan = to_at_xdmac_chan(chan);
489	atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
490	atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
491	atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
492	dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
493		 atchan->memif, atchan->perif, atchan->perid);
494
495	return chan;
496}
497
498static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
499				      enum dma_transfer_direction direction)
500{
501	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
502	int			csize, dwidth;
503
504	if (direction == DMA_DEV_TO_MEM) {
505		atchan->cfg =
506			AT91_XDMAC_DT_PERID(atchan->perid)
507			| AT_XDMAC_CC_DAM_INCREMENTED_AM
508			| AT_XDMAC_CC_SAM_FIXED_AM
509			| AT_XDMAC_CC_DIF(atchan->memif)
510			| AT_XDMAC_CC_SIF(atchan->perif)
511			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
512			| AT_XDMAC_CC_DSYNC_PER2MEM
513			| AT_XDMAC_CC_MBSIZE_SIXTEEN
514			| AT_XDMAC_CC_TYPE_PER_TRAN;
515		csize = ffs(atchan->sconfig.src_maxburst) - 1;
516		if (csize < 0) {
517			dev_err(chan2dev(chan), "invalid src maxburst value\n");
518			return -EINVAL;
519		}
520		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
521		dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
522		if (dwidth < 0) {
523			dev_err(chan2dev(chan), "invalid src addr width value\n");
524			return -EINVAL;
525		}
526		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
527	} else if (direction == DMA_MEM_TO_DEV) {
528		atchan->cfg =
529			AT91_XDMAC_DT_PERID(atchan->perid)
530			| AT_XDMAC_CC_DAM_FIXED_AM
531			| AT_XDMAC_CC_SAM_INCREMENTED_AM
532			| AT_XDMAC_CC_DIF(atchan->perif)
533			| AT_XDMAC_CC_SIF(atchan->memif)
534			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
535			| AT_XDMAC_CC_DSYNC_MEM2PER
536			| AT_XDMAC_CC_MBSIZE_SIXTEEN
537			| AT_XDMAC_CC_TYPE_PER_TRAN;
538		csize = ffs(atchan->sconfig.dst_maxburst) - 1;
539		if (csize < 0) {
540			dev_err(chan2dev(chan), "invalid src maxburst value\n");
541			return -EINVAL;
542		}
543		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
544		dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
545		if (dwidth < 0) {
546			dev_err(chan2dev(chan), "invalid dst addr width value\n");
547			return -EINVAL;
548		}
549		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
550	}
551
552	dev_dbg(chan2dev(chan),	"%s: cfg=0x%08x\n", __func__, atchan->cfg);
553
554	return 0;
555}
556
557/*
558 * Only check that maxburst and addr width values are supported by the
559 * the controller but not that the configuration is good to perform the
560 * transfer since we don't know the direction at this stage.
561 */
562static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
563{
564	if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
565	    || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
566		return -EINVAL;
567
568	if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
569	    || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
570		return -EINVAL;
571
572	return 0;
573}
574
575static int at_xdmac_set_slave_config(struct dma_chan *chan,
576				      struct dma_slave_config *sconfig)
577{
578	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
579
580	if (at_xdmac_check_slave_config(sconfig)) {
581		dev_err(chan2dev(chan), "invalid slave configuration\n");
582		return -EINVAL;
583	}
584
585	memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
586
587	return 0;
588}
589
590static struct dma_async_tx_descriptor *
591at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
592		       unsigned int sg_len, enum dma_transfer_direction direction,
593		       unsigned long flags, void *context)
594{
595	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
596	struct at_xdmac_desc	*first = NULL, *prev = NULL;
597	struct scatterlist	*sg;
598	int			i;
599	unsigned int		xfer_size = 0;
600	unsigned long		irqflags;
601	struct dma_async_tx_descriptor	*ret = NULL;
602
603	if (!sgl)
604		return NULL;
605
606	if (!is_slave_direction(direction)) {
607		dev_err(chan2dev(chan), "invalid DMA direction\n");
608		return NULL;
609	}
610
611	dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
612		 __func__, sg_len,
613		 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
614		 flags);
615
616	/* Protect dma_sconfig field that can be modified by set_slave_conf. */
617	spin_lock_irqsave(&atchan->lock, irqflags);
618
619	if (at_xdmac_compute_chan_conf(chan, direction))
620		goto spin_unlock;
621
622	/* Prepare descriptors. */
623	for_each_sg(sgl, sg, sg_len, i) {
624		struct at_xdmac_desc	*desc = NULL;
625		u32			len, mem, dwidth, fixed_dwidth;
626
627		len = sg_dma_len(sg);
628		mem = sg_dma_address(sg);
629		if (unlikely(!len)) {
630			dev_err(chan2dev(chan), "sg data length is zero\n");
631			goto spin_unlock;
632		}
633		dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
634			 __func__, i, len, mem);
635
636		desc = at_xdmac_get_desc(atchan);
637		if (!desc) {
638			dev_err(chan2dev(chan), "can't get descriptor\n");
639			if (first)
640				list_splice_init(&first->descs_list, &atchan->free_descs_list);
641			goto spin_unlock;
642		}
643
644		/* Linked list descriptor setup. */
645		if (direction == DMA_DEV_TO_MEM) {
646			desc->lld.mbr_sa = atchan->sconfig.src_addr;
647			desc->lld.mbr_da = mem;
648		} else {
649			desc->lld.mbr_sa = mem;
650			desc->lld.mbr_da = atchan->sconfig.dst_addr;
651		}
652		dwidth = at_xdmac_get_dwidth(atchan->cfg);
653		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
654			       ? dwidth
655			       : AT_XDMAC_CC_DWIDTH_BYTE;
656		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
657			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
658			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
659			| (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)		/* descriptor fetch */
660			| (len >> fixed_dwidth);				/* microblock length */
661		desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
662				    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
663		dev_dbg(chan2dev(chan),
664			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
665			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
666
667		/* Chain lld. */
668		if (prev) {
669			prev->lld.mbr_nda = desc->tx_dma_desc.phys;
670			dev_dbg(chan2dev(chan),
671				 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
672				 __func__, prev, &prev->lld.mbr_nda);
673		}
674
675		prev = desc;
676		if (!first)
677			first = desc;
678
679		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
680			 __func__, desc, first);
681		list_add_tail(&desc->desc_node, &first->descs_list);
682		xfer_size += len;
683	}
684
685
686	first->tx_dma_desc.flags = flags;
687	first->xfer_size = xfer_size;
688	first->direction = direction;
689	ret = &first->tx_dma_desc;
690
691spin_unlock:
692	spin_unlock_irqrestore(&atchan->lock, irqflags);
693	return ret;
694}
695
696static struct dma_async_tx_descriptor *
697at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
698			 size_t buf_len, size_t period_len,
699			 enum dma_transfer_direction direction,
700			 unsigned long flags)
701{
702	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
703	struct at_xdmac_desc	*first = NULL, *prev = NULL;
704	unsigned int		periods = buf_len / period_len;
705	int			i;
706	unsigned long		irqflags;
707
708	dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
709		__func__, &buf_addr, buf_len, period_len,
710		direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
711
712	if (!is_slave_direction(direction)) {
713		dev_err(chan2dev(chan), "invalid DMA direction\n");
714		return NULL;
715	}
716
717	if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
718		dev_err(chan2dev(chan), "channel currently used\n");
719		return NULL;
720	}
721
722	if (at_xdmac_compute_chan_conf(chan, direction))
723		return NULL;
724
725	for (i = 0; i < periods; i++) {
726		struct at_xdmac_desc	*desc = NULL;
727
728		spin_lock_irqsave(&atchan->lock, irqflags);
729		desc = at_xdmac_get_desc(atchan);
730		if (!desc) {
731			dev_err(chan2dev(chan), "can't get descriptor\n");
732			if (first)
733				list_splice_init(&first->descs_list, &atchan->free_descs_list);
734			spin_unlock_irqrestore(&atchan->lock, irqflags);
735			return NULL;
736		}
737		spin_unlock_irqrestore(&atchan->lock, irqflags);
738		dev_dbg(chan2dev(chan),
739			"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
740			__func__, desc, &desc->tx_dma_desc.phys);
741
742		if (direction == DMA_DEV_TO_MEM) {
743			desc->lld.mbr_sa = atchan->sconfig.src_addr;
744			desc->lld.mbr_da = buf_addr + i * period_len;
745		} else {
746			desc->lld.mbr_sa = buf_addr + i * period_len;
747			desc->lld.mbr_da = atchan->sconfig.dst_addr;
748		}
749		desc->lld.mbr_cfg = atchan->cfg;
750		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
751			| AT_XDMAC_MBR_UBC_NDEN
752			| AT_XDMAC_MBR_UBC_NSEN
753			| AT_XDMAC_MBR_UBC_NDE
754			| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
755
756		dev_dbg(chan2dev(chan),
757			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
758			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
759
760		/* Chain lld. */
761		if (prev) {
762			prev->lld.mbr_nda = desc->tx_dma_desc.phys;
763			dev_dbg(chan2dev(chan),
764				 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
765				 __func__, prev, &prev->lld.mbr_nda);
766		}
767
768		prev = desc;
769		if (!first)
770			first = desc;
771
772		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
773			 __func__, desc, first);
774		list_add_tail(&desc->desc_node, &first->descs_list);
775	}
776
777	prev->lld.mbr_nda = first->tx_dma_desc.phys;
778	dev_dbg(chan2dev(chan),
779		"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
780		__func__, prev, &prev->lld.mbr_nda);
781	first->tx_dma_desc.flags = flags;
782	first->xfer_size = buf_len;
783	first->direction = direction;
784
785	return &first->tx_dma_desc;
786}
787
788static struct dma_async_tx_descriptor *
789at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
790			 size_t len, unsigned long flags)
791{
792	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
793	struct at_xdmac_desc	*first = NULL, *prev = NULL;
794	size_t			remaining_size = len, xfer_size = 0, ublen;
795	dma_addr_t		src_addr = src, dst_addr = dest;
796	u32			dwidth;
797	/*
798	 * WARNING: We don't know the direction, it involves we can't
799	 * dynamically set the source and dest interface so we have to use the
800	 * same one. Only interface 0 allows EBI access. Hopefully we can
801	 * access DDR through both ports (at least on SAMA5D4x), so we can use
802	 * the same interface for source and dest, that solves the fact we
803	 * don't know the direction.
804	 */
805	u32			chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
806					| AT_XDMAC_CC_SAM_INCREMENTED_AM
807					| AT_XDMAC_CC_DIF(0)
808					| AT_XDMAC_CC_SIF(0)
809					| AT_XDMAC_CC_MBSIZE_SIXTEEN
810					| AT_XDMAC_CC_TYPE_MEM_TRAN;
811	unsigned long		irqflags;
812
813	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
814		__func__, &src, &dest, len, flags);
815
816	if (unlikely(!len))
817		return NULL;
818
819	/*
820	 * Check address alignment to select the greater data width we can use.
821	 * Some XDMAC implementations don't provide dword transfer, in this
822	 * case selecting dword has the same behavior as selecting word transfers.
823	 */
824	if (!((src_addr | dst_addr) & 7)) {
825		dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
826		dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
827	} else if (!((src_addr | dst_addr)  & 3)) {
828		dwidth = AT_XDMAC_CC_DWIDTH_WORD;
829		dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
830	} else if (!((src_addr | dst_addr) & 1)) {
831		dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
832		dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
833	} else {
834		dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
835		dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
836	}
837
838	/* Prepare descriptors. */
839	while (remaining_size) {
840		struct at_xdmac_desc	*desc = NULL;
841
842		dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
843
844		spin_lock_irqsave(&atchan->lock, irqflags);
845		desc = at_xdmac_get_desc(atchan);
846		spin_unlock_irqrestore(&atchan->lock, irqflags);
847		if (!desc) {
848			dev_err(chan2dev(chan), "can't get descriptor\n");
849			if (first)
850				list_splice_init(&first->descs_list, &atchan->free_descs_list);
851			return NULL;
852		}
853
854		/* Update src and dest addresses. */
855		src_addr += xfer_size;
856		dst_addr += xfer_size;
857
858		if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
859			xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
860		else
861			xfer_size = remaining_size;
862
863		dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
864
865		/* Check remaining length and change data width if needed. */
866		if (!((src_addr | dst_addr | xfer_size) & 7)) {
867			dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
868			dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
869		} else if (!((src_addr | dst_addr | xfer_size)  & 3)) {
870			dwidth = AT_XDMAC_CC_DWIDTH_WORD;
871			dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
872		} else if (!((src_addr | dst_addr | xfer_size) & 1)) {
873			dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
874			dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
875		} else if ((src_addr | dst_addr | xfer_size) & 1) {
876			dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
877			dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
878		}
879		chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
880
881		ublen = xfer_size >> dwidth;
882		remaining_size -= xfer_size;
883
884		desc->lld.mbr_sa = src_addr;
885		desc->lld.mbr_da = dst_addr;
886		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
887			| AT_XDMAC_MBR_UBC_NDEN
888			| AT_XDMAC_MBR_UBC_NSEN
889			| (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0)
890			| ublen;
891		desc->lld.mbr_cfg = chan_cc;
892
893		dev_dbg(chan2dev(chan),
894			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
895			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
896
897		/* Chain lld. */
898		if (prev) {
899			prev->lld.mbr_nda = desc->tx_dma_desc.phys;
900			dev_dbg(chan2dev(chan),
901				 "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n",
902				 __func__, prev, prev->lld.mbr_nda);
903		}
904
905		prev = desc;
906		if (!first)
907			first = desc;
908
909		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
910			 __func__, desc, first);
911		list_add_tail(&desc->desc_node, &first->descs_list);
912	}
913
914	first->tx_dma_desc.flags = flags;
915	first->xfer_size = len;
916
917	return &first->tx_dma_desc;
918}
919
920static enum dma_status
921at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
922		struct dma_tx_state *txstate)
923{
924	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
925	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
926	struct at_xdmac_desc	*desc, *_desc;
927	struct list_head	*descs_list;
928	enum dma_status		ret;
929	int			residue, retry;
930	u32			cur_nda, check_nda, cur_ubc, mask, value;
931	u8			dwidth = 0;
932	unsigned long		flags;
933
934	ret = dma_cookie_status(chan, cookie, txstate);
935	if (ret == DMA_COMPLETE)
936		return ret;
937
938	if (!txstate)
939		return ret;
940
941	spin_lock_irqsave(&atchan->lock, flags);
942
943	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
944
945	/*
946	 * If the transfer has not been started yet, don't need to compute the
947	 * residue, it's the transfer length.
948	 */
949	if (!desc->active_xfer) {
950		dma_set_residue(txstate, desc->xfer_size);
951		goto spin_unlock;
952	}
953
954	residue = desc->xfer_size;
955	/*
956	 * Flush FIFO: only relevant when the transfer is source peripheral
957	 * synchronized.
958	 */
959	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
960	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
961	if ((desc->lld.mbr_cfg & mask) == value) {
962		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
963		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
964			cpu_relax();
965	}
966
967	/*
968	 * When processing the residue, we need to read two registers but we
969	 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
970	 * we stand in the descriptor list and AT_XDMAC_CUBC is used
971	 * to know how many data are remaining for the current descriptor.
972	 * Since the dma channel is not paused to not loose data, between the
973	 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
974	 * descriptor.
975	 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
976	 * still using the same descriptor by reading a second time
977	 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
978	 * read again AT_XDMAC_CUBC.
979	 * Memory barriers are used to ensure the read order of the registers.
980	 * A max number of retries is set because unlikely it can never ends if
981	 * we are transferring a lot of data with small buffers.
982	 */
983	cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
984	rmb();
985	cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
986	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
987		rmb();
988		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
989
990		if (likely(cur_nda == check_nda))
991			break;
992
993		cur_nda = check_nda;
994		rmb();
995		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
996	}
997
998	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
999		ret = DMA_ERROR;
1000		goto spin_unlock;
1001	}
1002
1003	/*
1004	 * Remove size of all microblocks already transferred and the current
1005	 * one. Then add the remaining size to transfer of the current
1006	 * microblock.
1007	 */
1008	descs_list = &desc->descs_list;
1009	list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1010		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1011		residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1012		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1013			break;
1014	}
1015	residue += cur_ubc << dwidth;
1016
1017	dma_set_residue(txstate, residue);
1018
1019	dev_dbg(chan2dev(chan),
1020		 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1021		 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1022
1023spin_unlock:
1024	spin_unlock_irqrestore(&atchan->lock, flags);
1025	return ret;
1026}
1027
1028/* Call must be protected by lock. */
1029static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1030				    struct at_xdmac_desc *desc)
1031{
1032	dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1033
1034	/*
1035	 * Remove the transfer from the transfer list then move the transfer
1036	 * descriptors into the free descriptors list.
1037	 */
1038	list_del(&desc->xfer_node);
1039	list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1040}
1041
1042static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1043{
1044	struct at_xdmac_desc	*desc;
1045	unsigned long		flags;
1046
1047	spin_lock_irqsave(&atchan->lock, flags);
1048
1049	/*
1050	 * If channel is enabled, do nothing, advance_work will be triggered
1051	 * after the interruption.
1052	 */
1053	if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1054		desc = list_first_entry(&atchan->xfers_list,
1055					struct at_xdmac_desc,
1056					xfer_node);
1057		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1058		if (!desc->active_xfer)
1059			at_xdmac_start_xfer(atchan, desc);
1060	}
1061
1062	spin_unlock_irqrestore(&atchan->lock, flags);
1063}
1064
1065static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1066{
1067	struct at_xdmac_desc		*desc;
1068	struct dma_async_tx_descriptor	*txd;
1069
1070	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1071	txd = &desc->tx_dma_desc;
1072
1073	if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
1074		txd->callback(txd->callback_param);
1075}
1076
1077static void at_xdmac_tasklet(unsigned long data)
1078{
1079	struct at_xdmac_chan	*atchan = (struct at_xdmac_chan *)data;
1080	struct at_xdmac_desc	*desc;
1081	u32			error_mask;
1082
1083	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1084		 __func__, atchan->status);
1085
1086	error_mask = AT_XDMAC_CIS_RBEIS
1087		     | AT_XDMAC_CIS_WBEIS
1088		     | AT_XDMAC_CIS_ROIS;
1089
1090	if (at_xdmac_chan_is_cyclic(atchan)) {
1091		at_xdmac_handle_cyclic(atchan);
1092	} else if ((atchan->status & AT_XDMAC_CIS_LIS)
1093		   || (atchan->status & error_mask)) {
1094		struct dma_async_tx_descriptor  *txd;
1095
1096		if (atchan->status & AT_XDMAC_CIS_RBEIS)
1097			dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1098		if (atchan->status & AT_XDMAC_CIS_WBEIS)
1099			dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1100		if (atchan->status & AT_XDMAC_CIS_ROIS)
1101			dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1102
1103		spin_lock_bh(&atchan->lock);
1104		desc = list_first_entry(&atchan->xfers_list,
1105					struct at_xdmac_desc,
1106					xfer_node);
1107		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1108		BUG_ON(!desc->active_xfer);
1109
1110		txd = &desc->tx_dma_desc;
1111
1112		at_xdmac_remove_xfer(atchan, desc);
1113		spin_unlock_bh(&atchan->lock);
1114
1115		if (!at_xdmac_chan_is_cyclic(atchan)) {
1116			dma_cookie_complete(txd);
1117			if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
1118				txd->callback(txd->callback_param);
1119		}
1120
1121		dma_run_dependencies(txd);
1122
1123		at_xdmac_advance_work(atchan);
1124	}
1125}
1126
1127static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1128{
1129	struct at_xdmac		*atxdmac = (struct at_xdmac *)dev_id;
1130	struct at_xdmac_chan	*atchan;
1131	u32			imr, status, pending;
1132	u32			chan_imr, chan_status;
1133	int			i, ret = IRQ_NONE;
1134
1135	do {
1136		imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1137		status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1138		pending = status & imr;
1139
1140		dev_vdbg(atxdmac->dma.dev,
1141			 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1142			 __func__, status, imr, pending);
1143
1144		if (!pending)
1145			break;
1146
1147		/* We have to find which channel has generated the interrupt. */
1148		for (i = 0; i < atxdmac->dma.chancnt; i++) {
1149			if (!((1 << i) & pending))
1150				continue;
1151
1152			atchan = &atxdmac->chan[i];
1153			chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1154			chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1155			atchan->status = chan_status & chan_imr;
1156			dev_vdbg(atxdmac->dma.dev,
1157				 "%s: chan%d: imr=0x%x, status=0x%x\n",
1158				 __func__, i, chan_imr, chan_status);
1159			dev_vdbg(chan2dev(&atchan->chan),
1160				 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1161				 __func__,
1162				 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1163				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1164				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1165				 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1166				 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1167				 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1168
1169			if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1170				at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1171
1172			tasklet_schedule(&atchan->tasklet);
1173			ret = IRQ_HANDLED;
1174		}
1175
1176	} while (pending);
1177
1178	return ret;
1179}
1180
1181static void at_xdmac_issue_pending(struct dma_chan *chan)
1182{
1183	struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1184
1185	dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1186
1187	if (!at_xdmac_chan_is_cyclic(atchan))
1188		at_xdmac_advance_work(atchan);
1189
1190	return;
1191}
1192
1193static int at_xdmac_device_config(struct dma_chan *chan,
1194				  struct dma_slave_config *config)
1195{
1196	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1197	int ret;
1198	unsigned long		flags;
1199
1200	dev_dbg(chan2dev(chan), "%s\n", __func__);
1201
1202	spin_lock_irqsave(&atchan->lock, flags);
1203	ret = at_xdmac_set_slave_config(chan, config);
1204	spin_unlock_irqrestore(&atchan->lock, flags);
1205
1206	return ret;
1207}
1208
1209static int at_xdmac_device_pause(struct dma_chan *chan)
1210{
1211	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1212	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
1213	unsigned long		flags;
1214
1215	dev_dbg(chan2dev(chan), "%s\n", __func__);
1216
1217	if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1218		return 0;
1219
1220	spin_lock_irqsave(&atchan->lock, flags);
1221	at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1222	while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1223	       & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1224		cpu_relax();
1225	spin_unlock_irqrestore(&atchan->lock, flags);
1226
1227	return 0;
1228}
1229
1230static int at_xdmac_device_resume(struct dma_chan *chan)
1231{
1232	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1233	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
1234	unsigned long		flags;
1235
1236	dev_dbg(chan2dev(chan), "%s\n", __func__);
1237
1238	spin_lock_irqsave(&atchan->lock, flags);
1239	if (!at_xdmac_chan_is_paused(atchan)) {
1240		spin_unlock_irqrestore(&atchan->lock, flags);
1241		return 0;
1242	}
1243
1244	at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1245	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1246	spin_unlock_irqrestore(&atchan->lock, flags);
1247
1248	return 0;
1249}
1250
1251static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1252{
1253	struct at_xdmac_desc	*desc, *_desc;
1254	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1255	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
1256	unsigned long		flags;
1257
1258	dev_dbg(chan2dev(chan), "%s\n", __func__);
1259
1260	spin_lock_irqsave(&atchan->lock, flags);
1261	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1262	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1263		cpu_relax();
1264
1265	/* Cancel all pending transfers. */
1266	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1267		at_xdmac_remove_xfer(atchan, desc);
1268
1269	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1270	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1271	spin_unlock_irqrestore(&atchan->lock, flags);
1272
1273	return 0;
1274}
1275
1276static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1277{
1278	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1279	struct at_xdmac_desc	*desc;
1280	int			i;
1281	unsigned long		flags;
1282
1283	spin_lock_irqsave(&atchan->lock, flags);
1284
1285	if (at_xdmac_chan_is_enabled(atchan)) {
1286		dev_err(chan2dev(chan),
1287			"can't allocate channel resources (channel enabled)\n");
1288		i = -EIO;
1289		goto spin_unlock;
1290	}
1291
1292	if (!list_empty(&atchan->free_descs_list)) {
1293		dev_err(chan2dev(chan),
1294			"can't allocate channel resources (channel not free from a previous use)\n");
1295		i = -EIO;
1296		goto spin_unlock;
1297	}
1298
1299	for (i = 0; i < init_nr_desc_per_channel; i++) {
1300		desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1301		if (!desc) {
1302			dev_warn(chan2dev(chan),
1303				"only %d descriptors have been allocated\n", i);
1304			break;
1305		}
1306		list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1307	}
1308
1309	dma_cookie_init(chan);
1310
1311	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1312
1313spin_unlock:
1314	spin_unlock_irqrestore(&atchan->lock, flags);
1315	return i;
1316}
1317
1318static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1319{
1320	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1321	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
1322	struct at_xdmac_desc	*desc, *_desc;
1323
1324	list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1325		dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1326		list_del(&desc->desc_node);
1327		dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1328	}
1329
1330	return;
1331}
1332
1333#ifdef CONFIG_PM
1334static int atmel_xdmac_prepare(struct device *dev)
1335{
1336	struct platform_device	*pdev = to_platform_device(dev);
1337	struct at_xdmac		*atxdmac = platform_get_drvdata(pdev);
1338	struct dma_chan		*chan, *_chan;
1339
1340	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1341		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1342
1343		/* Wait for transfer completion, except in cyclic case. */
1344		if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1345			return -EAGAIN;
1346	}
1347	return 0;
1348}
1349#else
1350#	define atmel_xdmac_prepare NULL
1351#endif
1352
1353#ifdef CONFIG_PM_SLEEP
1354static int atmel_xdmac_suspend(struct device *dev)
1355{
1356	struct platform_device	*pdev = to_platform_device(dev);
1357	struct at_xdmac		*atxdmac = platform_get_drvdata(pdev);
1358	struct dma_chan		*chan, *_chan;
1359
1360	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1361		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
1362
1363		atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1364		if (at_xdmac_chan_is_cyclic(atchan)) {
1365			if (!at_xdmac_chan_is_paused(atchan))
1366				at_xdmac_device_pause(chan);
1367			atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1368			atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1369			atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1370		}
1371	}
1372	atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1373
1374	at_xdmac_off(atxdmac);
1375	clk_disable_unprepare(atxdmac->clk);
1376	return 0;
1377}
1378
1379static int atmel_xdmac_resume(struct device *dev)
1380{
1381	struct platform_device	*pdev = to_platform_device(dev);
1382	struct at_xdmac		*atxdmac = platform_get_drvdata(pdev);
1383	struct at_xdmac_chan	*atchan;
1384	struct dma_chan		*chan, *_chan;
1385	int			i;
1386
1387	clk_prepare_enable(atxdmac->clk);
1388
1389	/* Clear pending interrupts. */
1390	for (i = 0; i < atxdmac->dma.chancnt; i++) {
1391		atchan = &atxdmac->chan[i];
1392		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1393			cpu_relax();
1394	}
1395
1396	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1397	at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1398	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1399		atchan = to_at_xdmac_chan(chan);
1400		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1401		if (at_xdmac_chan_is_cyclic(atchan)) {
1402			if (at_xdmac_chan_is_paused(atchan))
1403				at_xdmac_device_resume(chan);
1404			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1405			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1406			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1407			wmb();
1408			at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1409		}
1410	}
1411	return 0;
1412}
1413#endif /* CONFIG_PM_SLEEP */
1414
1415static int at_xdmac_probe(struct platform_device *pdev)
1416{
1417	struct resource	*res;
1418	struct at_xdmac	*atxdmac;
1419	int		irq, size, nr_channels, i, ret;
1420	void __iomem	*base;
1421	u32		reg;
1422
1423	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424	if (!res)
1425		return -EINVAL;
1426
1427	irq = platform_get_irq(pdev, 0);
1428	if (irq < 0)
1429		return irq;
1430
1431	base = devm_ioremap_resource(&pdev->dev, res);
1432	if (IS_ERR(base))
1433		return PTR_ERR(base);
1434
1435	/*
1436	 * Read number of xdmac channels, read helper function can't be used
1437	 * since atxdmac is not yet allocated and we need to know the number
1438	 * of channels to do the allocation.
1439	 */
1440	reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1441	nr_channels = AT_XDMAC_NB_CH(reg);
1442	if (nr_channels > AT_XDMAC_MAX_CHAN) {
1443		dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1444			nr_channels);
1445		return -EINVAL;
1446	}
1447
1448	size = sizeof(*atxdmac);
1449	size += nr_channels * sizeof(struct at_xdmac_chan);
1450	atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1451	if (!atxdmac) {
1452		dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1453		return -ENOMEM;
1454	}
1455
1456	atxdmac->regs = base;
1457	atxdmac->irq = irq;
1458
1459	atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1460	if (IS_ERR(atxdmac->clk)) {
1461		dev_err(&pdev->dev, "can't get dma_clk\n");
1462		return PTR_ERR(atxdmac->clk);
1463	}
1464
1465	/* Do not use dev res to prevent races with tasklet */
1466	ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1467	if (ret) {
1468		dev_err(&pdev->dev, "can't request irq\n");
1469		return ret;
1470	}
1471
1472	ret = clk_prepare_enable(atxdmac->clk);
1473	if (ret) {
1474		dev_err(&pdev->dev, "can't prepare or enable clock\n");
1475		goto err_free_irq;
1476	}
1477
1478	atxdmac->at_xdmac_desc_pool =
1479		dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1480				sizeof(struct at_xdmac_desc), 4, 0);
1481	if (!atxdmac->at_xdmac_desc_pool) {
1482		dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1483		ret = -ENOMEM;
1484		goto err_clk_disable;
1485	}
1486
1487	dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1488	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1489	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1490	/*
1491	 * Without DMA_PRIVATE the driver is not able to allocate more than
1492	 * one channel, second allocation fails in private_candidate.
1493	 */
1494	dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
1495	atxdmac->dma.dev				= &pdev->dev;
1496	atxdmac->dma.device_alloc_chan_resources	= at_xdmac_alloc_chan_resources;
1497	atxdmac->dma.device_free_chan_resources		= at_xdmac_free_chan_resources;
1498	atxdmac->dma.device_tx_status			= at_xdmac_tx_status;
1499	atxdmac->dma.device_issue_pending		= at_xdmac_issue_pending;
1500	atxdmac->dma.device_prep_dma_cyclic		= at_xdmac_prep_dma_cyclic;
1501	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
1502	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
1503	atxdmac->dma.device_config			= at_xdmac_device_config;
1504	atxdmac->dma.device_pause			= at_xdmac_device_pause;
1505	atxdmac->dma.device_resume			= at_xdmac_device_resume;
1506	atxdmac->dma.device_terminate_all		= at_xdmac_device_terminate_all;
1507	atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1508	atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1509	atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1510	atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1511
1512	/* Disable all chans and interrupts. */
1513	at_xdmac_off(atxdmac);
1514
1515	/* Init channels. */
1516	INIT_LIST_HEAD(&atxdmac->dma.channels);
1517	for (i = 0; i < nr_channels; i++) {
1518		struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1519
1520		atchan->chan.device = &atxdmac->dma;
1521		list_add_tail(&atchan->chan.device_node,
1522			      &atxdmac->dma.channels);
1523
1524		atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
1525		atchan->mask = 1 << i;
1526
1527		spin_lock_init(&atchan->lock);
1528		INIT_LIST_HEAD(&atchan->xfers_list);
1529		INIT_LIST_HEAD(&atchan->free_descs_list);
1530		tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
1531			     (unsigned long)atchan);
1532
1533		/* Clear pending interrupts. */
1534		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1535			cpu_relax();
1536	}
1537	platform_set_drvdata(pdev, atxdmac);
1538
1539	ret = dma_async_device_register(&atxdmac->dma);
1540	if (ret) {
1541		dev_err(&pdev->dev, "fail to register DMA engine device\n");
1542		goto err_clk_disable;
1543	}
1544
1545	ret = of_dma_controller_register(pdev->dev.of_node,
1546					 at_xdmac_xlate, atxdmac);
1547	if (ret) {
1548		dev_err(&pdev->dev, "could not register of dma controller\n");
1549		goto err_dma_unregister;
1550	}
1551
1552	dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
1553		 nr_channels, atxdmac->regs);
1554
1555	return 0;
1556
1557err_dma_unregister:
1558	dma_async_device_unregister(&atxdmac->dma);
1559err_clk_disable:
1560	clk_disable_unprepare(atxdmac->clk);
1561err_free_irq:
1562	free_irq(atxdmac->irq, atxdmac->dma.dev);
1563	return ret;
1564}
1565
1566static int at_xdmac_remove(struct platform_device *pdev)
1567{
1568	struct at_xdmac	*atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
1569	int		i;
1570
1571	at_xdmac_off(atxdmac);
1572	of_dma_controller_free(pdev->dev.of_node);
1573	dma_async_device_unregister(&atxdmac->dma);
1574	clk_disable_unprepare(atxdmac->clk);
1575
1576	synchronize_irq(atxdmac->irq);
1577
1578	free_irq(atxdmac->irq, atxdmac->dma.dev);
1579
1580	for (i = 0; i < atxdmac->dma.chancnt; i++) {
1581		struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1582
1583		tasklet_kill(&atchan->tasklet);
1584		at_xdmac_free_chan_resources(&atchan->chan);
1585	}
1586
1587	return 0;
1588}
1589
1590static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
1591	.prepare	= atmel_xdmac_prepare,
1592	SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
1593};
1594
1595static const struct of_device_id atmel_xdmac_dt_ids[] = {
1596	{
1597		.compatible = "atmel,sama5d4-dma",
1598	}, {
1599		/* sentinel */
1600	}
1601};
1602MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
1603
1604static struct platform_driver at_xdmac_driver = {
1605	.probe		= at_xdmac_probe,
1606	.remove		= at_xdmac_remove,
1607	.driver = {
1608		.name		= "at_xdmac",
1609		.of_match_table	= of_match_ptr(atmel_xdmac_dt_ids),
1610		.pm		= &atmel_xdmac_dev_pm_ops,
1611	}
1612};
1613
1614static int __init at_xdmac_init(void)
1615{
1616	return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
1617}
1618subsys_initcall(at_xdmac_init);
1619
1620MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
1621MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
1622MODULE_LICENSE("GPL");
1623