1 /*
2  * EDMA3 support for DaVinci
3  *
4  * Copyright (C) 2006-2009 Texas Instruments.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/io.h>
27 #include <linux/slab.h>
28 #include <linux/edma.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/of_dma.h>
33 #include <linux/of_irq.h>
34 #include <linux/pm_runtime.h>
35 
36 #include <linux/platform_data/edma.h>
37 
38 /* Offsets matching "struct edmacc_param" */
39 #define PARM_OPT		0x00
40 #define PARM_SRC		0x04
41 #define PARM_A_B_CNT		0x08
42 #define PARM_DST		0x0c
43 #define PARM_SRC_DST_BIDX	0x10
44 #define PARM_LINK_BCNTRLD	0x14
45 #define PARM_SRC_DST_CIDX	0x18
46 #define PARM_CCNT		0x1c
47 
48 #define PARM_SIZE		0x20
49 
50 /* Offsets for EDMA CC global channel registers and their shadows */
51 #define SH_ER		0x00	/* 64 bits */
52 #define SH_ECR		0x08	/* 64 bits */
53 #define SH_ESR		0x10	/* 64 bits */
54 #define SH_CER		0x18	/* 64 bits */
55 #define SH_EER		0x20	/* 64 bits */
56 #define SH_EECR		0x28	/* 64 bits */
57 #define SH_EESR		0x30	/* 64 bits */
58 #define SH_SER		0x38	/* 64 bits */
59 #define SH_SECR		0x40	/* 64 bits */
60 #define SH_IER		0x50	/* 64 bits */
61 #define SH_IECR		0x58	/* 64 bits */
62 #define SH_IESR		0x60	/* 64 bits */
63 #define SH_IPR		0x68	/* 64 bits */
64 #define SH_ICR		0x70	/* 64 bits */
65 #define SH_IEVAL	0x78
66 #define SH_QER		0x80
67 #define SH_QEER		0x84
68 #define SH_QEECR	0x88
69 #define SH_QEESR	0x8c
70 #define SH_QSER		0x90
71 #define SH_QSECR	0x94
72 #define SH_SIZE		0x200
73 
74 /* Offsets for EDMA CC global registers */
75 #define EDMA_REV	0x0000
76 #define EDMA_CCCFG	0x0004
77 #define EDMA_QCHMAP	0x0200	/* 8 registers */
78 #define EDMA_DMAQNUM	0x0240	/* 8 registers (4 on OMAP-L1xx) */
79 #define EDMA_QDMAQNUM	0x0260
80 #define EDMA_QUETCMAP	0x0280
81 #define EDMA_QUEPRI	0x0284
82 #define EDMA_EMR	0x0300	/* 64 bits */
83 #define EDMA_EMCR	0x0308	/* 64 bits */
84 #define EDMA_QEMR	0x0310
85 #define EDMA_QEMCR	0x0314
86 #define EDMA_CCERR	0x0318
87 #define EDMA_CCERRCLR	0x031c
88 #define EDMA_EEVAL	0x0320
89 #define EDMA_DRAE	0x0340	/* 4 x 64 bits*/
90 #define EDMA_QRAE	0x0380	/* 4 registers */
91 #define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
92 #define EDMA_QSTAT	0x0600	/* 2 registers */
93 #define EDMA_QWMTHRA	0x0620
94 #define EDMA_QWMTHRB	0x0624
95 #define EDMA_CCSTAT	0x0640
96 
97 #define EDMA_M		0x1000	/* global channel registers */
98 #define EDMA_ECR	0x1008
99 #define EDMA_ECRH	0x100C
100 #define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
101 #define EDMA_PARM	0x4000	/* 128 param entries */
102 
103 #define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
104 
105 #define EDMA_DCHMAP	0x0100  /* 64 registers */
106 
107 /* CCCFG register */
108 #define GET_NUM_DMACH(x)	(x & 0x7) /* bits 0-2 */
109 #define GET_NUM_PAENTRY(x)	((x & 0x7000) >> 12) /* bits 12-14 */
110 #define GET_NUM_EVQUE(x)	((x & 0x70000) >> 16) /* bits 16-18 */
111 #define GET_NUM_REGN(x)		((x & 0x300000) >> 20) /* bits 20-21 */
112 #define CHMAP_EXIST		BIT(24)
113 
114 #define EDMA_MAX_DMACH           64
115 #define EDMA_MAX_PARAMENTRY     512
116 
117 /*****************************************************************************/
118 
119 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
120 
edma_read(unsigned ctlr,int offset)121 static inline unsigned int edma_read(unsigned ctlr, int offset)
122 {
123 	return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
124 }
125 
edma_write(unsigned ctlr,int offset,int val)126 static inline void edma_write(unsigned ctlr, int offset, int val)
127 {
128 	__raw_writel(val, edmacc_regs_base[ctlr] + offset);
129 }
edma_modify(unsigned ctlr,int offset,unsigned and,unsigned or)130 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
131 		unsigned or)
132 {
133 	unsigned val = edma_read(ctlr, offset);
134 	val &= and;
135 	val |= or;
136 	edma_write(ctlr, offset, val);
137 }
edma_and(unsigned ctlr,int offset,unsigned and)138 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
139 {
140 	unsigned val = edma_read(ctlr, offset);
141 	val &= and;
142 	edma_write(ctlr, offset, val);
143 }
edma_or(unsigned ctlr,int offset,unsigned or)144 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
145 {
146 	unsigned val = edma_read(ctlr, offset);
147 	val |= or;
148 	edma_write(ctlr, offset, val);
149 }
edma_read_array(unsigned ctlr,int offset,int i)150 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
151 {
152 	return edma_read(ctlr, offset + (i << 2));
153 }
edma_write_array(unsigned ctlr,int offset,int i,unsigned val)154 static inline void edma_write_array(unsigned ctlr, int offset, int i,
155 		unsigned val)
156 {
157 	edma_write(ctlr, offset + (i << 2), val);
158 }
edma_modify_array(unsigned ctlr,int offset,int i,unsigned and,unsigned or)159 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
160 		unsigned and, unsigned or)
161 {
162 	edma_modify(ctlr, offset + (i << 2), and, or);
163 }
edma_or_array(unsigned ctlr,int offset,int i,unsigned or)164 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
165 {
166 	edma_or(ctlr, offset + (i << 2), or);
167 }
edma_or_array2(unsigned ctlr,int offset,int i,int j,unsigned or)168 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
169 		unsigned or)
170 {
171 	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
172 }
edma_write_array2(unsigned ctlr,int offset,int i,int j,unsigned val)173 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
174 		unsigned val)
175 {
176 	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
177 }
edma_shadow0_read(unsigned ctlr,int offset)178 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
179 {
180 	return edma_read(ctlr, EDMA_SHADOW0 + offset);
181 }
edma_shadow0_read_array(unsigned ctlr,int offset,int i)182 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
183 		int i)
184 {
185 	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
186 }
edma_shadow0_write(unsigned ctlr,int offset,unsigned val)187 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
188 {
189 	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
190 }
edma_shadow0_write_array(unsigned ctlr,int offset,int i,unsigned val)191 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
192 		unsigned val)
193 {
194 	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
195 }
edma_parm_read(unsigned ctlr,int offset,int param_no)196 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
197 		int param_no)
198 {
199 	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
200 }
edma_parm_write(unsigned ctlr,int offset,int param_no,unsigned val)201 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
202 		unsigned val)
203 {
204 	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
205 }
edma_parm_modify(unsigned ctlr,int offset,int param_no,unsigned and,unsigned or)206 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
207 		unsigned and, unsigned or)
208 {
209 	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
210 }
edma_parm_and(unsigned ctlr,int offset,int param_no,unsigned and)211 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
212 		unsigned and)
213 {
214 	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
215 }
edma_parm_or(unsigned ctlr,int offset,int param_no,unsigned or)216 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
217 		unsigned or)
218 {
219 	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
220 }
221 
set_bits(int offset,int len,unsigned long * p)222 static inline void set_bits(int offset, int len, unsigned long *p)
223 {
224 	for (; len > 0; len--)
225 		set_bit(offset + (len - 1), p);
226 }
227 
clear_bits(int offset,int len,unsigned long * p)228 static inline void clear_bits(int offset, int len, unsigned long *p)
229 {
230 	for (; len > 0; len--)
231 		clear_bit(offset + (len - 1), p);
232 }
233 
234 /*****************************************************************************/
235 
236 /* actual number of DMA channels and slots on this silicon */
237 struct edma {
238 	/* how many dma resources of each type */
239 	unsigned	num_channels;
240 	unsigned	num_region;
241 	unsigned	num_slots;
242 	unsigned	num_tc;
243 	enum dma_event_q 	default_queue;
244 
245 	/* list of channels with no even trigger; terminated by "-1" */
246 	const s8	*noevent;
247 
248 	struct edma_soc_info *info;
249 
250 	/* The edma_inuse bit for each PaRAM slot is clear unless the
251 	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
252 	 */
253 	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
254 
255 	/* The edma_unused bit for each channel is clear unless
256 	 * it is not being used on this platform. It uses a bit
257 	 * of SOC-specific initialization code.
258 	 */
259 	DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
260 
261 	unsigned	irq_res_start;
262 	unsigned	irq_res_end;
263 
264 	struct dma_interrupt_data {
265 		void (*callback)(unsigned channel, unsigned short ch_status,
266 				void *data);
267 		void *data;
268 	} intr_data[EDMA_MAX_DMACH];
269 };
270 
271 static struct edma *edma_cc[EDMA_MAX_CC];
272 static int arch_num_cc;
273 
274 /* dummy param set used to (re)initialize parameter RAM slots */
275 static const struct edmacc_param dummy_paramset = {
276 	.link_bcntrld = 0xffff,
277 	.ccnt = 1,
278 };
279 
280 static const struct of_device_id edma_of_ids[] = {
281 	{ .compatible = "ti,edma3", },
282 	{}
283 };
284 
285 /*****************************************************************************/
286 
map_dmach_queue(unsigned ctlr,unsigned ch_no,enum dma_event_q queue_no)287 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
288 		enum dma_event_q queue_no)
289 {
290 	int bit = (ch_no & 0x7) * 4;
291 
292 	/* default to low priority queue */
293 	if (queue_no == EVENTQ_DEFAULT)
294 		queue_no = edma_cc[ctlr]->default_queue;
295 
296 	queue_no &= 7;
297 	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
298 			~(0x7 << bit), queue_no << bit);
299 }
300 
assign_priority_to_queue(unsigned ctlr,int queue_no,int priority)301 static void assign_priority_to_queue(unsigned ctlr, int queue_no,
302 		int priority)
303 {
304 	int bit = queue_no * 4;
305 	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
306 			((priority & 0x7) << bit));
307 }
308 
309 /**
310  * map_dmach_param - Maps channel number to param entry number
311  *
312  * This maps the dma channel number to param entry numberter. In
313  * other words using the DMA channel mapping registers a param entry
314  * can be mapped to any channel
315  *
316  * Callers are responsible for ensuring the channel mapping logic is
317  * included in that particular EDMA variant (Eg : dm646x)
318  *
319  */
map_dmach_param(unsigned ctlr)320 static void map_dmach_param(unsigned ctlr)
321 {
322 	int i;
323 	for (i = 0; i < EDMA_MAX_DMACH; i++)
324 		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
325 }
326 
327 static inline void
setup_dma_interrupt(unsigned lch,void (* callback)(unsigned channel,u16 ch_status,void * data),void * data)328 setup_dma_interrupt(unsigned lch,
329 	void (*callback)(unsigned channel, u16 ch_status, void *data),
330 	void *data)
331 {
332 	unsigned ctlr;
333 
334 	ctlr = EDMA_CTLR(lch);
335 	lch = EDMA_CHAN_SLOT(lch);
336 
337 	if (!callback)
338 		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
339 				BIT(lch & 0x1f));
340 
341 	edma_cc[ctlr]->intr_data[lch].callback = callback;
342 	edma_cc[ctlr]->intr_data[lch].data = data;
343 
344 	if (callback) {
345 		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
346 				BIT(lch & 0x1f));
347 		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
348 				BIT(lch & 0x1f));
349 	}
350 }
351 
irq2ctlr(int irq)352 static int irq2ctlr(int irq)
353 {
354 	if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
355 		return 0;
356 	else if (irq >= edma_cc[1]->irq_res_start &&
357 		irq <= edma_cc[1]->irq_res_end)
358 		return 1;
359 
360 	return -1;
361 }
362 
363 /******************************************************************************
364  *
365  * DMA interrupt handler
366  *
367  *****************************************************************************/
dma_irq_handler(int irq,void * data)368 static irqreturn_t dma_irq_handler(int irq, void *data)
369 {
370 	int ctlr;
371 	u32 sh_ier;
372 	u32 sh_ipr;
373 	u32 bank;
374 
375 	ctlr = irq2ctlr(irq);
376 	if (ctlr < 0)
377 		return IRQ_NONE;
378 
379 	dev_dbg(data, "dma_irq_handler\n");
380 
381 	sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
382 	if (!sh_ipr) {
383 		sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
384 		if (!sh_ipr)
385 			return IRQ_NONE;
386 		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
387 		bank = 1;
388 	} else {
389 		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
390 		bank = 0;
391 	}
392 
393 	do {
394 		u32 slot;
395 		u32 channel;
396 
397 		dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
398 
399 		slot = __ffs(sh_ipr);
400 		sh_ipr &= ~(BIT(slot));
401 
402 		if (sh_ier & BIT(slot)) {
403 			channel = (bank << 5) | slot;
404 			/* Clear the corresponding IPR bits */
405 			edma_shadow0_write_array(ctlr, SH_ICR, bank,
406 					BIT(slot));
407 			if (edma_cc[ctlr]->intr_data[channel].callback)
408 				edma_cc[ctlr]->intr_data[channel].callback(
409 					EDMA_CTLR_CHAN(ctlr, channel),
410 					EDMA_DMA_COMPLETE,
411 					edma_cc[ctlr]->intr_data[channel].data);
412 		}
413 	} while (sh_ipr);
414 
415 	edma_shadow0_write(ctlr, SH_IEVAL, 1);
416 	return IRQ_HANDLED;
417 }
418 
419 /******************************************************************************
420  *
421  * DMA error interrupt handler
422  *
423  *****************************************************************************/
dma_ccerr_handler(int irq,void * data)424 static irqreturn_t dma_ccerr_handler(int irq, void *data)
425 {
426 	int i;
427 	int ctlr;
428 	unsigned int cnt = 0;
429 
430 	ctlr = irq2ctlr(irq);
431 	if (ctlr < 0)
432 		return IRQ_NONE;
433 
434 	dev_dbg(data, "dma_ccerr_handler\n");
435 
436 	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
437 	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
438 	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
439 	    (edma_read(ctlr, EDMA_CCERR) == 0))
440 		return IRQ_NONE;
441 
442 	while (1) {
443 		int j = -1;
444 		if (edma_read_array(ctlr, EDMA_EMR, 0))
445 			j = 0;
446 		else if (edma_read_array(ctlr, EDMA_EMR, 1))
447 			j = 1;
448 		if (j >= 0) {
449 			dev_dbg(data, "EMR%d %08x\n", j,
450 					edma_read_array(ctlr, EDMA_EMR, j));
451 			for (i = 0; i < 32; i++) {
452 				int k = (j << 5) + i;
453 				if (edma_read_array(ctlr, EDMA_EMR, j) &
454 							BIT(i)) {
455 					/* Clear the corresponding EMR bits */
456 					edma_write_array(ctlr, EDMA_EMCR, j,
457 							BIT(i));
458 					/* Clear any SER */
459 					edma_shadow0_write_array(ctlr, SH_SECR,
460 								j, BIT(i));
461 					if (edma_cc[ctlr]->intr_data[k].
462 								callback) {
463 						edma_cc[ctlr]->intr_data[k].
464 						callback(
465 						EDMA_CTLR_CHAN(ctlr, k),
466 						EDMA_DMA_CC_ERROR,
467 						edma_cc[ctlr]->intr_data
468 						[k].data);
469 					}
470 				}
471 			}
472 		} else if (edma_read(ctlr, EDMA_QEMR)) {
473 			dev_dbg(data, "QEMR %02x\n",
474 				edma_read(ctlr, EDMA_QEMR));
475 			for (i = 0; i < 8; i++) {
476 				if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
477 					/* Clear the corresponding IPR bits */
478 					edma_write(ctlr, EDMA_QEMCR, BIT(i));
479 					edma_shadow0_write(ctlr, SH_QSECR,
480 								BIT(i));
481 
482 					/* NOTE:  not reported!! */
483 				}
484 			}
485 		} else if (edma_read(ctlr, EDMA_CCERR)) {
486 			dev_dbg(data, "CCERR %08x\n",
487 				edma_read(ctlr, EDMA_CCERR));
488 			/* FIXME:  CCERR.BIT(16) ignored!  much better
489 			 * to just write CCERRCLR with CCERR value...
490 			 */
491 			for (i = 0; i < 8; i++) {
492 				if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
493 					/* Clear the corresponding IPR bits */
494 					edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
495 
496 					/* NOTE:  not reported!! */
497 				}
498 			}
499 		}
500 		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
501 		    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
502 		    (edma_read(ctlr, EDMA_QEMR) == 0) &&
503 		    (edma_read(ctlr, EDMA_CCERR) == 0))
504 			break;
505 		cnt++;
506 		if (cnt > 10)
507 			break;
508 	}
509 	edma_write(ctlr, EDMA_EEVAL, 1);
510 	return IRQ_HANDLED;
511 }
512 
reserve_contiguous_slots(int ctlr,unsigned int id,unsigned int num_slots,unsigned int start_slot)513 static int reserve_contiguous_slots(int ctlr, unsigned int id,
514 				     unsigned int num_slots,
515 				     unsigned int start_slot)
516 {
517 	int i, j;
518 	unsigned int count = num_slots;
519 	int stop_slot = start_slot;
520 	DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
521 
522 	for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
523 		j = EDMA_CHAN_SLOT(i);
524 		if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
525 			/* Record our current beginning slot */
526 			if (count == num_slots)
527 				stop_slot = i;
528 
529 			count--;
530 			set_bit(j, tmp_inuse);
531 
532 			if (count == 0)
533 				break;
534 		} else {
535 			clear_bit(j, tmp_inuse);
536 
537 			if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
538 				stop_slot = i;
539 				break;
540 			} else {
541 				count = num_slots;
542 			}
543 		}
544 	}
545 
546 	/*
547 	 * We have to clear any bits that we set
548 	 * if we run out parameter RAM slots, i.e we do find a set
549 	 * of contiguous parameter RAM slots but do not find the exact number
550 	 * requested as we may reach the total number of parameter RAM slots
551 	 */
552 	if (i == edma_cc[ctlr]->num_slots)
553 		stop_slot = i;
554 
555 	j = start_slot;
556 	for_each_set_bit_from(j, tmp_inuse, stop_slot)
557 		clear_bit(j, edma_cc[ctlr]->edma_inuse);
558 
559 	if (count)
560 		return -EBUSY;
561 
562 	for (j = i - num_slots + 1; j <= i; ++j)
563 		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
564 			&dummy_paramset, PARM_SIZE);
565 
566 	return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
567 }
568 
prepare_unused_channel_list(struct device * dev,void * data)569 static int prepare_unused_channel_list(struct device *dev, void *data)
570 {
571 	struct platform_device *pdev = to_platform_device(dev);
572 	int i, count, ctlr;
573 	struct of_phandle_args  dma_spec;
574 
575 	if (dev->of_node) {
576 		count = of_property_count_strings(dev->of_node, "dma-names");
577 		if (count < 0)
578 			return 0;
579 		for (i = 0; i < count; i++) {
580 			if (of_parse_phandle_with_args(dev->of_node, "dmas",
581 						       "#dma-cells", i,
582 						       &dma_spec))
583 				continue;
584 
585 			if (!of_match_node(edma_of_ids, dma_spec.np)) {
586 				of_node_put(dma_spec.np);
587 				continue;
588 			}
589 
590 			clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
591 				  edma_cc[0]->edma_unused);
592 			of_node_put(dma_spec.np);
593 		}
594 		return 0;
595 	}
596 
597 	/* For non-OF case */
598 	for (i = 0; i < pdev->num_resources; i++) {
599 		if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
600 				(int)pdev->resource[i].start >= 0) {
601 			ctlr = EDMA_CTLR(pdev->resource[i].start);
602 			clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
603 				  edma_cc[ctlr]->edma_unused);
604 		}
605 	}
606 
607 	return 0;
608 }
609 
610 /*-----------------------------------------------------------------------*/
611 
612 static bool unused_chan_list_done;
613 
614 /* Resource alloc/free:  dma channels, parameter RAM slots */
615 
616 /**
617  * edma_alloc_channel - allocate DMA channel and paired parameter RAM
618  * @channel: specific channel to allocate; negative for "any unmapped channel"
619  * @callback: optional; to be issued on DMA completion or errors
620  * @data: passed to callback
621  * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
622  *	Controller (TC) executes requests using this channel.  Use
623  *	EVENTQ_DEFAULT unless you really need a high priority queue.
624  *
625  * This allocates a DMA channel and its associated parameter RAM slot.
626  * The parameter RAM is initialized to hold a dummy transfer.
627  *
628  * Normal use is to pass a specific channel number as @channel, to make
629  * use of hardware events mapped to that channel.  When the channel will
630  * be used only for software triggering or event chaining, channels not
631  * mapped to hardware events (or mapped to unused events) are preferable.
632  *
633  * DMA transfers start from a channel using edma_start(), or by
634  * chaining.  When the transfer described in that channel's parameter RAM
635  * slot completes, that slot's data may be reloaded through a link.
636  *
637  * DMA errors are only reported to the @callback associated with the
638  * channel driving that transfer, but transfer completion callbacks can
639  * be sent to another channel under control of the TCC field in
640  * the option word of the transfer's parameter RAM set.  Drivers must not
641  * use DMA transfer completion callbacks for channels they did not allocate.
642  * (The same applies to TCC codes used in transfer chaining.)
643  *
644  * Returns the number of the channel, else negative errno.
645  */
edma_alloc_channel(int channel,void (* callback)(unsigned channel,u16 ch_status,void * data),void * data,enum dma_event_q eventq_no)646 int edma_alloc_channel(int channel,
647 		void (*callback)(unsigned channel, u16 ch_status, void *data),
648 		void *data,
649 		enum dma_event_q eventq_no)
650 {
651 	unsigned i, done = 0, ctlr = 0;
652 	int ret = 0;
653 
654 	if (!unused_chan_list_done) {
655 		/*
656 		 * Scan all the platform devices to find out the EDMA channels
657 		 * used and clear them in the unused list, making the rest
658 		 * available for ARM usage.
659 		 */
660 		ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
661 				prepare_unused_channel_list);
662 		if (ret < 0)
663 			return ret;
664 
665 		unused_chan_list_done = true;
666 	}
667 
668 	if (channel >= 0) {
669 		ctlr = EDMA_CTLR(channel);
670 		channel = EDMA_CHAN_SLOT(channel);
671 	}
672 
673 	if (channel < 0) {
674 		for (i = 0; i < arch_num_cc; i++) {
675 			channel = 0;
676 			for (;;) {
677 				channel = find_next_bit(edma_cc[i]->edma_unused,
678 						edma_cc[i]->num_channels,
679 						channel);
680 				if (channel == edma_cc[i]->num_channels)
681 					break;
682 				if (!test_and_set_bit(channel,
683 						edma_cc[i]->edma_inuse)) {
684 					done = 1;
685 					ctlr = i;
686 					break;
687 				}
688 				channel++;
689 			}
690 			if (done)
691 				break;
692 		}
693 		if (!done)
694 			return -ENOMEM;
695 	} else if (channel >= edma_cc[ctlr]->num_channels) {
696 		return -EINVAL;
697 	} else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
698 		return -EBUSY;
699 	}
700 
701 	/* ensure access through shadow region 0 */
702 	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
703 
704 	/* ensure no events are pending */
705 	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
706 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
707 			&dummy_paramset, PARM_SIZE);
708 
709 	if (callback)
710 		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
711 					callback, data);
712 
713 	map_dmach_queue(ctlr, channel, eventq_no);
714 
715 	return EDMA_CTLR_CHAN(ctlr, channel);
716 }
717 EXPORT_SYMBOL(edma_alloc_channel);
718 
719 
720 /**
721  * edma_free_channel - deallocate DMA channel
722  * @channel: dma channel returned from edma_alloc_channel()
723  *
724  * This deallocates the DMA channel and associated parameter RAM slot
725  * allocated by edma_alloc_channel().
726  *
727  * Callers are responsible for ensuring the channel is inactive, and
728  * will not be reactivated by linking, chaining, or software calls to
729  * edma_start().
730  */
edma_free_channel(unsigned channel)731 void edma_free_channel(unsigned channel)
732 {
733 	unsigned ctlr;
734 
735 	ctlr = EDMA_CTLR(channel);
736 	channel = EDMA_CHAN_SLOT(channel);
737 
738 	if (channel >= edma_cc[ctlr]->num_channels)
739 		return;
740 
741 	setup_dma_interrupt(channel, NULL, NULL);
742 	/* REVISIT should probably take out of shadow region 0 */
743 
744 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
745 			&dummy_paramset, PARM_SIZE);
746 	clear_bit(channel, edma_cc[ctlr]->edma_inuse);
747 }
748 EXPORT_SYMBOL(edma_free_channel);
749 
750 /**
751  * edma_alloc_slot - allocate DMA parameter RAM
752  * @slot: specific slot to allocate; negative for "any unused slot"
753  *
754  * This allocates a parameter RAM slot, initializing it to hold a
755  * dummy transfer.  Slots allocated using this routine have not been
756  * mapped to a hardware DMA channel, and will normally be used by
757  * linking to them from a slot associated with a DMA channel.
758  *
759  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
760  * slots may be allocated on behalf of DSP firmware.
761  *
762  * Returns the number of the slot, else negative errno.
763  */
edma_alloc_slot(unsigned ctlr,int slot)764 int edma_alloc_slot(unsigned ctlr, int slot)
765 {
766 	if (!edma_cc[ctlr])
767 		return -EINVAL;
768 
769 	if (slot >= 0)
770 		slot = EDMA_CHAN_SLOT(slot);
771 
772 	if (slot < 0) {
773 		slot = edma_cc[ctlr]->num_channels;
774 		for (;;) {
775 			slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
776 					edma_cc[ctlr]->num_slots, slot);
777 			if (slot == edma_cc[ctlr]->num_slots)
778 				return -ENOMEM;
779 			if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
780 				break;
781 		}
782 	} else if (slot < edma_cc[ctlr]->num_channels ||
783 			slot >= edma_cc[ctlr]->num_slots) {
784 		return -EINVAL;
785 	} else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
786 		return -EBUSY;
787 	}
788 
789 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
790 			&dummy_paramset, PARM_SIZE);
791 
792 	return EDMA_CTLR_CHAN(ctlr, slot);
793 }
794 EXPORT_SYMBOL(edma_alloc_slot);
795 
796 /**
797  * edma_free_slot - deallocate DMA parameter RAM
798  * @slot: parameter RAM slot returned from edma_alloc_slot()
799  *
800  * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
801  * Callers are responsible for ensuring the slot is inactive, and will
802  * not be activated.
803  */
edma_free_slot(unsigned slot)804 void edma_free_slot(unsigned slot)
805 {
806 	unsigned ctlr;
807 
808 	ctlr = EDMA_CTLR(slot);
809 	slot = EDMA_CHAN_SLOT(slot);
810 
811 	if (slot < edma_cc[ctlr]->num_channels ||
812 		slot >= edma_cc[ctlr]->num_slots)
813 		return;
814 
815 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
816 			&dummy_paramset, PARM_SIZE);
817 	clear_bit(slot, edma_cc[ctlr]->edma_inuse);
818 }
819 EXPORT_SYMBOL(edma_free_slot);
820 
821 
822 /**
823  * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
824  * The API will return the starting point of a set of
825  * contiguous parameter RAM slots that have been requested
826  *
827  * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
828  * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
829  * @count: number of contiguous Paramter RAM slots
830  * @slot  - the start value of Parameter RAM slot that should be passed if id
831  * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
832  *
833  * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
834  * contiguous Parameter RAM slots from parameter RAM 64 in the case of
835  * DaVinci SOCs and 32 in the case of DA8xx SOCs.
836  *
837  * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
838  * set of contiguous parameter RAM slots from the "slot" that is passed as an
839  * argument to the API.
840  *
841  * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
842  * starts looking for a set of contiguous parameter RAMs from the "slot"
843  * that is passed as an argument to the API. On failure the API will try to
844  * find a set of contiguous Parameter RAM slots from the remaining Parameter
845  * RAM slots
846  */
edma_alloc_cont_slots(unsigned ctlr,unsigned int id,int slot,int count)847 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
848 {
849 	/*
850 	 * The start slot requested should be greater than
851 	 * the number of channels and lesser than the total number
852 	 * of slots
853 	 */
854 	if ((id != EDMA_CONT_PARAMS_ANY) &&
855 		(slot < edma_cc[ctlr]->num_channels ||
856 		slot >= edma_cc[ctlr]->num_slots))
857 		return -EINVAL;
858 
859 	/*
860 	 * The number of parameter RAM slots requested cannot be less than 1
861 	 * and cannot be more than the number of slots minus the number of
862 	 * channels
863 	 */
864 	if (count < 1 || count >
865 		(edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
866 		return -EINVAL;
867 
868 	switch (id) {
869 	case EDMA_CONT_PARAMS_ANY:
870 		return reserve_contiguous_slots(ctlr, id, count,
871 						 edma_cc[ctlr]->num_channels);
872 	case EDMA_CONT_PARAMS_FIXED_EXACT:
873 	case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
874 		return reserve_contiguous_slots(ctlr, id, count, slot);
875 	default:
876 		return -EINVAL;
877 	}
878 
879 }
880 EXPORT_SYMBOL(edma_alloc_cont_slots);
881 
882 /**
883  * edma_free_cont_slots - deallocate DMA parameter RAM slots
884  * @slot: first parameter RAM of a set of parameter RAM slots to be freed
885  * @count: the number of contiguous parameter RAM slots to be freed
886  *
887  * This deallocates the parameter RAM slots allocated by
888  * edma_alloc_cont_slots.
889  * Callers/applications need to keep track of sets of contiguous
890  * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
891  * API.
892  * Callers are responsible for ensuring the slots are inactive, and will
893  * not be activated.
894  */
edma_free_cont_slots(unsigned slot,int count)895 int edma_free_cont_slots(unsigned slot, int count)
896 {
897 	unsigned ctlr, slot_to_free;
898 	int i;
899 
900 	ctlr = EDMA_CTLR(slot);
901 	slot = EDMA_CHAN_SLOT(slot);
902 
903 	if (slot < edma_cc[ctlr]->num_channels ||
904 		slot >= edma_cc[ctlr]->num_slots ||
905 		count < 1)
906 		return -EINVAL;
907 
908 	for (i = slot; i < slot + count; ++i) {
909 		ctlr = EDMA_CTLR(i);
910 		slot_to_free = EDMA_CHAN_SLOT(i);
911 
912 		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
913 			&dummy_paramset, PARM_SIZE);
914 		clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
915 	}
916 
917 	return 0;
918 }
919 EXPORT_SYMBOL(edma_free_cont_slots);
920 
921 /*-----------------------------------------------------------------------*/
922 
923 /* Parameter RAM operations (i) -- read/write partial slots */
924 
925 /**
926  * edma_set_src - set initial DMA source address in parameter RAM slot
927  * @slot: parameter RAM slot being configured
928  * @src_port: physical address of source (memory, controller FIFO, etc)
929  * @addressMode: INCR, except in very rare cases
930  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
931  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
932  *
933  * Note that the source address is modified during the DMA transfer
934  * according to edma_set_src_index().
935  */
edma_set_src(unsigned slot,dma_addr_t src_port,enum address_mode mode,enum fifo_width width)936 void edma_set_src(unsigned slot, dma_addr_t src_port,
937 				enum address_mode mode, enum fifo_width width)
938 {
939 	unsigned ctlr;
940 
941 	ctlr = EDMA_CTLR(slot);
942 	slot = EDMA_CHAN_SLOT(slot);
943 
944 	if (slot < edma_cc[ctlr]->num_slots) {
945 		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
946 
947 		if (mode) {
948 			/* set SAM and program FWID */
949 			i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
950 		} else {
951 			/* clear SAM */
952 			i &= ~SAM;
953 		}
954 		edma_parm_write(ctlr, PARM_OPT, slot, i);
955 
956 		/* set the source port address
957 		   in source register of param structure */
958 		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
959 	}
960 }
961 EXPORT_SYMBOL(edma_set_src);
962 
963 /**
964  * edma_set_dest - set initial DMA destination address in parameter RAM slot
965  * @slot: parameter RAM slot being configured
966  * @dest_port: physical address of destination (memory, controller FIFO, etc)
967  * @addressMode: INCR, except in very rare cases
968  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
969  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
970  *
971  * Note that the destination address is modified during the DMA transfer
972  * according to edma_set_dest_index().
973  */
edma_set_dest(unsigned slot,dma_addr_t dest_port,enum address_mode mode,enum fifo_width width)974 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
975 				 enum address_mode mode, enum fifo_width width)
976 {
977 	unsigned ctlr;
978 
979 	ctlr = EDMA_CTLR(slot);
980 	slot = EDMA_CHAN_SLOT(slot);
981 
982 	if (slot < edma_cc[ctlr]->num_slots) {
983 		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
984 
985 		if (mode) {
986 			/* set DAM and program FWID */
987 			i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
988 		} else {
989 			/* clear DAM */
990 			i &= ~DAM;
991 		}
992 		edma_parm_write(ctlr, PARM_OPT, slot, i);
993 		/* set the destination port address
994 		   in dest register of param structure */
995 		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
996 	}
997 }
998 EXPORT_SYMBOL(edma_set_dest);
999 
1000 /**
1001  * edma_get_position - returns the current transfer point
1002  * @slot: parameter RAM slot being examined
1003  * @dst:  true selects the dest position, false the source
1004  *
1005  * Returns the position of the current active slot
1006  */
edma_get_position(unsigned slot,bool dst)1007 dma_addr_t edma_get_position(unsigned slot, bool dst)
1008 {
1009 	u32 offs, ctlr = EDMA_CTLR(slot);
1010 
1011 	slot = EDMA_CHAN_SLOT(slot);
1012 
1013 	offs = PARM_OFFSET(slot);
1014 	offs += dst ? PARM_DST : PARM_SRC;
1015 
1016 	return edma_read(ctlr, offs);
1017 }
1018 
1019 /**
1020  * edma_set_src_index - configure DMA source address indexing
1021  * @slot: parameter RAM slot being configured
1022  * @src_bidx: byte offset between source arrays in a frame
1023  * @src_cidx: byte offset between source frames in a block
1024  *
1025  * Offsets are specified to support either contiguous or discontiguous
1026  * memory transfers, or repeated access to a hardware register, as needed.
1027  * When accessing hardware registers, both offsets are normally zero.
1028  */
edma_set_src_index(unsigned slot,s16 src_bidx,s16 src_cidx)1029 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1030 {
1031 	unsigned ctlr;
1032 
1033 	ctlr = EDMA_CTLR(slot);
1034 	slot = EDMA_CHAN_SLOT(slot);
1035 
1036 	if (slot < edma_cc[ctlr]->num_slots) {
1037 		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1038 				0xffff0000, src_bidx);
1039 		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1040 				0xffff0000, src_cidx);
1041 	}
1042 }
1043 EXPORT_SYMBOL(edma_set_src_index);
1044 
1045 /**
1046  * edma_set_dest_index - configure DMA destination address indexing
1047  * @slot: parameter RAM slot being configured
1048  * @dest_bidx: byte offset between destination arrays in a frame
1049  * @dest_cidx: byte offset between destination frames in a block
1050  *
1051  * Offsets are specified to support either contiguous or discontiguous
1052  * memory transfers, or repeated access to a hardware register, as needed.
1053  * When accessing hardware registers, both offsets are normally zero.
1054  */
edma_set_dest_index(unsigned slot,s16 dest_bidx,s16 dest_cidx)1055 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1056 {
1057 	unsigned ctlr;
1058 
1059 	ctlr = EDMA_CTLR(slot);
1060 	slot = EDMA_CHAN_SLOT(slot);
1061 
1062 	if (slot < edma_cc[ctlr]->num_slots) {
1063 		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1064 				0x0000ffff, dest_bidx << 16);
1065 		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1066 				0x0000ffff, dest_cidx << 16);
1067 	}
1068 }
1069 EXPORT_SYMBOL(edma_set_dest_index);
1070 
1071 /**
1072  * edma_set_transfer_params - configure DMA transfer parameters
1073  * @slot: parameter RAM slot being configured
1074  * @acnt: how many bytes per array (at least one)
1075  * @bcnt: how many arrays per frame (at least one)
1076  * @ccnt: how many frames per block (at least one)
1077  * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1078  *	the value to reload into bcnt when it decrements to zero
1079  * @sync_mode: ASYNC or ABSYNC
1080  *
1081  * See the EDMA3 documentation to understand how to configure and link
1082  * transfers using the fields in PaRAM slots.  If you are not doing it
1083  * all at once with edma_write_slot(), you will use this routine
1084  * plus two calls each for source and destination, setting the initial
1085  * address and saying how to index that address.
1086  *
1087  * An example of an A-Synchronized transfer is a serial link using a
1088  * single word shift register.  In that case, @acnt would be equal to
1089  * that word size; the serial controller issues a DMA synchronization
1090  * event to transfer each word, and memory access by the DMA transfer
1091  * controller will be word-at-a-time.
1092  *
1093  * An example of an AB-Synchronized transfer is a device using a FIFO.
1094  * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1095  * The controller with the FIFO issues DMA synchronization events when
1096  * the FIFO threshold is reached, and the DMA transfer controller will
1097  * transfer one frame to (or from) the FIFO.  It will probably use
1098  * efficient burst modes to access memory.
1099  */
edma_set_transfer_params(unsigned slot,u16 acnt,u16 bcnt,u16 ccnt,u16 bcnt_rld,enum sync_dimension sync_mode)1100 void edma_set_transfer_params(unsigned slot,
1101 		u16 acnt, u16 bcnt, u16 ccnt,
1102 		u16 bcnt_rld, enum sync_dimension sync_mode)
1103 {
1104 	unsigned ctlr;
1105 
1106 	ctlr = EDMA_CTLR(slot);
1107 	slot = EDMA_CHAN_SLOT(slot);
1108 
1109 	if (slot < edma_cc[ctlr]->num_slots) {
1110 		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1111 				0x0000ffff, bcnt_rld << 16);
1112 		if (sync_mode == ASYNC)
1113 			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1114 		else
1115 			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1116 		/* Set the acount, bcount, ccount registers */
1117 		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1118 		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1119 	}
1120 }
1121 EXPORT_SYMBOL(edma_set_transfer_params);
1122 
1123 /**
1124  * edma_link - link one parameter RAM slot to another
1125  * @from: parameter RAM slot originating the link
1126  * @to: parameter RAM slot which is the link target
1127  *
1128  * The originating slot should not be part of any active DMA transfer.
1129  */
edma_link(unsigned from,unsigned to)1130 void edma_link(unsigned from, unsigned to)
1131 {
1132 	unsigned ctlr_from, ctlr_to;
1133 
1134 	ctlr_from = EDMA_CTLR(from);
1135 	from = EDMA_CHAN_SLOT(from);
1136 	ctlr_to = EDMA_CTLR(to);
1137 	to = EDMA_CHAN_SLOT(to);
1138 
1139 	if (from >= edma_cc[ctlr_from]->num_slots)
1140 		return;
1141 	if (to >= edma_cc[ctlr_to]->num_slots)
1142 		return;
1143 	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1144 				PARM_OFFSET(to));
1145 }
1146 EXPORT_SYMBOL(edma_link);
1147 
1148 /**
1149  * edma_unlink - cut link from one parameter RAM slot
1150  * @from: parameter RAM slot originating the link
1151  *
1152  * The originating slot should not be part of any active DMA transfer.
1153  * Its link is set to 0xffff.
1154  */
edma_unlink(unsigned from)1155 void edma_unlink(unsigned from)
1156 {
1157 	unsigned ctlr;
1158 
1159 	ctlr = EDMA_CTLR(from);
1160 	from = EDMA_CHAN_SLOT(from);
1161 
1162 	if (from >= edma_cc[ctlr]->num_slots)
1163 		return;
1164 	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1165 }
1166 EXPORT_SYMBOL(edma_unlink);
1167 
1168 /*-----------------------------------------------------------------------*/
1169 
1170 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1171 
1172 /**
1173  * edma_write_slot - write parameter RAM data for slot
1174  * @slot: number of parameter RAM slot being modified
1175  * @param: data to be written into parameter RAM slot
1176  *
1177  * Use this to assign all parameters of a transfer at once.  This
1178  * allows more efficient setup of transfers than issuing multiple
1179  * calls to set up those parameters in small pieces, and provides
1180  * complete control over all transfer options.
1181  */
edma_write_slot(unsigned slot,const struct edmacc_param * param)1182 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1183 {
1184 	unsigned ctlr;
1185 
1186 	ctlr = EDMA_CTLR(slot);
1187 	slot = EDMA_CHAN_SLOT(slot);
1188 
1189 	if (slot >= edma_cc[ctlr]->num_slots)
1190 		return;
1191 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1192 			PARM_SIZE);
1193 }
1194 EXPORT_SYMBOL(edma_write_slot);
1195 
1196 /**
1197  * edma_read_slot - read parameter RAM data from slot
1198  * @slot: number of parameter RAM slot being copied
1199  * @param: where to store copy of parameter RAM data
1200  *
1201  * Use this to read data from a parameter RAM slot, perhaps to
1202  * save them as a template for later reuse.
1203  */
edma_read_slot(unsigned slot,struct edmacc_param * param)1204 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1205 {
1206 	unsigned ctlr;
1207 
1208 	ctlr = EDMA_CTLR(slot);
1209 	slot = EDMA_CHAN_SLOT(slot);
1210 
1211 	if (slot >= edma_cc[ctlr]->num_slots)
1212 		return;
1213 	memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1214 			PARM_SIZE);
1215 }
1216 EXPORT_SYMBOL(edma_read_slot);
1217 
1218 /*-----------------------------------------------------------------------*/
1219 
1220 /* Various EDMA channel control operations */
1221 
1222 /**
1223  * edma_pause - pause dma on a channel
1224  * @channel: on which edma_start() has been called
1225  *
1226  * This temporarily disables EDMA hardware events on the specified channel,
1227  * preventing them from triggering new transfers on its behalf
1228  */
edma_pause(unsigned channel)1229 void edma_pause(unsigned channel)
1230 {
1231 	unsigned ctlr;
1232 
1233 	ctlr = EDMA_CTLR(channel);
1234 	channel = EDMA_CHAN_SLOT(channel);
1235 
1236 	if (channel < edma_cc[ctlr]->num_channels) {
1237 		unsigned int mask = BIT(channel & 0x1f);
1238 
1239 		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1240 	}
1241 }
1242 EXPORT_SYMBOL(edma_pause);
1243 
1244 /**
1245  * edma_resume - resumes dma on a paused channel
1246  * @channel: on which edma_pause() has been called
1247  *
1248  * This re-enables EDMA hardware events on the specified channel.
1249  */
edma_resume(unsigned channel)1250 void edma_resume(unsigned channel)
1251 {
1252 	unsigned ctlr;
1253 
1254 	ctlr = EDMA_CTLR(channel);
1255 	channel = EDMA_CHAN_SLOT(channel);
1256 
1257 	if (channel < edma_cc[ctlr]->num_channels) {
1258 		unsigned int mask = BIT(channel & 0x1f);
1259 
1260 		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1261 	}
1262 }
1263 EXPORT_SYMBOL(edma_resume);
1264 
edma_trigger_channel(unsigned channel)1265 int edma_trigger_channel(unsigned channel)
1266 {
1267 	unsigned ctlr;
1268 	unsigned int mask;
1269 
1270 	ctlr = EDMA_CTLR(channel);
1271 	channel = EDMA_CHAN_SLOT(channel);
1272 	mask = BIT(channel & 0x1f);
1273 
1274 	edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
1275 
1276 	pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
1277 		 edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
1278 	return 0;
1279 }
1280 EXPORT_SYMBOL(edma_trigger_channel);
1281 
1282 /**
1283  * edma_start - start dma on a channel
1284  * @channel: channel being activated
1285  *
1286  * Channels with event associations will be triggered by their hardware
1287  * events, and channels without such associations will be triggered by
1288  * software.  (At this writing there is no interface for using software
1289  * triggers except with channels that don't support hardware triggers.)
1290  *
1291  * Returns zero on success, else negative errno.
1292  */
edma_start(unsigned channel)1293 int edma_start(unsigned channel)
1294 {
1295 	unsigned ctlr;
1296 
1297 	ctlr = EDMA_CTLR(channel);
1298 	channel = EDMA_CHAN_SLOT(channel);
1299 
1300 	if (channel < edma_cc[ctlr]->num_channels) {
1301 		int j = channel >> 5;
1302 		unsigned int mask = BIT(channel & 0x1f);
1303 
1304 		/* EDMA channels without event association */
1305 		if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1306 			pr_debug("EDMA: ESR%d %08x\n", j,
1307 				edma_shadow0_read_array(ctlr, SH_ESR, j));
1308 			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1309 			return 0;
1310 		}
1311 
1312 		/* EDMA channel with event association */
1313 		pr_debug("EDMA: ER%d %08x\n", j,
1314 			edma_shadow0_read_array(ctlr, SH_ER, j));
1315 		/* Clear any pending event or error */
1316 		edma_write_array(ctlr, EDMA_ECR, j, mask);
1317 		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1318 		/* Clear any SER */
1319 		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1320 		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1321 		pr_debug("EDMA: EER%d %08x\n", j,
1322 			edma_shadow0_read_array(ctlr, SH_EER, j));
1323 		return 0;
1324 	}
1325 
1326 	return -EINVAL;
1327 }
1328 EXPORT_SYMBOL(edma_start);
1329 
1330 /**
1331  * edma_stop - stops dma on the channel passed
1332  * @channel: channel being deactivated
1333  *
1334  * When @lch is a channel, any active transfer is paused and
1335  * all pending hardware events are cleared.  The current transfer
1336  * may not be resumed, and the channel's Parameter RAM should be
1337  * reinitialized before being reused.
1338  */
edma_stop(unsigned channel)1339 void edma_stop(unsigned channel)
1340 {
1341 	unsigned ctlr;
1342 
1343 	ctlr = EDMA_CTLR(channel);
1344 	channel = EDMA_CHAN_SLOT(channel);
1345 
1346 	if (channel < edma_cc[ctlr]->num_channels) {
1347 		int j = channel >> 5;
1348 		unsigned int mask = BIT(channel & 0x1f);
1349 
1350 		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1351 		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1352 		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1353 		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1354 
1355 		pr_debug("EDMA: EER%d %08x\n", j,
1356 				edma_shadow0_read_array(ctlr, SH_EER, j));
1357 
1358 		/* REVISIT:  consider guarding against inappropriate event
1359 		 * chaining by overwriting with dummy_paramset.
1360 		 */
1361 	}
1362 }
1363 EXPORT_SYMBOL(edma_stop);
1364 
1365 /******************************************************************************
1366  *
1367  * It cleans ParamEntry qand bring back EDMA to initial state if media has
1368  * been removed before EDMA has finished.It is usedful for removable media.
1369  * Arguments:
1370  *      ch_no     - channel no
1371  *
1372  * Return: zero on success, or corresponding error no on failure
1373  *
1374  * FIXME this should not be needed ... edma_stop() should suffice.
1375  *
1376  *****************************************************************************/
1377 
edma_clean_channel(unsigned channel)1378 void edma_clean_channel(unsigned channel)
1379 {
1380 	unsigned ctlr;
1381 
1382 	ctlr = EDMA_CTLR(channel);
1383 	channel = EDMA_CHAN_SLOT(channel);
1384 
1385 	if (channel < edma_cc[ctlr]->num_channels) {
1386 		int j = (channel >> 5);
1387 		unsigned int mask = BIT(channel & 0x1f);
1388 
1389 		pr_debug("EDMA: EMR%d %08x\n", j,
1390 				edma_read_array(ctlr, EDMA_EMR, j));
1391 		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1392 		/* Clear the corresponding EMR bits */
1393 		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1394 		/* Clear any SER */
1395 		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1396 		edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1397 	}
1398 }
1399 EXPORT_SYMBOL(edma_clean_channel);
1400 
1401 /*
1402  * edma_clear_event - clear an outstanding event on the DMA channel
1403  * Arguments:
1404  *	channel - channel number
1405  */
edma_clear_event(unsigned channel)1406 void edma_clear_event(unsigned channel)
1407 {
1408 	unsigned ctlr;
1409 
1410 	ctlr = EDMA_CTLR(channel);
1411 	channel = EDMA_CHAN_SLOT(channel);
1412 
1413 	if (channel >= edma_cc[ctlr]->num_channels)
1414 		return;
1415 	if (channel < 32)
1416 		edma_write(ctlr, EDMA_ECR, BIT(channel));
1417 	else
1418 		edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1419 }
1420 EXPORT_SYMBOL(edma_clear_event);
1421 
1422 /*
1423  * edma_assign_channel_eventq - move given channel to desired eventq
1424  * Arguments:
1425  *	channel - channel number
1426  *	eventq_no - queue to move the channel
1427  *
1428  * Can be used to move a channel to a selected event queue.
1429  */
edma_assign_channel_eventq(unsigned channel,enum dma_event_q eventq_no)1430 void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
1431 {
1432 	unsigned ctlr;
1433 
1434 	ctlr = EDMA_CTLR(channel);
1435 	channel = EDMA_CHAN_SLOT(channel);
1436 
1437 	if (channel >= edma_cc[ctlr]->num_channels)
1438 		return;
1439 
1440 	/* default to low priority queue */
1441 	if (eventq_no == EVENTQ_DEFAULT)
1442 		eventq_no = edma_cc[ctlr]->default_queue;
1443 	if (eventq_no >= edma_cc[ctlr]->num_tc)
1444 		return;
1445 
1446 	map_dmach_queue(ctlr, channel, eventq_no);
1447 }
1448 EXPORT_SYMBOL(edma_assign_channel_eventq);
1449 
edma_setup_from_hw(struct device * dev,struct edma_soc_info * pdata,struct edma * edma_cc,int cc_id)1450 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1451 			      struct edma *edma_cc, int cc_id)
1452 {
1453 	int i;
1454 	u32 value, cccfg;
1455 	s8 (*queue_priority_map)[2];
1456 
1457 	/* Decode the eDMA3 configuration from CCCFG register */
1458 	cccfg = edma_read(cc_id, EDMA_CCCFG);
1459 
1460 	value = GET_NUM_REGN(cccfg);
1461 	edma_cc->num_region = BIT(value);
1462 
1463 	value = GET_NUM_DMACH(cccfg);
1464 	edma_cc->num_channels = BIT(value + 1);
1465 
1466 	value = GET_NUM_PAENTRY(cccfg);
1467 	edma_cc->num_slots = BIT(value + 4);
1468 
1469 	value = GET_NUM_EVQUE(cccfg);
1470 	edma_cc->num_tc = value + 1;
1471 
1472 	dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
1473 		cccfg);
1474 	dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
1475 	dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
1476 	dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
1477 	dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc);
1478 
1479 	/* Nothing need to be done if queue priority is provided */
1480 	if (pdata->queue_priority_mapping)
1481 		return 0;
1482 
1483 	/*
1484 	 * Configure TC/queue priority as follows:
1485 	 * Q0 - priority 0
1486 	 * Q1 - priority 1
1487 	 * Q2 - priority 2
1488 	 * ...
1489 	 * The meaning of priority numbers: 0 highest priority, 7 lowest
1490 	 * priority. So Q0 is the highest priority queue and the last queue has
1491 	 * the lowest priority.
1492 	 */
1493 	queue_priority_map = devm_kzalloc(dev,
1494 					  (edma_cc->num_tc + 1) * sizeof(s8),
1495 					  GFP_KERNEL);
1496 	if (!queue_priority_map)
1497 		return -ENOMEM;
1498 
1499 	for (i = 0; i < edma_cc->num_tc; i++) {
1500 		queue_priority_map[i][0] = i;
1501 		queue_priority_map[i][1] = i;
1502 	}
1503 	queue_priority_map[i][0] = -1;
1504 	queue_priority_map[i][1] = -1;
1505 
1506 	pdata->queue_priority_mapping = queue_priority_map;
1507 	/* Default queue has the lowest priority */
1508 	pdata->default_queue = i - 1;
1509 
1510 	return 0;
1511 }
1512 
1513 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
1514 
edma_xbar_event_map(struct device * dev,struct device_node * node,struct edma_soc_info * pdata,size_t sz)1515 static int edma_xbar_event_map(struct device *dev, struct device_node *node,
1516 			       struct edma_soc_info *pdata, size_t sz)
1517 {
1518 	const char pname[] = "ti,edma-xbar-event-map";
1519 	struct resource res;
1520 	void __iomem *xbar;
1521 	s16 (*xbar_chans)[2];
1522 	size_t nelm = sz / sizeof(s16);
1523 	u32 shift, offset, mux;
1524 	int ret, i;
1525 
1526 	xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
1527 	if (!xbar_chans)
1528 		return -ENOMEM;
1529 
1530 	ret = of_address_to_resource(node, 1, &res);
1531 	if (ret)
1532 		return -ENOMEM;
1533 
1534 	xbar = devm_ioremap(dev, res.start, resource_size(&res));
1535 	if (!xbar)
1536 		return -ENOMEM;
1537 
1538 	ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
1539 	if (ret)
1540 		return -EIO;
1541 
1542 	/* Invalidate last entry for the other user of this mess */
1543 	nelm >>= 1;
1544 	xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
1545 
1546 	for (i = 0; i < nelm; i++) {
1547 		shift = (xbar_chans[i][1] & 0x03) << 3;
1548 		offset = xbar_chans[i][1] & 0xfffffffc;
1549 		mux = readl(xbar + offset);
1550 		mux &= ~(0xff << shift);
1551 		mux |= xbar_chans[i][0] << shift;
1552 		writel(mux, (xbar + offset));
1553 	}
1554 
1555 	pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
1556 	return 0;
1557 }
1558 
edma_of_parse_dt(struct device * dev,struct device_node * node,struct edma_soc_info * pdata)1559 static int edma_of_parse_dt(struct device *dev,
1560 			    struct device_node *node,
1561 			    struct edma_soc_info *pdata)
1562 {
1563 	int ret = 0;
1564 	struct property *prop;
1565 	size_t sz;
1566 	struct edma_rsv_info *rsv_info;
1567 
1568 	rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
1569 	if (!rsv_info)
1570 		return -ENOMEM;
1571 	pdata->rsv = rsv_info;
1572 
1573 	prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
1574 	if (prop)
1575 		ret = edma_xbar_event_map(dev, node, pdata, sz);
1576 
1577 	return ret;
1578 }
1579 
1580 static struct of_dma_filter_info edma_filter_info = {
1581 	.filter_fn = edma_filter_fn,
1582 };
1583 
edma_setup_info_from_dt(struct device * dev,struct device_node * node)1584 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1585 						      struct device_node *node)
1586 {
1587 	struct edma_soc_info *info;
1588 	int ret;
1589 
1590 	info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1591 	if (!info)
1592 		return ERR_PTR(-ENOMEM);
1593 
1594 	ret = edma_of_parse_dt(dev, node, info);
1595 	if (ret)
1596 		return ERR_PTR(ret);
1597 
1598 	dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
1599 	dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap);
1600 	of_dma_controller_register(dev->of_node, of_dma_simple_xlate,
1601 				   &edma_filter_info);
1602 
1603 	return info;
1604 }
1605 #else
edma_setup_info_from_dt(struct device * dev,struct device_node * node)1606 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1607 						      struct device_node *node)
1608 {
1609 	return ERR_PTR(-ENOSYS);
1610 }
1611 #endif
1612 
edma_probe(struct platform_device * pdev)1613 static int edma_probe(struct platform_device *pdev)
1614 {
1615 	struct edma_soc_info	**info = pdev->dev.platform_data;
1616 	struct edma_soc_info    *ninfo[EDMA_MAX_CC] = {NULL};
1617 	s8		(*queue_priority_mapping)[2];
1618 	int			i, j, off, ln, found = 0;
1619 	int			status = -1;
1620 	const s16		(*rsv_chans)[2];
1621 	const s16		(*rsv_slots)[2];
1622 	const s16		(*xbar_chans)[2];
1623 	int			irq[EDMA_MAX_CC] = {0, 0};
1624 	int			err_irq[EDMA_MAX_CC] = {0, 0};
1625 	struct resource		*r[EDMA_MAX_CC] = {NULL};
1626 	struct resource		res[EDMA_MAX_CC];
1627 	char			res_name[10];
1628 	struct device_node	*node = pdev->dev.of_node;
1629 	struct device		*dev = &pdev->dev;
1630 	int			ret;
1631 	struct platform_device_info edma_dev_info = {
1632 		.name = "edma-dma-engine",
1633 		.dma_mask = DMA_BIT_MASK(32),
1634 		.parent = &pdev->dev,
1635 	};
1636 
1637 	if (node) {
1638 		/* Check if this is a second instance registered */
1639 		if (arch_num_cc) {
1640 			dev_err(dev, "only one EDMA instance is supported via DT\n");
1641 			return -ENODEV;
1642 		}
1643 
1644 		ninfo[0] = edma_setup_info_from_dt(dev, node);
1645 		if (IS_ERR(ninfo[0])) {
1646 			dev_err(dev, "failed to get DT data\n");
1647 			return PTR_ERR(ninfo[0]);
1648 		}
1649 
1650 		info = ninfo;
1651 	}
1652 
1653 	if (!info)
1654 		return -ENODEV;
1655 
1656 	pm_runtime_enable(dev);
1657 	ret = pm_runtime_get_sync(dev);
1658 	if (ret < 0) {
1659 		dev_err(dev, "pm_runtime_get_sync() failed\n");
1660 		return ret;
1661 	}
1662 
1663 	for (j = 0; j < EDMA_MAX_CC; j++) {
1664 		if (!info[j]) {
1665 			if (!found)
1666 				return -ENODEV;
1667 			break;
1668 		}
1669 		if (node) {
1670 			ret = of_address_to_resource(node, j, &res[j]);
1671 			if (!ret)
1672 				r[j] = &res[j];
1673 		} else {
1674 			sprintf(res_name, "edma_cc%d", j);
1675 			r[j] = platform_get_resource_byname(pdev,
1676 						IORESOURCE_MEM,
1677 						res_name);
1678 		}
1679 		if (!r[j]) {
1680 			if (found)
1681 				break;
1682 			else
1683 				return -ENODEV;
1684 		} else {
1685 			found = 1;
1686 		}
1687 
1688 		edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]);
1689 		if (IS_ERR(edmacc_regs_base[j]))
1690 			return PTR_ERR(edmacc_regs_base[j]);
1691 
1692 		edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma),
1693 					  GFP_KERNEL);
1694 		if (!edma_cc[j])
1695 			return -ENOMEM;
1696 
1697 		/* Get eDMA3 configuration from IP */
1698 		ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
1699 		if (ret)
1700 			return ret;
1701 
1702 		edma_cc[j]->default_queue = info[j]->default_queue;
1703 
1704 		dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1705 			edmacc_regs_base[j]);
1706 
1707 		for (i = 0; i < edma_cc[j]->num_slots; i++)
1708 			memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1709 					&dummy_paramset, PARM_SIZE);
1710 
1711 		/* Mark all channels as unused */
1712 		memset(edma_cc[j]->edma_unused, 0xff,
1713 			sizeof(edma_cc[j]->edma_unused));
1714 
1715 		if (info[j]->rsv) {
1716 
1717 			/* Clear the reserved channels in unused list */
1718 			rsv_chans = info[j]->rsv->rsv_chans;
1719 			if (rsv_chans) {
1720 				for (i = 0; rsv_chans[i][0] != -1; i++) {
1721 					off = rsv_chans[i][0];
1722 					ln = rsv_chans[i][1];
1723 					clear_bits(off, ln,
1724 						  edma_cc[j]->edma_unused);
1725 				}
1726 			}
1727 
1728 			/* Set the reserved slots in inuse list */
1729 			rsv_slots = info[j]->rsv->rsv_slots;
1730 			if (rsv_slots) {
1731 				for (i = 0; rsv_slots[i][0] != -1; i++) {
1732 					off = rsv_slots[i][0];
1733 					ln = rsv_slots[i][1];
1734 					set_bits(off, ln,
1735 						edma_cc[j]->edma_inuse);
1736 				}
1737 			}
1738 		}
1739 
1740 		/* Clear the xbar mapped channels in unused list */
1741 		xbar_chans = info[j]->xbar_chans;
1742 		if (xbar_chans) {
1743 			for (i = 0; xbar_chans[i][1] != -1; i++) {
1744 				off = xbar_chans[i][1];
1745 				clear_bits(off, 1,
1746 					   edma_cc[j]->edma_unused);
1747 			}
1748 		}
1749 
1750 		if (node) {
1751 			irq[j] = irq_of_parse_and_map(node, 0);
1752 			err_irq[j] = irq_of_parse_and_map(node, 2);
1753 		} else {
1754 			char irq_name[10];
1755 
1756 			sprintf(irq_name, "edma%d", j);
1757 			irq[j] = platform_get_irq_byname(pdev, irq_name);
1758 
1759 			sprintf(irq_name, "edma%d_err", j);
1760 			err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1761 		}
1762 		edma_cc[j]->irq_res_start = irq[j];
1763 		edma_cc[j]->irq_res_end = err_irq[j];
1764 
1765 		status = devm_request_irq(dev, irq[j], dma_irq_handler, 0,
1766 					  "edma", dev);
1767 		if (status < 0) {
1768 			dev_dbg(&pdev->dev,
1769 				"devm_request_irq %d failed --> %d\n",
1770 				irq[j], status);
1771 			return status;
1772 		}
1773 
1774 		status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0,
1775 					  "edma_error", dev);
1776 		if (status < 0) {
1777 			dev_dbg(&pdev->dev,
1778 				"devm_request_irq %d failed --> %d\n",
1779 				err_irq[j], status);
1780 			return status;
1781 		}
1782 
1783 		for (i = 0; i < edma_cc[j]->num_channels; i++)
1784 			map_dmach_queue(j, i, info[j]->default_queue);
1785 
1786 		queue_priority_mapping = info[j]->queue_priority_mapping;
1787 
1788 		/* Event queue priority mapping */
1789 		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1790 			assign_priority_to_queue(j,
1791 						queue_priority_mapping[i][0],
1792 						queue_priority_mapping[i][1]);
1793 
1794 		/* Map the channel to param entry if channel mapping logic
1795 		 * exist
1796 		 */
1797 		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1798 			map_dmach_param(j);
1799 
1800 		for (i = 0; i < edma_cc[j]->num_region; i++) {
1801 			edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1802 			edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1803 			edma_write_array(j, EDMA_QRAE, i, 0x0);
1804 		}
1805 		edma_cc[j]->info = info[j];
1806 		arch_num_cc++;
1807 
1808 		edma_dev_info.id = j;
1809 		platform_device_register_full(&edma_dev_info);
1810 	}
1811 
1812 	return 0;
1813 }
1814 
1815 #ifdef CONFIG_PM_SLEEP
edma_pm_resume(struct device * dev)1816 static int edma_pm_resume(struct device *dev)
1817 {
1818 	int i, j;
1819 
1820 	for (j = 0; j < arch_num_cc; j++) {
1821 		struct edma *cc = edma_cc[j];
1822 
1823 		s8 (*queue_priority_mapping)[2];
1824 
1825 		queue_priority_mapping = cc->info->queue_priority_mapping;
1826 
1827 		/* Event queue priority mapping */
1828 		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1829 			assign_priority_to_queue(j,
1830 						 queue_priority_mapping[i][0],
1831 						 queue_priority_mapping[i][1]);
1832 
1833 		/*
1834 		 * Map the channel to param entry if channel mapping logic
1835 		 * exist
1836 		 */
1837 		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1838 			map_dmach_param(j);
1839 
1840 		for (i = 0; i < cc->num_channels; i++) {
1841 			if (test_bit(i, cc->edma_inuse)) {
1842 				/* ensure access through shadow region 0 */
1843 				edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
1844 					       BIT(i & 0x1f));
1845 
1846 				setup_dma_interrupt(i,
1847 						    cc->intr_data[i].callback,
1848 						    cc->intr_data[i].data);
1849 			}
1850 		}
1851 	}
1852 
1853 	return 0;
1854 }
1855 #endif
1856 
1857 static const struct dev_pm_ops edma_pm_ops = {
1858 	SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
1859 };
1860 
1861 static struct platform_driver edma_driver = {
1862 	.driver = {
1863 		.name	= "edma",
1864 		.pm	= &edma_pm_ops,
1865 		.of_match_table = edma_of_ids,
1866 	},
1867 	.probe = edma_probe,
1868 };
1869 
edma_init(void)1870 static int __init edma_init(void)
1871 {
1872 	return platform_driver_probe(&edma_driver, edma_probe);
1873 }
1874 arch_initcall(edma_init);
1875 
1876