1/*
2 * File:         bf5xx_sport.c
3 * Based on:
4 * Author:       Roy Huang <roy.huang@analog.com>
5 *
6 * Created:      Tue Sep 21 10:52:42 CEST 2004
7 * Description:
8 *               Blackfin SPORT Driver
9 *
10 *               Copyright 2004-2007 Analog Devices Inc.
11 *
12 * Bugs:         Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
28 */
29
30#include <linux/kernel.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/dma-mapping.h>
34#include <linux/gpio.h>
35#include <linux/bug.h>
36#include <linux/module.h>
37#include <asm/portmux.h>
38#include <asm/dma.h>
39#include <asm/blackfin.h>
40#include <asm/cacheflush.h>
41
42#include "bf5xx-sport.h"
43/* delay between frame sync pulse and first data bit in multichannel mode */
44#define FRAME_DELAY (1<<12)
45
46/* note: multichannel is in units of 8 channels,
47 * tdm_count is # channels NOT / 8 ! */
48int sport_set_multichannel(struct sport_device *sport,
49		int tdm_count, u32 tx_mask, u32 rx_mask, int packed)
50{
51	pr_debug("%s tdm_count=%d tx_mask:0x%08x rx_mask:0x%08x packed=%d\n",
52			__func__, tdm_count, tx_mask, rx_mask, packed);
53
54	if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
55		return -EBUSY;
56
57	if (tdm_count & 0x7)
58		return -EINVAL;
59
60	if (tdm_count > 32)
61		return -EINVAL; /* Only support less than 32 channels now */
62
63	if (tdm_count) {
64		sport->regs->mcmc1 = ((tdm_count>>3)-1) << 12;
65		sport->regs->mcmc2 = FRAME_DELAY | MCMEN | \
66				(packed ? (MCDTXPE|MCDRXPE) : 0);
67
68		sport->regs->mtcs0 = tx_mask;
69		sport->regs->mrcs0 = rx_mask;
70		sport->regs->mtcs1 = 0;
71		sport->regs->mrcs1 = 0;
72		sport->regs->mtcs2 = 0;
73		sport->regs->mrcs2 = 0;
74		sport->regs->mtcs3 = 0;
75		sport->regs->mrcs3 = 0;
76	} else {
77		sport->regs->mcmc1 = 0;
78		sport->regs->mcmc2 = 0;
79
80		sport->regs->mtcs0 = 0;
81		sport->regs->mrcs0 = 0;
82	}
83
84	sport->regs->mtcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mtcs3 = 0;
85	sport->regs->mrcs1 = 0; sport->regs->mrcs2 = 0; sport->regs->mrcs3 = 0;
86
87	SSYNC();
88
89	return 0;
90}
91EXPORT_SYMBOL(sport_set_multichannel);
92
93int sport_config_rx(struct sport_device *sport, unsigned int rcr1,
94		unsigned int rcr2, unsigned int clkdiv, unsigned int fsdiv)
95{
96	if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
97		return -EBUSY;
98
99	sport->regs->rcr1 = rcr1;
100	sport->regs->rcr2 = rcr2;
101	sport->regs->rclkdiv = clkdiv;
102	sport->regs->rfsdiv = fsdiv;
103
104	SSYNC();
105
106	return 0;
107}
108EXPORT_SYMBOL(sport_config_rx);
109
110int sport_config_tx(struct sport_device *sport, unsigned int tcr1,
111		unsigned int tcr2, unsigned int clkdiv, unsigned int fsdiv)
112{
113	if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN))
114		return -EBUSY;
115
116	sport->regs->tcr1 = tcr1;
117	sport->regs->tcr2 = tcr2;
118	sport->regs->tclkdiv = clkdiv;
119	sport->regs->tfsdiv = fsdiv;
120
121	SSYNC();
122
123	return 0;
124}
125EXPORT_SYMBOL(sport_config_tx);
126
127static void setup_desc(struct dmasg *desc, void *buf, int fragcount,
128		size_t fragsize, unsigned int cfg,
129		unsigned int x_count, unsigned int ycount, size_t wdsize)
130{
131
132	int i;
133
134	for (i = 0; i < fragcount; ++i) {
135		desc[i].next_desc_addr  = &(desc[i + 1]);
136		desc[i].start_addr = (unsigned long)buf + i*fragsize;
137		desc[i].cfg = cfg;
138		desc[i].x_count = x_count;
139		desc[i].x_modify = wdsize;
140		desc[i].y_count = ycount;
141		desc[i].y_modify = wdsize;
142	}
143
144	/* make circular */
145	desc[fragcount-1].next_desc_addr = desc;
146
147	pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p,"
148		"next1=%p\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n",
149		desc, desc[0].next_desc_addr,
150		desc+1, desc[1].next_desc_addr,
151		desc[0].x_count, desc[0].y_count,
152		desc[0].start_addr, desc[0].cfg);
153}
154
155static int sport_start(struct sport_device *sport)
156{
157	enable_dma(sport->dma_rx_chan);
158	enable_dma(sport->dma_tx_chan);
159	sport->regs->rcr1 |= RSPEN;
160	sport->regs->tcr1 |= TSPEN;
161	SSYNC();
162
163	return 0;
164}
165
166static int sport_stop(struct sport_device *sport)
167{
168	sport->regs->tcr1 &= ~TSPEN;
169	sport->regs->rcr1 &= ~RSPEN;
170	SSYNC();
171
172	disable_dma(sport->dma_rx_chan);
173	disable_dma(sport->dma_tx_chan);
174	return 0;
175}
176
177static inline int sport_hook_rx_dummy(struct sport_device *sport)
178{
179	struct dmasg *desc, temp_desc;
180	unsigned long flags;
181
182	if (WARN_ON(!sport->dummy_rx_desc) ||
183	    WARN_ON(sport->curr_rx_desc == sport->dummy_rx_desc))
184		return -EINVAL;
185
186	/* Maybe the dummy buffer descriptor ring is damaged */
187	sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc + 1;
188
189	local_irq_save(flags);
190	desc = get_dma_next_desc_ptr(sport->dma_rx_chan);
191	/* Copy the descriptor which will be damaged to backup */
192	temp_desc = *desc;
193	desc->x_count = sport->dummy_count / 2;
194	desc->y_count = 0;
195	desc->next_desc_addr = sport->dummy_rx_desc;
196	local_irq_restore(flags);
197	/* Waiting for dummy buffer descriptor is already hooked*/
198	while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) -
199			sizeof(struct dmasg)) != sport->dummy_rx_desc)
200		continue;
201	sport->curr_rx_desc = sport->dummy_rx_desc;
202	/* Restore the damaged descriptor */
203	*desc = temp_desc;
204
205	return 0;
206}
207
208static inline int sport_rx_dma_start(struct sport_device *sport, int dummy)
209{
210	if (dummy) {
211		sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc;
212		sport->curr_rx_desc = sport->dummy_rx_desc;
213	} else
214		sport->curr_rx_desc = sport->dma_rx_desc;
215
216	set_dma_next_desc_addr(sport->dma_rx_chan, sport->curr_rx_desc);
217	set_dma_x_count(sport->dma_rx_chan, 0);
218	set_dma_x_modify(sport->dma_rx_chan, 0);
219	set_dma_config(sport->dma_rx_chan, (DMAFLOW_LARGE | NDSIZE_9 | \
220				WDSIZE_32 | WNR));
221	set_dma_curr_addr(sport->dma_rx_chan, sport->curr_rx_desc->start_addr);
222	SSYNC();
223
224	return 0;
225}
226
227static inline int sport_tx_dma_start(struct sport_device *sport, int dummy)
228{
229	if (dummy) {
230		sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc;
231		sport->curr_tx_desc = sport->dummy_tx_desc;
232	} else
233		sport->curr_tx_desc = sport->dma_tx_desc;
234
235	set_dma_next_desc_addr(sport->dma_tx_chan, sport->curr_tx_desc);
236	set_dma_x_count(sport->dma_tx_chan, 0);
237	set_dma_x_modify(sport->dma_tx_chan, 0);
238	set_dma_config(sport->dma_tx_chan,
239			(DMAFLOW_LARGE | NDSIZE_9 | WDSIZE_32));
240	set_dma_curr_addr(sport->dma_tx_chan, sport->curr_tx_desc->start_addr);
241	SSYNC();
242
243	return 0;
244}
245
246int sport_rx_start(struct sport_device *sport)
247{
248	unsigned long flags;
249	pr_debug("%s enter\n", __func__);
250	if (sport->rx_run)
251		return -EBUSY;
252	if (sport->tx_run) {
253		/* tx is running, rx is not running */
254		if (WARN_ON(!sport->dma_rx_desc) ||
255		    WARN_ON(sport->curr_rx_desc != sport->dummy_rx_desc))
256			return -EINVAL;
257		local_irq_save(flags);
258		while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) -
259			sizeof(struct dmasg)) != sport->dummy_rx_desc)
260			continue;
261		sport->dummy_rx_desc->next_desc_addr = sport->dma_rx_desc;
262		local_irq_restore(flags);
263		sport->curr_rx_desc = sport->dma_rx_desc;
264	} else {
265		sport_tx_dma_start(sport, 1);
266		sport_rx_dma_start(sport, 0);
267		sport_start(sport);
268	}
269
270	sport->rx_run = 1;
271
272	return 0;
273}
274EXPORT_SYMBOL(sport_rx_start);
275
276int sport_rx_stop(struct sport_device *sport)
277{
278	pr_debug("%s enter\n", __func__);
279
280	if (!sport->rx_run)
281		return 0;
282	if (sport->tx_run) {
283		/* TX dma is still running, hook the dummy buffer */
284		sport_hook_rx_dummy(sport);
285	} else {
286		/* Both rx and tx dma will be stopped */
287		sport_stop(sport);
288		sport->curr_rx_desc = NULL;
289		sport->curr_tx_desc = NULL;
290	}
291
292	sport->rx_run = 0;
293
294	return 0;
295}
296EXPORT_SYMBOL(sport_rx_stop);
297
298static inline int sport_hook_tx_dummy(struct sport_device *sport)
299{
300	struct dmasg *desc, temp_desc;
301	unsigned long flags;
302
303	if (WARN_ON(!sport->dummy_tx_desc) ||
304	    WARN_ON(sport->curr_tx_desc == sport->dummy_tx_desc))
305		return -EINVAL;
306
307	sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc + 1;
308
309	/* Shorten the time on last normal descriptor */
310	local_irq_save(flags);
311	desc = get_dma_next_desc_ptr(sport->dma_tx_chan);
312	/* Store the descriptor which will be damaged */
313	temp_desc = *desc;
314	desc->x_count = sport->dummy_count / 2;
315	desc->y_count = 0;
316	desc->next_desc_addr = sport->dummy_tx_desc;
317	local_irq_restore(flags);
318	/* Waiting for dummy buffer descriptor is already hooked*/
319	while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - \
320			sizeof(struct dmasg)) != sport->dummy_tx_desc)
321		continue;
322	sport->curr_tx_desc = sport->dummy_tx_desc;
323	/* Restore the damaged descriptor */
324	*desc = temp_desc;
325
326	return 0;
327}
328
329int sport_tx_start(struct sport_device *sport)
330{
331	unsigned long flags;
332	pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__,
333			sport->tx_run, sport->rx_run);
334	if (sport->tx_run)
335		return -EBUSY;
336	if (sport->rx_run) {
337		if (WARN_ON(!sport->dma_tx_desc) ||
338		    WARN_ON(sport->curr_tx_desc != sport->dummy_tx_desc))
339			return -EINVAL;
340		/* Hook the normal buffer descriptor */
341		local_irq_save(flags);
342		while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) -
343			sizeof(struct dmasg)) != sport->dummy_tx_desc)
344			continue;
345		sport->dummy_tx_desc->next_desc_addr = sport->dma_tx_desc;
346		local_irq_restore(flags);
347		sport->curr_tx_desc = sport->dma_tx_desc;
348	} else {
349
350		sport_tx_dma_start(sport, 0);
351		/* Let rx dma run the dummy buffer */
352		sport_rx_dma_start(sport, 1);
353		sport_start(sport);
354	}
355	sport->tx_run = 1;
356	return 0;
357}
358EXPORT_SYMBOL(sport_tx_start);
359
360int sport_tx_stop(struct sport_device *sport)
361{
362	if (!sport->tx_run)
363		return 0;
364	if (sport->rx_run) {
365		/* RX is still running, hook the dummy buffer */
366		sport_hook_tx_dummy(sport);
367	} else {
368		/* Both rx and tx dma stopped */
369		sport_stop(sport);
370		sport->curr_rx_desc = NULL;
371		sport->curr_tx_desc = NULL;
372	}
373
374	sport->tx_run = 0;
375
376	return 0;
377}
378EXPORT_SYMBOL(sport_tx_stop);
379
380static inline int compute_wdsize(size_t wdsize)
381{
382	switch (wdsize) {
383	case 1:
384		return WDSIZE_8;
385	case 2:
386		return WDSIZE_16;
387	case 4:
388	default:
389		return WDSIZE_32;
390	}
391}
392
393int sport_config_rx_dma(struct sport_device *sport, void *buf,
394		int fragcount, size_t fragsize)
395{
396	unsigned int x_count;
397	unsigned int y_count;
398	unsigned int cfg;
399	dma_addr_t addr;
400
401	pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__, \
402			buf, fragcount, fragsize);
403
404	x_count = fragsize / sport->wdsize;
405	y_count = 0;
406
407	/* for fragments larger than 64k words we use 2d dma,
408	 * denote fragecount as two numbers' mutliply and both of them
409	 * are less than 64k.*/
410	if (x_count >= 0x10000) {
411		int i, count = x_count;
412
413		for (i = 16; i > 0; i--) {
414			x_count = 1 << i;
415			if ((count & (x_count - 1)) == 0) {
416				y_count = count >> i;
417				if (y_count < 0x10000)
418					break;
419			}
420		}
421		if (i == 0)
422			return -EINVAL;
423	}
424	pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__,
425			x_count, y_count);
426
427	if (sport->dma_rx_desc)
428		dma_free_coherent(NULL, sport->rx_desc_bytes,
429					sport->dma_rx_desc, 0);
430
431	/* Allocate a new descritor ring as current one. */
432	sport->dma_rx_desc = dma_alloc_coherent(NULL, \
433			fragcount * sizeof(struct dmasg), &addr, 0);
434	sport->rx_desc_bytes = fragcount * sizeof(struct dmasg);
435
436	if (!sport->dma_rx_desc) {
437		pr_err("Failed to allocate memory for rx desc\n");
438		return -ENOMEM;
439	}
440
441	sport->rx_buf = buf;
442	sport->rx_fragsize = fragsize;
443	sport->rx_frags = fragcount;
444
445	cfg     = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | WNR | \
446		  (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */
447
448	if (y_count != 0)
449		cfg |= DMA2D;
450
451	setup_desc(sport->dma_rx_desc, buf, fragcount, fragsize,
452			cfg|DMAEN, x_count, y_count, sport->wdsize);
453
454	return 0;
455}
456EXPORT_SYMBOL(sport_config_rx_dma);
457
458int sport_config_tx_dma(struct sport_device *sport, void *buf, \
459		int fragcount, size_t fragsize)
460{
461	unsigned int x_count;
462	unsigned int y_count;
463	unsigned int cfg;
464	dma_addr_t addr;
465
466	pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n",
467			__func__, buf, fragcount, fragsize);
468
469	x_count = fragsize/sport->wdsize;
470	y_count = 0;
471
472	/* for fragments larger than 64k words we use 2d dma,
473	 * denote fragecount as two numbers' mutliply and both of them
474	 * are less than 64k.*/
475	if (x_count >= 0x10000) {
476		int i, count = x_count;
477
478		for (i = 16; i > 0; i--) {
479			x_count = 1 << i;
480			if ((count & (x_count - 1)) == 0) {
481				y_count = count >> i;
482				if (y_count < 0x10000)
483					break;
484			}
485		}
486		if (i == 0)
487			return -EINVAL;
488	}
489	pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__,
490			x_count, y_count);
491
492
493	if (sport->dma_tx_desc) {
494		dma_free_coherent(NULL, sport->tx_desc_bytes, \
495				sport->dma_tx_desc, 0);
496	}
497
498	sport->dma_tx_desc = dma_alloc_coherent(NULL, \
499			fragcount * sizeof(struct dmasg), &addr, 0);
500	sport->tx_desc_bytes = fragcount * sizeof(struct dmasg);
501	if (!sport->dma_tx_desc) {
502		pr_err("Failed to allocate memory for tx desc\n");
503		return -ENOMEM;
504	}
505
506	sport->tx_buf = buf;
507	sport->tx_fragsize = fragsize;
508	sport->tx_frags = fragcount;
509	cfg     = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | \
510		  (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */
511
512	if (y_count != 0)
513		cfg |= DMA2D;
514
515	setup_desc(sport->dma_tx_desc, buf, fragcount, fragsize,
516			cfg|DMAEN, x_count, y_count, sport->wdsize);
517
518	return 0;
519}
520EXPORT_SYMBOL(sport_config_tx_dma);
521
522/* setup dummy dma descriptor ring, which don't generate interrupts,
523 * the x_modify is set to 0 */
524static int sport_config_rx_dummy(struct sport_device *sport)
525{
526	struct dmasg *desc;
527	unsigned config;
528
529	pr_debug("%s entered\n", __func__);
530	if (L1_DATA_A_LENGTH)
531		desc = l1_data_sram_zalloc(2 * sizeof(*desc));
532	else {
533		dma_addr_t addr;
534		desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0);
535		memset(desc, 0, 2 * sizeof(*desc));
536	}
537	if (desc == NULL) {
538		pr_err("Failed to allocate memory for dummy rx desc\n");
539		return -ENOMEM;
540	}
541	sport->dummy_rx_desc = desc;
542	desc->start_addr = (unsigned long)sport->dummy_buf;
543	config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize)
544		 | WNR | DMAEN;
545	desc->cfg = config;
546	desc->x_count = sport->dummy_count/sport->wdsize;
547	desc->x_modify = sport->wdsize;
548	desc->y_count = 0;
549	desc->y_modify = 0;
550	memcpy(desc+1, desc, sizeof(*desc));
551	desc->next_desc_addr = desc + 1;
552	desc[1].next_desc_addr = desc;
553	return 0;
554}
555
556static int sport_config_tx_dummy(struct sport_device *sport)
557{
558	struct dmasg *desc;
559	unsigned int config;
560
561	pr_debug("%s entered\n", __func__);
562
563	if (L1_DATA_A_LENGTH)
564		desc = l1_data_sram_zalloc(2 * sizeof(*desc));
565	else {
566		dma_addr_t addr;
567		desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0);
568		memset(desc, 0, 2 * sizeof(*desc));
569	}
570	if (!desc) {
571		pr_err("Failed to allocate memory for dummy tx desc\n");
572		return -ENOMEM;
573	}
574	sport->dummy_tx_desc = desc;
575	desc->start_addr = (unsigned long)sport->dummy_buf + \
576		sport->dummy_count;
577	config = DMAFLOW_LARGE | NDSIZE_9 |
578		 compute_wdsize(sport->wdsize) | DMAEN;
579	desc->cfg = config;
580	desc->x_count = sport->dummy_count/sport->wdsize;
581	desc->x_modify = sport->wdsize;
582	desc->y_count = 0;
583	desc->y_modify = 0;
584	memcpy(desc+1, desc, sizeof(*desc));
585	desc->next_desc_addr = desc + 1;
586	desc[1].next_desc_addr = desc;
587	return 0;
588}
589
590unsigned long sport_curr_offset_rx(struct sport_device *sport)
591{
592	unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan);
593
594	return (unsigned char *)curr - sport->rx_buf;
595}
596EXPORT_SYMBOL(sport_curr_offset_rx);
597
598unsigned long sport_curr_offset_tx(struct sport_device *sport)
599{
600	unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan);
601
602	return (unsigned char *)curr - sport->tx_buf;
603}
604EXPORT_SYMBOL(sport_curr_offset_tx);
605
606void sport_incfrag(struct sport_device *sport, int *frag, int tx)
607{
608	++(*frag);
609	if (tx == 1 && *frag == sport->tx_frags)
610		*frag = 0;
611
612	if (tx == 0 && *frag == sport->rx_frags)
613		*frag = 0;
614}
615EXPORT_SYMBOL(sport_incfrag);
616
617void sport_decfrag(struct sport_device *sport, int *frag, int tx)
618{
619	--(*frag);
620	if (tx == 1 && *frag == 0)
621		*frag = sport->tx_frags;
622
623	if (tx == 0 && *frag == 0)
624		*frag = sport->rx_frags;
625}
626EXPORT_SYMBOL(sport_decfrag);
627
628static int sport_check_status(struct sport_device *sport,
629		unsigned int *sport_stat,
630		unsigned int *rx_stat,
631		unsigned int *tx_stat)
632{
633	int status = 0;
634
635	if (sport_stat) {
636		SSYNC();
637		status = sport->regs->stat;
638		if (status & (TOVF|TUVF|ROVF|RUVF))
639			sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF));
640		SSYNC();
641		*sport_stat = status;
642	}
643
644	if (rx_stat) {
645		SSYNC();
646		status = get_dma_curr_irqstat(sport->dma_rx_chan);
647		if (status & (DMA_DONE|DMA_ERR))
648			clear_dma_irqstat(sport->dma_rx_chan);
649		SSYNC();
650		*rx_stat = status;
651	}
652
653	if (tx_stat) {
654		SSYNC();
655		status = get_dma_curr_irqstat(sport->dma_tx_chan);
656		if (status & (DMA_DONE|DMA_ERR))
657			clear_dma_irqstat(sport->dma_tx_chan);
658		SSYNC();
659		*tx_stat = status;
660	}
661
662	return 0;
663}
664
665int  sport_dump_stat(struct sport_device *sport, char *buf, size_t len)
666{
667	int ret;
668
669	ret = snprintf(buf, len,
670			"sts: 0x%04x\n"
671			"rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n",
672			sport->regs->stat,
673			sport->dma_rx_chan,
674			get_dma_curr_irqstat(sport->dma_rx_chan),
675			sport->dma_tx_chan,
676			get_dma_curr_irqstat(sport->dma_tx_chan));
677	buf += ret;
678	len -= ret;
679
680	ret += snprintf(buf, len,
681			"curr_rx_desc:0x%p, curr_tx_desc:0x%p\n"
682			"dma_rx_desc:0x%p, dma_tx_desc:0x%p\n"
683			"dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n",
684			sport->curr_rx_desc, sport->curr_tx_desc,
685			sport->dma_rx_desc, sport->dma_tx_desc,
686			sport->dummy_rx_desc, sport->dummy_tx_desc);
687
688	return ret;
689}
690
691static irqreturn_t rx_handler(int irq, void *dev_id)
692{
693	unsigned int rx_stat;
694	struct sport_device *sport = dev_id;
695
696	pr_debug("%s enter\n", __func__);
697	sport_check_status(sport, NULL, &rx_stat, NULL);
698	if (!(rx_stat & DMA_DONE))
699		pr_err("rx dma is already stopped\n");
700
701	if (sport->rx_callback) {
702		sport->rx_callback(sport->rx_data);
703		return IRQ_HANDLED;
704	}
705
706	return IRQ_NONE;
707}
708
709static irqreturn_t tx_handler(int irq, void *dev_id)
710{
711	unsigned int tx_stat;
712	struct sport_device *sport = dev_id;
713	pr_debug("%s enter\n", __func__);
714	sport_check_status(sport, NULL, NULL, &tx_stat);
715	if (!(tx_stat & DMA_DONE)) {
716		pr_err("tx dma is already stopped\n");
717		return IRQ_HANDLED;
718	}
719	if (sport->tx_callback) {
720		sport->tx_callback(sport->tx_data);
721		return IRQ_HANDLED;
722	}
723
724	return IRQ_NONE;
725}
726
727static irqreturn_t err_handler(int irq, void *dev_id)
728{
729	unsigned int status = 0;
730	struct sport_device *sport = dev_id;
731
732	pr_debug("%s\n", __func__);
733	if (sport_check_status(sport, &status, NULL, NULL)) {
734		pr_err("error checking status ??");
735		return IRQ_NONE;
736	}
737
738	if (status & (TOVF|TUVF|ROVF|RUVF)) {
739		pr_info("sport status error:%s%s%s%s\n",
740				status & TOVF ? " TOVF" : "",
741				status & TUVF ? " TUVF" : "",
742				status & ROVF ? " ROVF" : "",
743				status & RUVF ? " RUVF" : "");
744		if (status & TOVF || status & TUVF) {
745			disable_dma(sport->dma_tx_chan);
746			if (sport->tx_run)
747				sport_tx_dma_start(sport, 0);
748			else
749				sport_tx_dma_start(sport, 1);
750			enable_dma(sport->dma_tx_chan);
751		} else {
752			disable_dma(sport->dma_rx_chan);
753			if (sport->rx_run)
754				sport_rx_dma_start(sport, 0);
755			else
756				sport_rx_dma_start(sport, 1);
757			enable_dma(sport->dma_rx_chan);
758		}
759	}
760	status = sport->regs->stat;
761	if (status & (TOVF|TUVF|ROVF|RUVF))
762		sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF));
763	SSYNC();
764
765	if (sport->err_callback)
766		sport->err_callback(sport->err_data);
767
768	return IRQ_HANDLED;
769}
770
771int sport_set_rx_callback(struct sport_device *sport,
772		       void (*rx_callback)(void *), void *rx_data)
773{
774	if (WARN_ON(!rx_callback))
775		return -EINVAL;
776	sport->rx_callback = rx_callback;
777	sport->rx_data = rx_data;
778
779	return 0;
780}
781EXPORT_SYMBOL(sport_set_rx_callback);
782
783int sport_set_tx_callback(struct sport_device *sport,
784		void (*tx_callback)(void *), void *tx_data)
785{
786	if (WARN_ON(!tx_callback))
787		return -EINVAL;
788	sport->tx_callback = tx_callback;
789	sport->tx_data = tx_data;
790
791	return 0;
792}
793EXPORT_SYMBOL(sport_set_tx_callback);
794
795int sport_set_err_callback(struct sport_device *sport,
796		void (*err_callback)(void *), void *err_data)
797{
798	if (WARN_ON(!err_callback))
799		return -EINVAL;
800	sport->err_callback = err_callback;
801	sport->err_data = err_data;
802
803	return 0;
804}
805EXPORT_SYMBOL(sport_set_err_callback);
806
807static int sport_config_pdev(struct platform_device *pdev, struct sport_param *param)
808{
809	/* Extract settings from platform data */
810	struct device *dev = &pdev->dev;
811	struct bfin_snd_platform_data *pdata = dev->platform_data;
812	struct resource *res;
813
814	param->num = pdev->id;
815
816	if (!pdata) {
817		dev_err(dev, "no platform_data\n");
818		return -ENODEV;
819	}
820	param->pin_req = pdata->pin_req;
821
822	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
823	if (!res) {
824		dev_err(dev, "no MEM resource\n");
825		return -ENODEV;
826	}
827	param->regs = (struct sport_register *)res->start;
828
829	/* first RX, then TX */
830	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
831	if (!res) {
832		dev_err(dev, "no rx DMA resource\n");
833		return -ENODEV;
834	}
835	param->dma_rx_chan = res->start;
836
837	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
838	if (!res) {
839		dev_err(dev, "no tx DMA resource\n");
840		return -ENODEV;
841	}
842	param->dma_tx_chan = res->start;
843
844	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
845	if (!res) {
846		dev_err(dev, "no irq resource\n");
847		return -ENODEV;
848	}
849	param->err_irq = res->start;
850
851	return 0;
852}
853
854struct sport_device *sport_init(struct platform_device *pdev,
855	unsigned int wdsize, unsigned int dummy_count, size_t priv_size)
856{
857	struct device *dev = &pdev->dev;
858	struct sport_param param;
859	struct sport_device *sport;
860	int ret;
861
862	dev_dbg(dev, "%s enter\n", __func__);
863
864	param.wdsize = wdsize;
865	param.dummy_count = dummy_count;
866	if (WARN_ON(param.wdsize == 0 || param.dummy_count == 0))
867		return NULL;
868
869	ret = sport_config_pdev(pdev, &param);
870	if (ret)
871		return NULL;
872
873	if (peripheral_request_list(param.pin_req, "soc-audio")) {
874		dev_err(dev, "requesting Peripherals failed\n");
875		return NULL;
876	}
877
878	sport = kzalloc(sizeof(*sport), GFP_KERNEL);
879	if (!sport) {
880		dev_err(dev, "failed to allocate for sport device\n");
881		goto __init_err0;
882	}
883
884	sport->num = param.num;
885	sport->dma_rx_chan = param.dma_rx_chan;
886	sport->dma_tx_chan = param.dma_tx_chan;
887	sport->err_irq = param.err_irq;
888	sport->regs = param.regs;
889	sport->pin_req = param.pin_req;
890
891	if (request_dma(sport->dma_rx_chan, "SPORT RX Data") == -EBUSY) {
892		dev_err(dev, "failed to request RX dma %d\n", sport->dma_rx_chan);
893		goto __init_err1;
894	}
895	if (set_dma_callback(sport->dma_rx_chan, rx_handler, sport) != 0) {
896		dev_err(dev, "failed to request RX irq %d\n", sport->dma_rx_chan);
897		goto __init_err2;
898	}
899
900	if (request_dma(sport->dma_tx_chan, "SPORT TX Data") == -EBUSY) {
901		dev_err(dev, "failed to request TX dma %d\n", sport->dma_tx_chan);
902		goto __init_err2;
903	}
904
905	if (set_dma_callback(sport->dma_tx_chan, tx_handler, sport) != 0) {
906		dev_err(dev, "failed to request TX irq %d\n", sport->dma_tx_chan);
907		goto __init_err3;
908	}
909
910	if (request_irq(sport->err_irq, err_handler, IRQF_SHARED, "SPORT err",
911			sport) < 0) {
912		dev_err(dev, "failed to request err irq %d\n", sport->err_irq);
913		goto __init_err3;
914	}
915
916	dev_info(dev, "dma rx:%d tx:%d, err irq:%d, regs:%p\n",
917			sport->dma_rx_chan, sport->dma_tx_chan,
918			sport->err_irq, sport->regs);
919
920	sport->wdsize = param.wdsize;
921	sport->dummy_count = param.dummy_count;
922
923	sport->private_data = kzalloc(priv_size, GFP_KERNEL);
924	if (!sport->private_data) {
925		dev_err(dev, "could not alloc priv data %zu bytes\n", priv_size);
926		goto __init_err4;
927	}
928
929	if (L1_DATA_A_LENGTH)
930		sport->dummy_buf = l1_data_sram_zalloc(param.dummy_count * 2);
931	else
932		sport->dummy_buf = kzalloc(param.dummy_count * 2, GFP_KERNEL);
933	if (sport->dummy_buf == NULL) {
934		dev_err(dev, "failed to allocate dummy buffer\n");
935		goto __error1;
936	}
937
938	ret = sport_config_rx_dummy(sport);
939	if (ret) {
940		dev_err(dev, "failed to config rx dummy ring\n");
941		goto __error2;
942	}
943	ret = sport_config_tx_dummy(sport);
944	if (ret) {
945		dev_err(dev, "failed to config tx dummy ring\n");
946		goto __error3;
947	}
948
949	platform_set_drvdata(pdev, sport);
950
951	return sport;
952__error3:
953	if (L1_DATA_A_LENGTH)
954		l1_data_sram_free(sport->dummy_rx_desc);
955	else
956		dma_free_coherent(NULL, 2*sizeof(struct dmasg),
957				sport->dummy_rx_desc, 0);
958__error2:
959	if (L1_DATA_A_LENGTH)
960		l1_data_sram_free(sport->dummy_buf);
961	else
962		kfree(sport->dummy_buf);
963__error1:
964	kfree(sport->private_data);
965__init_err4:
966	free_irq(sport->err_irq, sport);
967__init_err3:
968	free_dma(sport->dma_tx_chan);
969__init_err2:
970	free_dma(sport->dma_rx_chan);
971__init_err1:
972	kfree(sport);
973__init_err0:
974	peripheral_free_list(param.pin_req);
975	return NULL;
976}
977EXPORT_SYMBOL(sport_init);
978
979void sport_done(struct sport_device *sport)
980{
981	if (sport == NULL)
982		return;
983
984	sport_stop(sport);
985	if (sport->dma_rx_desc)
986		dma_free_coherent(NULL, sport->rx_desc_bytes,
987			sport->dma_rx_desc, 0);
988	if (sport->dma_tx_desc)
989		dma_free_coherent(NULL, sport->tx_desc_bytes,
990			sport->dma_tx_desc, 0);
991
992#if L1_DATA_A_LENGTH != 0
993	l1_data_sram_free(sport->dummy_rx_desc);
994	l1_data_sram_free(sport->dummy_tx_desc);
995	l1_data_sram_free(sport->dummy_buf);
996#else
997	dma_free_coherent(NULL, 2*sizeof(struct dmasg),
998		sport->dummy_rx_desc, 0);
999	dma_free_coherent(NULL, 2*sizeof(struct dmasg),
1000		sport->dummy_tx_desc, 0);
1001	kfree(sport->dummy_buf);
1002#endif
1003	free_dma(sport->dma_rx_chan);
1004	free_dma(sport->dma_tx_chan);
1005	free_irq(sport->err_irq, sport);
1006
1007	kfree(sport->private_data);
1008	peripheral_free_list(sport->pin_req);
1009	kfree(sport);
1010}
1011EXPORT_SYMBOL(sport_done);
1012
1013/*
1014* It is only used to send several bytes when dma is not enabled
1015 * sport controller is configured but not enabled.
1016 * Multichannel cannot works with pio mode */
1017/* Used by ac97 to write and read codec register */
1018int sport_send_and_recv(struct sport_device *sport, u8 *out_data, \
1019		u8 *in_data, int len)
1020{
1021	unsigned short dma_config;
1022	unsigned short status;
1023	unsigned long flags;
1024	unsigned long wait = 0;
1025
1026	pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \
1027			__func__, out_data, in_data, len);
1028	pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n"
1029			"mcmc1:0x%04x, mcmc2:0x%04x\n",
1030			sport->regs->tcr1, sport->regs->tcr2,
1031			sport->regs->tclkdiv, sport->regs->tfsdiv,
1032			sport->regs->mcmc1, sport->regs->mcmc2);
1033	flush_dcache_range((unsigned)out_data, (unsigned)(out_data + len));
1034
1035	/* Enable tx dma */
1036	dma_config = (RESTART | WDSIZE_16 | DI_EN);
1037	set_dma_start_addr(sport->dma_tx_chan, (unsigned long)out_data);
1038	set_dma_x_count(sport->dma_tx_chan, len/2);
1039	set_dma_x_modify(sport->dma_tx_chan, 2);
1040	set_dma_config(sport->dma_tx_chan, dma_config);
1041	enable_dma(sport->dma_tx_chan);
1042
1043	if (in_data != NULL) {
1044		invalidate_dcache_range((unsigned)in_data, \
1045				(unsigned)(in_data + len));
1046		/* Enable rx dma */
1047		dma_config = (RESTART | WDSIZE_16 | WNR | DI_EN);
1048		set_dma_start_addr(sport->dma_rx_chan, (unsigned long)in_data);
1049		set_dma_x_count(sport->dma_rx_chan, len/2);
1050		set_dma_x_modify(sport->dma_rx_chan, 2);
1051		set_dma_config(sport->dma_rx_chan, dma_config);
1052		enable_dma(sport->dma_rx_chan);
1053	}
1054
1055	local_irq_save(flags);
1056	sport->regs->tcr1 |= TSPEN;
1057	sport->regs->rcr1 |= RSPEN;
1058	SSYNC();
1059
1060	status = get_dma_curr_irqstat(sport->dma_tx_chan);
1061	while (status & DMA_RUN) {
1062		udelay(1);
1063		status = get_dma_curr_irqstat(sport->dma_tx_chan);
1064		pr_debug("DMA status:0x%04x\n", status);
1065		if (wait++ > 100)
1066			goto __over;
1067	}
1068	status = sport->regs->stat;
1069	wait = 0;
1070
1071	while (!(status & TXHRE)) {
1072		pr_debug("sport status:0x%04x\n", status);
1073		udelay(1);
1074		status = *(unsigned short *)&sport->regs->stat;
1075		if (wait++ > 1000)
1076			goto __over;
1077	}
1078	/* Wait for the last byte sent out */
1079	udelay(20);
1080	pr_debug("sport status:0x%04x\n", status);
1081
1082__over:
1083	sport->regs->tcr1 &= ~TSPEN;
1084	sport->regs->rcr1 &= ~RSPEN;
1085	SSYNC();
1086	disable_dma(sport->dma_tx_chan);
1087	/* Clear the status */
1088	clear_dma_irqstat(sport->dma_tx_chan);
1089	if (in_data != NULL) {
1090		disable_dma(sport->dma_rx_chan);
1091		clear_dma_irqstat(sport->dma_rx_chan);
1092	}
1093	SSYNC();
1094	local_irq_restore(flags);
1095
1096	return 0;
1097}
1098EXPORT_SYMBOL(sport_send_and_recv);
1099
1100MODULE_AUTHOR("Roy Huang");
1101MODULE_DESCRIPTION("SPORT driver for ADI Blackfin");
1102MODULE_LICENSE("GPL");
1103