1/*
2 * Generic TXx9 ACLC platform driver
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * Based on RBTX49xx patch from CELF patch archive.
7 * (C) Copyright TOSHIBA CORPORATION 2004-2006
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/dmaengine.h>
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/pcm_params.h>
23#include <sound/soc.h>
24#include "txx9aclc.h"
25
26static struct txx9aclc_soc_device {
27	struct txx9aclc_dmadata dmadata[2];
28} txx9aclc_soc_device;
29
30/* REVISIT: How to find txx9aclc_drvdata from snd_ac97? */
31static struct txx9aclc_plat_drvdata *txx9aclc_drvdata;
32
33static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
34			     struct txx9aclc_dmadata *dmadata);
35
36static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
37	/*
38	 * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
39	 * needs more works for noncoherent MIPS.
40	 */
41	.info		  = SNDRV_PCM_INFO_INTERLEAVED |
42			    SNDRV_PCM_INFO_BATCH |
43			    SNDRV_PCM_INFO_PAUSE,
44	.period_bytes_min = 1024,
45	.period_bytes_max = 8 * 1024,
46	.periods_min	  = 2,
47	.periods_max	  = 4096,
48	.buffer_bytes_max = 32 * 1024,
49};
50
51static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
52				  struct snd_pcm_hw_params *params)
53{
54	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
55	struct snd_pcm_runtime *runtime = substream->runtime;
56	struct txx9aclc_dmadata *dmadata = runtime->private_data;
57	int ret;
58
59	ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
60	if (ret < 0)
61		return ret;
62
63	dev_dbg(rtd->platform->dev,
64		"runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd "
65		"runtime->min_align %ld\n",
66		(unsigned long)runtime->dma_area,
67		(unsigned long)runtime->dma_addr, runtime->dma_bytes,
68		runtime->min_align);
69	dev_dbg(rtd->platform->dev,
70		"periods %d period_bytes %d stream %d\n",
71		params_periods(params), params_period_bytes(params),
72		substream->stream);
73
74	dmadata->substream = substream;
75	dmadata->pos = 0;
76	return 0;
77}
78
79static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
80{
81	return snd_pcm_lib_free_pages(substream);
82}
83
84static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
85{
86	struct snd_pcm_runtime *runtime = substream->runtime;
87	struct txx9aclc_dmadata *dmadata = runtime->private_data;
88
89	dmadata->dma_addr = runtime->dma_addr;
90	dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
91	dmadata->period_bytes = snd_pcm_lib_period_bytes(substream);
92
93	if (dmadata->buffer_bytes == dmadata->period_bytes) {
94		dmadata->frag_bytes = dmadata->period_bytes >> 1;
95		dmadata->frags = 2;
96	} else {
97		dmadata->frag_bytes = dmadata->period_bytes;
98		dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes;
99	}
100	dmadata->frag_count = 0;
101	dmadata->pos = 0;
102	return 0;
103}
104
105static void txx9aclc_dma_complete(void *arg)
106{
107	struct txx9aclc_dmadata *dmadata = arg;
108	unsigned long flags;
109
110	/* dma completion handler cannot submit new operations */
111	spin_lock_irqsave(&dmadata->dma_lock, flags);
112	if (dmadata->frag_count >= 0) {
113		dmadata->dmacount--;
114		if (!WARN_ON(dmadata->dmacount < 0))
115			tasklet_schedule(&dmadata->tasklet);
116	}
117	spin_unlock_irqrestore(&dmadata->dma_lock, flags);
118}
119
120static struct dma_async_tx_descriptor *
121txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
122{
123	struct dma_chan *chan = dmadata->dma_chan;
124	struct dma_async_tx_descriptor *desc;
125	struct scatterlist sg;
126
127	sg_init_table(&sg, 1);
128	sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
129		    dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
130	sg_dma_address(&sg) = buf_dma_addr;
131	desc = dmaengine_prep_slave_sg(chan, &sg, 1,
132		dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
133		DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
134		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
135	if (!desc) {
136		dev_err(&chan->dev->device, "cannot prepare slave dma\n");
137		return NULL;
138	}
139	desc->callback = txx9aclc_dma_complete;
140	desc->callback_param = dmadata;
141	dmaengine_submit(desc);
142	return desc;
143}
144
145#define NR_DMA_CHAIN		2
146
147static void txx9aclc_dma_tasklet(unsigned long data)
148{
149	struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
150	struct dma_chan *chan = dmadata->dma_chan;
151	struct dma_async_tx_descriptor *desc;
152	struct snd_pcm_substream *substream = dmadata->substream;
153	u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
154		ACCTL_AUDODMA : ACCTL_AUDIDMA;
155	int i;
156	unsigned long flags;
157
158	spin_lock_irqsave(&dmadata->dma_lock, flags);
159	if (dmadata->frag_count < 0) {
160		struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
161		void __iomem *base = drvdata->base;
162
163		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
164		dmaengine_terminate_all(chan);
165		/* first time */
166		for (i = 0; i < NR_DMA_CHAIN; i++) {
167			desc = txx9aclc_dma_submit(dmadata,
168				dmadata->dma_addr + i * dmadata->frag_bytes);
169			if (!desc)
170				return;
171		}
172		dmadata->dmacount = NR_DMA_CHAIN;
173		dma_async_issue_pending(chan);
174		spin_lock_irqsave(&dmadata->dma_lock, flags);
175		__raw_writel(ctlbit, base + ACCTLEN);
176		dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
177		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
178		return;
179	}
180	if (WARN_ON(dmadata->dmacount >= NR_DMA_CHAIN)) {
181		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
182		return;
183	}
184	while (dmadata->dmacount < NR_DMA_CHAIN) {
185		dmadata->dmacount++;
186		spin_unlock_irqrestore(&dmadata->dma_lock, flags);
187		desc = txx9aclc_dma_submit(dmadata,
188			dmadata->dma_addr +
189			dmadata->frag_count * dmadata->frag_bytes);
190		if (!desc)
191			return;
192		dma_async_issue_pending(chan);
193
194		spin_lock_irqsave(&dmadata->dma_lock, flags);
195		dmadata->frag_count++;
196		dmadata->frag_count %= dmadata->frags;
197		dmadata->pos += dmadata->frag_bytes;
198		dmadata->pos %= dmadata->buffer_bytes;
199		if ((dmadata->frag_count * dmadata->frag_bytes) %
200		    dmadata->period_bytes == 0)
201			snd_pcm_period_elapsed(substream);
202	}
203	spin_unlock_irqrestore(&dmadata->dma_lock, flags);
204}
205
206static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
207{
208	struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
209	struct txx9aclc_plat_drvdata *drvdata =txx9aclc_drvdata;
210	void __iomem *base = drvdata->base;
211	unsigned long flags;
212	int ret = 0;
213	u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
214		ACCTL_AUDODMA : ACCTL_AUDIDMA;
215
216	spin_lock_irqsave(&dmadata->dma_lock, flags);
217	switch (cmd) {
218	case SNDRV_PCM_TRIGGER_START:
219		dmadata->frag_count = -1;
220		tasklet_schedule(&dmadata->tasklet);
221		break;
222	case SNDRV_PCM_TRIGGER_STOP:
223	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
224	case SNDRV_PCM_TRIGGER_SUSPEND:
225		__raw_writel(ctlbit, base + ACCTLDIS);
226		break;
227	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
228	case SNDRV_PCM_TRIGGER_RESUME:
229		__raw_writel(ctlbit, base + ACCTLEN);
230		break;
231	default:
232		ret = -EINVAL;
233	}
234	spin_unlock_irqrestore(&dmadata->dma_lock, flags);
235	return ret;
236}
237
238static snd_pcm_uframes_t
239txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
240{
241	struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
242
243	return bytes_to_frames(substream->runtime, dmadata->pos);
244}
245
246static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
247{
248	struct txx9aclc_soc_device *dev = &txx9aclc_soc_device;
249	struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
250	int ret;
251
252	ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware);
253	if (ret)
254		return ret;
255	/* ensure that buffer size is a multiple of period size */
256	ret = snd_pcm_hw_constraint_integer(substream->runtime,
257					    SNDRV_PCM_HW_PARAM_PERIODS);
258	if (ret < 0)
259		return ret;
260	substream->runtime->private_data = dmadata;
261	return 0;
262}
263
264static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
265{
266	struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
267	struct dma_chan *chan = dmadata->dma_chan;
268
269	dmadata->frag_count = -1;
270	dmaengine_terminate_all(chan);
271	return 0;
272}
273
274static struct snd_pcm_ops txx9aclc_pcm_ops = {
275	.open		= txx9aclc_pcm_open,
276	.close		= txx9aclc_pcm_close,
277	.ioctl		= snd_pcm_lib_ioctl,
278	.hw_params	= txx9aclc_pcm_hw_params,
279	.hw_free	= txx9aclc_pcm_hw_free,
280	.prepare	= txx9aclc_pcm_prepare,
281	.trigger	= txx9aclc_pcm_trigger,
282	.pointer	= txx9aclc_pcm_pointer,
283};
284
285static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
286{
287	struct snd_card *card = rtd->card->snd_card;
288	struct snd_soc_dai *dai = rtd->cpu_dai;
289	struct snd_pcm *pcm = rtd->pcm;
290	struct platform_device *pdev = to_platform_device(rtd->platform->dev);
291	struct txx9aclc_soc_device *dev;
292	struct resource *r;
293	int i;
294	int ret;
295
296	/* at this point onwards the AC97 component has probed and this will be valid */
297	dev = snd_soc_dai_get_drvdata(dai);
298
299	dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK;
300	dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE;
301	for (i = 0; i < 2; i++) {
302		r = platform_get_resource(pdev, IORESOURCE_DMA, i);
303		if (!r) {
304			ret = -EBUSY;
305			goto exit;
306		}
307		dev->dmadata[i].dma_res = r;
308		ret = txx9aclc_dma_init(dev, &dev->dmadata[i]);
309		if (ret)
310			goto exit;
311	}
312	return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
313		card->dev, 64 * 1024, 4 * 1024 * 1024);
314
315exit:
316	for (i = 0; i < 2; i++) {
317		if (dev->dmadata[i].dma_chan)
318			dma_release_channel(dev->dmadata[i].dma_chan);
319		dev->dmadata[i].dma_chan = NULL;
320	}
321	return ret;
322}
323
324static bool filter(struct dma_chan *chan, void *param)
325{
326	struct txx9aclc_dmadata *dmadata = param;
327	char *devname;
328	bool found = false;
329
330	devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
331		(int)dmadata->dma_res->start);
332	if (strcmp(dev_name(chan->device->dev), devname) == 0) {
333		chan->private = &dmadata->dma_slave;
334		found = true;
335	}
336	kfree(devname);
337	return found;
338}
339
340static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
341			     struct txx9aclc_dmadata *dmadata)
342{
343	struct txx9aclc_plat_drvdata *drvdata =txx9aclc_drvdata;
344	struct txx9dmac_slave *ds = &dmadata->dma_slave;
345	dma_cap_mask_t mask;
346
347	spin_lock_init(&dmadata->dma_lock);
348
349	ds->reg_width = sizeof(u32);
350	if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) {
351		ds->tx_reg = drvdata->physbase + ACAUDODAT;
352		ds->rx_reg = 0;
353	} else {
354		ds->tx_reg = 0;
355		ds->rx_reg = drvdata->physbase + ACAUDIDAT;
356	}
357
358	/* Try to grab a DMA channel */
359	dma_cap_zero(mask);
360	dma_cap_set(DMA_SLAVE, mask);
361	dmadata->dma_chan = dma_request_channel(mask, filter, dmadata);
362	if (!dmadata->dma_chan) {
363		printk(KERN_ERR
364			"DMA channel for %s is not available\n",
365			dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ?
366			"playback" : "capture");
367		return -EBUSY;
368	}
369	tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
370		     (unsigned long)dmadata);
371	return 0;
372}
373
374static int txx9aclc_pcm_probe(struct snd_soc_platform *platform)
375{
376	snd_soc_platform_set_drvdata(platform, &txx9aclc_soc_device);
377	return 0;
378}
379
380static int txx9aclc_pcm_remove(struct snd_soc_platform *platform)
381{
382	struct txx9aclc_soc_device *dev = snd_soc_platform_get_drvdata(platform);
383	struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
384	void __iomem *base = drvdata->base;
385	int i;
386
387	/* disable all FIFO DMAs */
388	__raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS);
389	/* dummy R/W to clear pending DMAREQ if any */
390	__raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT);
391
392	for (i = 0; i < 2; i++) {
393		struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
394		struct dma_chan *chan = dmadata->dma_chan;
395		if (chan) {
396			dmadata->frag_count = -1;
397			dmaengine_terminate_all(chan);
398			dma_release_channel(chan);
399		}
400		dev->dmadata[i].dma_chan = NULL;
401	}
402	return 0;
403}
404
405static struct snd_soc_platform_driver txx9aclc_soc_platform = {
406	.probe		= txx9aclc_pcm_probe,
407	.remove		= txx9aclc_pcm_remove,
408	.ops		= &txx9aclc_pcm_ops,
409	.pcm_new	= txx9aclc_pcm_new,
410};
411
412static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
413{
414	return snd_soc_register_platform(&pdev->dev, &txx9aclc_soc_platform);
415}
416
417static int txx9aclc_soc_platform_remove(struct platform_device *pdev)
418{
419	snd_soc_unregister_platform(&pdev->dev);
420	return 0;
421}
422
423static struct platform_driver txx9aclc_pcm_driver = {
424	.driver = {
425			.name = "txx9aclc-pcm-audio",
426	},
427
428	.probe = txx9aclc_soc_platform_probe,
429	.remove = txx9aclc_soc_platform_remove,
430};
431
432module_platform_driver(txx9aclc_pcm_driver);
433
434MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
435MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver");
436MODULE_LICENSE("GPL");
437