1/*
2 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
4 *  Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au>
5 *
6 *  Trident 4DWave-NX memory page allocation (TLB area)
7 *  Trident chip can handle only 16MByte of the memory at the same time.
8 *
9 *
10 *   This program is free software; you can redistribute it and/or modify
11 *   it under the terms of the GNU General Public License as published by
12 *   the Free Software Foundation; either version 2 of the License, or
13 *   (at your option) any later version.
14 *
15 *   This program is distributed in the hope that it will be useful,
16 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *   GNU General Public License for more details.
19 *
20 *   You should have received a copy of the GNU General Public License
21 *   along with this program; if not, write to the Free Software
22 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23 *
24 */
25
26#include <linux/io.h>
27#include <linux/pci.h>
28#include <linux/time.h>
29#include <linux/mutex.h>
30
31#include <sound/core.h>
32#include "trident.h"
33
34/* page arguments of these two macros are Trident page (4096 bytes), not like
35 * aligned pages in others
36 */
37#define __set_tlb_bus(trident,page,ptr,addr) \
38	do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
39	     (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
40#define __tlb_to_ptr(trident,page) \
41	(void*)((trident)->tlb.shadow_entries[page])
42#define __tlb_to_addr(trident,page) \
43	(dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
44
45#if PAGE_SIZE == 4096
46/* page size == SNDRV_TRIDENT_PAGE_SIZE */
47#define ALIGN_PAGE_SIZE		PAGE_SIZE	/* minimum page size for allocation */
48#define MAX_ALIGN_PAGES		SNDRV_TRIDENT_MAX_PAGES	/* maxmium aligned pages */
49/* fill TLB entrie(s) corresponding to page with ptr */
50#define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
51/* fill TLB entrie(s) corresponding to page with silence pointer */
52#define set_silent_tlb(trident,page)	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
53/* get aligned page from offset address */
54#define get_aligned_page(offset)	((offset) >> 12)
55/* get offset address from aligned page */
56#define aligned_page_offset(page)	((page) << 12)
57/* get buffer address from aligned page */
58#define page_to_ptr(trident,page)	__tlb_to_ptr(trident, page)
59/* get PCI physical address from aligned page */
60#define page_to_addr(trident,page)	__tlb_to_addr(trident, page)
61
62#elif PAGE_SIZE == 8192
63/* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/
64#define ALIGN_PAGE_SIZE		PAGE_SIZE
65#define MAX_ALIGN_PAGES		(SNDRV_TRIDENT_MAX_PAGES / 2)
66#define get_aligned_page(offset)	((offset) >> 13)
67#define aligned_page_offset(page)	((page) << 13)
68#define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) << 1)
69#define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) << 1)
70
71/* fill TLB entries -- we need to fill two entries */
72static inline void set_tlb_bus(struct snd_trident *trident, int page,
73			       unsigned long ptr, dma_addr_t addr)
74{
75	page <<= 1;
76	__set_tlb_bus(trident, page, ptr, addr);
77	__set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
78}
79static inline void set_silent_tlb(struct snd_trident *trident, int page)
80{
81	page <<= 1;
82	__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
83	__set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
84}
85
86#else
87/* arbitrary size */
88#define UNIT_PAGES		(PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE)
89#define ALIGN_PAGE_SIZE		(SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES)
90#define MAX_ALIGN_PAGES		(SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES)
91/* Note: if alignment doesn't match to the maximum size, the last few blocks
92 * become unusable.  To use such blocks, you'll need to check the validity
93 * of accessing page in set_tlb_bus and set_silent_tlb.  search_empty()
94 * should also check it, too.
95 */
96#define get_aligned_page(offset)	((offset) / ALIGN_PAGE_SIZE)
97#define aligned_page_offset(page)	((page) * ALIGN_PAGE_SIZE)
98#define page_to_ptr(trident,page)	__tlb_to_ptr(trident, (page) * UNIT_PAGES)
99#define page_to_addr(trident,page)	__tlb_to_addr(trident, (page) * UNIT_PAGES)
100
101/* fill TLB entries -- UNIT_PAGES entries must be filled */
102static inline void set_tlb_bus(struct snd_trident *trident, int page,
103			       unsigned long ptr, dma_addr_t addr)
104{
105	int i;
106	page *= UNIT_PAGES;
107	for (i = 0; i < UNIT_PAGES; i++, page++) {
108		__set_tlb_bus(trident, page, ptr, addr);
109		ptr += SNDRV_TRIDENT_PAGE_SIZE;
110		addr += SNDRV_TRIDENT_PAGE_SIZE;
111	}
112}
113static inline void set_silent_tlb(struct snd_trident *trident, int page)
114{
115	int i;
116	page *= UNIT_PAGES;
117	for (i = 0; i < UNIT_PAGES; i++, page++)
118		__set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
119}
120
121#endif /* PAGE_SIZE */
122
123/* calculate buffer pointer from offset address */
124static inline void *offset_ptr(struct snd_trident *trident, int offset)
125{
126	char *ptr;
127	ptr = page_to_ptr(trident, get_aligned_page(offset));
128	ptr += offset % ALIGN_PAGE_SIZE;
129	return (void*)ptr;
130}
131
132/* first and last (aligned) pages of memory block */
133#define firstpg(blk)	(((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page)
134#define lastpg(blk)	(((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page)
135
136/*
137 * search empty pages which may contain given size
138 */
139static struct snd_util_memblk *
140search_empty(struct snd_util_memhdr *hdr, int size)
141{
142	struct snd_util_memblk *blk;
143	int page, psize;
144	struct list_head *p;
145
146	psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1);
147	page = 0;
148	list_for_each(p, &hdr->block) {
149		blk = list_entry(p, struct snd_util_memblk, list);
150		if (page + psize <= firstpg(blk))
151			goto __found_pages;
152		page = lastpg(blk) + 1;
153	}
154	if (page + psize > MAX_ALIGN_PAGES)
155		return NULL;
156
157__found_pages:
158	/* create a new memory block */
159	blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev);
160	if (blk == NULL)
161		return NULL;
162	blk->offset = aligned_page_offset(page); /* set aligned offset */
163	firstpg(blk) = page;
164	lastpg(blk) = page + psize - 1;
165	return blk;
166}
167
168
169/*
170 * check if the given pointer is valid for pages
171 */
172static int is_valid_page(unsigned long ptr)
173{
174	if (ptr & ~0x3fffffffUL) {
175		snd_printk(KERN_ERR "max memory size is 1GB!!\n");
176		return 0;
177	}
178	if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) {
179		snd_printk(KERN_ERR "page is not aligned\n");
180		return 0;
181	}
182	return 1;
183}
184
185/*
186 * page allocation for DMA (Scatter-Gather version)
187 */
188static struct snd_util_memblk *
189snd_trident_alloc_sg_pages(struct snd_trident *trident,
190			   struct snd_pcm_substream *substream)
191{
192	struct snd_util_memhdr *hdr;
193	struct snd_util_memblk *blk;
194	struct snd_pcm_runtime *runtime = substream->runtime;
195	int idx, page;
196
197	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
198		       runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES *
199					SNDRV_TRIDENT_PAGE_SIZE))
200		return NULL;
201	hdr = trident->tlb.memhdr;
202	if (snd_BUG_ON(!hdr))
203		return NULL;
204
205
206
207	mutex_lock(&hdr->block_mutex);
208	blk = search_empty(hdr, runtime->dma_bytes);
209	if (blk == NULL) {
210		mutex_unlock(&hdr->block_mutex);
211		return NULL;
212	}
213
214	/* set TLB entries */
215	idx = 0;
216	for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
217		unsigned long ofs = idx << PAGE_SHIFT;
218		dma_addr_t addr = snd_pcm_sgbuf_get_addr(substream, ofs);
219		unsigned long ptr = (unsigned long)
220			snd_pcm_sgbuf_get_ptr(substream, ofs);
221		if (! is_valid_page(addr)) {
222			__snd_util_mem_free(hdr, blk);
223			mutex_unlock(&hdr->block_mutex);
224			return NULL;
225		}
226		set_tlb_bus(trident, page, ptr, addr);
227	}
228	mutex_unlock(&hdr->block_mutex);
229	return blk;
230}
231
232/*
233 * page allocation for DMA (contiguous version)
234 */
235static struct snd_util_memblk *
236snd_trident_alloc_cont_pages(struct snd_trident *trident,
237			     struct snd_pcm_substream *substream)
238{
239	struct snd_util_memhdr *hdr;
240	struct snd_util_memblk *blk;
241	int page;
242	struct snd_pcm_runtime *runtime = substream->runtime;
243	dma_addr_t addr;
244	unsigned long ptr;
245
246	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
247		       runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES *
248					SNDRV_TRIDENT_PAGE_SIZE))
249		return NULL;
250	hdr = trident->tlb.memhdr;
251	if (snd_BUG_ON(!hdr))
252		return NULL;
253
254	mutex_lock(&hdr->block_mutex);
255	blk = search_empty(hdr, runtime->dma_bytes);
256	if (blk == NULL) {
257		mutex_unlock(&hdr->block_mutex);
258		return NULL;
259	}
260
261	/* set TLB entries */
262	addr = runtime->dma_addr;
263	ptr = (unsigned long)runtime->dma_area;
264	for (page = firstpg(blk); page <= lastpg(blk); page++,
265	     ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) {
266		if (! is_valid_page(addr)) {
267			__snd_util_mem_free(hdr, blk);
268			mutex_unlock(&hdr->block_mutex);
269			return NULL;
270		}
271		set_tlb_bus(trident, page, ptr, addr);
272	}
273	mutex_unlock(&hdr->block_mutex);
274	return blk;
275}
276
277/*
278 * page allocation for DMA
279 */
280struct snd_util_memblk *
281snd_trident_alloc_pages(struct snd_trident *trident,
282			struct snd_pcm_substream *substream)
283{
284	if (snd_BUG_ON(!trident || !substream))
285		return NULL;
286	if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG)
287		return snd_trident_alloc_sg_pages(trident, substream);
288	else
289		return snd_trident_alloc_cont_pages(trident, substream);
290}
291
292
293/*
294 * release DMA buffer from page table
295 */
296int snd_trident_free_pages(struct snd_trident *trident,
297			   struct snd_util_memblk *blk)
298{
299	struct snd_util_memhdr *hdr;
300	int page;
301
302	if (snd_BUG_ON(!trident || !blk))
303		return -EINVAL;
304
305	hdr = trident->tlb.memhdr;
306	mutex_lock(&hdr->block_mutex);
307	/* reset TLB entries */
308	for (page = firstpg(blk); page <= lastpg(blk); page++)
309		set_silent_tlb(trident, page);
310	/* free memory block */
311	__snd_util_mem_free(hdr, blk);
312	mutex_unlock(&hdr->block_mutex);
313	return 0;
314}
315