1/* 2 * Scatter-Gather buffer 3 * 4 * Copyright (c) by Takashi Iwai <tiwai@suse.de> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 */ 21 22#include <linux/slab.h> 23#include <linux/mm.h> 24#include <linux/vmalloc.h> 25#include <linux/export.h> 26#include <sound/memalloc.h> 27 28 29/* table entries are align to 32 */ 30#define SGBUF_TBL_ALIGN 32 31#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) 32 33int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) 34{ 35 struct snd_sg_buf *sgbuf = dmab->private_data; 36 struct snd_dma_buffer tmpb; 37 int i; 38 39 if (! sgbuf) 40 return -EINVAL; 41 42 vunmap(dmab->area); 43 dmab->area = NULL; 44 45 tmpb.dev.type = SNDRV_DMA_TYPE_DEV; 46 tmpb.dev.dev = sgbuf->dev; 47 for (i = 0; i < sgbuf->pages; i++) { 48 if (!(sgbuf->table[i].addr & ~PAGE_MASK)) 49 continue; /* continuous pages */ 50 tmpb.area = sgbuf->table[i].buf; 51 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; 52 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; 53 snd_dma_free_pages(&tmpb); 54 } 55 56 kfree(sgbuf->table); 57 kfree(sgbuf->page_table); 58 kfree(sgbuf); 59 dmab->private_data = NULL; 60 61 return 0; 62} 63 64#define MAX_ALLOC_PAGES 32 65 66void *snd_malloc_sgbuf_pages(struct device *device, 67 size_t size, struct snd_dma_buffer *dmab, 68 size_t *res_size) 69{ 70 struct snd_sg_buf *sgbuf; 71 unsigned int i, pages, chunk, maxpages; 72 struct snd_dma_buffer tmpb; 73 struct snd_sg_page *table; 74 struct page **pgtable; 75 76 dmab->area = NULL; 77 dmab->addr = 0; 78 dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); 79 if (! sgbuf) 80 return NULL; 81 sgbuf->dev = device; 82 pages = snd_sgbuf_aligned_pages(size); 83 sgbuf->tblsize = sgbuf_align_table(pages); 84 table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); 85 if (!table) 86 goto _failed; 87 sgbuf->table = table; 88 pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); 89 if (!pgtable) 90 goto _failed; 91 sgbuf->page_table = pgtable; 92 93 /* allocate pages */ 94 maxpages = MAX_ALLOC_PAGES; 95 while (pages > 0) { 96 chunk = pages; 97 /* don't be too eager to take a huge chunk */ 98 if (chunk > maxpages) 99 chunk = maxpages; 100 chunk <<= PAGE_SHIFT; 101 if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, 102 chunk, &tmpb) < 0) { 103 if (!sgbuf->pages) 104 goto _failed; 105 if (!res_size) 106 goto _failed; 107 size = sgbuf->pages * PAGE_SIZE; 108 break; 109 } 110 chunk = tmpb.bytes >> PAGE_SHIFT; 111 for (i = 0; i < chunk; i++) { 112 table->buf = tmpb.area; 113 table->addr = tmpb.addr; 114 if (!i) 115 table->addr |= chunk; /* mark head */ 116 table++; 117 *pgtable++ = virt_to_page(tmpb.area); 118 tmpb.area += PAGE_SIZE; 119 tmpb.addr += PAGE_SIZE; 120 } 121 sgbuf->pages += chunk; 122 pages -= chunk; 123 if (chunk < maxpages) 124 maxpages = chunk; 125 } 126 127 sgbuf->size = size; 128 dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); 129 if (! dmab->area) 130 goto _failed; 131 if (res_size) 132 *res_size = sgbuf->size; 133 return dmab->area; 134 135 _failed: 136 snd_free_sgbuf_pages(dmab); /* free the table */ 137 return NULL; 138} 139 140/* 141 * compute the max chunk size with continuous pages on sg-buffer 142 */ 143unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, 144 unsigned int ofs, unsigned int size) 145{ 146 struct snd_sg_buf *sg = dmab->private_data; 147 unsigned int start, end, pg; 148 149 start = ofs >> PAGE_SHIFT; 150 end = (ofs + size - 1) >> PAGE_SHIFT; 151 /* check page continuity */ 152 pg = sg->table[start].addr >> PAGE_SHIFT; 153 for (;;) { 154 start++; 155 if (start > end) 156 break; 157 pg++; 158 if ((sg->table[start].addr >> PAGE_SHIFT) != pg) 159 return (start << PAGE_SHIFT) - ofs; 160 } 161 /* ok, all on continuous pages */ 162 return size; 163} 164EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); 165