1/* 2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> 3 * Copyright (C) 2009 Wind River Systems Inc 4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 5 * 6 * Based on DMA code from MIPS. 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 13#include <linux/types.h> 14#include <linux/mm.h> 15#include <linux/export.h> 16#include <linux/string.h> 17#include <linux/scatterlist.h> 18#include <linux/dma-mapping.h> 19#include <linux/io.h> 20#include <linux/cache.h> 21#include <asm/cacheflush.h> 22 23 24void *dma_alloc_coherent(struct device *dev, size_t size, 25 dma_addr_t *dma_handle, gfp_t gfp) 26{ 27 void *ret; 28 29 /* ignore region specifiers */ 30 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 31 32 /* optimized page clearing */ 33 gfp |= __GFP_ZERO; 34 35 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 36 gfp |= GFP_DMA; 37 38 ret = (void *) __get_free_pages(gfp, get_order(size)); 39 if (ret != NULL) { 40 *dma_handle = virt_to_phys(ret); 41 flush_dcache_range((unsigned long) ret, 42 (unsigned long) ret + size); 43 ret = UNCAC_ADDR(ret); 44 } 45 46 return ret; 47} 48EXPORT_SYMBOL(dma_alloc_coherent); 49 50void dma_free_coherent(struct device *dev, size_t size, void *vaddr, 51 dma_addr_t dma_handle) 52{ 53 unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); 54 55 free_pages(addr, get_order(size)); 56} 57EXPORT_SYMBOL(dma_free_coherent); 58 59int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 60 enum dma_data_direction direction) 61{ 62 int i; 63 64 BUG_ON(!valid_dma_direction(direction)); 65 66 for_each_sg(sg, sg, nents, i) { 67 void *addr; 68 69 addr = sg_virt(sg); 70 if (addr) { 71 __dma_sync_for_device(addr, sg->length, direction); 72 sg->dma_address = sg_phys(sg); 73 } 74 } 75 76 return nents; 77} 78EXPORT_SYMBOL(dma_map_sg); 79 80dma_addr_t dma_map_page(struct device *dev, struct page *page, 81 unsigned long offset, size_t size, 82 enum dma_data_direction direction) 83{ 84 void *addr; 85 86 BUG_ON(!valid_dma_direction(direction)); 87 88 addr = page_address(page) + offset; 89 __dma_sync_for_device(addr, size, direction); 90 91 return page_to_phys(page) + offset; 92} 93EXPORT_SYMBOL(dma_map_page); 94 95void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 96 enum dma_data_direction direction) 97{ 98 BUG_ON(!valid_dma_direction(direction)); 99 100 __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 101} 102EXPORT_SYMBOL(dma_unmap_page); 103 104void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 105 enum dma_data_direction direction) 106{ 107 void *addr; 108 int i; 109 110 BUG_ON(!valid_dma_direction(direction)); 111 112 if (direction == DMA_TO_DEVICE) 113 return; 114 115 for_each_sg(sg, sg, nhwentries, i) { 116 addr = sg_virt(sg); 117 if (addr) 118 __dma_sync_for_cpu(addr, sg->length, direction); 119 } 120} 121EXPORT_SYMBOL(dma_unmap_sg); 122 123void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 124 size_t size, enum dma_data_direction direction) 125{ 126 BUG_ON(!valid_dma_direction(direction)); 127 128 __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 129} 130EXPORT_SYMBOL(dma_sync_single_for_cpu); 131 132void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 133 size_t size, enum dma_data_direction direction) 134{ 135 BUG_ON(!valid_dma_direction(direction)); 136 137 __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 138} 139EXPORT_SYMBOL(dma_sync_single_for_device); 140 141void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 142 unsigned long offset, size_t size, 143 enum dma_data_direction direction) 144{ 145 BUG_ON(!valid_dma_direction(direction)); 146 147 __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 148} 149EXPORT_SYMBOL(dma_sync_single_range_for_cpu); 150 151void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 152 unsigned long offset, size_t size, 153 enum dma_data_direction direction) 154{ 155 BUG_ON(!valid_dma_direction(direction)); 156 157 __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 158} 159EXPORT_SYMBOL(dma_sync_single_range_for_device); 160 161void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 162 enum dma_data_direction direction) 163{ 164 int i; 165 166 BUG_ON(!valid_dma_direction(direction)); 167 168 /* Make sure that gcc doesn't leave the empty loop body. */ 169 for_each_sg(sg, sg, nelems, i) 170 __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); 171} 172EXPORT_SYMBOL(dma_sync_sg_for_cpu); 173 174void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 175 int nelems, enum dma_data_direction direction) 176{ 177 int i; 178 179 BUG_ON(!valid_dma_direction(direction)); 180 181 /* Make sure that gcc doesn't leave the empty loop body. */ 182 for_each_sg(sg, sg, nelems, i) 183 __dma_sync_for_device(sg_virt(sg), sg->length, direction); 184 185} 186EXPORT_SYMBOL(dma_sync_sg_for_device); 187