1/* 2 * DMA mapping support for platforms lacking IOMMUs. 3 * 4 * Copyright (C) 2009 Paul Mundt 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 */ 10#include <linux/dma-mapping.h> 11#include <linux/io.h> 12 13static dma_addr_t nommu_map_page(struct device *dev, struct page *page, 14 unsigned long offset, size_t size, 15 enum dma_data_direction dir, 16 struct dma_attrs *attrs) 17{ 18 dma_addr_t addr = page_to_phys(page) + offset; 19 20 WARN_ON(size == 0); 21 dma_cache_sync(dev, page_address(page) + offset, size, dir); 22 23 return addr; 24} 25 26static int nommu_map_sg(struct device *dev, struct scatterlist *sg, 27 int nents, enum dma_data_direction dir, 28 struct dma_attrs *attrs) 29{ 30 struct scatterlist *s; 31 int i; 32 33 WARN_ON(nents == 0 || sg[0].length == 0); 34 35 for_each_sg(sg, s, nents, i) { 36 BUG_ON(!sg_page(s)); 37 38 dma_cache_sync(dev, sg_virt(s), s->length, dir); 39 40 s->dma_address = sg_phys(s); 41 s->dma_length = s->length; 42 } 43 44 return nents; 45} 46 47#ifdef CONFIG_DMA_NONCOHERENT 48static void nommu_sync_single(struct device *dev, dma_addr_t addr, 49 size_t size, enum dma_data_direction dir) 50{ 51 dma_cache_sync(dev, phys_to_virt(addr), size, dir); 52} 53 54static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, 55 int nelems, enum dma_data_direction dir) 56{ 57 struct scatterlist *s; 58 int i; 59 60 for_each_sg(sg, s, nelems, i) 61 dma_cache_sync(dev, sg_virt(s), s->length, dir); 62} 63#endif 64 65struct dma_map_ops nommu_dma_ops = { 66 .alloc = dma_generic_alloc_coherent, 67 .free = dma_generic_free_coherent, 68 .map_page = nommu_map_page, 69 .map_sg = nommu_map_sg, 70#ifdef CONFIG_DMA_NONCOHERENT 71 .sync_single_for_device = nommu_sync_single, 72 .sync_sg_for_device = nommu_sync_sg, 73#endif 74 .is_phys = 1, 75}; 76 77void __init no_iommu_init(void) 78{ 79 if (dma_ops) 80 return; 81 dma_ops = &nommu_dma_ops; 82} 83