1/* Fallback functions when the main IOMMU code is not compiled in. This 2 code is roughly equivalent to i386. */ 3#include <linux/dma-mapping.h> 4#include <linux/scatterlist.h> 5#include <linux/string.h> 6#include <linux/gfp.h> 7#include <linux/pci.h> 8#include <linux/mm.h> 9 10#include <asm/processor.h> 11#include <asm/iommu.h> 12#include <asm/dma.h> 13 14static int 15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 16{ 17 if (hwdev && !dma_capable(hwdev, bus, size)) { 18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) 19 printk(KERN_ERR 20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 21 name, (long long)bus, size, 22 (long long)*hwdev->dma_mask); 23 return 0; 24 } 25 return 1; 26} 27 28static dma_addr_t nommu_map_page(struct device *dev, struct page *page, 29 unsigned long offset, size_t size, 30 enum dma_data_direction dir, 31 struct dma_attrs *attrs) 32{ 33 dma_addr_t bus = page_to_phys(page) + offset; 34 WARN_ON(size == 0); 35 if (!check_addr("map_single", dev, bus, size)) 36 return DMA_ERROR_CODE; 37 flush_write_buffers(); 38 return bus; 39} 40 41/* Map a set of buffers described by scatterlist in streaming 42 * mode for DMA. This is the scatter-gather version of the 43 * above pci_map_single interface. Here the scatter gather list 44 * elements are each tagged with the appropriate dma address 45 * and length. They are obtained via sg_dma_{address,length}(SG). 46 * 47 * NOTE: An implementation may be able to use a smaller number of 48 * DMA address/length pairs than there are SG table elements. 49 * (for example via virtual mapping capabilities) 50 * The routine returns the number of addr/length pairs actually 51 * used, at most nents. 52 * 53 * Device ownership issues as mentioned above for pci_map_single are 54 * the same here. 55 */ 56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 57 int nents, enum dma_data_direction dir, 58 struct dma_attrs *attrs) 59{ 60 struct scatterlist *s; 61 int i; 62 63 WARN_ON(nents == 0 || sg[0].length == 0); 64 65 for_each_sg(sg, s, nents, i) { 66 BUG_ON(!sg_page(s)); 67 s->dma_address = sg_phys(s); 68 if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) 69 return 0; 70 s->dma_length = s->length; 71 } 72 flush_write_buffers(); 73 return nents; 74} 75 76static void nommu_sync_single_for_device(struct device *dev, 77 dma_addr_t addr, size_t size, 78 enum dma_data_direction dir) 79{ 80 flush_write_buffers(); 81} 82 83 84static void nommu_sync_sg_for_device(struct device *dev, 85 struct scatterlist *sg, int nelems, 86 enum dma_data_direction dir) 87{ 88 flush_write_buffers(); 89} 90 91struct dma_map_ops nommu_dma_ops = { 92 .alloc = dma_generic_alloc_coherent, 93 .free = dma_generic_free_coherent, 94 .map_sg = nommu_map_sg, 95 .map_page = nommu_map_page, 96 .sync_single_for_device = nommu_sync_single_for_device, 97 .sync_sg_for_device = nommu_sync_sg_for_device, 98 .is_phys = 1, 99}; 100