root/arch/arm/mm/dma-mapping-nommu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arm_nommu_dma_alloc
  2. arm_nommu_dma_free
  3. arm_nommu_dma_mmap
  4. __dma_page_cpu_to_dev
  5. __dma_page_dev_to_cpu
  6. arm_nommu_dma_map_page
  7. arm_nommu_dma_unmap_page
  8. arm_nommu_dma_map_sg
  9. arm_nommu_dma_unmap_sg
  10. arm_nommu_dma_sync_single_for_device
  11. arm_nommu_dma_sync_single_for_cpu
  12. arm_nommu_dma_sync_sg_for_device
  13. arm_nommu_dma_sync_sg_for_cpu
  14. arch_setup_dma_ops

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  Based on linux/arch/arm/mm/dma-mapping.c
   4  *
   5  *  Copyright (C) 2000-2004 Russell King
   6  */
   7 
   8 #include <linux/export.h>
   9 #include <linux/mm.h>
  10 #include <linux/dma-direct.h>
  11 #include <linux/scatterlist.h>
  12 
  13 #include <asm/cachetype.h>
  14 #include <asm/cacheflush.h>
  15 #include <asm/outercache.h>
  16 #include <asm/cp15.h>
  17 
  18 #include "dma.h"
  19 
  20 /*
  21  *  The generic direct mapping code is used if
  22  *   - MMU/MPU is off
  23  *   - cpu is v7m w/o cache support
  24  *   - device is coherent
  25  *  otherwise arm_nommu_dma_ops is used.
  26  *
  27  *  arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
  28  *  [1] on how to declare such memory).
  29  *
  30  *  [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
  31  */
  32 
  33 static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
  34                                  dma_addr_t *dma_handle, gfp_t gfp,
  35                                  unsigned long attrs)
  36 
  37 {
  38         void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
  39 
  40         /*
  41          * dma_alloc_from_global_coherent() may fail because:
  42          *
  43          * - no consistent DMA region has been defined, so we can't
  44          *   continue.
  45          * - there is no space left in consistent DMA region, so we
  46          *   only can fallback to generic allocator if we are
  47          *   advertised that consistency is not required.
  48          */
  49 
  50         WARN_ON_ONCE(ret == NULL);
  51         return ret;
  52 }
  53 
  54 static void arm_nommu_dma_free(struct device *dev, size_t size,
  55                                void *cpu_addr, dma_addr_t dma_addr,
  56                                unsigned long attrs)
  57 {
  58         int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
  59 
  60         WARN_ON_ONCE(ret == 0);
  61 }
  62 
  63 static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  64                               void *cpu_addr, dma_addr_t dma_addr, size_t size,
  65                               unsigned long attrs)
  66 {
  67         int ret;
  68 
  69         if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
  70                 return ret;
  71         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
  72                 return ret;
  73         return -ENXIO;
  74 }
  75 
  76 
  77 static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
  78                                   enum dma_data_direction dir)
  79 {
  80         dmac_map_area(__va(paddr), size, dir);
  81 
  82         if (dir == DMA_FROM_DEVICE)
  83                 outer_inv_range(paddr, paddr + size);
  84         else
  85                 outer_clean_range(paddr, paddr + size);
  86 }
  87 
  88 static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
  89                                   enum dma_data_direction dir)
  90 {
  91         if (dir != DMA_TO_DEVICE) {
  92                 outer_inv_range(paddr, paddr + size);
  93                 dmac_unmap_area(__va(paddr), size, dir);
  94         }
  95 }
  96 
  97 static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
  98                                          unsigned long offset, size_t size,
  99                                          enum dma_data_direction dir,
 100                                          unsigned long attrs)
 101 {
 102         dma_addr_t handle = page_to_phys(page) + offset;
 103 
 104         __dma_page_cpu_to_dev(handle, size, dir);
 105 
 106         return handle;
 107 }
 108 
 109 static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
 110                                      size_t size, enum dma_data_direction dir,
 111                                      unsigned long attrs)
 112 {
 113         __dma_page_dev_to_cpu(handle, size, dir);
 114 }
 115 
 116 
 117 static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
 118                                 int nents, enum dma_data_direction dir,
 119                                 unsigned long attrs)
 120 {
 121         int i;
 122         struct scatterlist *sg;
 123 
 124         for_each_sg(sgl, sg, nents, i) {
 125                 sg_dma_address(sg) = sg_phys(sg);
 126                 sg_dma_len(sg) = sg->length;
 127                 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
 128         }
 129 
 130         return nents;
 131 }
 132 
 133 static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
 134                                    int nents, enum dma_data_direction dir,
 135                                    unsigned long attrs)
 136 {
 137         struct scatterlist *sg;
 138         int i;
 139 
 140         for_each_sg(sgl, sg, nents, i)
 141                 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
 142 }
 143 
 144 static void arm_nommu_dma_sync_single_for_device(struct device *dev,
 145                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
 146 {
 147         __dma_page_cpu_to_dev(handle, size, dir);
 148 }
 149 
 150 static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
 151                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
 152 {
 153         __dma_page_cpu_to_dev(handle, size, dir);
 154 }
 155 
 156 static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 157                                              int nents, enum dma_data_direction dir)
 158 {
 159         struct scatterlist *sg;
 160         int i;
 161 
 162         for_each_sg(sgl, sg, nents, i)
 163                 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
 164 }
 165 
 166 static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
 167                                           int nents, enum dma_data_direction dir)
 168 {
 169         struct scatterlist *sg;
 170         int i;
 171 
 172         for_each_sg(sgl, sg, nents, i)
 173                 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
 174 }
 175 
 176 const struct dma_map_ops arm_nommu_dma_ops = {
 177         .alloc                  = arm_nommu_dma_alloc,
 178         .free                   = arm_nommu_dma_free,
 179         .mmap                   = arm_nommu_dma_mmap,
 180         .map_page               = arm_nommu_dma_map_page,
 181         .unmap_page             = arm_nommu_dma_unmap_page,
 182         .map_sg                 = arm_nommu_dma_map_sg,
 183         .unmap_sg               = arm_nommu_dma_unmap_sg,
 184         .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
 185         .sync_single_for_cpu    = arm_nommu_dma_sync_single_for_cpu,
 186         .sync_sg_for_device     = arm_nommu_dma_sync_sg_for_device,
 187         .sync_sg_for_cpu        = arm_nommu_dma_sync_sg_for_cpu,
 188 };
 189 EXPORT_SYMBOL(arm_nommu_dma_ops);
 190 
 191 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 192                         const struct iommu_ops *iommu, bool coherent)
 193 {
 194         if (IS_ENABLED(CONFIG_CPU_V7M)) {
 195                 /*
 196                  * Cache support for v7m is optional, so can be treated as
 197                  * coherent if no cache has been detected. Note that it is not
 198                  * enough to check if MPU is in use or not since in absense of
 199                  * MPU system memory map is used.
 200                  */
 201                 dev->archdata.dma_coherent = (cacheid) ? coherent : true;
 202         } else {
 203                 /*
 204                  * Assume coherent DMA in case MMU/MPU has not been set up.
 205                  */
 206                 dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
 207         }
 208 
 209         if (!dev->archdata.dma_coherent)
 210                 set_dma_ops(dev, &arm_nommu_dma_ops);
 211 }

/* [<][>][^][v][top][bottom][index][help] */