root/arch/c6x/mm/dma-coherent.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __alloc_dma_pages
  2. __free_dma_pages
  3. arch_dma_alloc
  4. arch_dma_free
  5. coherent_mem_init
  6. c6x_dma_sync
  7. arch_sync_dma_for_device
  8. arch_sync_dma_for_cpu

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  Port on Texas Instruments TMS320C6x architecture
   4  *
   5  *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
   6  *  Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
   7  *
   8  *  DMA uncached mapping support.
   9  *
  10  *  Using code pulled from ARM
  11  *  Copyright (C) 2000-2004 Russell King
  12  */
  13 #include <linux/slab.h>
  14 #include <linux/bitmap.h>
  15 #include <linux/bitops.h>
  16 #include <linux/module.h>
  17 #include <linux/interrupt.h>
  18 #include <linux/dma-noncoherent.h>
  19 #include <linux/memblock.h>
  20 
  21 #include <asm/cacheflush.h>
  22 #include <asm/page.h>
  23 #include <asm/setup.h>
  24 
  25 /*
  26  * DMA coherent memory management, can be redefined using the memdma=
  27  * kernel command line
  28  */
  29 
  30 /* none by default */
  31 static phys_addr_t dma_base;
  32 static u32 dma_size;
  33 static u32 dma_pages;
  34 
  35 static unsigned long *dma_bitmap;
  36 
  37 /* bitmap lock */
  38 static DEFINE_SPINLOCK(dma_lock);
  39 
  40 /*
  41  * Return a DMA coherent and contiguous memory chunk from the DMA memory
  42  */
  43 static inline u32 __alloc_dma_pages(int order)
  44 {
  45         unsigned long flags;
  46         u32 pos;
  47 
  48         spin_lock_irqsave(&dma_lock, flags);
  49         pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
  50         spin_unlock_irqrestore(&dma_lock, flags);
  51 
  52         return dma_base + (pos << PAGE_SHIFT);
  53 }
  54 
  55 static void __free_dma_pages(u32 addr, int order)
  56 {
  57         unsigned long flags;
  58         u32 pos = (addr - dma_base) >> PAGE_SHIFT;
  59 
  60         if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
  61                 printk(KERN_ERR "%s: freeing outside range.\n", __func__);
  62                 BUG();
  63         }
  64 
  65         spin_lock_irqsave(&dma_lock, flags);
  66         bitmap_release_region(dma_bitmap, pos, order);
  67         spin_unlock_irqrestore(&dma_lock, flags);
  68 }
  69 
  70 /*
  71  * Allocate DMA coherent memory space and return both the kernel
  72  * virtual and DMA address for that space.
  73  */
  74 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  75                 gfp_t gfp, unsigned long attrs)
  76 {
  77         void *ret;
  78         u32 paddr;
  79         int order;
  80 
  81         if (!dma_size || !size)
  82                 return NULL;
  83 
  84         order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
  85 
  86         paddr = __alloc_dma_pages(order);
  87 
  88         if (handle)
  89                 *handle = paddr;
  90 
  91         if (!paddr)
  92                 return NULL;
  93 
  94         ret = phys_to_virt(paddr);
  95         memset(ret, 0, 1 << order);
  96         return ret;
  97 }
  98 
  99 /*
 100  * Free DMA coherent memory as defined by the above mapping.
 101  */
 102 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 103                 dma_addr_t dma_handle, unsigned long attrs)
 104 {
 105         int order;
 106 
 107         if (!dma_size || !size)
 108                 return;
 109 
 110         order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
 111 
 112         __free_dma_pages(virt_to_phys(vaddr), order);
 113 }
 114 
 115 /*
 116  * Initialise the coherent DMA memory allocator using the given uncached region.
 117  */
 118 void __init coherent_mem_init(phys_addr_t start, u32 size)
 119 {
 120         if (!size)
 121                 return;
 122 
 123         printk(KERN_INFO
 124                "Coherent memory (DMA) region start=0x%x size=0x%x\n",
 125                start, size);
 126 
 127         dma_base = start;
 128         dma_size = size;
 129 
 130         /* allocate bitmap */
 131         dma_pages = dma_size >> PAGE_SHIFT;
 132         if (dma_size & (PAGE_SIZE - 1))
 133                 ++dma_pages;
 134 
 135         dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
 136                                     sizeof(long));
 137         if (!dma_bitmap)
 138                 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
 139                       __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
 140                       sizeof(long));
 141 }
 142 
 143 static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
 144                 enum dma_data_direction dir)
 145 {
 146         BUG_ON(!valid_dma_direction(dir));
 147 
 148         switch (dir) {
 149         case DMA_FROM_DEVICE:
 150                 L2_cache_block_invalidate(paddr, paddr + size);
 151                 break;
 152         case DMA_TO_DEVICE:
 153                 L2_cache_block_writeback(paddr, paddr + size);
 154                 break;
 155         case DMA_BIDIRECTIONAL:
 156                 L2_cache_block_writeback_invalidate(paddr, paddr + size);
 157                 break;
 158         default:
 159                 break;
 160         }
 161 }
 162 
 163 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
 164                 size_t size, enum dma_data_direction dir)
 165 {
 166         return c6x_dma_sync(dev, paddr, size, dir);
 167 }
 168 
 169 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
 170                 size_t size, enum dma_data_direction dir)
 171 {
 172         return c6x_dma_sync(dev, paddr, size, dir);
 173 }

/* [<][>][^][v][top][bottom][index][help] */