root/kernel/dma/contiguous.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. early_cma
  2. cma_early_percent_memory
  3. cma_early_percent_memory
  4. dma_contiguous_reserve
  5. dma_contiguous_reserve_area
  6. dma_alloc_from_contiguous
  7. dma_release_from_contiguous
  8. dma_alloc_contiguous
  9. dma_free_contiguous
  10. rmem_cma_device_init
  11. rmem_cma_device_release
  12. rmem_cma_setup

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * Contiguous Memory Allocator for DMA mapping framework
   4  * Copyright (c) 2010-2011 by Samsung Electronics.
   5  * Written by:
   6  *      Marek Szyprowski <m.szyprowski@samsung.com>
   7  *      Michal Nazarewicz <mina86@mina86.com>
   8  */
   9 
  10 #define pr_fmt(fmt) "cma: " fmt
  11 
  12 #ifdef CONFIG_CMA_DEBUG
  13 #ifndef DEBUG
  14 #  define DEBUG
  15 #endif
  16 #endif
  17 
  18 #include <asm/page.h>
  19 #include <asm/dma-contiguous.h>
  20 
  21 #include <linux/memblock.h>
  22 #include <linux/err.h>
  23 #include <linux/sizes.h>
  24 #include <linux/dma-contiguous.h>
  25 #include <linux/cma.h>
  26 
  27 #ifdef CONFIG_CMA_SIZE_MBYTES
  28 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  29 #else
  30 #define CMA_SIZE_MBYTES 0
  31 #endif
  32 
  33 struct cma *dma_contiguous_default_area;
  34 
  35 /*
  36  * Default global CMA area size can be defined in kernel's .config.
  37  * This is useful mainly for distro maintainers to create a kernel
  38  * that works correctly for most supported systems.
  39  * The size can be set in bytes or as a percentage of the total memory
  40  * in the system.
  41  *
  42  * Users, who want to set the size of global CMA area for their system
  43  * should use cma= kernel parameter.
  44  */
  45 static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
  46 static phys_addr_t size_cmdline = -1;
  47 static phys_addr_t base_cmdline;
  48 static phys_addr_t limit_cmdline;
  49 
  50 static int __init early_cma(char *p)
  51 {
  52         if (!p) {
  53                 pr_err("Config string not provided\n");
  54                 return -EINVAL;
  55         }
  56 
  57         size_cmdline = memparse(p, &p);
  58         if (*p != '@')
  59                 return 0;
  60         base_cmdline = memparse(p + 1, &p);
  61         if (*p != '-') {
  62                 limit_cmdline = base_cmdline + size_cmdline;
  63                 return 0;
  64         }
  65         limit_cmdline = memparse(p + 1, &p);
  66 
  67         return 0;
  68 }
  69 early_param("cma", early_cma);
  70 
  71 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
  72 
  73 static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
  74 {
  75         struct memblock_region *reg;
  76         unsigned long total_pages = 0;
  77 
  78         /*
  79          * We cannot use memblock_phys_mem_size() here, because
  80          * memblock_analyze() has not been called yet.
  81          */
  82         for_each_memblock(memory, reg)
  83                 total_pages += memblock_region_memory_end_pfn(reg) -
  84                                memblock_region_memory_base_pfn(reg);
  85 
  86         return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
  87 }
  88 
  89 #else
  90 
  91 static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
  92 {
  93         return 0;
  94 }
  95 
  96 #endif
  97 
  98 /**
  99  * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
 100  * @limit: End address of the reserved memory (optional, 0 for any).
 101  *
 102  * This function reserves memory from early allocator. It should be
 103  * called by arch specific code once the early allocator (memblock or bootmem)
 104  * has been activated and all other subsystems have already allocated/reserved
 105  * memory.
 106  */
 107 void __init dma_contiguous_reserve(phys_addr_t limit)
 108 {
 109         phys_addr_t selected_size = 0;
 110         phys_addr_t selected_base = 0;
 111         phys_addr_t selected_limit = limit;
 112         bool fixed = false;
 113 
 114         pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
 115 
 116         if (size_cmdline != -1) {
 117                 selected_size = size_cmdline;
 118                 selected_base = base_cmdline;
 119                 selected_limit = min_not_zero(limit_cmdline, limit);
 120                 if (base_cmdline + size_cmdline == limit_cmdline)
 121                         fixed = true;
 122         } else {
 123 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
 124                 selected_size = size_bytes;
 125 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
 126                 selected_size = cma_early_percent_memory();
 127 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
 128                 selected_size = min(size_bytes, cma_early_percent_memory());
 129 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
 130                 selected_size = max(size_bytes, cma_early_percent_memory());
 131 #endif
 132         }
 133 
 134         if (selected_size && !dma_contiguous_default_area) {
 135                 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
 136                          (unsigned long)selected_size / SZ_1M);
 137 
 138                 dma_contiguous_reserve_area(selected_size, selected_base,
 139                                             selected_limit,
 140                                             &dma_contiguous_default_area,
 141                                             fixed);
 142         }
 143 }
 144 
 145 /**
 146  * dma_contiguous_reserve_area() - reserve custom contiguous area
 147  * @size: Size of the reserved area (in bytes),
 148  * @base: Base address of the reserved area optional, use 0 for any
 149  * @limit: End address of the reserved memory (optional, 0 for any).
 150  * @res_cma: Pointer to store the created cma region.
 151  * @fixed: hint about where to place the reserved area
 152  *
 153  * This function reserves memory from early allocator. It should be
 154  * called by arch specific code once the early allocator (memblock or bootmem)
 155  * has been activated and all other subsystems have already allocated/reserved
 156  * memory. This function allows to create custom reserved areas for specific
 157  * devices.
 158  *
 159  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
 160  * reserve in range from @base to @limit.
 161  */
 162 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
 163                                        phys_addr_t limit, struct cma **res_cma,
 164                                        bool fixed)
 165 {
 166         int ret;
 167 
 168         ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
 169                                         "reserved", res_cma);
 170         if (ret)
 171                 return ret;
 172 
 173         /* Architecture specific contiguous memory fixup. */
 174         dma_contiguous_early_fixup(cma_get_base(*res_cma),
 175                                 cma_get_size(*res_cma));
 176 
 177         return 0;
 178 }
 179 
 180 /**
 181  * dma_alloc_from_contiguous() - allocate pages from contiguous area
 182  * @dev:   Pointer to device for which the allocation is performed.
 183  * @count: Requested number of pages.
 184  * @align: Requested alignment of pages (in PAGE_SIZE order).
 185  * @no_warn: Avoid printing message about failed allocation.
 186  *
 187  * This function allocates memory buffer for specified device. It uses
 188  * device specific contiguous memory area if available or the default
 189  * global one. Requires architecture specific dev_get_cma_area() helper
 190  * function.
 191  */
 192 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
 193                                        unsigned int align, bool no_warn)
 194 {
 195         if (align > CONFIG_CMA_ALIGNMENT)
 196                 align = CONFIG_CMA_ALIGNMENT;
 197 
 198         return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
 199 }
 200 
 201 /**
 202  * dma_release_from_contiguous() - release allocated pages
 203  * @dev:   Pointer to device for which the pages were allocated.
 204  * @pages: Allocated pages.
 205  * @count: Number of allocated pages.
 206  *
 207  * This function releases memory allocated by dma_alloc_from_contiguous().
 208  * It returns false when provided pages do not belong to contiguous area and
 209  * true otherwise.
 210  */
 211 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 212                                  int count)
 213 {
 214         return cma_release(dev_get_cma_area(dev), pages, count);
 215 }
 216 
 217 /**
 218  * dma_alloc_contiguous() - allocate contiguous pages
 219  * @dev:   Pointer to device for which the allocation is performed.
 220  * @size:  Requested allocation size.
 221  * @gfp:   Allocation flags.
 222  *
 223  * This function allocates contiguous memory buffer for specified device. It
 224  * first tries to use device specific contiguous memory area if available or
 225  * the default global one, then tries a fallback allocation of normal pages.
 226  *
 227  * Note that it byapss one-page size of allocations from the global area as
 228  * the addresses within one page are always contiguous, so there is no need
 229  * to waste CMA pages for that kind; it also helps reduce fragmentations.
 230  */
 231 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
 232 {
 233         size_t count = size >> PAGE_SHIFT;
 234         struct page *page = NULL;
 235         struct cma *cma = NULL;
 236 
 237         if (dev && dev->cma_area)
 238                 cma = dev->cma_area;
 239         else if (count > 1)
 240                 cma = dma_contiguous_default_area;
 241 
 242         /* CMA can be used only in the context which permits sleeping */
 243         if (cma && gfpflags_allow_blocking(gfp)) {
 244                 size_t align = get_order(size);
 245                 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
 246 
 247                 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
 248         }
 249 
 250         return page;
 251 }
 252 
 253 /**
 254  * dma_free_contiguous() - release allocated pages
 255  * @dev:   Pointer to device for which the pages were allocated.
 256  * @page:  Pointer to the allocated pages.
 257  * @size:  Size of allocated pages.
 258  *
 259  * This function releases memory allocated by dma_alloc_contiguous(). As the
 260  * cma_release returns false when provided pages do not belong to contiguous
 261  * area and true otherwise, this function then does a fallback __free_pages()
 262  * upon a false-return.
 263  */
 264 void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
 265 {
 266         if (!cma_release(dev_get_cma_area(dev), page,
 267                          PAGE_ALIGN(size) >> PAGE_SHIFT))
 268                 __free_pages(page, get_order(size));
 269 }
 270 
 271 /*
 272  * Support for reserved memory regions defined in device tree
 273  */
 274 #ifdef CONFIG_OF_RESERVED_MEM
 275 #include <linux/of.h>
 276 #include <linux/of_fdt.h>
 277 #include <linux/of_reserved_mem.h>
 278 
 279 #undef pr_fmt
 280 #define pr_fmt(fmt) fmt
 281 
 282 static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
 283 {
 284         dev_set_cma_area(dev, rmem->priv);
 285         return 0;
 286 }
 287 
 288 static void rmem_cma_device_release(struct reserved_mem *rmem,
 289                                     struct device *dev)
 290 {
 291         dev_set_cma_area(dev, NULL);
 292 }
 293 
 294 static const struct reserved_mem_ops rmem_cma_ops = {
 295         .device_init    = rmem_cma_device_init,
 296         .device_release = rmem_cma_device_release,
 297 };
 298 
 299 static int __init rmem_cma_setup(struct reserved_mem *rmem)
 300 {
 301         phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
 302         phys_addr_t mask = align - 1;
 303         unsigned long node = rmem->fdt_node;
 304         struct cma *cma;
 305         int err;
 306 
 307         if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
 308             of_get_flat_dt_prop(node, "no-map", NULL))
 309                 return -EINVAL;
 310 
 311         if ((rmem->base & mask) || (rmem->size & mask)) {
 312                 pr_err("Reserved memory: incorrect alignment of CMA region\n");
 313                 return -EINVAL;
 314         }
 315 
 316         err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
 317         if (err) {
 318                 pr_err("Reserved memory: unable to setup CMA region\n");
 319                 return err;
 320         }
 321         /* Architecture specific contiguous memory fixup. */
 322         dma_contiguous_early_fixup(rmem->base, rmem->size);
 323 
 324         if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
 325                 dma_contiguous_set_default(cma);
 326 
 327         rmem->ops = &rmem_cma_ops;
 328         rmem->priv = cma;
 329 
 330         pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
 331                 &rmem->base, (unsigned long)rmem->size / SZ_1M);
 332 
 333         return 0;
 334 }
 335 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
 336 #endif

/* [<][>][^][v][top][bottom][index][help] */