cma 188 arch/arm/mach-shmobile/setup-rcar-gen2.c static struct cma *rcar_gen2_dma_contiguous; cma 694 arch/arm/mm/dma-mapping.c bool allowblock, cma; cma 738 arch/arm/mm/dma-mapping.c cma = allowblock ? dev_get_cma_area(dev) : false; cma 740 arch/arm/mm/dma-mapping.c if (cma) cma 273 arch/powerpc/include/asm/kvm_host.h int cma; cma 45 arch/powerpc/kernel/fadump.c static struct cma *fadump_cma; cma 73 arch/powerpc/kvm/book3s_64_mmu_hv.c int cma = 0; cma 85 arch/powerpc/kvm/book3s_64_mmu_hv.c cma = 1; cma 101 arch/powerpc/kvm/book3s_64_mmu_hv.c if (cma) cma 110 arch/powerpc/kvm/book3s_64_mmu_hv.c info->cma = cma; cma 183 arch/powerpc/kvm/book3s_64_mmu_hv.c if (info->cma) cma 61 arch/powerpc/kvm/book3s_hv_builtin.c static struct cma *kvm_cma; cma 227 arch/s390/mm/init.c static int s390_cma_check_range(struct cma *cma, void *data) cma 233 arch/s390/mm/init.c start = cma_get_base(cma); cma 234 arch/s390/mm/init.c end = start + cma_get_size(cma); cma 24 drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h bool cma; cma 1461 drivers/gpu/drm/nouveau/nouveau_bo.c reg->bus.is_iomem = !drm->agp.cma; cma 147 drivers/gpu/drm/nouveau/nouveau_drv.h bool cma; cma 229 drivers/gpu/drm/nouveau/nouveau_ttm.c drm->agp.cma = pci->agp.cma; cma 135 drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c pci->agp.cma = info.cant_use_aperture; cma 42 drivers/s390/char/vmcp.c static struct cma *vmcp_cma; cma 21 drivers/staging/android/ion/ion_cma_heap.c struct cma *cma; cma 42 drivers/staging/android/ion/ion_cma_heap.c pages = cma_alloc(cma_heap->cma, nr_pages, align, false); cma 79 drivers/staging/android/ion/ion_cma_heap.c cma_release(cma_heap->cma, pages, nr_pages); cma 90 drivers/staging/android/ion/ion_cma_heap.c cma_release(cma_heap->cma, pages, nr_pages); cma 104 drivers/staging/android/ion/ion_cma_heap.c static struct ion_heap *__ion_cma_heap_create(struct cma *cma) cma 114 drivers/staging/android/ion/ion_cma_heap.c cma_heap->cma = cma; cma 119 drivers/staging/android/ion/ion_cma_heap.c static int __ion_add_cma_heaps(struct cma *cma, void *data) cma 123 drivers/staging/android/ion/ion_cma_heap.c heap = __ion_cma_heap_create(cma); cma 127 drivers/staging/android/ion/ion_cma_heap.c heap->name = cma_get_name(cma); cma 20 include/linux/cma.h struct cma; cma 23 include/linux/cma.h extern phys_addr_t cma_get_base(const struct cma *cma); cma 24 include/linux/cma.h extern unsigned long cma_get_size(const struct cma *cma); cma 25 include/linux/cma.h extern const char *cma_get_name(const struct cma *cma); cma 30 include/linux/cma.h bool fixed, const char *name, struct cma **res_cma); cma 34 include/linux/cma.h struct cma **res_cma); cma 35 include/linux/cma.h extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, cma 37 include/linux/cma.h extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); cma 39 include/linux/cma.h extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); cma 1285 include/linux/device.h struct cma *cma_area; /* contiguous memory area for dma cma 55 include/linux/dma-contiguous.h struct cma; cma 60 include/linux/dma-contiguous.h extern struct cma *dma_contiguous_default_area; cma 62 include/linux/dma-contiguous.h static inline struct cma *dev_get_cma_area(struct device *dev) cma 69 include/linux/dma-contiguous.h static inline void dev_set_cma_area(struct device *dev, struct cma *cma) cma 72 include/linux/dma-contiguous.h dev->cma_area = cma; cma 75 include/linux/dma-contiguous.h static inline void dma_contiguous_set_default(struct cma *cma) cma 77 include/linux/dma-contiguous.h dma_contiguous_default_area = cma; cma 83 include/linux/dma-contiguous.h phys_addr_t limit, struct cma **res_cma, cma 102 include/linux/dma-contiguous.h struct cma *cma; cma 104 include/linux/dma-contiguous.h ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); cma 106 include/linux/dma-contiguous.h dev_set_cma_area(dev, cma); cma 120 include/linux/dma-contiguous.h static inline struct cma *dev_get_cma_area(struct device *dev) cma 125 include/linux/dma-contiguous.h static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } cma 127 include/linux/dma-contiguous.h static inline void dma_contiguous_set_default(struct cma *cma) { } cma 132 include/linux/dma-contiguous.h phys_addr_t limit, struct cma **res_cma, cma 3 include/trace/events/cma.h #define TRACE_SYSTEM cma cma 33 kernel/dma/contiguous.c struct cma *dma_contiguous_default_area; cma 163 kernel/dma/contiguous.c phys_addr_t limit, struct cma **res_cma, cma 235 kernel/dma/contiguous.c struct cma *cma = NULL; cma 238 kernel/dma/contiguous.c cma = dev->cma_area; cma 240 kernel/dma/contiguous.c cma = dma_contiguous_default_area; cma 243 kernel/dma/contiguous.c if (cma && gfpflags_allow_blocking(gfp)) { cma 247 kernel/dma/contiguous.c page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); cma 304 kernel/dma/contiguous.c struct cma *cma; cma 316 kernel/dma/contiguous.c err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); cma 325 kernel/dma/contiguous.c dma_contiguous_set_default(cma); cma 328 kernel/dma/contiguous.c rmem->priv = cma; cma 335 kernel/dma/contiguous.c RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); cma 39 mm/cma.c struct cma cma_areas[MAX_CMA_AREAS]; cma 43 mm/cma.c phys_addr_t cma_get_base(const struct cma *cma) cma 45 mm/cma.c return PFN_PHYS(cma->base_pfn); cma 48 mm/cma.c unsigned long cma_get_size(const struct cma *cma) cma 50 mm/cma.c return cma->count << PAGE_SHIFT; cma 53 mm/cma.c const char *cma_get_name(const struct cma *cma) cma 55 mm/cma.c return cma->name ? cma->name : "(undefined)"; cma 58 mm/cma.c static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, cma 61 mm/cma.c if (align_order <= cma->order_per_bit) cma 63 mm/cma.c return (1UL << (align_order - cma->order_per_bit)) - 1; cma 70 mm/cma.c static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, cma 73 mm/cma.c return (cma->base_pfn & ((1UL << align_order) - 1)) cma 74 mm/cma.c >> cma->order_per_bit; cma 77 mm/cma.c static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, cma 80 mm/cma.c return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; cma 83 mm/cma.c static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, cma 88 mm/cma.c bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; cma 89 mm/cma.c bitmap_count = cma_bitmap_pages_to_bits(cma, count); cma 91 mm/cma.c mutex_lock(&cma->lock); cma 92 mm/cma.c bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); cma 93 mm/cma.c mutex_unlock(&cma->lock); cma 96 mm/cma.c static int __init cma_activate_area(struct cma *cma) cma 98 mm/cma.c int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); cma 99 mm/cma.c unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; cma 100 mm/cma.c unsigned i = cma->count >> pageblock_order; cma 103 mm/cma.c cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); cma 105 mm/cma.c if (!cma->bitmap) { cma 106 mm/cma.c cma->count = 0; cma 131 mm/cma.c mutex_init(&cma->lock); cma 134 mm/cma.c INIT_HLIST_HEAD(&cma->mem_head); cma 135 mm/cma.c spin_lock_init(&cma->mem_head_lock); cma 141 mm/cma.c pr_err("CMA area %s could not be activated\n", cma->name); cma 142 mm/cma.c kfree(cma->bitmap); cma 143 mm/cma.c cma->count = 0; cma 177 mm/cma.c struct cma **res_cma) cma 179 mm/cma.c struct cma *cma; cma 206 mm/cma.c cma = &cma_areas[cma_area_count]; cma 208 mm/cma.c cma->name = name; cma 210 mm/cma.c cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count); cma 211 mm/cma.c if (!cma->name) cma 214 mm/cma.c cma->base_pfn = PFN_DOWN(base); cma 215 mm/cma.c cma->count = size >> PAGE_SHIFT; cma 216 mm/cma.c cma->order_per_bit = order_per_bit; cma 217 mm/cma.c *res_cma = cma; cma 246 mm/cma.c bool fixed, const char *name, struct cma **res_cma) cma 379 mm/cma.c static void cma_debug_show_areas(struct cma *cma) cma 384 mm/cma.c unsigned long nbits = cma_bitmap_maxno(cma); cma 386 mm/cma.c mutex_lock(&cma->lock); cma 389 mm/cma.c next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); cma 392 mm/cma.c next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); cma 394 mm/cma.c nr_part = nr_zero << cma->order_per_bit; cma 400 mm/cma.c pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); cma 401 mm/cma.c mutex_unlock(&cma->lock); cma 404 mm/cma.c static inline void cma_debug_show_areas(struct cma *cma) { } cma 417 mm/cma.c struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, cma 428 mm/cma.c if (!cma || !cma->count) cma 431 mm/cma.c pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, cma 437 mm/cma.c mask = cma_bitmap_aligned_mask(cma, align); cma 438 mm/cma.c offset = cma_bitmap_aligned_offset(cma, align); cma 439 mm/cma.c bitmap_maxno = cma_bitmap_maxno(cma); cma 440 mm/cma.c bitmap_count = cma_bitmap_pages_to_bits(cma, count); cma 446 mm/cma.c mutex_lock(&cma->lock); cma 447 mm/cma.c bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, cma 451 mm/cma.c mutex_unlock(&cma->lock); cma 454 mm/cma.c bitmap_set(cma->bitmap, bitmap_no, bitmap_count); cma 460 mm/cma.c mutex_unlock(&cma->lock); cma 462 mm/cma.c pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); cma 472 mm/cma.c cma_clear_bitmap(cma, pfn, count); cma 497 mm/cma.c cma_debug_show_areas(cma); cma 514 mm/cma.c bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) cma 518 mm/cma.c if (!cma || !pages) cma 525 mm/cma.c if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) cma 528 mm/cma.c VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); cma 531 mm/cma.c cma_clear_bitmap(cma, pfn, count); cma 537 mm/cma.c int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) cma 18 mm/cma.h extern struct cma cma_areas[MAX_CMA_AREAS]; cma 21 mm/cma.h static inline unsigned long cma_bitmap_maxno(struct cma *cma) cma 23 mm/cma.h return cma->count >> cma->order_per_bit; cma 36 mm/cma_debug.c struct cma *cma = data; cma 39 mm/cma_debug.c mutex_lock(&cma->lock); cma 41 mm/cma_debug.c used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); cma 42 mm/cma_debug.c mutex_unlock(&cma->lock); cma 43 mm/cma_debug.c *val = (u64)used << cma->order_per_bit; cma 51 mm/cma_debug.c struct cma *cma = data; cma 54 mm/cma_debug.c unsigned long bitmap_maxno = cma_bitmap_maxno(cma); cma 56 mm/cma_debug.c mutex_lock(&cma->lock); cma 58 mm/cma_debug.c start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); cma 61 mm/cma_debug.c end = find_next_bit(cma->bitmap, bitmap_maxno, start); cma 64 mm/cma_debug.c mutex_unlock(&cma->lock); cma 65 mm/cma_debug.c *val = (u64)maxchunk << cma->order_per_bit; cma 71 mm/cma_debug.c static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem) cma 73 mm/cma_debug.c spin_lock(&cma->mem_head_lock); cma 74 mm/cma_debug.c hlist_add_head(&mem->node, &cma->mem_head); cma 75 mm/cma_debug.c spin_unlock(&cma->mem_head_lock); cma 78 mm/cma_debug.c static struct cma_mem *cma_get_entry_from_list(struct cma *cma) cma 82 mm/cma_debug.c spin_lock(&cma->mem_head_lock); cma 83 mm/cma_debug.c if (!hlist_empty(&cma->mem_head)) { cma 84 mm/cma_debug.c mem = hlist_entry(cma->mem_head.first, struct cma_mem, node); cma 87 mm/cma_debug.c spin_unlock(&cma->mem_head_lock); cma 92 mm/cma_debug.c static int cma_free_mem(struct cma *cma, int count) cma 97 mm/cma_debug.c mem = cma_get_entry_from_list(cma); cma 102 mm/cma_debug.c cma_release(cma, mem->p, mem->n); cma 105 mm/cma_debug.c } else if (cma->order_per_bit == 0) { cma 106 mm/cma_debug.c cma_release(cma, mem->p, count); cma 110 mm/cma_debug.c cma_add_to_cma_mem_list(cma, mem); cma 113 mm/cma_debug.c cma_add_to_cma_mem_list(cma, mem); cma 125 mm/cma_debug.c struct cma *cma = data; cma 127 mm/cma_debug.c return cma_free_mem(cma, pages); cma 131 mm/cma_debug.c static int cma_alloc_mem(struct cma *cma, int count) cma 140 mm/cma_debug.c p = cma_alloc(cma, count, 0, false); cma 149 mm/cma_debug.c cma_add_to_cma_mem_list(cma, mem); cma 157 mm/cma_debug.c struct cma *cma = data; cma 159 mm/cma_debug.c return cma_alloc_mem(cma, pages); cma 163 mm/cma_debug.c static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) cma 169 mm/cma_debug.c scnprintf(name, sizeof(name), "cma-%s", cma->name); cma 173 mm/cma_debug.c debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); cma 174 mm/cma_debug.c debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); cma 176 mm/cma_debug.c &cma->base_pfn, &cma_debugfs_fops); cma 177 mm/cma_debug.c debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); cma 179 mm/cma_debug.c &cma->order_per_bit, &cma_debugfs_fops); cma 180 mm/cma_debug.c debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); cma 181 mm/cma_debug.c debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); cma 183 mm/cma_debug.c u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32)); cma 184 mm/cma_debug.c debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s);