root/include/linux/memremap.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pgmap_altmap
  2. devm_memremap_pages
  3. devm_memunmap_pages
  4. get_dev_pagemap
  5. vmem_altmap_offset
  6. vmem_altmap_free
  7. put_dev_pagemap

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_MEMREMAP_H_
   3 #define _LINUX_MEMREMAP_H_
   4 #include <linux/ioport.h>
   5 #include <linux/percpu-refcount.h>
   6 
   7 struct resource;
   8 struct device;
   9 
  10 /**
  11  * struct vmem_altmap - pre-allocated storage for vmemmap_populate
  12  * @base_pfn: base of the entire dev_pagemap mapping
  13  * @reserve: pages mapped, but reserved for driver use (relative to @base)
  14  * @free: free pages set aside in the mapping for memmap storage
  15  * @align: pages reserved to meet allocation alignments
  16  * @alloc: track pages consumed, private to vmemmap_populate()
  17  */
  18 struct vmem_altmap {
  19         const unsigned long base_pfn;
  20         const unsigned long end_pfn;
  21         const unsigned long reserve;
  22         unsigned long free;
  23         unsigned long align;
  24         unsigned long alloc;
  25 };
  26 
  27 /*
  28  * Specialize ZONE_DEVICE memory into multiple types each having differents
  29  * usage.
  30  *
  31  * MEMORY_DEVICE_PRIVATE:
  32  * Device memory that is not directly addressable by the CPU: CPU can neither
  33  * read nor write private memory. In this case, we do still have struct pages
  34  * backing the device memory. Doing so simplifies the implementation, but it is
  35  * important to remember that there are certain points at which the struct page
  36  * must be treated as an opaque object, rather than a "normal" struct page.
  37  *
  38  * A more complete discussion of unaddressable memory may be found in
  39  * include/linux/hmm.h and Documentation/vm/hmm.rst.
  40  *
  41  * MEMORY_DEVICE_FS_DAX:
  42  * Host memory that has similar access semantics as System RAM i.e. DMA
  43  * coherent and supports page pinning. In support of coordinating page
  44  * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
  45  * wakeup event whenever a page is unpinned and becomes idle. This
  46  * wakeup is used to coordinate physical address space management (ex:
  47  * fs truncate/hole punch) vs pinned pages (ex: device dma).
  48  *
  49  * MEMORY_DEVICE_DEVDAX:
  50  * Host memory that has similar access semantics as System RAM i.e. DMA
  51  * coherent and supports page pinning. In contrast to
  52  * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
  53  * character device.
  54  *
  55  * MEMORY_DEVICE_PCI_P2PDMA:
  56  * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
  57  * transactions.
  58  */
  59 enum memory_type {
  60         /* 0 is reserved to catch uninitialized type fields */
  61         MEMORY_DEVICE_PRIVATE = 1,
  62         MEMORY_DEVICE_FS_DAX,
  63         MEMORY_DEVICE_DEVDAX,
  64         MEMORY_DEVICE_PCI_P2PDMA,
  65 };
  66 
  67 struct dev_pagemap_ops {
  68         /*
  69          * Called once the page refcount reaches 1.  (ZONE_DEVICE pages never
  70          * reach 0 refcount unless there is a refcount bug. This allows the
  71          * device driver to implement its own memory management.)
  72          */
  73         void (*page_free)(struct page *page);
  74 
  75         /*
  76          * Transition the refcount in struct dev_pagemap to the dead state.
  77          */
  78         void (*kill)(struct dev_pagemap *pgmap);
  79 
  80         /*
  81          * Wait for refcount in struct dev_pagemap to be idle and reap it.
  82          */
  83         void (*cleanup)(struct dev_pagemap *pgmap);
  84 
  85         /*
  86          * Used for private (un-addressable) device memory only.  Must migrate
  87          * the page back to a CPU accessible page.
  88          */
  89         vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
  90 };
  91 
  92 #define PGMAP_ALTMAP_VALID      (1 << 0)
  93 
  94 /**
  95  * struct dev_pagemap - metadata for ZONE_DEVICE mappings
  96  * @altmap: pre-allocated/reserved memory for vmemmap allocations
  97  * @res: physical address range covered by @ref
  98  * @ref: reference count that pins the devm_memremap_pages() mapping
  99  * @internal_ref: internal reference if @ref is not provided by the caller
 100  * @done: completion for @internal_ref
 101  * @dev: host device of the mapping for debug
 102  * @data: private data pointer for page_free()
 103  * @type: memory type: see MEMORY_* in memory_hotplug.h
 104  * @flags: PGMAP_* flags to specify defailed behavior
 105  * @ops: method table
 106  */
 107 struct dev_pagemap {
 108         struct vmem_altmap altmap;
 109         struct resource res;
 110         struct percpu_ref *ref;
 111         struct percpu_ref internal_ref;
 112         struct completion done;
 113         enum memory_type type;
 114         unsigned int flags;
 115         const struct dev_pagemap_ops *ops;
 116 };
 117 
 118 static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
 119 {
 120         if (pgmap->flags & PGMAP_ALTMAP_VALID)
 121                 return &pgmap->altmap;
 122         return NULL;
 123 }
 124 
 125 #ifdef CONFIG_ZONE_DEVICE
 126 void *memremap_pages(struct dev_pagemap *pgmap, int nid);
 127 void memunmap_pages(struct dev_pagemap *pgmap);
 128 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
 129 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
 130 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
 131                 struct dev_pagemap *pgmap);
 132 
 133 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
 134 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
 135 #else
 136 static inline void *devm_memremap_pages(struct device *dev,
 137                 struct dev_pagemap *pgmap)
 138 {
 139         /*
 140          * Fail attempts to call devm_memremap_pages() without
 141          * ZONE_DEVICE support enabled, this requires callers to fall
 142          * back to plain devm_memremap() based on config
 143          */
 144         WARN_ON_ONCE(1);
 145         return ERR_PTR(-ENXIO);
 146 }
 147 
 148 static inline void devm_memunmap_pages(struct device *dev,
 149                 struct dev_pagemap *pgmap)
 150 {
 151 }
 152 
 153 static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
 154                 struct dev_pagemap *pgmap)
 155 {
 156         return NULL;
 157 }
 158 
 159 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
 160 {
 161         return 0;
 162 }
 163 
 164 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
 165                 unsigned long nr_pfns)
 166 {
 167 }
 168 #endif /* CONFIG_ZONE_DEVICE */
 169 
 170 static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
 171 {
 172         if (pgmap)
 173                 percpu_ref_put(pgmap->ref);
 174 }
 175 #endif /* _LINUX_MEMREMAP_H_ */

/* [<][>][^][v][top][bottom][index][help] */