root/include/linux/memory_hotplug.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. zone_span_seqbegin
  2. zone_span_seqretry
  3. zone_span_writelock
  4. zone_span_writeunlock
  5. zone_seqlock_init
  6. movable_node_is_enabled
  7. add_pages
  8. memory_add_physaddr_to_nid
  9. arch_refresh_nodedata
  10. generic_alloc_nodedata
  11. generic_free_nodedata
  12. arch_refresh_nodedata
  13. register_page_bootmem_info_node
  14. zone_span_seqbegin
  15. zone_span_seqretry
  16. zone_span_writelock
  17. zone_span_writeunlock
  18. zone_seqlock_init
  19. mhp_notimplemented
  20. register_page_bootmem_info_node
  21. try_online_node
  22. get_online_mems
  23. put_online_mems
  24. mem_hotplug_begin
  25. mem_hotplug_done
  26. movable_node_is_enabled
  27. pgdat_resize_lock
  28. pgdat_resize_unlock
  29. pgdat_resize_init
  30. pgdat_resize_lock
  31. pgdat_resize_unlock
  32. pgdat_resize_init
  33. is_mem_section_removable
  34. try_offline_node
  35. offline_pages
  36. remove_memory
  37. __remove_memory

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __LINUX_MEMORY_HOTPLUG_H
   3 #define __LINUX_MEMORY_HOTPLUG_H
   4 
   5 #include <linux/mmzone.h>
   6 #include <linux/spinlock.h>
   7 #include <linux/notifier.h>
   8 #include <linux/bug.h>
   9 
  10 struct page;
  11 struct zone;
  12 struct pglist_data;
  13 struct mem_section;
  14 struct memory_block;
  15 struct resource;
  16 struct vmem_altmap;
  17 
  18 #ifdef CONFIG_MEMORY_HOTPLUG
  19 /*
  20  * Return page for the valid pfn only if the page is online. All pfn
  21  * walkers which rely on the fully initialized page->flags and others
  22  * should use this rather than pfn_valid && pfn_to_page
  23  */
  24 #define pfn_to_online_page(pfn)                                    \
  25 ({                                                                 \
  26         struct page *___page = NULL;                               \
  27         unsigned long ___pfn = pfn;                                \
  28         unsigned long ___nr = pfn_to_section_nr(___pfn);           \
  29                                                                    \
  30         if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
  31             pfn_valid_within(___pfn))                              \
  32                 ___page = pfn_to_page(___pfn);                     \
  33         ___page;                                                   \
  34 })
  35 
  36 /*
  37  * Types for free bootmem stored in page->lru.next. These have to be in
  38  * some random range in unsigned long space for debugging purposes.
  39  */
  40 enum {
  41         MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
  42         SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
  43         MIX_SECTION_INFO,
  44         NODE_INFO,
  45         MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
  46 };
  47 
  48 /* Types for control the zone type of onlined and offlined memory */
  49 enum {
  50         MMOP_OFFLINE = -1,
  51         MMOP_ONLINE_KEEP,
  52         MMOP_ONLINE_KERNEL,
  53         MMOP_ONLINE_MOVABLE,
  54 };
  55 
  56 /*
  57  * Restrictions for the memory hotplug:
  58  * flags:  MHP_ flags
  59  * altmap: alternative allocator for memmap array
  60  */
  61 struct mhp_restrictions {
  62         unsigned long flags;
  63         struct vmem_altmap *altmap;
  64 };
  65 
  66 /*
  67  * Zone resizing functions
  68  *
  69  * Note: any attempt to resize a zone should has pgdat_resize_lock()
  70  * zone_span_writelock() both held. This ensure the size of a zone
  71  * can't be changed while pgdat_resize_lock() held.
  72  */
  73 static inline unsigned zone_span_seqbegin(struct zone *zone)
  74 {
  75         return read_seqbegin(&zone->span_seqlock);
  76 }
  77 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  78 {
  79         return read_seqretry(&zone->span_seqlock, iv);
  80 }
  81 static inline void zone_span_writelock(struct zone *zone)
  82 {
  83         write_seqlock(&zone->span_seqlock);
  84 }
  85 static inline void zone_span_writeunlock(struct zone *zone)
  86 {
  87         write_sequnlock(&zone->span_seqlock);
  88 }
  89 static inline void zone_seqlock_init(struct zone *zone)
  90 {
  91         seqlock_init(&zone->span_seqlock);
  92 }
  93 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
  94 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
  95 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
  96 /* VM interface that may be used by firmware interface */
  97 extern int online_pages(unsigned long, unsigned long, int);
  98 extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
  99         unsigned long *valid_start, unsigned long *valid_end);
 100 extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
 101                                                 unsigned long end_pfn);
 102 
 103 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
 104 
 105 extern int set_online_page_callback(online_page_callback_t callback);
 106 extern int restore_online_page_callback(online_page_callback_t callback);
 107 
 108 extern void __online_page_set_limits(struct page *page);
 109 extern void __online_page_increment_counters(struct page *page);
 110 extern void __online_page_free(struct page *page);
 111 
 112 extern int try_online_node(int nid);
 113 
 114 extern int arch_add_memory(int nid, u64 start, u64 size,
 115                         struct mhp_restrictions *restrictions);
 116 extern u64 max_mem_size;
 117 
 118 extern bool memhp_auto_online;
 119 /* If movable_node boot option specified */
 120 extern bool movable_node_enabled;
 121 static inline bool movable_node_is_enabled(void)
 122 {
 123         return movable_node_enabled;
 124 }
 125 
 126 extern void arch_remove_memory(int nid, u64 start, u64 size,
 127                                struct vmem_altmap *altmap);
 128 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
 129                            struct vmem_altmap *altmap);
 130 
 131 /* reasonably generic interface to expand the physical pages */
 132 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 133                        struct mhp_restrictions *restrictions);
 134 
 135 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
 136 static inline int add_pages(int nid, unsigned long start_pfn,
 137                 unsigned long nr_pages, struct mhp_restrictions *restrictions)
 138 {
 139         return __add_pages(nid, start_pfn, nr_pages, restrictions);
 140 }
 141 #else /* ARCH_HAS_ADD_PAGES */
 142 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 143               struct mhp_restrictions *restrictions);
 144 #endif /* ARCH_HAS_ADD_PAGES */
 145 
 146 #ifdef CONFIG_NUMA
 147 extern int memory_add_physaddr_to_nid(u64 start);
 148 #else
 149 static inline int memory_add_physaddr_to_nid(u64 start)
 150 {
 151         return 0;
 152 }
 153 #endif
 154 
 155 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
 156 /*
 157  * For supporting node-hotadd, we have to allocate a new pgdat.
 158  *
 159  * If an arch has generic style NODE_DATA(),
 160  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
 161  *
 162  * In general, generic_alloc_nodedata() is used.
 163  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
 164  *
 165  */
 166 extern pg_data_t *arch_alloc_nodedata(int nid);
 167 extern void arch_free_nodedata(pg_data_t *pgdat);
 168 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
 169 
 170 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 171 
 172 #define arch_alloc_nodedata(nid)        generic_alloc_nodedata(nid)
 173 #define arch_free_nodedata(pgdat)       generic_free_nodedata(pgdat)
 174 
 175 #ifdef CONFIG_NUMA
 176 /*
 177  * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
 178  * XXX: kmalloc_node() can't work well to get new node's memory at this time.
 179  *      Because, pgdat for the new node is not allocated/initialized yet itself.
 180  *      To use new node's memory, more consideration will be necessary.
 181  */
 182 #define generic_alloc_nodedata(nid)                             \
 183 ({                                                              \
 184         kzalloc(sizeof(pg_data_t), GFP_KERNEL);                 \
 185 })
 186 /*
 187  * This definition is just for error path in node hotadd.
 188  * For node hotremove, we have to replace this.
 189  */
 190 #define generic_free_nodedata(pgdat)    kfree(pgdat)
 191 
 192 extern pg_data_t *node_data[];
 193 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 194 {
 195         node_data[nid] = pgdat;
 196 }
 197 
 198 #else /* !CONFIG_NUMA */
 199 
 200 /* never called */
 201 static inline pg_data_t *generic_alloc_nodedata(int nid)
 202 {
 203         BUG();
 204         return NULL;
 205 }
 206 static inline void generic_free_nodedata(pg_data_t *pgdat)
 207 {
 208 }
 209 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 210 {
 211 }
 212 #endif /* CONFIG_NUMA */
 213 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 214 
 215 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
 216 extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
 217 #else
 218 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
 219 {
 220 }
 221 #endif
 222 extern void put_page_bootmem(struct page *page);
 223 extern void get_page_bootmem(unsigned long ingo, struct page *page,
 224                              unsigned long type);
 225 
 226 void get_online_mems(void);
 227 void put_online_mems(void);
 228 
 229 void mem_hotplug_begin(void);
 230 void mem_hotplug_done(void);
 231 
 232 extern void set_zone_contiguous(struct zone *zone);
 233 extern void clear_zone_contiguous(struct zone *zone);
 234 
 235 #else /* ! CONFIG_MEMORY_HOTPLUG */
 236 #define pfn_to_online_page(pfn)                 \
 237 ({                                              \
 238         struct page *___page = NULL;            \
 239         if (pfn_valid(pfn))                     \
 240                 ___page = pfn_to_page(pfn);     \
 241         ___page;                                \
 242  })
 243 
 244 static inline unsigned zone_span_seqbegin(struct zone *zone)
 245 {
 246         return 0;
 247 }
 248 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
 249 {
 250         return 0;
 251 }
 252 static inline void zone_span_writelock(struct zone *zone) {}
 253 static inline void zone_span_writeunlock(struct zone *zone) {}
 254 static inline void zone_seqlock_init(struct zone *zone) {}
 255 
 256 static inline int mhp_notimplemented(const char *func)
 257 {
 258         printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
 259         dump_stack();
 260         return -ENOSYS;
 261 }
 262 
 263 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
 264 {
 265 }
 266 
 267 static inline int try_online_node(int nid)
 268 {
 269         return 0;
 270 }
 271 
 272 static inline void get_online_mems(void) {}
 273 static inline void put_online_mems(void) {}
 274 
 275 static inline void mem_hotplug_begin(void) {}
 276 static inline void mem_hotplug_done(void) {}
 277 
 278 static inline bool movable_node_is_enabled(void)
 279 {
 280         return false;
 281 }
 282 #endif /* ! CONFIG_MEMORY_HOTPLUG */
 283 
 284 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
 285 /*
 286  * pgdat resizing functions
 287  */
 288 static inline
 289 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
 290 {
 291         spin_lock_irqsave(&pgdat->node_size_lock, *flags);
 292 }
 293 static inline
 294 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
 295 {
 296         spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
 297 }
 298 static inline
 299 void pgdat_resize_init(struct pglist_data *pgdat)
 300 {
 301         spin_lock_init(&pgdat->node_size_lock);
 302 }
 303 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
 304 /*
 305  * Stub functions for when hotplug is off
 306  */
 307 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
 308 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
 309 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
 310 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
 311 
 312 #ifdef CONFIG_MEMORY_HOTREMOVE
 313 
 314 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
 315 extern void try_offline_node(int nid);
 316 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 317 extern int remove_memory(int nid, u64 start, u64 size);
 318 extern void __remove_memory(int nid, u64 start, u64 size);
 319 
 320 #else
 321 static inline bool is_mem_section_removable(unsigned long pfn,
 322                                         unsigned long nr_pages)
 323 {
 324         return false;
 325 }
 326 
 327 static inline void try_offline_node(int nid) {}
 328 
 329 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
 330 {
 331         return -EINVAL;
 332 }
 333 
 334 static inline int remove_memory(int nid, u64 start, u64 size)
 335 {
 336         return -EBUSY;
 337 }
 338 
 339 static inline void __remove_memory(int nid, u64 start, u64 size) {}
 340 #endif /* CONFIG_MEMORY_HOTREMOVE */
 341 
 342 extern void __ref free_area_init_core_hotplug(int nid);
 343 extern int __add_memory(int nid, u64 start, u64 size);
 344 extern int add_memory(int nid, u64 start, u64 size);
 345 extern int add_memory_resource(int nid, struct resource *resource);
 346 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 347                 unsigned long nr_pages, struct vmem_altmap *altmap);
 348 extern void remove_pfn_range_from_zone(struct zone *zone,
 349                                        unsigned long start_pfn,
 350                                        unsigned long nr_pages);
 351 extern bool is_memblock_offlined(struct memory_block *mem);
 352 extern int sparse_add_section(int nid, unsigned long pfn,
 353                 unsigned long nr_pages, struct vmem_altmap *altmap);
 354 extern void sparse_remove_section(struct mem_section *ms,
 355                 unsigned long pfn, unsigned long nr_pages,
 356                 unsigned long map_offset, struct vmem_altmap *altmap);
 357 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
 358                                           unsigned long pnum);
 359 extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
 360                 int online_type);
 361 extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
 362                 unsigned long nr_pages);
 363 #endif /* __LINUX_MEMORY_HOTPLUG_H */

/* [<][>][^][v][top][bottom][index][help] */