This source file includes following definitions.
- is_migrate_movable
- add_to_free_area
- add_to_free_area_tail
- add_to_free_area_random
- move_to_free_area
- get_page_from_free_area
- del_page_from_free_area
- free_area_empty
- is_file_lru
- is_active_lru
- zone_managed_pages
- zone_end_pfn
- zone_spans_pfn
- zone_is_initialized
- zone_is_empty
- zone_intersects
- node_lruvec
- pgdat_end_pfn
- pgdat_is_empty
- lruvec_pgdat
- memory_present
- memblocks_present
- local_memory_node
- managed_zone
- populated_zone
- zone_to_nid
- zone_set_nid
- zone_to_nid
- zone_set_nid
- zone_movable_is_highmem
- is_highmem_idx
- is_highmem
- zonelist_zone
- zonelist_zone_idx
- zonelist_node_idx
- next_zones_zonelist
- first_zones_zonelist
- early_pfn_to_nid
- pfn_to_section_nr
- section_nr_to_pfn
- section_to_usemap
- __nr_to_section
- __section_mem_map_addr
- present_section
- present_section_nr
- valid_section
- early_section
- valid_section_nr
- online_section
- online_section_nr
- __pfn_to_section
- subsection_map_index
- pfn_section_valid
- pfn_section_valid
- pfn_valid
- pfn_present
- memmap_valid_within
1
2 #ifndef _LINUX_MMZONE_H
3 #define _LINUX_MMZONE_H
4
5 #ifndef __ASSEMBLY__
6 #ifndef __GENERATING_BOUNDS_H
7
8 #include <linux/spinlock.h>
9 #include <linux/list.h>
10 #include <linux/wait.h>
11 #include <linux/bitops.h>
12 #include <linux/cache.h>
13 #include <linux/threads.h>
14 #include <linux/numa.h>
15 #include <linux/init.h>
16 #include <linux/seqlock.h>
17 #include <linux/nodemask.h>
18 #include <linux/pageblock-flags.h>
19 #include <linux/page-flags-layout.h>
20 #include <linux/atomic.h>
21 #include <linux/mm_types.h>
22 #include <linux/page-flags.h>
23 #include <asm/page.h>
24
25
26 #ifndef CONFIG_FORCE_MAX_ZONEORDER
27 #define MAX_ORDER 11
28 #else
29 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
30 #endif
31 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
32
33
34
35
36
37
38
39 #define PAGE_ALLOC_COSTLY_ORDER 3
40
41 enum migratetype {
42 MIGRATE_UNMOVABLE,
43 MIGRATE_MOVABLE,
44 MIGRATE_RECLAIMABLE,
45 MIGRATE_PCPTYPES,
46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47 #ifdef CONFIG_CMA
48
49
50
51
52
53
54
55
56
57
58
59
60
61 MIGRATE_CMA,
62 #endif
63 #ifdef CONFIG_MEMORY_ISOLATION
64 MIGRATE_ISOLATE,
65 #endif
66 MIGRATE_TYPES
67 };
68
69
70 extern const char * const migratetype_names[MIGRATE_TYPES];
71
72 #ifdef CONFIG_CMA
73 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
74 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
75 #else
76 # define is_migrate_cma(migratetype) false
77 # define is_migrate_cma_page(_page) false
78 #endif
79
80 static inline bool is_migrate_movable(int mt)
81 {
82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
83 }
84
85 #define for_each_migratetype_order(order, type) \
86 for (order = 0; order < MAX_ORDER; order++) \
87 for (type = 0; type < MIGRATE_TYPES; type++)
88
89 extern int page_group_by_mobility_disabled;
90
91 #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
92 #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
93
94 #define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), \
96 PB_migrate_end, MIGRATETYPE_MASK)
97
98 struct free_area {
99 struct list_head free_list[MIGRATE_TYPES];
100 unsigned long nr_free;
101 };
102
103
104 static inline void add_to_free_area(struct page *page, struct free_area *area,
105 int migratetype)
106 {
107 list_add(&page->lru, &area->free_list[migratetype]);
108 area->nr_free++;
109 }
110
111
112 static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
113 int migratetype)
114 {
115 list_add_tail(&page->lru, &area->free_list[migratetype]);
116 area->nr_free++;
117 }
118
119 #ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
120
121 void add_to_free_area_random(struct page *page, struct free_area *area,
122 int migratetype);
123 #else
124 static inline void add_to_free_area_random(struct page *page,
125 struct free_area *area, int migratetype)
126 {
127 add_to_free_area(page, area, migratetype);
128 }
129 #endif
130
131
132 static inline void move_to_free_area(struct page *page, struct free_area *area,
133 int migratetype)
134 {
135 list_move(&page->lru, &area->free_list[migratetype]);
136 }
137
138 static inline struct page *get_page_from_free_area(struct free_area *area,
139 int migratetype)
140 {
141 return list_first_entry_or_null(&area->free_list[migratetype],
142 struct page, lru);
143 }
144
145 static inline void del_page_from_free_area(struct page *page,
146 struct free_area *area)
147 {
148 list_del(&page->lru);
149 __ClearPageBuddy(page);
150 set_page_private(page, 0);
151 area->nr_free--;
152 }
153
154 static inline bool free_area_empty(struct free_area *area, int migratetype)
155 {
156 return list_empty(&area->free_list[migratetype]);
157 }
158
159 struct pglist_data;
160
161
162
163
164
165
166
167 #if defined(CONFIG_SMP)
168 struct zone_padding {
169 char x[0];
170 } ____cacheline_internodealigned_in_smp;
171 #define ZONE_PADDING(name) struct zone_padding name;
172 #else
173 #define ZONE_PADDING(name)
174 #endif
175
176 #ifdef CONFIG_NUMA
177 enum numa_stat_item {
178 NUMA_HIT,
179 NUMA_MISS,
180 NUMA_FOREIGN,
181 NUMA_INTERLEAVE_HIT,
182 NUMA_LOCAL,
183 NUMA_OTHER,
184 NR_VM_NUMA_STAT_ITEMS
185 };
186 #else
187 #define NR_VM_NUMA_STAT_ITEMS 0
188 #endif
189
190 enum zone_stat_item {
191
192 NR_FREE_PAGES,
193 NR_ZONE_LRU_BASE,
194 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
195 NR_ZONE_ACTIVE_ANON,
196 NR_ZONE_INACTIVE_FILE,
197 NR_ZONE_ACTIVE_FILE,
198 NR_ZONE_UNEVICTABLE,
199 NR_ZONE_WRITE_PENDING,
200 NR_MLOCK,
201 NR_PAGETABLE,
202 NR_KERNEL_STACK_KB,
203
204 NR_BOUNCE,
205 #if IS_ENABLED(CONFIG_ZSMALLOC)
206 NR_ZSPAGES,
207 #endif
208 NR_FREE_CMA_PAGES,
209 NR_VM_ZONE_STAT_ITEMS };
210
211 enum node_stat_item {
212 NR_LRU_BASE,
213 NR_INACTIVE_ANON = NR_LRU_BASE,
214 NR_ACTIVE_ANON,
215 NR_INACTIVE_FILE,
216 NR_ACTIVE_FILE,
217 NR_UNEVICTABLE,
218 NR_SLAB_RECLAIMABLE,
219 NR_SLAB_UNRECLAIMABLE,
220 NR_ISOLATED_ANON,
221 NR_ISOLATED_FILE,
222 WORKINGSET_NODES,
223 WORKINGSET_REFAULT,
224 WORKINGSET_ACTIVATE,
225 WORKINGSET_RESTORE,
226 WORKINGSET_NODERECLAIM,
227 NR_ANON_MAPPED,
228 NR_FILE_MAPPED,
229
230 NR_FILE_PAGES,
231 NR_FILE_DIRTY,
232 NR_WRITEBACK,
233 NR_WRITEBACK_TEMP,
234 NR_SHMEM,
235 NR_SHMEM_THPS,
236 NR_SHMEM_PMDMAPPED,
237 NR_FILE_THPS,
238 NR_FILE_PMDMAPPED,
239 NR_ANON_THPS,
240 NR_UNSTABLE_NFS,
241 NR_VMSCAN_WRITE,
242 NR_VMSCAN_IMMEDIATE,
243 NR_DIRTIED,
244 NR_WRITTEN,
245 NR_KERNEL_MISC_RECLAIMABLE,
246 NR_VM_NODE_STAT_ITEMS
247 };
248
249
250
251
252
253
254
255
256
257
258 #define LRU_BASE 0
259 #define LRU_ACTIVE 1
260 #define LRU_FILE 2
261
262 enum lru_list {
263 LRU_INACTIVE_ANON = LRU_BASE,
264 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
265 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
266 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
267 LRU_UNEVICTABLE,
268 NR_LRU_LISTS
269 };
270
271 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
272
273 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
274
275 static inline int is_file_lru(enum lru_list lru)
276 {
277 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
278 }
279
280 static inline int is_active_lru(enum lru_list lru)
281 {
282 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
283 }
284
285 struct zone_reclaim_stat {
286
287
288
289
290
291
292
293
294 unsigned long recent_rotated[2];
295 unsigned long recent_scanned[2];
296 };
297
298 struct lruvec {
299 struct list_head lists[NR_LRU_LISTS];
300 struct zone_reclaim_stat reclaim_stat;
301
302 atomic_long_t inactive_age;
303
304 unsigned long refaults;
305 #ifdef CONFIG_MEMCG
306 struct pglist_data *pgdat;
307 #endif
308 };
309
310
311 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
312
313 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
314
315 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
316
317
318 typedef unsigned __bitwise isolate_mode_t;
319
320 enum zone_watermarks {
321 WMARK_MIN,
322 WMARK_LOW,
323 WMARK_HIGH,
324 NR_WMARK
325 };
326
327 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
328 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
329 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
330 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
331
332 struct per_cpu_pages {
333 int count;
334 int high;
335 int batch;
336
337
338 struct list_head lists[MIGRATE_PCPTYPES];
339 };
340
341 struct per_cpu_pageset {
342 struct per_cpu_pages pcp;
343 #ifdef CONFIG_NUMA
344 s8 expire;
345 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
346 #endif
347 #ifdef CONFIG_SMP
348 s8 stat_threshold;
349 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
350 #endif
351 };
352
353 struct per_cpu_nodestat {
354 s8 stat_threshold;
355 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
356 };
357
358 #endif
359
360 enum zone_type {
361 #ifdef CONFIG_ZONE_DMA
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380 ZONE_DMA,
381 #endif
382 #ifdef CONFIG_ZONE_DMA32
383
384
385
386
387
388 ZONE_DMA32,
389 #endif
390
391
392
393
394
395 ZONE_NORMAL,
396 #ifdef CONFIG_HIGHMEM
397
398
399
400
401
402
403
404
405 ZONE_HIGHMEM,
406 #endif
407 ZONE_MOVABLE,
408 #ifdef CONFIG_ZONE_DEVICE
409 ZONE_DEVICE,
410 #endif
411 __MAX_NR_ZONES
412
413 };
414
415 #ifndef __GENERATING_BOUNDS_H
416
417 struct zone {
418
419
420
421 unsigned long _watermark[NR_WMARK];
422 unsigned long watermark_boost;
423
424 unsigned long nr_reserved_highatomic;
425
426
427
428
429
430
431
432
433
434
435 long lowmem_reserve[MAX_NR_ZONES];
436
437 #ifdef CONFIG_NUMA
438 int node;
439 #endif
440 struct pglist_data *zone_pgdat;
441 struct per_cpu_pageset __percpu *pageset;
442
443 #ifndef CONFIG_SPARSEMEM
444
445
446
447
448 unsigned long *pageblock_flags;
449 #endif
450
451
452 unsigned long zone_start_pfn;
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489 atomic_long_t managed_pages;
490 unsigned long spanned_pages;
491 unsigned long present_pages;
492
493 const char *name;
494
495 #ifdef CONFIG_MEMORY_ISOLATION
496
497
498
499
500
501 unsigned long nr_isolate_pageblock;
502 #endif
503
504 #ifdef CONFIG_MEMORY_HOTPLUG
505
506 seqlock_t span_seqlock;
507 #endif
508
509 int initialized;
510
511
512 ZONE_PADDING(_pad1_)
513
514
515 struct free_area free_area[MAX_ORDER];
516
517
518 unsigned long flags;
519
520
521 spinlock_t lock;
522
523
524 ZONE_PADDING(_pad2_)
525
526
527
528
529
530
531 unsigned long percpu_drift_mark;
532
533 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
534
535 unsigned long compact_cached_free_pfn;
536
537 unsigned long compact_cached_migrate_pfn[2];
538 unsigned long compact_init_migrate_pfn;
539 unsigned long compact_init_free_pfn;
540 #endif
541
542 #ifdef CONFIG_COMPACTION
543
544
545
546
547
548 unsigned int compact_considered;
549 unsigned int compact_defer_shift;
550 int compact_order_failed;
551 #endif
552
553 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
554
555 bool compact_blockskip_flush;
556 #endif
557
558 bool contiguous;
559
560 ZONE_PADDING(_pad3_)
561
562 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
563 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
564 } ____cacheline_internodealigned_in_smp;
565
566 enum pgdat_flags {
567 PGDAT_CONGESTED,
568
569
570 PGDAT_DIRTY,
571
572
573
574 PGDAT_WRITEBACK,
575
576
577 PGDAT_RECLAIM_LOCKED,
578 };
579
580 enum zone_flags {
581 ZONE_BOOSTED_WATERMARK,
582
583
584 };
585
586 static inline unsigned long zone_managed_pages(struct zone *zone)
587 {
588 return (unsigned long)atomic_long_read(&zone->managed_pages);
589 }
590
591 static inline unsigned long zone_end_pfn(const struct zone *zone)
592 {
593 return zone->zone_start_pfn + zone->spanned_pages;
594 }
595
596 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
597 {
598 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
599 }
600
601 static inline bool zone_is_initialized(struct zone *zone)
602 {
603 return zone->initialized;
604 }
605
606 static inline bool zone_is_empty(struct zone *zone)
607 {
608 return zone->spanned_pages == 0;
609 }
610
611
612
613
614
615 static inline bool zone_intersects(struct zone *zone,
616 unsigned long start_pfn, unsigned long nr_pages)
617 {
618 if (zone_is_empty(zone))
619 return false;
620 if (start_pfn >= zone_end_pfn(zone) ||
621 start_pfn + nr_pages <= zone->zone_start_pfn)
622 return false;
623
624 return true;
625 }
626
627
628
629
630
631
632 #define DEF_PRIORITY 12
633
634
635 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
636
637 enum {
638 ZONELIST_FALLBACK,
639 #ifdef CONFIG_NUMA
640
641
642
643
644 ZONELIST_NOFALLBACK,
645 #endif
646 MAX_ZONELISTS
647 };
648
649
650
651
652
653 struct zoneref {
654 struct zone *zone;
655 int zone_idx;
656 };
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672 struct zonelist {
673 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
674 };
675
676 #ifndef CONFIG_DISCONTIGMEM
677
678 extern struct page *mem_map;
679 #endif
680
681 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
682 struct deferred_split {
683 spinlock_t split_queue_lock;
684 struct list_head split_queue;
685 unsigned long split_queue_len;
686 };
687 #endif
688
689
690
691
692
693
694
695
696
697 struct bootmem_data;
698 typedef struct pglist_data {
699 struct zone node_zones[MAX_NR_ZONES];
700 struct zonelist node_zonelists[MAX_ZONELISTS];
701 int nr_zones;
702 #ifdef CONFIG_FLAT_NODE_MEM_MAP
703 struct page *node_mem_map;
704 #ifdef CONFIG_PAGE_EXTENSION
705 struct page_ext *node_page_ext;
706 #endif
707 #endif
708 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
709
710
711
712
713
714
715
716
717
718
719 spinlock_t node_size_lock;
720 #endif
721 unsigned long node_start_pfn;
722 unsigned long node_present_pages;
723 unsigned long node_spanned_pages;
724
725 int node_id;
726 wait_queue_head_t kswapd_wait;
727 wait_queue_head_t pfmemalloc_wait;
728 struct task_struct *kswapd;
729
730 int kswapd_order;
731 enum zone_type kswapd_classzone_idx;
732
733 int kswapd_failures;
734
735 #ifdef CONFIG_COMPACTION
736 int kcompactd_max_order;
737 enum zone_type kcompactd_classzone_idx;
738 wait_queue_head_t kcompactd_wait;
739 struct task_struct *kcompactd;
740 #endif
741
742
743
744
745 unsigned long totalreserve_pages;
746
747 #ifdef CONFIG_NUMA
748
749
750
751 unsigned long min_unmapped_pages;
752 unsigned long min_slab_pages;
753 #endif
754
755
756 ZONE_PADDING(_pad1_)
757 spinlock_t lru_lock;
758
759 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
760
761
762
763
764 unsigned long first_deferred_pfn;
765 #endif
766
767 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
768 struct deferred_split deferred_split_queue;
769 #endif
770
771
772 struct lruvec lruvec;
773
774 unsigned long flags;
775
776 ZONE_PADDING(_pad2_)
777
778
779 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
780 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
781 } pg_data_t;
782
783 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
784 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
785 #ifdef CONFIG_FLAT_NODE_MEM_MAP
786 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
787 #else
788 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
789 #endif
790 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
791
792 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
793 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
794
795 static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
796 {
797 return &pgdat->lruvec;
798 }
799
800 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
801 {
802 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
803 }
804
805 static inline bool pgdat_is_empty(pg_data_t *pgdat)
806 {
807 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
808 }
809
810 #include <linux/memory_hotplug.h>
811
812 void build_all_zonelists(pg_data_t *pgdat);
813 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
814 enum zone_type classzone_idx);
815 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
816 int classzone_idx, unsigned int alloc_flags,
817 long free_pages);
818 bool zone_watermark_ok(struct zone *z, unsigned int order,
819 unsigned long mark, int classzone_idx,
820 unsigned int alloc_flags);
821 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
822 unsigned long mark, int classzone_idx);
823 enum memmap_context {
824 MEMMAP_EARLY,
825 MEMMAP_HOTPLUG,
826 };
827 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
828 unsigned long size);
829
830 extern void lruvec_init(struct lruvec *lruvec);
831
832 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
833 {
834 #ifdef CONFIG_MEMCG
835 return lruvec->pgdat;
836 #else
837 return container_of(lruvec, struct pglist_data, lruvec);
838 #endif
839 }
840
841 extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
842
843 #ifdef CONFIG_HAVE_MEMORY_PRESENT
844 void memory_present(int nid, unsigned long start, unsigned long end);
845 #else
846 static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
847 #endif
848
849 #if defined(CONFIG_SPARSEMEM)
850 void memblocks_present(void);
851 #else
852 static inline void memblocks_present(void) {}
853 #endif
854
855 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
856 int local_memory_node(int node_id);
857 #else
858 static inline int local_memory_node(int node_id) { return node_id; };
859 #endif
860
861
862
863
864 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
865
866
867
868
869
870
871
872 static inline bool managed_zone(struct zone *zone)
873 {
874 return zone_managed_pages(zone);
875 }
876
877
878 static inline bool populated_zone(struct zone *zone)
879 {
880 return zone->present_pages;
881 }
882
883 #ifdef CONFIG_NUMA
884 static inline int zone_to_nid(struct zone *zone)
885 {
886 return zone->node;
887 }
888
889 static inline void zone_set_nid(struct zone *zone, int nid)
890 {
891 zone->node = nid;
892 }
893 #else
894 static inline int zone_to_nid(struct zone *zone)
895 {
896 return 0;
897 }
898
899 static inline void zone_set_nid(struct zone *zone, int nid) {}
900 #endif
901
902 extern int movable_zone;
903
904 #ifdef CONFIG_HIGHMEM
905 static inline int zone_movable_is_highmem(void)
906 {
907 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
908 return movable_zone == ZONE_HIGHMEM;
909 #else
910 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
911 #endif
912 }
913 #endif
914
915 static inline int is_highmem_idx(enum zone_type idx)
916 {
917 #ifdef CONFIG_HIGHMEM
918 return (idx == ZONE_HIGHMEM ||
919 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
920 #else
921 return 0;
922 #endif
923 }
924
925
926
927
928
929
930
931 static inline int is_highmem(struct zone *zone)
932 {
933 #ifdef CONFIG_HIGHMEM
934 return is_highmem_idx(zone_idx(zone));
935 #else
936 return 0;
937 #endif
938 }
939
940
941 struct ctl_table;
942 int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
943 void __user *, size_t *, loff_t *);
944 int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
945 void __user *, size_t *, loff_t *);
946 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
947 void __user *, size_t *, loff_t *);
948 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
949 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
950 void __user *, size_t *, loff_t *);
951 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
952 void __user *, size_t *, loff_t *);
953 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
954 void __user *, size_t *, loff_t *);
955 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
956 void __user *, size_t *, loff_t *);
957
958 extern int numa_zonelist_order_handler(struct ctl_table *, int,
959 void __user *, size_t *, loff_t *);
960 extern char numa_zonelist_order[];
961 #define NUMA_ZONELIST_ORDER_LEN 16
962
963 #ifndef CONFIG_NEED_MULTIPLE_NODES
964
965 extern struct pglist_data contig_page_data;
966 #define NODE_DATA(nid) (&contig_page_data)
967 #define NODE_MEM_MAP(nid) mem_map
968
969 #else
970
971 #include <asm/mmzone.h>
972
973 #endif
974
975 extern struct pglist_data *first_online_pgdat(void);
976 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
977 extern struct zone *next_zone(struct zone *zone);
978
979
980
981
982
983 #define for_each_online_pgdat(pgdat) \
984 for (pgdat = first_online_pgdat(); \
985 pgdat; \
986 pgdat = next_online_pgdat(pgdat))
987
988
989
990
991
992
993
994 #define for_each_zone(zone) \
995 for (zone = (first_online_pgdat())->node_zones; \
996 zone; \
997 zone = next_zone(zone))
998
999 #define for_each_populated_zone(zone) \
1000 for (zone = (first_online_pgdat())->node_zones; \
1001 zone; \
1002 zone = next_zone(zone)) \
1003 if (!populated_zone(zone)) \
1004 ; \
1005 else
1006
1007 static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1008 {
1009 return zoneref->zone;
1010 }
1011
1012 static inline int zonelist_zone_idx(struct zoneref *zoneref)
1013 {
1014 return zoneref->zone_idx;
1015 }
1016
1017 static inline int zonelist_node_idx(struct zoneref *zoneref)
1018 {
1019 return zone_to_nid(zoneref->zone);
1020 }
1021
1022 struct zoneref *__next_zones_zonelist(struct zoneref *z,
1023 enum zone_type highest_zoneidx,
1024 nodemask_t *nodes);
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1039 enum zone_type highest_zoneidx,
1040 nodemask_t *nodes)
1041 {
1042 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1043 return z;
1044 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1064 enum zone_type highest_zoneidx,
1065 nodemask_t *nodes)
1066 {
1067 return next_zones_zonelist(zonelist->_zonerefs,
1068 highest_zoneidx, nodes);
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1083 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1084 zone; \
1085 z = next_zones_zonelist(++z, highidx, nodemask), \
1086 zone = zonelist_zone(z))
1087
1088 #define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1089 for (zone = z->zone; \
1090 zone; \
1091 z = next_zones_zonelist(++z, highidx, nodemask), \
1092 zone = zonelist_zone(z))
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 #define for_each_zone_zonelist(zone, z, zlist, highidx) \
1105 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1106
1107 #ifdef CONFIG_SPARSEMEM
1108 #include <asm/sparsemem.h>
1109 #endif
1110
1111 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1112 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1113 static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1114 {
1115 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1116 return 0;
1117 }
1118 #endif
1119
1120 #ifdef CONFIG_FLATMEM
1121 #define pfn_to_nid(pfn) (0)
1122 #endif
1123
1124 #ifdef CONFIG_SPARSEMEM
1125
1126
1127
1128
1129
1130
1131
1132 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1133 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1134
1135 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1136
1137 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1138 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1139
1140 #define SECTION_BLOCKFLAGS_BITS \
1141 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1142
1143 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1144 #error Allocator MAX_ORDER exceeds SECTION_SIZE
1145 #endif
1146
1147 static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1148 {
1149 return pfn >> PFN_SECTION_SHIFT;
1150 }
1151 static inline unsigned long section_nr_to_pfn(unsigned long sec)
1152 {
1153 return sec << PFN_SECTION_SHIFT;
1154 }
1155
1156 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1157 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1158
1159 #define SUBSECTION_SHIFT 21
1160
1161 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1162 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1163 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1164
1165 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1166 #error Subsection size exceeds section size
1167 #else
1168 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1169 #endif
1170
1171 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1172 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1173
1174 struct mem_section_usage {
1175 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1176
1177 unsigned long pageblock_flags[0];
1178 };
1179
1180 void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1181
1182 struct page;
1183 struct page_ext;
1184 struct mem_section {
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 unsigned long section_mem_map;
1198
1199 struct mem_section_usage *usage;
1200 #ifdef CONFIG_PAGE_EXTENSION
1201
1202
1203
1204
1205 struct page_ext *page_ext;
1206 unsigned long pad;
1207 #endif
1208
1209
1210
1211
1212 };
1213
1214 #ifdef CONFIG_SPARSEMEM_EXTREME
1215 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1216 #else
1217 #define SECTIONS_PER_ROOT 1
1218 #endif
1219
1220 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1221 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1222 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1223
1224 #ifdef CONFIG_SPARSEMEM_EXTREME
1225 extern struct mem_section **mem_section;
1226 #else
1227 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1228 #endif
1229
1230 static inline unsigned long *section_to_usemap(struct mem_section *ms)
1231 {
1232 return ms->usage->pageblock_flags;
1233 }
1234
1235 static inline struct mem_section *__nr_to_section(unsigned long nr)
1236 {
1237 #ifdef CONFIG_SPARSEMEM_EXTREME
1238 if (!mem_section)
1239 return NULL;
1240 #endif
1241 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1242 return NULL;
1243 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1244 }
1245 extern unsigned long __section_nr(struct mem_section *ms);
1246 extern size_t mem_section_usage_size(void);
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 #define SECTION_MARKED_PRESENT (1UL<<0)
1262 #define SECTION_HAS_MEM_MAP (1UL<<1)
1263 #define SECTION_IS_ONLINE (1UL<<2)
1264 #define SECTION_IS_EARLY (1UL<<3)
1265 #define SECTION_MAP_LAST_BIT (1UL<<4)
1266 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1267 #define SECTION_NID_SHIFT 3
1268
1269 static inline struct page *__section_mem_map_addr(struct mem_section *section)
1270 {
1271 unsigned long map = section->section_mem_map;
1272 map &= SECTION_MAP_MASK;
1273 return (struct page *)map;
1274 }
1275
1276 static inline int present_section(struct mem_section *section)
1277 {
1278 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1279 }
1280
1281 static inline int present_section_nr(unsigned long nr)
1282 {
1283 return present_section(__nr_to_section(nr));
1284 }
1285
1286 static inline int valid_section(struct mem_section *section)
1287 {
1288 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1289 }
1290
1291 static inline int early_section(struct mem_section *section)
1292 {
1293 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1294 }
1295
1296 static inline int valid_section_nr(unsigned long nr)
1297 {
1298 return valid_section(__nr_to_section(nr));
1299 }
1300
1301 static inline int online_section(struct mem_section *section)
1302 {
1303 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1304 }
1305
1306 static inline int online_section_nr(unsigned long nr)
1307 {
1308 return online_section(__nr_to_section(nr));
1309 }
1310
1311 #ifdef CONFIG_MEMORY_HOTPLUG
1312 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1313 #ifdef CONFIG_MEMORY_HOTREMOVE
1314 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1315 #endif
1316 #endif
1317
1318 static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1319 {
1320 return __nr_to_section(pfn_to_section_nr(pfn));
1321 }
1322
1323 extern unsigned long __highest_present_section_nr;
1324
1325 static inline int subsection_map_index(unsigned long pfn)
1326 {
1327 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1328 }
1329
1330 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1331 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1332 {
1333 int idx = subsection_map_index(pfn);
1334
1335 return test_bit(idx, ms->usage->subsection_map);
1336 }
1337 #else
1338 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1339 {
1340 return 1;
1341 }
1342 #endif
1343
1344 #ifndef CONFIG_HAVE_ARCH_PFN_VALID
1345 static inline int pfn_valid(unsigned long pfn)
1346 {
1347 struct mem_section *ms;
1348
1349 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1350 return 0;
1351 ms = __nr_to_section(pfn_to_section_nr(pfn));
1352 if (!valid_section(ms))
1353 return 0;
1354
1355
1356
1357
1358 return early_section(ms) || pfn_section_valid(ms, pfn);
1359 }
1360 #endif
1361
1362 static inline int pfn_present(unsigned long pfn)
1363 {
1364 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1365 return 0;
1366 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1367 }
1368
1369
1370
1371
1372
1373
1374 #ifdef CONFIG_NUMA
1375 #define pfn_to_nid(pfn) \
1376 ({ \
1377 unsigned long __pfn_to_nid_pfn = (pfn); \
1378 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1379 })
1380 #else
1381 #define pfn_to_nid(pfn) (0)
1382 #endif
1383
1384 #define early_pfn_valid(pfn) pfn_valid(pfn)
1385 void sparse_init(void);
1386 #else
1387 #define sparse_init() do {} while (0)
1388 #define sparse_index_init(_sec, _nid) do {} while (0)
1389 #define pfn_present pfn_valid
1390 #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
1391 #endif
1392
1393
1394
1395
1396
1397
1398 struct mminit_pfnnid_cache {
1399 unsigned long last_start;
1400 unsigned long last_end;
1401 int last_nid;
1402 };
1403
1404 #ifndef early_pfn_valid
1405 #define early_pfn_valid(pfn) (1)
1406 #endif
1407
1408 void memory_present(int nid, unsigned long start, unsigned long end);
1409
1410
1411
1412
1413
1414
1415
1416 #ifdef CONFIG_HOLES_IN_ZONE
1417 #define pfn_valid_within(pfn) pfn_valid(pfn)
1418 #else
1419 #define pfn_valid_within(pfn) (1)
1420 #endif
1421
1422 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 bool memmap_valid_within(unsigned long pfn,
1444 struct page *page, struct zone *zone);
1445 #else
1446 static inline bool memmap_valid_within(unsigned long pfn,
1447 struct page *page, struct zone *zone)
1448 {
1449 return true;
1450 }
1451 #endif
1452
1453 #endif
1454 #endif
1455 #endif