This source file includes following definitions.
- set_max_mapnr
- set_max_mapnr
- totalram_pages
- totalram_pages_inc
- totalram_pages_dec
- totalram_pages_add
- totalram_pages_set
- __mm_zero_struct_page
- vma_init
- vma_set_anonymous
- vma_is_anonymous
- vma_is_shmem
- pmd_devmap
- pud_devmap
- pgd_devmap
- put_page_testzero
- get_page_unless_zero
- is_vmalloc_addr
- is_vmalloc_or_module_addr
- kvmalloc
- kvzalloc_node
- kvzalloc
- kvmalloc_array
- kvcalloc
- compound_mapcount
- page_mapcount_reset
- page_mapcount
- total_mapcount
- page_trans_huge_mapcount
- virt_to_head_page
- set_compound_page_dtor
- get_compound_page_dtor
- compound_order
- set_compound_order
- compound_nr
- page_size
- page_shift
- maybe_mkwrite
- page_zonenum
- is_zone_device_page
- is_zone_device_page
- put_devmap_managed_page
- put_devmap_managed_page
- is_device_private_page
- is_pci_p2pdma_page
- get_page
- try_get_page
- put_page
- put_user_page
- page_zone_id
- page_to_nid
- cpu_pid_to_cpupid
- cpupid_to_pid
- cpupid_to_cpu
- cpupid_to_nid
- cpupid_pid_unset
- cpupid_cpu_unset
- __cpupid_match_pid
- page_cpupid_xchg_last
- page_cpupid_last
- page_cpupid_reset_last
- page_cpupid_last
- page_cpupid_reset_last
- page_cpupid_xchg_last
- page_cpupid_last
- cpupid_to_nid
- cpupid_to_pid
- cpupid_to_cpu
- cpu_pid_to_cpupid
- cpupid_pid_unset
- page_cpupid_reset_last
- cpupid_match_pid
- page_kasan_tag
- page_kasan_tag_set
- page_kasan_tag_reset
- page_kasan_tag
- page_kasan_tag_set
- page_kasan_tag_reset
- page_zone
- page_pgdat
- set_page_section
- page_to_section
- set_page_zone
- set_page_node
- set_page_links
- page_memcg
- page_memcg_rcu
- page_memcg
- page_memcg_rcu
- lowmem_page_address
- page_address
- set_page_address
- page_file_mapping
- page_index
- page_is_pfmemalloc
- set_page_pfmemalloc
- clear_page_pfmemalloc
- can_do_mlock
- handle_mm_fault
- fixup_user_fault
- unmap_mapping_pages
- unmap_mapping_range
- unmap_shared_mapping_range
- frame_vector_count
- frame_vector_pages
- frame_vector_pfns
- cancel_dirty_page
- get_mm_counter
- add_mm_counter
- inc_mm_counter
- dec_mm_counter
- mm_counter_file
- mm_counter
- get_mm_rss
- get_mm_hiwater_rss
- get_mm_hiwater_vm
- update_hiwater_rss
- update_hiwater_vm
- reset_mm_hiwater_rss
- setmax_mm_hiwater_rss
- sync_mm_rss
- pte_devmap
- get_locked_pte
- __p4d_alloc
- __pud_alloc
- mm_inc_nr_puds
- mm_dec_nr_puds
- mm_inc_nr_puds
- mm_dec_nr_puds
- __pmd_alloc
- mm_inc_nr_pmds
- mm_dec_nr_pmds
- mm_inc_nr_pmds
- mm_dec_nr_pmds
- mm_pgtables_bytes_init
- mm_pgtables_bytes
- mm_inc_nr_ptes
- mm_dec_nr_ptes
- mm_pgtables_bytes_init
- mm_pgtables_bytes
- mm_inc_nr_ptes
- mm_dec_nr_ptes
- p4d_alloc
- pud_alloc
- pmd_alloc
- ptlock_ptr
- ptlock_cache_init
- ptlock_alloc
- ptlock_free
- ptlock_ptr
- pte_lockptr
- ptlock_init
- pte_lockptr
- ptlock_cache_init
- ptlock_init
- ptlock_free
- pgtable_init
- pgtable_pte_page_ctor
- pgtable_pte_page_dtor
- pmd_to_page
- pmd_lockptr
- pgtable_pmd_page_ctor
- pgtable_pmd_page_dtor
- pmd_lockptr
- pgtable_pmd_page_ctor
- pgtable_pmd_page_dtor
- pmd_lock
- pud_lockptr
- pud_lock
- __free_reserved_page
- free_reserved_page
- mark_page_reserved
- free_initmem_default
- get_num_physpages
- __early_pfn_to_nid
- zero_resv_unavail
- vma_adjust
- check_data_rlimit
- do_mmap_pgoff
- mm_populate
- mm_populate
- vm_unmapped_area
- find_vma_intersection
- vm_start_gap
- vm_end_gap
- vma_pages
- find_exact_vma
- range_in_vma
- vm_get_page_prot
- vma_set_page_prot
- vmf_insert_page
- vmf_error
- vm_fault_to_errno
- page_poisoning_enabled
- kernel_poison_pages
- want_init_on_alloc
- want_init_on_free
- init_debug_pagealloc
- debug_pagealloc_enabled
- debug_pagealloc_enabled_static
- kernel_map_pages
- kernel_map_pages
- kernel_page_present
- get_gate_vma
- in_gate_area_no_mm
- in_gate_area
- print_vma_addr
- debug_guardpage_minorder
- debug_guardpage_enabled
- page_is_guard
- debug_guardpage_minorder
- debug_guardpage_enabled
- page_is_guard
- setup_nr_node_ids
- pages_identical
1
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4
5 #include <linux/errno.h>
6
7 #ifdef __KERNEL__
8
9 #include <linux/mmdebug.h>
10 #include <linux/gfp.h>
11 #include <linux/bug.h>
12 #include <linux/list.h>
13 #include <linux/mmzone.h>
14 #include <linux/rbtree.h>
15 #include <linux/atomic.h>
16 #include <linux/debug_locks.h>
17 #include <linux/mm_types.h>
18 #include <linux/range.h>
19 #include <linux/pfn.h>
20 #include <linux/percpu-refcount.h>
21 #include <linux/bit_spinlock.h>
22 #include <linux/shrinker.h>
23 #include <linux/resource.h>
24 #include <linux/page_ext.h>
25 #include <linux/err.h>
26 #include <linux/page_ref.h>
27 #include <linux/memremap.h>
28 #include <linux/overflow.h>
29 #include <linux/sizes.h>
30
31 struct mempolicy;
32 struct anon_vma;
33 struct anon_vma_chain;
34 struct file_ra_state;
35 struct user_struct;
36 struct writeback_control;
37 struct bdi_writeback;
38
39 void init_mm_internals(void);
40
41 #ifndef CONFIG_NEED_MULTIPLE_NODES
42 extern unsigned long max_mapnr;
43
44 static inline void set_max_mapnr(unsigned long limit)
45 {
46 max_mapnr = limit;
47 }
48 #else
49 static inline void set_max_mapnr(unsigned long limit) { }
50 #endif
51
52 extern atomic_long_t _totalram_pages;
53 static inline unsigned long totalram_pages(void)
54 {
55 return (unsigned long)atomic_long_read(&_totalram_pages);
56 }
57
58 static inline void totalram_pages_inc(void)
59 {
60 atomic_long_inc(&_totalram_pages);
61 }
62
63 static inline void totalram_pages_dec(void)
64 {
65 atomic_long_dec(&_totalram_pages);
66 }
67
68 static inline void totalram_pages_add(long count)
69 {
70 atomic_long_add(count, &_totalram_pages);
71 }
72
73 static inline void totalram_pages_set(long val)
74 {
75 atomic_long_set(&_totalram_pages, val);
76 }
77
78 extern void * high_memory;
79 extern int page_cluster;
80
81 #ifdef CONFIG_SYSCTL
82 extern int sysctl_legacy_va_layout;
83 #else
84 #define sysctl_legacy_va_layout 0
85 #endif
86
87 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
88 extern const int mmap_rnd_bits_min;
89 extern const int mmap_rnd_bits_max;
90 extern int mmap_rnd_bits __read_mostly;
91 #endif
92 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
93 extern const int mmap_rnd_compat_bits_min;
94 extern const int mmap_rnd_compat_bits_max;
95 extern int mmap_rnd_compat_bits __read_mostly;
96 #endif
97
98 #include <asm/page.h>
99 #include <asm/pgtable.h>
100 #include <asm/processor.h>
101
102
103
104
105
106
107
108
109 #ifndef untagged_addr
110 #define untagged_addr(addr) (addr)
111 #endif
112
113 #ifndef __pa_symbol
114 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
115 #endif
116
117 #ifndef page_to_virt
118 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
119 #endif
120
121 #ifndef lm_alias
122 #define lm_alias(x) __va(__pa_symbol(x))
123 #endif
124
125
126
127
128
129
130
131
132 #ifndef mm_forbids_zeropage
133 #define mm_forbids_zeropage(X) (0)
134 #endif
135
136
137
138
139
140
141
142 #if BITS_PER_LONG == 64
143
144
145
146
147
148
149 #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
150 static inline void __mm_zero_struct_page(struct page *page)
151 {
152 unsigned long *_pp = (void *)page;
153
154
155 BUILD_BUG_ON(sizeof(struct page) & 7);
156 BUILD_BUG_ON(sizeof(struct page) < 56);
157 BUILD_BUG_ON(sizeof(struct page) > 80);
158
159 switch (sizeof(struct page)) {
160 case 80:
161 _pp[9] = 0;
162 case 72:
163 _pp[8] = 0;
164 case 64:
165 _pp[7] = 0;
166 case 56:
167 _pp[6] = 0;
168 _pp[5] = 0;
169 _pp[4] = 0;
170 _pp[3] = 0;
171 _pp[2] = 0;
172 _pp[1] = 0;
173 _pp[0] = 0;
174 }
175 }
176 #else
177 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
178 #endif
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196 #define MAPCOUNT_ELF_CORE_MARGIN (5)
197 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
198
199 extern int sysctl_max_map_count;
200
201 extern unsigned long sysctl_user_reserve_kbytes;
202 extern unsigned long sysctl_admin_reserve_kbytes;
203
204 extern int sysctl_overcommit_memory;
205 extern int sysctl_overcommit_ratio;
206 extern unsigned long sysctl_overcommit_kbytes;
207
208 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
209 size_t *, loff_t *);
210 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
211 size_t *, loff_t *);
212
213 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
214
215
216 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
217
218
219 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
220
221 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
222
223
224
225
226
227
228
229
230
231
232 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
233 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
234 void vm_area_free(struct vm_area_struct *);
235
236 #ifndef CONFIG_MMU
237 extern struct rb_root nommu_region_tree;
238 extern struct rw_semaphore nommu_region_sem;
239
240 extern unsigned int kobjsize(const void *objp);
241 #endif
242
243
244
245
246
247 #define VM_NONE 0x00000000
248
249 #define VM_READ 0x00000001
250 #define VM_WRITE 0x00000002
251 #define VM_EXEC 0x00000004
252 #define VM_SHARED 0x00000008
253
254
255 #define VM_MAYREAD 0x00000010
256 #define VM_MAYWRITE 0x00000020
257 #define VM_MAYEXEC 0x00000040
258 #define VM_MAYSHARE 0x00000080
259
260 #define VM_GROWSDOWN 0x00000100
261 #define VM_UFFD_MISSING 0x00000200
262 #define VM_PFNMAP 0x00000400
263 #define VM_DENYWRITE 0x00000800
264 #define VM_UFFD_WP 0x00001000
265
266 #define VM_LOCKED 0x00002000
267 #define VM_IO 0x00004000
268
269
270 #define VM_SEQ_READ 0x00008000
271 #define VM_RAND_READ 0x00010000
272
273 #define VM_DONTCOPY 0x00020000
274 #define VM_DONTEXPAND 0x00040000
275 #define VM_LOCKONFAULT 0x00080000
276 #define VM_ACCOUNT 0x00100000
277 #define VM_NORESERVE 0x00200000
278 #define VM_HUGETLB 0x00400000
279 #define VM_SYNC 0x00800000
280 #define VM_ARCH_1 0x01000000
281 #define VM_WIPEONFORK 0x02000000
282 #define VM_DONTDUMP 0x04000000
283
284 #ifdef CONFIG_MEM_SOFT_DIRTY
285 # define VM_SOFTDIRTY 0x08000000
286 #else
287 # define VM_SOFTDIRTY 0
288 #endif
289
290 #define VM_MIXEDMAP 0x10000000
291 #define VM_HUGEPAGE 0x20000000
292 #define VM_NOHUGEPAGE 0x40000000
293 #define VM_MERGEABLE 0x80000000
294
295 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
296 #define VM_HIGH_ARCH_BIT_0 32
297 #define VM_HIGH_ARCH_BIT_1 33
298 #define VM_HIGH_ARCH_BIT_2 34
299 #define VM_HIGH_ARCH_BIT_3 35
300 #define VM_HIGH_ARCH_BIT_4 36
301 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
302 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
303 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
304 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
305 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
306 #endif
307
308 #ifdef CONFIG_ARCH_HAS_PKEYS
309 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
310 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0
311 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1
312 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2
313 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3
314 #ifdef CONFIG_PPC
315 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4
316 #else
317 # define VM_PKEY_BIT4 0
318 #endif
319 #endif
320
321 #if defined(CONFIG_X86)
322 # define VM_PAT VM_ARCH_1
323 #elif defined(CONFIG_PPC)
324 # define VM_SAO VM_ARCH_1
325 #elif defined(CONFIG_PARISC)
326 # define VM_GROWSUP VM_ARCH_1
327 #elif defined(CONFIG_IA64)
328 # define VM_GROWSUP VM_ARCH_1
329 #elif defined(CONFIG_SPARC64)
330 # define VM_SPARC_ADI VM_ARCH_1
331 # define VM_ARCH_CLEAR VM_SPARC_ADI
332 #elif !defined(CONFIG_MMU)
333 # define VM_MAPPED_COPY VM_ARCH_1
334 #endif
335
336 #if defined(CONFIG_X86_INTEL_MPX)
337
338 # define VM_MPX VM_HIGH_ARCH_4
339 #else
340 # define VM_MPX VM_NONE
341 #endif
342
343 #ifndef VM_GROWSUP
344 # define VM_GROWSUP VM_NONE
345 #endif
346
347
348 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
349
350 #ifndef VM_STACK_DEFAULT_FLAGS
351 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
352 #endif
353
354 #ifdef CONFIG_STACK_GROWSUP
355 #define VM_STACK VM_GROWSUP
356 #else
357 #define VM_STACK VM_GROWSDOWN
358 #endif
359
360 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
361
362
363
364
365
366 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
367
368
369 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE
370
371
372 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
373
374
375 #ifndef VM_ARCH_CLEAR
376 # define VM_ARCH_CLEAR VM_NONE
377 #endif
378 #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
379
380
381
382
383
384 extern pgprot_t protection_map[16];
385
386 #define FAULT_FLAG_WRITE 0x01
387 #define FAULT_FLAG_MKWRITE 0x02
388 #define FAULT_FLAG_ALLOW_RETRY 0x04
389 #define FAULT_FLAG_RETRY_NOWAIT 0x08
390 #define FAULT_FLAG_KILLABLE 0x10
391 #define FAULT_FLAG_TRIED 0x20
392 #define FAULT_FLAG_USER 0x40
393 #define FAULT_FLAG_REMOTE 0x80
394 #define FAULT_FLAG_INSTRUCTION 0x100
395
396 #define FAULT_FLAG_TRACE \
397 { FAULT_FLAG_WRITE, "WRITE" }, \
398 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
399 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
400 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
401 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
402 { FAULT_FLAG_TRIED, "TRIED" }, \
403 { FAULT_FLAG_USER, "USER" }, \
404 { FAULT_FLAG_REMOTE, "REMOTE" }, \
405 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
406
407
408
409
410
411
412
413
414
415
416
417 struct vm_fault {
418 struct vm_area_struct *vma;
419 unsigned int flags;
420 gfp_t gfp_mask;
421 pgoff_t pgoff;
422 unsigned long address;
423 pmd_t *pmd;
424
425 pud_t *pud;
426
427
428 pte_t orig_pte;
429
430 struct page *cow_page;
431 struct mem_cgroup *memcg;
432 struct page *page;
433
434
435
436
437
438 pte_t *pte;
439
440
441
442 spinlock_t *ptl;
443
444
445
446 pgtable_t prealloc_pte;
447
448
449
450
451
452
453 };
454
455
456 enum page_entry_size {
457 PE_SIZE_PTE = 0,
458 PE_SIZE_PMD,
459 PE_SIZE_PUD,
460 };
461
462
463
464
465
466
467 struct vm_operations_struct {
468 void (*open)(struct vm_area_struct * area);
469 void (*close)(struct vm_area_struct * area);
470 int (*split)(struct vm_area_struct * area, unsigned long addr);
471 int (*mremap)(struct vm_area_struct * area);
472 vm_fault_t (*fault)(struct vm_fault *vmf);
473 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
474 enum page_entry_size pe_size);
475 void (*map_pages)(struct vm_fault *vmf,
476 pgoff_t start_pgoff, pgoff_t end_pgoff);
477 unsigned long (*pagesize)(struct vm_area_struct * area);
478
479
480
481 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
482
483
484 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
485
486
487
488
489 int (*access)(struct vm_area_struct *vma, unsigned long addr,
490 void *buf, int len, int write);
491
492
493
494
495 const char *(*name)(struct vm_area_struct *vma);
496
497 #ifdef CONFIG_NUMA
498
499
500
501
502
503
504
505 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
506
507
508
509
510
511
512
513
514
515
516
517 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
518 unsigned long addr);
519 #endif
520
521
522
523
524
525 struct page *(*find_special_page)(struct vm_area_struct *vma,
526 unsigned long addr);
527 };
528
529 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
530 {
531 static const struct vm_operations_struct dummy_vm_ops = {};
532
533 memset(vma, 0, sizeof(*vma));
534 vma->vm_mm = mm;
535 vma->vm_ops = &dummy_vm_ops;
536 INIT_LIST_HEAD(&vma->anon_vma_chain);
537 }
538
539 static inline void vma_set_anonymous(struct vm_area_struct *vma)
540 {
541 vma->vm_ops = NULL;
542 }
543
544 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
545 {
546 return !vma->vm_ops;
547 }
548
549 #ifdef CONFIG_SHMEM
550
551
552
553
554 bool vma_is_shmem(struct vm_area_struct *vma);
555 #else
556 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
557 #endif
558
559 int vma_is_stack_for_current(struct vm_area_struct *vma);
560
561
562 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
563
564 struct mmu_gather;
565 struct inode;
566
567 #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
568 static inline int pmd_devmap(pmd_t pmd)
569 {
570 return 0;
571 }
572 static inline int pud_devmap(pud_t pud)
573 {
574 return 0;
575 }
576 static inline int pgd_devmap(pgd_t pgd)
577 {
578 return 0;
579 }
580 #endif
581
582
583
584
585
586 #include <linux/page-flags.h>
587 #include <linux/huge_mm.h>
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 static inline int put_page_testzero(struct page *page)
606 {
607 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
608 return page_ref_dec_and_test(page);
609 }
610
611
612
613
614
615
616
617 static inline int get_page_unless_zero(struct page *page)
618 {
619 return page_ref_add_unless(page, 1, 0);
620 }
621
622 extern int page_is_ram(unsigned long pfn);
623
624 enum {
625 REGION_INTERSECTS,
626 REGION_DISJOINT,
627 REGION_MIXED,
628 };
629
630 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
631 unsigned long desc);
632
633
634 struct page *vmalloc_to_page(const void *addr);
635 unsigned long vmalloc_to_pfn(const void *addr);
636
637
638
639
640
641
642
643 static inline bool is_vmalloc_addr(const void *x)
644 {
645 #ifdef CONFIG_MMU
646 unsigned long addr = (unsigned long)x;
647
648 return addr >= VMALLOC_START && addr < VMALLOC_END;
649 #else
650 return false;
651 #endif
652 }
653
654 #ifndef is_ioremap_addr
655 #define is_ioremap_addr(x) is_vmalloc_addr(x)
656 #endif
657
658 #ifdef CONFIG_MMU
659 extern int is_vmalloc_or_module_addr(const void *x);
660 #else
661 static inline int is_vmalloc_or_module_addr(const void *x)
662 {
663 return 0;
664 }
665 #endif
666
667 extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
668 static inline void *kvmalloc(size_t size, gfp_t flags)
669 {
670 return kvmalloc_node(size, flags, NUMA_NO_NODE);
671 }
672 static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
673 {
674 return kvmalloc_node(size, flags | __GFP_ZERO, node);
675 }
676 static inline void *kvzalloc(size_t size, gfp_t flags)
677 {
678 return kvmalloc(size, flags | __GFP_ZERO);
679 }
680
681 static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
682 {
683 size_t bytes;
684
685 if (unlikely(check_mul_overflow(n, size, &bytes)))
686 return NULL;
687
688 return kvmalloc(bytes, flags);
689 }
690
691 static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
692 {
693 return kvmalloc_array(n, size, flags | __GFP_ZERO);
694 }
695
696 extern void kvfree(const void *addr);
697 extern void kvfree_sensitive(const void *addr, size_t len);
698
699
700
701
702
703
704 static inline int compound_mapcount(struct page *page)
705 {
706 VM_BUG_ON_PAGE(!PageCompound(page), page);
707 page = compound_head(page);
708 return atomic_read(compound_mapcount_ptr(page)) + 1;
709 }
710
711
712
713
714
715
716 static inline void page_mapcount_reset(struct page *page)
717 {
718 atomic_set(&(page)->_mapcount, -1);
719 }
720
721 int __page_mapcount(struct page *page);
722
723
724
725
726
727
728
729
730
731 static inline int page_mapcount(struct page *page)
732 {
733 if (unlikely(PageCompound(page)))
734 return __page_mapcount(page);
735 return atomic_read(&page->_mapcount) + 1;
736 }
737
738 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
739 int total_mapcount(struct page *page);
740 int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
741 #else
742 static inline int total_mapcount(struct page *page)
743 {
744 return page_mapcount(page);
745 }
746 static inline int page_trans_huge_mapcount(struct page *page,
747 int *total_mapcount)
748 {
749 int mapcount = page_mapcount(page);
750 if (total_mapcount)
751 *total_mapcount = mapcount;
752 return mapcount;
753 }
754 #endif
755
756 static inline struct page *virt_to_head_page(const void *x)
757 {
758 struct page *page = virt_to_page(x);
759
760 return compound_head(page);
761 }
762
763 void __put_page(struct page *page);
764
765 void put_pages_list(struct list_head *pages);
766
767 void split_page(struct page *page, unsigned int order);
768
769
770
771
772
773
774 typedef void compound_page_dtor(struct page *);
775
776
777 enum compound_dtor_id {
778 NULL_COMPOUND_DTOR,
779 COMPOUND_PAGE_DTOR,
780 #ifdef CONFIG_HUGETLB_PAGE
781 HUGETLB_PAGE_DTOR,
782 #endif
783 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
784 TRANSHUGE_PAGE_DTOR,
785 #endif
786 NR_COMPOUND_DTORS,
787 };
788 extern compound_page_dtor * const compound_page_dtors[];
789
790 static inline void set_compound_page_dtor(struct page *page,
791 enum compound_dtor_id compound_dtor)
792 {
793 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
794 page[1].compound_dtor = compound_dtor;
795 }
796
797 static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
798 {
799 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
800 return compound_page_dtors[page[1].compound_dtor];
801 }
802
803 static inline unsigned int compound_order(struct page *page)
804 {
805 if (!PageHead(page))
806 return 0;
807 return page[1].compound_order;
808 }
809
810 static inline void set_compound_order(struct page *page, unsigned int order)
811 {
812 page[1].compound_order = order;
813 }
814
815
816 static inline unsigned long compound_nr(struct page *page)
817 {
818 return 1UL << compound_order(page);
819 }
820
821
822 static inline unsigned long page_size(struct page *page)
823 {
824 return PAGE_SIZE << compound_order(page);
825 }
826
827
828 static inline unsigned int page_shift(struct page *page)
829 {
830 return PAGE_SHIFT + compound_order(page);
831 }
832
833 void free_compound_page(struct page *page);
834
835 #ifdef CONFIG_MMU
836
837
838
839
840
841
842 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
843 {
844 if (likely(vma->vm_flags & VM_WRITE))
845 pte = pte_mkwrite(pte);
846 return pte;
847 }
848
849 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
850 struct page *page);
851 vm_fault_t finish_fault(struct vm_fault *vmf);
852 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
853 #endif
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
922 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
923 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
924 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
925 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
926
927
928
929
930
931
932 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
933 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
934 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
935 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
936 #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
937
938
939 #ifdef NODE_NOT_IN_PAGE_FLAGS
940 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
941 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
942 SECTIONS_PGOFF : ZONES_PGOFF)
943 #else
944 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
945 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
946 NODES_PGOFF : ZONES_PGOFF)
947 #endif
948
949 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
950
951 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
952 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
953 #endif
954
955 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
956 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
957 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
958 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
959 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
960 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
961
962 static inline enum zone_type page_zonenum(const struct page *page)
963 {
964 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
965 }
966
967 #ifdef CONFIG_ZONE_DEVICE
968 static inline bool is_zone_device_page(const struct page *page)
969 {
970 return page_zonenum(page) == ZONE_DEVICE;
971 }
972 extern void memmap_init_zone_device(struct zone *, unsigned long,
973 unsigned long, struct dev_pagemap *);
974 #else
975 static inline bool is_zone_device_page(const struct page *page)
976 {
977 return false;
978 }
979 #endif
980
981 #ifdef CONFIG_DEV_PAGEMAP_OPS
982 void __put_devmap_managed_page(struct page *page);
983 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
984 static inline bool put_devmap_managed_page(struct page *page)
985 {
986 if (!static_branch_unlikely(&devmap_managed_key))
987 return false;
988 if (!is_zone_device_page(page))
989 return false;
990 switch (page->pgmap->type) {
991 case MEMORY_DEVICE_PRIVATE:
992 case MEMORY_DEVICE_FS_DAX:
993 __put_devmap_managed_page(page);
994 return true;
995 default:
996 break;
997 }
998 return false;
999 }
1000
1001 #else
1002 static inline bool put_devmap_managed_page(struct page *page)
1003 {
1004 return false;
1005 }
1006 #endif
1007
1008 static inline bool is_device_private_page(const struct page *page)
1009 {
1010 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1011 IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
1012 is_zone_device_page(page) &&
1013 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
1014 }
1015
1016 static inline bool is_pci_p2pdma_page(const struct page *page)
1017 {
1018 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1019 IS_ENABLED(CONFIG_PCI_P2PDMA) &&
1020 is_zone_device_page(page) &&
1021 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
1022 }
1023
1024
1025 #define page_ref_zero_or_close_to_overflow(page) \
1026 ((unsigned int) page_ref_count(page) + 127u <= 127u)
1027
1028 static inline void get_page(struct page *page)
1029 {
1030 page = compound_head(page);
1031
1032
1033
1034
1035 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1036 page_ref_inc(page);
1037 }
1038
1039 static inline __must_check bool try_get_page(struct page *page)
1040 {
1041 page = compound_head(page);
1042 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1043 return false;
1044 page_ref_inc(page);
1045 return true;
1046 }
1047
1048 static inline void put_page(struct page *page)
1049 {
1050 page = compound_head(page);
1051
1052
1053
1054
1055
1056
1057
1058 if (put_devmap_managed_page(page))
1059 return;
1060
1061 if (put_page_testzero(page))
1062 __put_page(page);
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 static inline void put_user_page(struct page *page)
1081 {
1082 put_page(page);
1083 }
1084
1085 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1086 bool make_dirty);
1087
1088 void put_user_pages(struct page **pages, unsigned long npages);
1089
1090 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1091 #define SECTION_IN_PAGE_FLAGS
1092 #endif
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 static inline int page_zone_id(struct page *page)
1103 {
1104 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1105 }
1106
1107 #ifdef NODE_NOT_IN_PAGE_FLAGS
1108 extern int page_to_nid(const struct page *page);
1109 #else
1110 static inline int page_to_nid(const struct page *page)
1111 {
1112 struct page *p = (struct page *)page;
1113
1114 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1115 }
1116 #endif
1117
1118 #ifdef CONFIG_NUMA_BALANCING
1119 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1120 {
1121 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1122 }
1123
1124 static inline int cpupid_to_pid(int cpupid)
1125 {
1126 return cpupid & LAST__PID_MASK;
1127 }
1128
1129 static inline int cpupid_to_cpu(int cpupid)
1130 {
1131 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1132 }
1133
1134 static inline int cpupid_to_nid(int cpupid)
1135 {
1136 return cpu_to_node(cpupid_to_cpu(cpupid));
1137 }
1138
1139 static inline bool cpupid_pid_unset(int cpupid)
1140 {
1141 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1142 }
1143
1144 static inline bool cpupid_cpu_unset(int cpupid)
1145 {
1146 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1147 }
1148
1149 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1150 {
1151 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1152 }
1153
1154 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1155 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1156 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1157 {
1158 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1159 }
1160
1161 static inline int page_cpupid_last(struct page *page)
1162 {
1163 return page->_last_cpupid;
1164 }
1165 static inline void page_cpupid_reset_last(struct page *page)
1166 {
1167 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1168 }
1169 #else
1170 static inline int page_cpupid_last(struct page *page)
1171 {
1172 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1173 }
1174
1175 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1176
1177 static inline void page_cpupid_reset_last(struct page *page)
1178 {
1179 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1180 }
1181 #endif
1182 #else
1183 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1184 {
1185 return page_to_nid(page);
1186 }
1187
1188 static inline int page_cpupid_last(struct page *page)
1189 {
1190 return page_to_nid(page);
1191 }
1192
1193 static inline int cpupid_to_nid(int cpupid)
1194 {
1195 return -1;
1196 }
1197
1198 static inline int cpupid_to_pid(int cpupid)
1199 {
1200 return -1;
1201 }
1202
1203 static inline int cpupid_to_cpu(int cpupid)
1204 {
1205 return -1;
1206 }
1207
1208 static inline int cpu_pid_to_cpupid(int nid, int pid)
1209 {
1210 return -1;
1211 }
1212
1213 static inline bool cpupid_pid_unset(int cpupid)
1214 {
1215 return 1;
1216 }
1217
1218 static inline void page_cpupid_reset_last(struct page *page)
1219 {
1220 }
1221
1222 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1223 {
1224 return false;
1225 }
1226 #endif
1227
1228 #ifdef CONFIG_KASAN_SW_TAGS
1229 static inline u8 page_kasan_tag(const struct page *page)
1230 {
1231 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1232 }
1233
1234 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1235 {
1236 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1237 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1238 }
1239
1240 static inline void page_kasan_tag_reset(struct page *page)
1241 {
1242 page_kasan_tag_set(page, 0xff);
1243 }
1244 #else
1245 static inline u8 page_kasan_tag(const struct page *page)
1246 {
1247 return 0xff;
1248 }
1249
1250 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1251 static inline void page_kasan_tag_reset(struct page *page) { }
1252 #endif
1253
1254 static inline struct zone *page_zone(const struct page *page)
1255 {
1256 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1257 }
1258
1259 static inline pg_data_t *page_pgdat(const struct page *page)
1260 {
1261 return NODE_DATA(page_to_nid(page));
1262 }
1263
1264 #ifdef SECTION_IN_PAGE_FLAGS
1265 static inline void set_page_section(struct page *page, unsigned long section)
1266 {
1267 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1268 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1269 }
1270
1271 static inline unsigned long page_to_section(const struct page *page)
1272 {
1273 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1274 }
1275 #endif
1276
1277 static inline void set_page_zone(struct page *page, enum zone_type zone)
1278 {
1279 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1280 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1281 }
1282
1283 static inline void set_page_node(struct page *page, unsigned long node)
1284 {
1285 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1286 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1287 }
1288
1289 static inline void set_page_links(struct page *page, enum zone_type zone,
1290 unsigned long node, unsigned long pfn)
1291 {
1292 set_page_zone(page, zone);
1293 set_page_node(page, node);
1294 #ifdef SECTION_IN_PAGE_FLAGS
1295 set_page_section(page, pfn_to_section_nr(pfn));
1296 #endif
1297 }
1298
1299 #ifdef CONFIG_MEMCG
1300 static inline struct mem_cgroup *page_memcg(struct page *page)
1301 {
1302 return page->mem_cgroup;
1303 }
1304 static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1305 {
1306 WARN_ON_ONCE(!rcu_read_lock_held());
1307 return READ_ONCE(page->mem_cgroup);
1308 }
1309 #else
1310 static inline struct mem_cgroup *page_memcg(struct page *page)
1311 {
1312 return NULL;
1313 }
1314 static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1315 {
1316 WARN_ON_ONCE(!rcu_read_lock_held());
1317 return NULL;
1318 }
1319 #endif
1320
1321
1322
1323
1324 #include <linux/vmstat.h>
1325
1326 static __always_inline void *lowmem_page_address(const struct page *page)
1327 {
1328 return page_to_virt(page);
1329 }
1330
1331 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1332 #define HASHED_PAGE_VIRTUAL
1333 #endif
1334
1335 #if defined(WANT_PAGE_VIRTUAL)
1336 static inline void *page_address(const struct page *page)
1337 {
1338 return page->virtual;
1339 }
1340 static inline void set_page_address(struct page *page, void *address)
1341 {
1342 page->virtual = address;
1343 }
1344 #define page_address_init() do { } while(0)
1345 #endif
1346
1347 #if defined(HASHED_PAGE_VIRTUAL)
1348 void *page_address(const struct page *page);
1349 void set_page_address(struct page *page, void *virtual);
1350 void page_address_init(void);
1351 #endif
1352
1353 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1354 #define page_address(page) lowmem_page_address(page)
1355 #define set_page_address(page, address) do { } while(0)
1356 #define page_address_init() do { } while(0)
1357 #endif
1358
1359 extern void *page_rmapping(struct page *page);
1360 extern struct anon_vma *page_anon_vma(struct page *page);
1361 extern struct address_space *page_mapping(struct page *page);
1362
1363 extern struct address_space *__page_file_mapping(struct page *);
1364
1365 static inline
1366 struct address_space *page_file_mapping(struct page *page)
1367 {
1368 if (unlikely(PageSwapCache(page)))
1369 return __page_file_mapping(page);
1370
1371 return page->mapping;
1372 }
1373
1374 extern pgoff_t __page_file_index(struct page *page);
1375
1376
1377
1378
1379
1380 static inline pgoff_t page_index(struct page *page)
1381 {
1382 if (unlikely(PageSwapCache(page)))
1383 return __page_file_index(page);
1384 return page->index;
1385 }
1386
1387 bool page_mapped(struct page *page);
1388 struct address_space *page_mapping(struct page *page);
1389 struct address_space *page_mapping_file(struct page *page);
1390
1391
1392
1393
1394
1395
1396 static inline bool page_is_pfmemalloc(struct page *page)
1397 {
1398
1399
1400
1401
1402 return page->index == -1UL;
1403 }
1404
1405
1406
1407
1408
1409 static inline void set_page_pfmemalloc(struct page *page)
1410 {
1411 page->index = -1UL;
1412 }
1413
1414 static inline void clear_page_pfmemalloc(struct page *page)
1415 {
1416 page->index = 0;
1417 }
1418
1419
1420
1421
1422 extern void pagefault_out_of_memory(void);
1423
1424 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1425
1426
1427
1428
1429
1430 #define SHOW_MEM_FILTER_NODES (0x0001u)
1431
1432 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1433
1434 #ifdef CONFIG_MMU
1435 extern bool can_do_mlock(void);
1436 #else
1437 static inline bool can_do_mlock(void) { return false; }
1438 #endif
1439 extern int user_shm_lock(size_t, struct user_struct *);
1440 extern void user_shm_unlock(size_t, struct user_struct *);
1441
1442
1443
1444
1445 struct zap_details {
1446 struct address_space *check_mapping;
1447 pgoff_t first_index;
1448 pgoff_t last_index;
1449 };
1450
1451 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1452 pte_t pte);
1453 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1454 pmd_t pmd);
1455
1456 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1457 unsigned long size);
1458 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1459 unsigned long size);
1460 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1461 unsigned long start, unsigned long end);
1462
1463 struct mmu_notifier_range;
1464
1465 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1466 unsigned long end, unsigned long floor, unsigned long ceiling);
1467 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1468 struct vm_area_struct *vma);
1469 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1470 struct mmu_notifier_range *range,
1471 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1472 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1473 unsigned long *pfn);
1474 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1475 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1476 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1477 void *buf, int len, int write);
1478
1479 extern void truncate_pagecache(struct inode *inode, loff_t new);
1480 extern void truncate_setsize(struct inode *inode, loff_t newsize);
1481 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1482 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1483 int truncate_inode_page(struct address_space *mapping, struct page *page);
1484 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1485 int invalidate_inode_page(struct page *page);
1486
1487 #ifdef CONFIG_MMU
1488 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1489 unsigned long address, unsigned int flags);
1490 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1491 unsigned long address, unsigned int fault_flags,
1492 bool *unlocked);
1493 void unmap_mapping_pages(struct address_space *mapping,
1494 pgoff_t start, pgoff_t nr, bool even_cows);
1495 void unmap_mapping_range(struct address_space *mapping,
1496 loff_t const holebegin, loff_t const holelen, int even_cows);
1497 #else
1498 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1499 unsigned long address, unsigned int flags)
1500 {
1501
1502 BUG();
1503 return VM_FAULT_SIGBUS;
1504 }
1505 static inline int fixup_user_fault(struct task_struct *tsk,
1506 struct mm_struct *mm, unsigned long address,
1507 unsigned int fault_flags, bool *unlocked)
1508 {
1509
1510 BUG();
1511 return -EFAULT;
1512 }
1513 static inline void unmap_mapping_pages(struct address_space *mapping,
1514 pgoff_t start, pgoff_t nr, bool even_cows) { }
1515 static inline void unmap_mapping_range(struct address_space *mapping,
1516 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1517 #endif
1518
1519 static inline void unmap_shared_mapping_range(struct address_space *mapping,
1520 loff_t const holebegin, loff_t const holelen)
1521 {
1522 unmap_mapping_range(mapping, holebegin, holelen, 0);
1523 }
1524
1525 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1526 void *buf, int len, unsigned int gup_flags);
1527 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1528 void *buf, int len, unsigned int gup_flags);
1529 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1530 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1531
1532 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1533 unsigned long start, unsigned long nr_pages,
1534 unsigned int gup_flags, struct page **pages,
1535 struct vm_area_struct **vmas, int *locked);
1536 long get_user_pages(unsigned long start, unsigned long nr_pages,
1537 unsigned int gup_flags, struct page **pages,
1538 struct vm_area_struct **vmas);
1539 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1540 unsigned int gup_flags, struct page **pages, int *locked);
1541 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1542 struct page **pages, unsigned int gup_flags);
1543
1544 int get_user_pages_fast(unsigned long start, int nr_pages,
1545 unsigned int gup_flags, struct page **pages);
1546
1547 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1548 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1549 struct task_struct *task, bool bypass_rlim);
1550
1551
1552 struct frame_vector {
1553 unsigned int nr_allocated;
1554 unsigned int nr_frames;
1555 bool got_ref;
1556 bool is_pfns;
1557 void *ptrs[0];
1558
1559
1560 };
1561
1562 struct frame_vector *frame_vector_create(unsigned int nr_frames);
1563 void frame_vector_destroy(struct frame_vector *vec);
1564 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1565 unsigned int gup_flags, struct frame_vector *vec);
1566 void put_vaddr_frames(struct frame_vector *vec);
1567 int frame_vector_to_pages(struct frame_vector *vec);
1568 void frame_vector_to_pfns(struct frame_vector *vec);
1569
1570 static inline unsigned int frame_vector_count(struct frame_vector *vec)
1571 {
1572 return vec->nr_frames;
1573 }
1574
1575 static inline struct page **frame_vector_pages(struct frame_vector *vec)
1576 {
1577 if (vec->is_pfns) {
1578 int err = frame_vector_to_pages(vec);
1579
1580 if (err)
1581 return ERR_PTR(err);
1582 }
1583 return (struct page **)(vec->ptrs);
1584 }
1585
1586 static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1587 {
1588 if (!vec->is_pfns)
1589 frame_vector_to_pfns(vec);
1590 return (unsigned long *)(vec->ptrs);
1591 }
1592
1593 struct kvec;
1594 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1595 struct page **pages);
1596 int get_kernel_page(unsigned long start, int write, struct page **pages);
1597 struct page *get_dump_page(unsigned long addr);
1598
1599 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1600 extern void do_invalidatepage(struct page *page, unsigned int offset,
1601 unsigned int length);
1602
1603 void __set_page_dirty(struct page *, struct address_space *, int warn);
1604 int __set_page_dirty_nobuffers(struct page *page);
1605 int __set_page_dirty_no_writeback(struct page *page);
1606 int redirty_page_for_writepage(struct writeback_control *wbc,
1607 struct page *page);
1608 void account_page_dirtied(struct page *page, struct address_space *mapping);
1609 void account_page_cleaned(struct page *page, struct address_space *mapping,
1610 struct bdi_writeback *wb);
1611 int set_page_dirty(struct page *page);
1612 int set_page_dirty_lock(struct page *page);
1613 void __cancel_dirty_page(struct page *page);
1614 static inline void cancel_dirty_page(struct page *page)
1615 {
1616
1617 if (PageDirty(page))
1618 __cancel_dirty_page(page);
1619 }
1620 int clear_page_dirty_for_io(struct page *page);
1621
1622 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1623
1624 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1625 unsigned long old_addr, struct vm_area_struct *new_vma,
1626 unsigned long new_addr, unsigned long len,
1627 bool need_rmap_locks);
1628 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1629 unsigned long end, pgprot_t newprot,
1630 int dirty_accountable, int prot_numa);
1631 extern int mprotect_fixup(struct vm_area_struct *vma,
1632 struct vm_area_struct **pprev, unsigned long start,
1633 unsigned long end, unsigned long newflags);
1634
1635
1636
1637
1638 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1639 struct page **pages);
1640
1641
1642
1643 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1644 {
1645 long val = atomic_long_read(&mm->rss_stat.count[member]);
1646
1647 #ifdef SPLIT_RSS_COUNTING
1648
1649
1650
1651
1652 if (val < 0)
1653 val = 0;
1654 #endif
1655 return (unsigned long)val;
1656 }
1657
1658 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1659 {
1660 atomic_long_add(value, &mm->rss_stat.count[member]);
1661 }
1662
1663 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1664 {
1665 atomic_long_inc(&mm->rss_stat.count[member]);
1666 }
1667
1668 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1669 {
1670 atomic_long_dec(&mm->rss_stat.count[member]);
1671 }
1672
1673
1674 static inline int mm_counter_file(struct page *page)
1675 {
1676 if (PageSwapBacked(page))
1677 return MM_SHMEMPAGES;
1678 return MM_FILEPAGES;
1679 }
1680
1681 static inline int mm_counter(struct page *page)
1682 {
1683 if (PageAnon(page))
1684 return MM_ANONPAGES;
1685 return mm_counter_file(page);
1686 }
1687
1688 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1689 {
1690 return get_mm_counter(mm, MM_FILEPAGES) +
1691 get_mm_counter(mm, MM_ANONPAGES) +
1692 get_mm_counter(mm, MM_SHMEMPAGES);
1693 }
1694
1695 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1696 {
1697 return max(mm->hiwater_rss, get_mm_rss(mm));
1698 }
1699
1700 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1701 {
1702 return max(mm->hiwater_vm, mm->total_vm);
1703 }
1704
1705 static inline void update_hiwater_rss(struct mm_struct *mm)
1706 {
1707 unsigned long _rss = get_mm_rss(mm);
1708
1709 if ((mm)->hiwater_rss < _rss)
1710 (mm)->hiwater_rss = _rss;
1711 }
1712
1713 static inline void update_hiwater_vm(struct mm_struct *mm)
1714 {
1715 if (mm->hiwater_vm < mm->total_vm)
1716 mm->hiwater_vm = mm->total_vm;
1717 }
1718
1719 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1720 {
1721 mm->hiwater_rss = get_mm_rss(mm);
1722 }
1723
1724 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1725 struct mm_struct *mm)
1726 {
1727 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1728
1729 if (*maxrss < hiwater_rss)
1730 *maxrss = hiwater_rss;
1731 }
1732
1733 #if defined(SPLIT_RSS_COUNTING)
1734 void sync_mm_rss(struct mm_struct *mm);
1735 #else
1736 static inline void sync_mm_rss(struct mm_struct *mm)
1737 {
1738 }
1739 #endif
1740
1741 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
1742 static inline int pte_devmap(pte_t pte)
1743 {
1744 return 0;
1745 }
1746 #endif
1747
1748 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1749
1750 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1751 spinlock_t **ptl);
1752 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1753 spinlock_t **ptl)
1754 {
1755 pte_t *ptep;
1756 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1757 return ptep;
1758 }
1759
1760 #ifdef __PAGETABLE_P4D_FOLDED
1761 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1762 unsigned long address)
1763 {
1764 return 0;
1765 }
1766 #else
1767 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1768 #endif
1769
1770 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1771 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1772 unsigned long address)
1773 {
1774 return 0;
1775 }
1776 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1777 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1778
1779 #else
1780 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1781
1782 static inline void mm_inc_nr_puds(struct mm_struct *mm)
1783 {
1784 if (mm_pud_folded(mm))
1785 return;
1786 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1787 }
1788
1789 static inline void mm_dec_nr_puds(struct mm_struct *mm)
1790 {
1791 if (mm_pud_folded(mm))
1792 return;
1793 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1794 }
1795 #endif
1796
1797 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1798 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1799 unsigned long address)
1800 {
1801 return 0;
1802 }
1803
1804 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1805 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1806
1807 #else
1808 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1809
1810 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1811 {
1812 if (mm_pmd_folded(mm))
1813 return;
1814 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1815 }
1816
1817 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1818 {
1819 if (mm_pmd_folded(mm))
1820 return;
1821 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1822 }
1823 #endif
1824
1825 #ifdef CONFIG_MMU
1826 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1827 {
1828 atomic_long_set(&mm->pgtables_bytes, 0);
1829 }
1830
1831 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1832 {
1833 return atomic_long_read(&mm->pgtables_bytes);
1834 }
1835
1836 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1837 {
1838 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1839 }
1840
1841 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1842 {
1843 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1844 }
1845 #else
1846
1847 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1848 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1849 {
1850 return 0;
1851 }
1852
1853 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1854 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1855 #endif
1856
1857 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
1858 int __pte_alloc_kernel(pmd_t *pmd);
1859
1860
1861
1862
1863
1864 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1865
1866 #ifndef __ARCH_HAS_5LEVEL_HACK
1867 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1868 unsigned long address)
1869 {
1870 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1871 NULL : p4d_offset(pgd, address);
1872 }
1873
1874 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1875 unsigned long address)
1876 {
1877 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1878 NULL : pud_offset(p4d, address);
1879 }
1880 #endif
1881
1882 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1883 {
1884 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1885 NULL: pmd_offset(pud, address);
1886 }
1887 #endif
1888
1889 #if USE_SPLIT_PTE_PTLOCKS
1890 #if ALLOC_SPLIT_PTLOCKS
1891 void __init ptlock_cache_init(void);
1892 extern bool ptlock_alloc(struct page *page);
1893 extern void ptlock_free(struct page *page);
1894
1895 static inline spinlock_t *ptlock_ptr(struct page *page)
1896 {
1897 return page->ptl;
1898 }
1899 #else
1900 static inline void ptlock_cache_init(void)
1901 {
1902 }
1903
1904 static inline bool ptlock_alloc(struct page *page)
1905 {
1906 return true;
1907 }
1908
1909 static inline void ptlock_free(struct page *page)
1910 {
1911 }
1912
1913 static inline spinlock_t *ptlock_ptr(struct page *page)
1914 {
1915 return &page->ptl;
1916 }
1917 #endif
1918
1919 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1920 {
1921 return ptlock_ptr(pmd_page(*pmd));
1922 }
1923
1924 static inline bool ptlock_init(struct page *page)
1925 {
1926
1927
1928
1929
1930
1931
1932
1933 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1934 if (!ptlock_alloc(page))
1935 return false;
1936 spin_lock_init(ptlock_ptr(page));
1937 return true;
1938 }
1939
1940 #else
1941
1942
1943
1944 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1945 {
1946 return &mm->page_table_lock;
1947 }
1948 static inline void ptlock_cache_init(void) {}
1949 static inline bool ptlock_init(struct page *page) { return true; }
1950 static inline void ptlock_free(struct page *page) {}
1951 #endif
1952
1953 static inline void pgtable_init(void)
1954 {
1955 ptlock_cache_init();
1956 pgtable_cache_init();
1957 }
1958
1959 static inline bool pgtable_pte_page_ctor(struct page *page)
1960 {
1961 if (!ptlock_init(page))
1962 return false;
1963 __SetPageTable(page);
1964 inc_zone_page_state(page, NR_PAGETABLE);
1965 return true;
1966 }
1967
1968 static inline void pgtable_pte_page_dtor(struct page *page)
1969 {
1970 ptlock_free(page);
1971 __ClearPageTable(page);
1972 dec_zone_page_state(page, NR_PAGETABLE);
1973 }
1974
1975 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
1976 ({ \
1977 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1978 pte_t *__pte = pte_offset_map(pmd, address); \
1979 *(ptlp) = __ptl; \
1980 spin_lock(__ptl); \
1981 __pte; \
1982 })
1983
1984 #define pte_unmap_unlock(pte, ptl) do { \
1985 spin_unlock(ptl); \
1986 pte_unmap(pte); \
1987 } while (0)
1988
1989 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
1990
1991 #define pte_alloc_map(mm, pmd, address) \
1992 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
1993
1994 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1995 (pte_alloc(mm, pmd) ? \
1996 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1997
1998 #define pte_alloc_kernel(pmd, address) \
1999 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2000 NULL: pte_offset_kernel(pmd, address))
2001
2002 #if USE_SPLIT_PMD_PTLOCKS
2003
2004 static struct page *pmd_to_page(pmd_t *pmd)
2005 {
2006 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2007 return virt_to_page((void *)((unsigned long) pmd & mask));
2008 }
2009
2010 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2011 {
2012 return ptlock_ptr(pmd_to_page(pmd));
2013 }
2014
2015 static inline bool pgtable_pmd_page_ctor(struct page *page)
2016 {
2017 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2018 page->pmd_huge_pte = NULL;
2019 #endif
2020 return ptlock_init(page);
2021 }
2022
2023 static inline void pgtable_pmd_page_dtor(struct page *page)
2024 {
2025 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2026 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2027 #endif
2028 ptlock_free(page);
2029 }
2030
2031 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2032
2033 #else
2034
2035 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2036 {
2037 return &mm->page_table_lock;
2038 }
2039
2040 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
2041 static inline void pgtable_pmd_page_dtor(struct page *page) {}
2042
2043 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2044
2045 #endif
2046
2047 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2048 {
2049 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2050 spin_lock(ptl);
2051 return ptl;
2052 }
2053
2054
2055
2056
2057
2058
2059
2060 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2061 {
2062 return &mm->page_table_lock;
2063 }
2064
2065 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2066 {
2067 spinlock_t *ptl = pud_lockptr(mm, pud);
2068
2069 spin_lock(ptl);
2070 return ptl;
2071 }
2072
2073 extern void __init pagecache_init(void);
2074 extern void free_area_init(unsigned long * zones_size);
2075 extern void __init free_area_init_node(int nid, unsigned long * zones_size,
2076 unsigned long zone_start_pfn, unsigned long *zholes_size);
2077 extern void free_initmem(void);
2078
2079
2080
2081
2082
2083
2084
2085 extern unsigned long free_reserved_area(void *start, void *end,
2086 int poison, const char *s);
2087
2088 #ifdef CONFIG_HIGHMEM
2089
2090
2091
2092
2093 extern void free_highmem_page(struct page *page);
2094 #endif
2095
2096 extern void adjust_managed_page_count(struct page *page, long count);
2097 extern void mem_init_print_info(const char *str);
2098
2099 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2100
2101
2102 static inline void __free_reserved_page(struct page *page)
2103 {
2104 ClearPageReserved(page);
2105 init_page_count(page);
2106 __free_page(page);
2107 }
2108
2109 static inline void free_reserved_page(struct page *page)
2110 {
2111 __free_reserved_page(page);
2112 adjust_managed_page_count(page, 1);
2113 }
2114
2115 static inline void mark_page_reserved(struct page *page)
2116 {
2117 SetPageReserved(page);
2118 adjust_managed_page_count(page, -1);
2119 }
2120
2121
2122
2123
2124
2125
2126
2127 static inline unsigned long free_initmem_default(int poison)
2128 {
2129 extern char __init_begin[], __init_end[];
2130
2131 return free_reserved_area(&__init_begin, &__init_end,
2132 poison, "unused kernel");
2133 }
2134
2135 static inline unsigned long get_num_physpages(void)
2136 {
2137 int nid;
2138 unsigned long phys_pages = 0;
2139
2140 for_each_online_node(nid)
2141 phys_pages += node_present_pages(nid);
2142
2143 return phys_pages;
2144 }
2145
2146 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2174 unsigned long node_map_pfn_alignment(void);
2175 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2176 unsigned long end_pfn);
2177 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2178 unsigned long end_pfn);
2179 extern void get_pfn_range_for_nid(unsigned int nid,
2180 unsigned long *start_pfn, unsigned long *end_pfn);
2181 extern unsigned long find_min_pfn_with_active_regions(void);
2182 extern void free_bootmem_with_active_regions(int nid,
2183 unsigned long max_low_pfn);
2184 extern void sparse_memory_present_with_active_regions(int nid);
2185
2186 #endif
2187
2188 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2189 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2190 static inline int __early_pfn_to_nid(unsigned long pfn,
2191 struct mminit_pfnnid_cache *state)
2192 {
2193 return 0;
2194 }
2195 #else
2196
2197 extern int __meminit early_pfn_to_nid(unsigned long pfn);
2198
2199 extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2200 struct mminit_pfnnid_cache *state);
2201 #endif
2202
2203 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
2204 void zero_resv_unavail(void);
2205 #else
2206 static inline void zero_resv_unavail(void) {}
2207 #endif
2208
2209 extern void set_dma_reserve(unsigned long new_dma_reserve);
2210 extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2211 enum memmap_context, struct vmem_altmap *);
2212 extern void setup_per_zone_wmarks(void);
2213 extern int __meminit init_per_zone_wmark_min(void);
2214 extern void mem_init(void);
2215 extern void __init mmap_init(void);
2216 extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2217 extern long si_mem_available(void);
2218 extern void si_meminfo(struct sysinfo * val);
2219 extern void si_meminfo_node(struct sysinfo *val, int nid);
2220 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2221 extern unsigned long arch_reserved_kernel_pages(void);
2222 #endif
2223
2224 extern __printf(3, 4)
2225 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2226
2227 extern void setup_per_cpu_pageset(void);
2228
2229 extern void zone_pcp_update(struct zone *zone);
2230 extern void zone_pcp_reset(struct zone *zone);
2231
2232
2233 extern int min_free_kbytes;
2234 extern int watermark_boost_factor;
2235 extern int watermark_scale_factor;
2236
2237
2238 extern atomic_long_t mmap_pages_allocated;
2239 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2240
2241
2242 void vma_interval_tree_insert(struct vm_area_struct *node,
2243 struct rb_root_cached *root);
2244 void vma_interval_tree_insert_after(struct vm_area_struct *node,
2245 struct vm_area_struct *prev,
2246 struct rb_root_cached *root);
2247 void vma_interval_tree_remove(struct vm_area_struct *node,
2248 struct rb_root_cached *root);
2249 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2250 unsigned long start, unsigned long last);
2251 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2252 unsigned long start, unsigned long last);
2253
2254 #define vma_interval_tree_foreach(vma, root, start, last) \
2255 for (vma = vma_interval_tree_iter_first(root, start, last); \
2256 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2257
2258 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2259 struct rb_root_cached *root);
2260 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2261 struct rb_root_cached *root);
2262 struct anon_vma_chain *
2263 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2264 unsigned long start, unsigned long last);
2265 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2266 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2267 #ifdef CONFIG_DEBUG_VM_RB
2268 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2269 #endif
2270
2271 #define anon_vma_interval_tree_foreach(avc, root, start, last) \
2272 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2273 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2274
2275
2276 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2277 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2278 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2279 struct vm_area_struct *expand);
2280 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2281 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2282 {
2283 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2284 }
2285 extern struct vm_area_struct *vma_merge(struct mm_struct *,
2286 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2287 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2288 struct mempolicy *, struct vm_userfaultfd_ctx);
2289 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2290 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2291 unsigned long addr, int new_below);
2292 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2293 unsigned long addr, int new_below);
2294 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2295 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2296 struct rb_node **, struct rb_node *);
2297 extern void unlink_file_vma(struct vm_area_struct *);
2298 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2299 unsigned long addr, unsigned long len, pgoff_t pgoff,
2300 bool *need_rmap_locks);
2301 extern void exit_mmap(struct mm_struct *);
2302
2303 static inline int check_data_rlimit(unsigned long rlim,
2304 unsigned long new,
2305 unsigned long start,
2306 unsigned long end_data,
2307 unsigned long start_data)
2308 {
2309 if (rlim < RLIM_INFINITY) {
2310 if (((new - start) + (end_data - start_data)) > rlim)
2311 return -ENOSPC;
2312 }
2313
2314 return 0;
2315 }
2316
2317 extern int mm_take_all_locks(struct mm_struct *mm);
2318 extern void mm_drop_all_locks(struct mm_struct *mm);
2319
2320 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2321 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2322 extern struct file *get_task_exe_file(struct task_struct *task);
2323
2324 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2325 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2326
2327 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2328 const struct vm_special_mapping *sm);
2329 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2330 unsigned long addr, unsigned long len,
2331 unsigned long flags,
2332 const struct vm_special_mapping *spec);
2333
2334 extern int install_special_mapping(struct mm_struct *mm,
2335 unsigned long addr, unsigned long len,
2336 unsigned long flags, struct page **pages);
2337
2338 unsigned long randomize_stack_top(unsigned long stack_top);
2339
2340 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2341
2342 extern unsigned long mmap_region(struct file *file, unsigned long addr,
2343 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2344 struct list_head *uf);
2345 extern unsigned long do_mmap(struct file *file, unsigned long addr,
2346 unsigned long len, unsigned long prot, unsigned long flags,
2347 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2348 struct list_head *uf);
2349 extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2350 struct list_head *uf, bool downgrade);
2351 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2352 struct list_head *uf);
2353
2354 static inline unsigned long
2355 do_mmap_pgoff(struct file *file, unsigned long addr,
2356 unsigned long len, unsigned long prot, unsigned long flags,
2357 unsigned long pgoff, unsigned long *populate,
2358 struct list_head *uf)
2359 {
2360 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2361 }
2362
2363 #ifdef CONFIG_MMU
2364 extern int __mm_populate(unsigned long addr, unsigned long len,
2365 int ignore_errors);
2366 static inline void mm_populate(unsigned long addr, unsigned long len)
2367 {
2368
2369 (void) __mm_populate(addr, len, 1);
2370 }
2371 #else
2372 static inline void mm_populate(unsigned long addr, unsigned long len) {}
2373 #endif
2374
2375
2376 extern int __must_check vm_brk(unsigned long, unsigned long);
2377 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2378 extern int vm_munmap(unsigned long, size_t);
2379 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2380 unsigned long, unsigned long,
2381 unsigned long, unsigned long);
2382
2383 struct vm_unmapped_area_info {
2384 #define VM_UNMAPPED_AREA_TOPDOWN 1
2385 unsigned long flags;
2386 unsigned long length;
2387 unsigned long low_limit;
2388 unsigned long high_limit;
2389 unsigned long align_mask;
2390 unsigned long align_offset;
2391 };
2392
2393 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2394 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 static inline unsigned long
2406 vm_unmapped_area(struct vm_unmapped_area_info *info)
2407 {
2408 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2409 return unmapped_area_topdown(info);
2410 else
2411 return unmapped_area(info);
2412 }
2413
2414
2415 extern void truncate_inode_pages(struct address_space *, loff_t);
2416 extern void truncate_inode_pages_range(struct address_space *,
2417 loff_t lstart, loff_t lend);
2418 extern void truncate_inode_pages_final(struct address_space *);
2419
2420
2421 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2422 extern void filemap_map_pages(struct vm_fault *vmf,
2423 pgoff_t start_pgoff, pgoff_t end_pgoff);
2424 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2425
2426
2427 int __must_check write_one_page(struct page *page);
2428 void task_dirty_inc(struct task_struct *tsk);
2429
2430
2431 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
2432
2433 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2434 pgoff_t offset, unsigned long nr_to_read);
2435
2436 void page_cache_sync_readahead(struct address_space *mapping,
2437 struct file_ra_state *ra,
2438 struct file *filp,
2439 pgoff_t offset,
2440 unsigned long size);
2441
2442 void page_cache_async_readahead(struct address_space *mapping,
2443 struct file_ra_state *ra,
2444 struct file *filp,
2445 struct page *pg,
2446 pgoff_t offset,
2447 unsigned long size);
2448
2449 extern unsigned long stack_guard_gap;
2450
2451 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2452
2453
2454 extern int expand_downwards(struct vm_area_struct *vma,
2455 unsigned long address);
2456 #if VM_GROWSUP
2457 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2458 #else
2459 #define expand_upwards(vma, address) (0)
2460 #endif
2461
2462
2463 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2464 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2465 struct vm_area_struct **pprev);
2466
2467
2468
2469 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2470 {
2471 struct vm_area_struct * vma = find_vma(mm,start_addr);
2472
2473 if (vma && end_addr <= vma->vm_start)
2474 vma = NULL;
2475 return vma;
2476 }
2477
2478 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2479 {
2480 unsigned long vm_start = vma->vm_start;
2481
2482 if (vma->vm_flags & VM_GROWSDOWN) {
2483 vm_start -= stack_guard_gap;
2484 if (vm_start > vma->vm_start)
2485 vm_start = 0;
2486 }
2487 return vm_start;
2488 }
2489
2490 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2491 {
2492 unsigned long vm_end = vma->vm_end;
2493
2494 if (vma->vm_flags & VM_GROWSUP) {
2495 vm_end += stack_guard_gap;
2496 if (vm_end < vma->vm_end)
2497 vm_end = -PAGE_SIZE;
2498 }
2499 return vm_end;
2500 }
2501
2502 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2503 {
2504 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2505 }
2506
2507
2508 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2509 unsigned long vm_start, unsigned long vm_end)
2510 {
2511 struct vm_area_struct *vma = find_vma(mm, vm_start);
2512
2513 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2514 vma = NULL;
2515
2516 return vma;
2517 }
2518
2519 static inline bool range_in_vma(struct vm_area_struct *vma,
2520 unsigned long start, unsigned long end)
2521 {
2522 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2523 }
2524
2525 #ifdef CONFIG_MMU
2526 pgprot_t vm_get_page_prot(unsigned long vm_flags);
2527 void vma_set_page_prot(struct vm_area_struct *vma);
2528 #else
2529 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2530 {
2531 return __pgprot(0);
2532 }
2533 static inline void vma_set_page_prot(struct vm_area_struct *vma)
2534 {
2535 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2536 }
2537 #endif
2538
2539 #ifdef CONFIG_NUMA_BALANCING
2540 unsigned long change_prot_numa(struct vm_area_struct *vma,
2541 unsigned long start, unsigned long end);
2542 #endif
2543
2544 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2545 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2546 unsigned long pfn, unsigned long size, pgprot_t);
2547 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2548 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2549 unsigned long num);
2550 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2551 unsigned long num);
2552 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2553 unsigned long pfn);
2554 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2555 unsigned long pfn, pgprot_t pgprot);
2556 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2557 pfn_t pfn);
2558 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2559 unsigned long addr, pfn_t pfn);
2560 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2561
2562 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2563 unsigned long addr, struct page *page)
2564 {
2565 int err = vm_insert_page(vma, addr, page);
2566
2567 if (err == -ENOMEM)
2568 return VM_FAULT_OOM;
2569 if (err < 0 && err != -EBUSY)
2570 return VM_FAULT_SIGBUS;
2571
2572 return VM_FAULT_NOPAGE;
2573 }
2574
2575 static inline vm_fault_t vmf_error(int err)
2576 {
2577 if (err == -ENOMEM)
2578 return VM_FAULT_OOM;
2579 return VM_FAULT_SIGBUS;
2580 }
2581
2582 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2583 unsigned int foll_flags);
2584
2585 #define FOLL_WRITE 0x01
2586 #define FOLL_TOUCH 0x02
2587 #define FOLL_GET 0x04
2588 #define FOLL_DUMP 0x08
2589 #define FOLL_FORCE 0x10
2590 #define FOLL_NOWAIT 0x20
2591
2592 #define FOLL_POPULATE 0x40
2593 #define FOLL_SPLIT 0x80
2594 #define FOLL_HWPOISON 0x100
2595 #define FOLL_NUMA 0x200
2596 #define FOLL_MIGRATION 0x400
2597 #define FOLL_TRIED 0x800
2598 #define FOLL_MLOCK 0x1000
2599 #define FOLL_REMOTE 0x2000
2600 #define FOLL_COW 0x4000
2601 #define FOLL_ANON 0x8000
2602 #define FOLL_LONGTERM 0x10000
2603 #define FOLL_SPLIT_PMD 0x20000
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2633 {
2634 if (vm_fault & VM_FAULT_OOM)
2635 return -ENOMEM;
2636 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2637 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2638 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2639 return -EFAULT;
2640 return 0;
2641 }
2642
2643 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2644 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2645 unsigned long size, pte_fn_t fn, void *data);
2646
2647
2648 #ifdef CONFIG_PAGE_POISONING
2649 extern bool page_poisoning_enabled(void);
2650 extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2651 #else
2652 static inline bool page_poisoning_enabled(void) { return false; }
2653 static inline void kernel_poison_pages(struct page *page, int numpages,
2654 int enable) { }
2655 #endif
2656
2657 #ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
2658 DECLARE_STATIC_KEY_TRUE(init_on_alloc);
2659 #else
2660 DECLARE_STATIC_KEY_FALSE(init_on_alloc);
2661 #endif
2662 static inline bool want_init_on_alloc(gfp_t flags)
2663 {
2664 if (static_branch_unlikely(&init_on_alloc) &&
2665 !page_poisoning_enabled())
2666 return true;
2667 return flags & __GFP_ZERO;
2668 }
2669
2670 #ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
2671 DECLARE_STATIC_KEY_TRUE(init_on_free);
2672 #else
2673 DECLARE_STATIC_KEY_FALSE(init_on_free);
2674 #endif
2675 static inline bool want_init_on_free(void)
2676 {
2677 return static_branch_unlikely(&init_on_free) &&
2678 !page_poisoning_enabled();
2679 }
2680
2681 #ifdef CONFIG_DEBUG_PAGEALLOC
2682 extern void init_debug_pagealloc(void);
2683 #else
2684 static inline void init_debug_pagealloc(void) {}
2685 #endif
2686 extern bool _debug_pagealloc_enabled_early;
2687 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
2688
2689 static inline bool debug_pagealloc_enabled(void)
2690 {
2691 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
2692 _debug_pagealloc_enabled_early;
2693 }
2694
2695
2696
2697
2698
2699 static inline bool debug_pagealloc_enabled_static(void)
2700 {
2701 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
2702 return false;
2703
2704 return static_branch_unlikely(&_debug_pagealloc_enabled);
2705 }
2706
2707 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
2708 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2709
2710
2711
2712
2713
2714 static inline void
2715 kernel_map_pages(struct page *page, int numpages, int enable)
2716 {
2717 __kernel_map_pages(page, numpages, enable);
2718 }
2719 #ifdef CONFIG_HIBERNATION
2720 extern bool kernel_page_present(struct page *page);
2721 #endif
2722 #else
2723 static inline void
2724 kernel_map_pages(struct page *page, int numpages, int enable) {}
2725 #ifdef CONFIG_HIBERNATION
2726 static inline bool kernel_page_present(struct page *page) { return true; }
2727 #endif
2728 #endif
2729
2730 #ifdef __HAVE_ARCH_GATE_AREA
2731 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2732 extern int in_gate_area_no_mm(unsigned long addr);
2733 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2734 #else
2735 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2736 {
2737 return NULL;
2738 }
2739 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2740 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2741 {
2742 return 0;
2743 }
2744 #endif
2745
2746 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2747
2748 #ifdef CONFIG_SYSCTL
2749 extern int sysctl_drop_caches;
2750 int drop_caches_sysctl_handler(struct ctl_table *, int,
2751 void __user *, size_t *, loff_t *);
2752 #endif
2753
2754 void drop_slab(void);
2755 void drop_slab_node(int nid);
2756
2757 #ifndef CONFIG_MMU
2758 #define randomize_va_space 0
2759 #else
2760 extern int randomize_va_space;
2761 #endif
2762
2763 const char * arch_vma_name(struct vm_area_struct *vma);
2764 #ifdef CONFIG_MMU
2765 void print_vma_addr(char *prefix, unsigned long rip);
2766 #else
2767 static inline void print_vma_addr(char *prefix, unsigned long rip)
2768 {
2769 }
2770 #endif
2771
2772 void *sparse_buffer_alloc(unsigned long size);
2773 struct page * __populate_section_memmap(unsigned long pfn,
2774 unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
2775 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2776 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2777 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2778 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2779 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2780 void *vmemmap_alloc_block(unsigned long size, int node);
2781 struct vmem_altmap;
2782 void *vmemmap_alloc_block_buf(unsigned long size, int node);
2783 void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2784 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2785 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2786 int node);
2787 int vmemmap_populate(unsigned long start, unsigned long end, int node,
2788 struct vmem_altmap *altmap);
2789 void vmemmap_populate_print_last(void);
2790 #ifdef CONFIG_MEMORY_HOTPLUG
2791 void vmemmap_free(unsigned long start, unsigned long end,
2792 struct vmem_altmap *altmap);
2793 #endif
2794 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2795 unsigned long nr_pages);
2796
2797 enum mf_flags {
2798 MF_COUNT_INCREASED = 1 << 0,
2799 MF_ACTION_REQUIRED = 1 << 1,
2800 MF_MUST_KILL = 1 << 2,
2801 MF_SOFT_OFFLINE = 1 << 3,
2802 };
2803 extern int memory_failure(unsigned long pfn, int flags);
2804 extern void memory_failure_queue(unsigned long pfn, int flags);
2805 extern int unpoison_memory(unsigned long pfn);
2806 extern int get_hwpoison_page(struct page *page);
2807 #define put_hwpoison_page(page) put_page(page)
2808 extern int sysctl_memory_failure_early_kill;
2809 extern int sysctl_memory_failure_recovery;
2810 extern void shake_page(struct page *p, int access);
2811 extern atomic_long_t num_poisoned_pages __read_mostly;
2812 extern int soft_offline_page(struct page *page, int flags);
2813
2814
2815
2816
2817
2818 enum mf_result {
2819 MF_IGNORED,
2820 MF_FAILED,
2821 MF_DELAYED,
2822 MF_RECOVERED,
2823 };
2824
2825 enum mf_action_page_type {
2826 MF_MSG_KERNEL,
2827 MF_MSG_KERNEL_HIGH_ORDER,
2828 MF_MSG_SLAB,
2829 MF_MSG_DIFFERENT_COMPOUND,
2830 MF_MSG_POISONED_HUGE,
2831 MF_MSG_HUGE,
2832 MF_MSG_FREE_HUGE,
2833 MF_MSG_NON_PMD_HUGE,
2834 MF_MSG_UNMAP_FAILED,
2835 MF_MSG_DIRTY_SWAPCACHE,
2836 MF_MSG_CLEAN_SWAPCACHE,
2837 MF_MSG_DIRTY_MLOCKED_LRU,
2838 MF_MSG_CLEAN_MLOCKED_LRU,
2839 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2840 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2841 MF_MSG_DIRTY_LRU,
2842 MF_MSG_CLEAN_LRU,
2843 MF_MSG_TRUNCATED_LRU,
2844 MF_MSG_BUDDY,
2845 MF_MSG_BUDDY_2ND,
2846 MF_MSG_DAX,
2847 MF_MSG_UNKNOWN,
2848 };
2849
2850 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2851 extern void clear_huge_page(struct page *page,
2852 unsigned long addr_hint,
2853 unsigned int pages_per_huge_page);
2854 extern void copy_user_huge_page(struct page *dst, struct page *src,
2855 unsigned long addr_hint,
2856 struct vm_area_struct *vma,
2857 unsigned int pages_per_huge_page);
2858 extern long copy_huge_page_from_user(struct page *dst_page,
2859 const void __user *usr_src,
2860 unsigned int pages_per_huge_page,
2861 bool allow_pagefault);
2862 #endif
2863
2864 #ifdef CONFIG_DEBUG_PAGEALLOC
2865 extern unsigned int _debug_guardpage_minorder;
2866 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
2867
2868 static inline unsigned int debug_guardpage_minorder(void)
2869 {
2870 return _debug_guardpage_minorder;
2871 }
2872
2873 static inline bool debug_guardpage_enabled(void)
2874 {
2875 return static_branch_unlikely(&_debug_guardpage_enabled);
2876 }
2877
2878 static inline bool page_is_guard(struct page *page)
2879 {
2880 if (!debug_guardpage_enabled())
2881 return false;
2882
2883 return PageGuard(page);
2884 }
2885 #else
2886 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2887 static inline bool debug_guardpage_enabled(void) { return false; }
2888 static inline bool page_is_guard(struct page *page) { return false; }
2889 #endif
2890
2891 #if MAX_NUMNODES > 1
2892 void __init setup_nr_node_ids(void);
2893 #else
2894 static inline void setup_nr_node_ids(void) {}
2895 #endif
2896
2897 extern int memcmp_pages(struct page *page1, struct page *page2);
2898
2899 static inline int pages_identical(struct page *page1, struct page *page2)
2900 {
2901 return !memcmp_pages(page1, page2);
2902 }
2903
2904 #endif
2905 #endif