This source file includes following definitions.
- can_madv_lru_vma
- ra_submit
- set_page_refcounted
- __find_buddy_pfn
- pageblock_pfn_to_page
- page_order
- is_cow_mapping
- is_exec_mapping
- is_stack_mapping
- is_data_mapping
- munlock_vma_pages_all
- mlock_migrate_page
- __vma_address
- vma_address
- maybe_unlock_mmap_for_io
- clear_page_mlock
- mlock_vma_page
- mlock_migrate_page
- mem_map_offset
- mem_map_next
- mminit_dprintk
- mminit_verify_pageflags_layout
- mminit_verify_zonelist
- mminit_validate_memmodel_limits
- node_reclaim
- try_to_unmap_flush
- try_to_unmap_flush_dirty
- flush_tlb_batched_pending
- is_migrate_highatomic
- is_migrate_highatomic_page
1
2
3
4
5
6
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/tracepoint-defs.h>
14
15
16
17
18
19
20
21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
22 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
23 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
24 __GFP_ATOMIC)
25
26
27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
28
29
30 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
31
32
33 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
34
35 void page_writeback_init(void);
36
37 vm_fault_t do_swap_page(struct vm_fault *vmf);
38
39 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
40 unsigned long floor, unsigned long ceiling);
41
42 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
43 {
44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
45 }
46
47 void unmap_page_range(struct mmu_gather *tlb,
48 struct vm_area_struct *vma,
49 unsigned long addr, unsigned long end,
50 struct zap_details *details);
51
52 extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
53 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
54 unsigned long lookahead_size);
55
56
57
58
59 static inline unsigned long ra_submit(struct file_ra_state *ra,
60 struct address_space *mapping, struct file *filp)
61 {
62 return __do_page_cache_readahead(mapping, filp,
63 ra->start, ra->size, ra->async_size);
64 }
65
66
67
68
69
70 static inline void set_page_refcounted(struct page *page)
71 {
72 VM_BUG_ON_PAGE(PageTail(page), page);
73 VM_BUG_ON_PAGE(page_ref_count(page), page);
74 set_page_count(page, 1);
75 }
76
77 extern unsigned long highest_memmap_pfn;
78
79
80
81
82
83 #define MAX_RECLAIM_RETRIES 16
84
85
86
87
88 extern int isolate_lru_page(struct page *page);
89 extern void putback_lru_page(struct page *page);
90
91
92
93
94 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 struct alloc_context {
114 struct zonelist *zonelist;
115 nodemask_t *nodemask;
116 struct zoneref *preferred_zoneref;
117 int migratetype;
118 enum zone_type high_zoneidx;
119 bool spread_dirty_pages;
120 };
121
122 #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141 static inline unsigned long
142 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
143 {
144 return page_pfn ^ (1 << order);
145 }
146
147 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
148 unsigned long end_pfn, struct zone *zone);
149
150 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
151 unsigned long end_pfn, struct zone *zone)
152 {
153 if (zone->contiguous)
154 return pfn_to_page(start_pfn);
155
156 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
157 }
158
159 extern int __isolate_free_page(struct page *page, unsigned int order);
160 extern void memblock_free_pages(struct page *page, unsigned long pfn,
161 unsigned int order);
162 extern void __free_pages_core(struct page *page, unsigned int order);
163 extern void prep_compound_page(struct page *page, unsigned int order);
164 extern void post_alloc_hook(struct page *page, unsigned int order,
165 gfp_t gfp_flags);
166 extern int user_min_free_kbytes;
167
168 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
169
170
171
172
173
174
175
176
177
178
179
180 struct compact_control {
181 struct list_head freepages;
182 struct list_head migratepages;
183 unsigned int nr_freepages;
184 unsigned int nr_migratepages;
185 unsigned long free_pfn;
186 unsigned long migrate_pfn;
187 unsigned long fast_start_pfn;
188 struct zone *zone;
189 unsigned long total_migrate_scanned;
190 unsigned long total_free_scanned;
191 unsigned short fast_search_fail;
192 short search_order;
193 const gfp_t gfp_mask;
194 int order;
195 int migratetype;
196 const unsigned int alloc_flags;
197 const int classzone_idx;
198 enum migrate_mode mode;
199 bool ignore_skip_hint;
200 bool no_set_skip_hint;
201 bool ignore_block_suitable;
202 bool direct_compaction;
203 bool whole_zone;
204 bool contended;
205 bool rescan;
206 };
207
208
209
210
211
212 struct capture_control {
213 struct compact_control *cc;
214 struct page *page;
215 };
216
217 unsigned long
218 isolate_freepages_range(struct compact_control *cc,
219 unsigned long start_pfn, unsigned long end_pfn);
220 unsigned long
221 isolate_migratepages_range(struct compact_control *cc,
222 unsigned long low_pfn, unsigned long end_pfn);
223 int find_suitable_fallback(struct free_area *area, unsigned int order,
224 int migratetype, bool only_stealable, bool *can_steal);
225
226 #endif
227
228
229
230
231
232
233
234
235
236 static inline unsigned int page_order(struct page *page)
237 {
238
239 return page_private(page);
240 }
241
242
243
244
245
246
247
248
249
250
251
252
253 #define page_order_unsafe(page) READ_ONCE(page_private(page))
254
255 static inline bool is_cow_mapping(vm_flags_t flags)
256 {
257 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
258 }
259
260
261
262
263
264
265
266
267 static inline bool is_exec_mapping(vm_flags_t flags)
268 {
269 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
270 }
271
272
273
274
275
276
277
278 static inline bool is_stack_mapping(vm_flags_t flags)
279 {
280 return (flags & VM_STACK) == VM_STACK;
281 }
282
283
284
285
286 static inline bool is_data_mapping(vm_flags_t flags)
287 {
288 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
289 }
290
291
292 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
293 struct vm_area_struct *prev, struct rb_node *rb_parent);
294
295 #ifdef CONFIG_MMU
296 extern long populate_vma_page_range(struct vm_area_struct *vma,
297 unsigned long start, unsigned long end, int *nonblocking);
298 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
299 unsigned long start, unsigned long end);
300 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
301 {
302 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
303 }
304
305
306
307
308 extern void mlock_vma_page(struct page *page);
309 extern unsigned int munlock_vma_page(struct page *page);
310
311
312
313
314
315
316
317
318
319
320 extern void clear_page_mlock(struct page *page);
321
322
323
324
325
326
327 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
328 {
329 if (TestClearPageMlocked(page)) {
330 int nr_pages = hpage_nr_pages(page);
331
332
333 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
334 SetPageMlocked(newpage);
335 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
336 }
337 }
338
339 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
340
341
342
343
344 static inline unsigned long
345 __vma_address(struct page *page, struct vm_area_struct *vma)
346 {
347 pgoff_t pgoff = page_to_pgoff(page);
348 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
349 }
350
351 static inline unsigned long
352 vma_address(struct page *page, struct vm_area_struct *vma)
353 {
354 unsigned long start, end;
355
356 start = __vma_address(page, vma);
357 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
358
359
360 VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
361
362 return max(start, vma->vm_start);
363 }
364
365 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
366 struct file *fpin)
367 {
368 int flags = vmf->flags;
369
370 if (fpin)
371 return fpin;
372
373
374
375
376
377
378 if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
379 FAULT_FLAG_ALLOW_RETRY) {
380 fpin = get_file(vmf->vma->vm_file);
381 up_read(&vmf->vma->vm_mm->mmap_sem);
382 }
383 return fpin;
384 }
385
386 #else
387 static inline void clear_page_mlock(struct page *page) { }
388 static inline void mlock_vma_page(struct page *page) { }
389 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
390
391 #endif
392
393
394
395
396
397
398 static inline struct page *mem_map_offset(struct page *base, int offset)
399 {
400 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
401 return nth_page(base, offset);
402 return base + offset;
403 }
404
405
406
407
408
409 static inline struct page *mem_map_next(struct page *iter,
410 struct page *base, int offset)
411 {
412 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
413 unsigned long pfn = page_to_pfn(base) + offset;
414 if (!pfn_valid(pfn))
415 return NULL;
416 return pfn_to_page(pfn);
417 }
418 return iter + 1;
419 }
420
421
422 enum mminit_level {
423 MMINIT_WARNING,
424 MMINIT_VERIFY,
425 MMINIT_TRACE
426 };
427
428 #ifdef CONFIG_DEBUG_MEMORY_INIT
429
430 extern int mminit_loglevel;
431
432 #define mminit_dprintk(level, prefix, fmt, arg...) \
433 do { \
434 if (level < mminit_loglevel) { \
435 if (level <= MMINIT_WARNING) \
436 pr_warn("mminit::" prefix " " fmt, ##arg); \
437 else \
438 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
439 } \
440 } while (0)
441
442 extern void mminit_verify_pageflags_layout(void);
443 extern void mminit_verify_zonelist(void);
444 #else
445
446 static inline void mminit_dprintk(enum mminit_level level,
447 const char *prefix, const char *fmt, ...)
448 {
449 }
450
451 static inline void mminit_verify_pageflags_layout(void)
452 {
453 }
454
455 static inline void mminit_verify_zonelist(void)
456 {
457 }
458 #endif
459
460
461 #if defined(CONFIG_SPARSEMEM)
462 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
463 unsigned long *end_pfn);
464 #else
465 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
466 unsigned long *end_pfn)
467 {
468 }
469 #endif
470
471 #define NODE_RECLAIM_NOSCAN -2
472 #define NODE_RECLAIM_FULL -1
473 #define NODE_RECLAIM_SOME 0
474 #define NODE_RECLAIM_SUCCESS 1
475
476 #ifdef CONFIG_NUMA
477 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
478 #else
479 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
480 unsigned int order)
481 {
482 return NODE_RECLAIM_NOSCAN;
483 }
484 #endif
485
486 extern int hwpoison_filter(struct page *p);
487
488 extern u32 hwpoison_filter_dev_major;
489 extern u32 hwpoison_filter_dev_minor;
490 extern u64 hwpoison_filter_flags_mask;
491 extern u64 hwpoison_filter_flags_value;
492 extern u64 hwpoison_filter_memcg;
493 extern u32 hwpoison_filter_enable;
494
495 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
496 unsigned long, unsigned long,
497 unsigned long, unsigned long);
498
499 extern void set_pageblock_order(void);
500 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
501 struct list_head *page_list);
502
503 #define ALLOC_WMARK_MIN WMARK_MIN
504 #define ALLOC_WMARK_LOW WMARK_LOW
505 #define ALLOC_WMARK_HIGH WMARK_HIGH
506 #define ALLOC_NO_WATERMARKS 0x04
507
508
509 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
510
511
512
513
514
515
516 #ifdef CONFIG_MMU
517 #define ALLOC_OOM 0x08
518 #else
519 #define ALLOC_OOM ALLOC_NO_WATERMARKS
520 #endif
521
522 #define ALLOC_HARDER 0x10
523 #define ALLOC_HIGH 0x20
524 #define ALLOC_CPUSET 0x40
525 #define ALLOC_CMA 0x80
526 #ifdef CONFIG_ZONE_DMA32
527 #define ALLOC_NOFRAGMENT 0x100
528 #else
529 #define ALLOC_NOFRAGMENT 0x0
530 #endif
531 #define ALLOC_KSWAPD 0x200
532
533 enum ttu_flags;
534 struct tlbflush_unmap_batch;
535
536
537
538
539
540
541 extern struct workqueue_struct *mm_percpu_wq;
542
543 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
544 void try_to_unmap_flush(void);
545 void try_to_unmap_flush_dirty(void);
546 void flush_tlb_batched_pending(struct mm_struct *mm);
547 #else
548 static inline void try_to_unmap_flush(void)
549 {
550 }
551 static inline void try_to_unmap_flush_dirty(void)
552 {
553 }
554 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
555 {
556 }
557 #endif
558
559 extern const struct trace_print_flags pageflag_names[];
560 extern const struct trace_print_flags vmaflag_names[];
561 extern const struct trace_print_flags gfpflag_names[];
562
563 static inline bool is_migrate_highatomic(enum migratetype migratetype)
564 {
565 return migratetype == MIGRATE_HIGHATOMIC;
566 }
567
568 static inline bool is_migrate_highatomic_page(struct page *page)
569 {
570 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
571 }
572
573 void setup_zone_pageset(struct zone *zone);
574 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
575 #endif