This source file includes following definitions.
- reset_vma_resv_huge_pages
- hugetlb_total_pages
- huge_pmd_unshare
- adjust_range_if_pmd_sharing_possible
- hugetlb_report_meminfo
- hugetlb_show_meminfo
- isolate_huge_page
- hugetlb_change_protection
- __unmap_hugepage_range_final
- __unmap_hugepage_range
- hugetlb_fault
- pgd_write
- HUGETLBFS_SB
- HUGETLBFS_I
- is_file_hugepages
- hstate_inode
- hugetlb_file_setup
- hstate_inode
- hstate_file
- hstate_sizelog
- hstate_vma
- huge_page_size
- huge_page_mask
- huge_page_order
- huge_page_shift
- hstate_is_gigantic
- pages_per_huge_page
- blocks_per_huge_page
- arch_make_huge_pte
- page_hstate
- hstate_index_to_shift
- hstate_index
- basepage_index
- arch_hugetlb_migration_supported
- arch_hugetlb_migration_supported
- hugepage_migration_supported
- hugepage_movable_supported
- huge_pte_lockptr
- hugetlb_count_add
- hugetlb_count_sub
- set_huge_swap_pte_at
- huge_ptep_modify_prot_start
- huge_ptep_modify_prot_commit
- alloc_huge_page
- alloc_huge_page_node
- alloc_huge_page_nodemask
- alloc_huge_page_vma
- __alloc_bootmem_huge_page
- hstate_file
- hstate_sizelog
- hstate_vma
- page_hstate
- huge_page_size
- huge_page_mask
- vma_kernel_pagesize
- vma_mmu_pagesize
- huge_page_order
- huge_page_shift
- hstate_is_gigantic
- pages_per_huge_page
- hstate_index_to_shift
- hstate_index
- basepage_index
- dissolve_free_huge_page
- dissolve_free_huge_pages
- hugepage_migration_supported
- hugepage_movable_supported
- huge_pte_lockptr
- hugetlb_report_usage
- hugetlb_count_sub
- set_huge_swap_pte_at
- huge_pte_lock
1
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <asm/pgtable.h>
13
14 struct ctl_table;
15 struct user_struct;
16 struct mmu_gather;
17
18 #ifndef is_hugepd
19 typedef struct { unsigned long pd; } hugepd_t;
20 #define is_hugepd(hugepd) (0)
21 #define __hugepd(x) ((hugepd_t) { (x) })
22 #endif
23
24 #ifdef CONFIG_HUGETLB_PAGE
25
26 #include <linux/mempolicy.h>
27 #include <linux/shm.h>
28 #include <asm/tlbflush.h>
29
30 struct hugepage_subpool {
31 spinlock_t lock;
32 long count;
33 long max_hpages;
34 long used_hpages;
35
36 struct hstate *hstate;
37 long min_hpages;
38 long rsv_hpages;
39
40 };
41
42 struct resv_map {
43 struct kref refs;
44 spinlock_t lock;
45 struct list_head regions;
46 long adds_in_progress;
47 struct list_head region_cache;
48 long region_cache_count;
49 };
50 extern struct resv_map *resv_map_alloc(void);
51 void resv_map_release(struct kref *ref);
52
53 extern spinlock_t hugetlb_lock;
54 extern int hugetlb_max_hstate __read_mostly;
55 #define for_each_hstate(h) \
56 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
57
58 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
59 long min_hpages);
60 void hugepage_put_subpool(struct hugepage_subpool *spool);
61
62 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
63 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
64 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
65 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
66
67 #ifdef CONFIG_NUMA
68 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
69 void __user *, size_t *, loff_t *);
70 #endif
71
72 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
73 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
74 struct page **, struct vm_area_struct **,
75 unsigned long *, unsigned long *, long, unsigned int,
76 int *);
77 void unmap_hugepage_range(struct vm_area_struct *,
78 unsigned long, unsigned long, struct page *);
79 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
80 struct vm_area_struct *vma,
81 unsigned long start, unsigned long end,
82 struct page *ref_page);
83 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
84 unsigned long start, unsigned long end,
85 struct page *ref_page);
86 void hugetlb_report_meminfo(struct seq_file *);
87 int hugetlb_report_node_meminfo(int, char *);
88 void hugetlb_show_meminfo(void);
89 unsigned long hugetlb_total_pages(void);
90 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91 unsigned long address, unsigned int flags);
92 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
93 struct vm_area_struct *dst_vma,
94 unsigned long dst_addr,
95 unsigned long src_addr,
96 struct page **pagep);
97 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
98 struct vm_area_struct *vma,
99 vm_flags_t vm_flags);
100 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
101 long freed);
102 bool isolate_huge_page(struct page *page, struct list_head *list);
103 void putback_active_hugepage(struct page *page);
104 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
105 void free_huge_page(struct page *page);
106 void hugetlb_fix_reserve_counts(struct inode *inode);
107 extern struct mutex *hugetlb_fault_mutex_table;
108 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
109 pgoff_t idx, unsigned long address);
110
111 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
112
113 extern int sysctl_hugetlb_shm_group;
114 extern struct list_head huge_boot_pages;
115
116
117
118 pte_t *huge_pte_alloc(struct mm_struct *mm,
119 unsigned long addr, unsigned long sz);
120 pte_t *huge_pte_offset(struct mm_struct *mm,
121 unsigned long addr, unsigned long sz);
122 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
123 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
124 unsigned long *start, unsigned long *end);
125 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
126 int write);
127 struct page *follow_huge_pd(struct vm_area_struct *vma,
128 unsigned long address, hugepd_t hpd,
129 int flags, int pdshift);
130 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
131 pmd_t *pmd, int flags);
132 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
133 pud_t *pud, int flags);
134 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
135 pgd_t *pgd, int flags);
136
137 int pmd_huge(pmd_t pmd);
138 int pud_huge(pud_t pud);
139 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
140 unsigned long address, unsigned long end, pgprot_t newprot);
141
142 bool is_hugetlb_entry_migration(pte_t pte);
143
144 #else
145
146 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
147 {
148 }
149
150 static inline unsigned long hugetlb_total_pages(void)
151 {
152 return 0;
153 }
154
155 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
156 pte_t *ptep)
157 {
158 return 0;
159 }
160
161 static inline void adjust_range_if_pmd_sharing_possible(
162 struct vm_area_struct *vma,
163 unsigned long *start, unsigned long *end)
164 {
165 }
166
167 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
168 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
169 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
170 static inline void hugetlb_report_meminfo(struct seq_file *m)
171 {
172 }
173 #define hugetlb_report_node_meminfo(n, buf) 0
174 static inline void hugetlb_show_meminfo(void)
175 {
176 }
177 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
178 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
179 #define follow_huge_pud(mm, addr, pud, flags) NULL
180 #define follow_huge_pgd(mm, addr, pgd, flags) NULL
181 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
182 #define pmd_huge(x) 0
183 #define pud_huge(x) 0
184 #define is_hugepage_only_range(mm, addr, len) 0
185 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
186 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
187 src_addr, pagep) ({ BUG(); 0; })
188 #define huge_pte_offset(mm, address, sz) 0
189
190 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
191 {
192 return false;
193 }
194 #define putback_active_hugepage(p) do {} while (0)
195 #define move_hugetlb_state(old, new, reason) do {} while (0)
196
197 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
198 unsigned long address, unsigned long end, pgprot_t newprot)
199 {
200 return 0;
201 }
202
203 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
204 struct vm_area_struct *vma, unsigned long start,
205 unsigned long end, struct page *ref_page)
206 {
207 BUG();
208 }
209
210 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
211 struct vm_area_struct *vma, unsigned long start,
212 unsigned long end, struct page *ref_page)
213 {
214 BUG();
215 }
216 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
217 struct vm_area_struct *vma, unsigned long address,
218 unsigned int flags)
219 {
220 BUG();
221 return 0;
222 }
223
224 #endif
225
226
227
228
229 #ifndef pgd_huge
230 #define pgd_huge(x) 0
231 #endif
232 #ifndef p4d_huge
233 #define p4d_huge(x) 0
234 #endif
235
236 #ifndef pgd_write
237 static inline int pgd_write(pgd_t pgd)
238 {
239 BUG();
240 return 0;
241 }
242 #endif
243
244 #define HUGETLB_ANON_FILE "anon_hugepage"
245
246 enum {
247
248
249
250
251 HUGETLB_SHMFS_INODE = 1,
252
253
254
255
256 HUGETLB_ANONHUGE_INODE = 2,
257 };
258
259 #ifdef CONFIG_HUGETLBFS
260 struct hugetlbfs_sb_info {
261 long max_inodes;
262 long free_inodes;
263 spinlock_t stat_lock;
264 struct hstate *hstate;
265 struct hugepage_subpool *spool;
266 kuid_t uid;
267 kgid_t gid;
268 umode_t mode;
269 };
270
271 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
272 {
273 return sb->s_fs_info;
274 }
275
276 struct hugetlbfs_inode_info {
277 struct shared_policy policy;
278 struct inode vfs_inode;
279 unsigned int seals;
280 };
281
282 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
283 {
284 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
285 }
286
287 extern const struct file_operations hugetlbfs_file_operations;
288 extern const struct vm_operations_struct hugetlb_vm_ops;
289 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
290 struct user_struct **user, int creat_flags,
291 int page_size_log);
292
293 static inline bool is_file_hugepages(struct file *file)
294 {
295 if (file->f_op == &hugetlbfs_file_operations)
296 return true;
297
298 return is_file_shm_hugepages(file);
299 }
300
301 static inline struct hstate *hstate_inode(struct inode *i)
302 {
303 return HUGETLBFS_SB(i->i_sb)->hstate;
304 }
305 #else
306
307 #define is_file_hugepages(file) false
308 static inline struct file *
309 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
310 struct user_struct **user, int creat_flags,
311 int page_size_log)
312 {
313 return ERR_PTR(-ENOSYS);
314 }
315
316 static inline struct hstate *hstate_inode(struct inode *i)
317 {
318 return NULL;
319 }
320 #endif
321
322 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
323 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
324 unsigned long len, unsigned long pgoff,
325 unsigned long flags);
326 #endif
327
328 #ifdef CONFIG_HUGETLB_PAGE
329
330 #define HSTATE_NAME_LEN 32
331
332 struct hstate {
333 int next_nid_to_alloc;
334 int next_nid_to_free;
335 unsigned int order;
336 unsigned long mask;
337 unsigned long max_huge_pages;
338 unsigned long nr_huge_pages;
339 unsigned long free_huge_pages;
340 unsigned long resv_huge_pages;
341 unsigned long surplus_huge_pages;
342 unsigned long nr_overcommit_huge_pages;
343 struct list_head hugepage_activelist;
344 struct list_head hugepage_freelists[MAX_NUMNODES];
345 unsigned int nr_huge_pages_node[MAX_NUMNODES];
346 unsigned int free_huge_pages_node[MAX_NUMNODES];
347 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
348 #ifdef CONFIG_CGROUP_HUGETLB
349
350 struct cftype cgroup_files[5];
351 #endif
352 char name[HSTATE_NAME_LEN];
353 };
354
355 struct huge_bootmem_page {
356 struct list_head list;
357 struct hstate *hstate;
358 };
359
360 struct page *alloc_huge_page(struct vm_area_struct *vma,
361 unsigned long addr, int avoid_reserve);
362 struct page *alloc_huge_page_node(struct hstate *h, int nid);
363 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
364 nodemask_t *nmask);
365 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
366 unsigned long address);
367 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
368 int nid, nodemask_t *nmask);
369 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
370 pgoff_t idx);
371
372
373 int __init __alloc_bootmem_huge_page(struct hstate *h);
374 int __init alloc_bootmem_huge_page(struct hstate *h);
375
376 void __init hugetlb_bad_size(void);
377 void __init hugetlb_add_hstate(unsigned order);
378 struct hstate *size_to_hstate(unsigned long size);
379
380 #ifndef HUGE_MAX_HSTATE
381 #define HUGE_MAX_HSTATE 1
382 #endif
383
384 extern struct hstate hstates[HUGE_MAX_HSTATE];
385 extern unsigned int default_hstate_idx;
386
387 #define default_hstate (hstates[default_hstate_idx])
388
389 static inline struct hstate *hstate_file(struct file *f)
390 {
391 return hstate_inode(file_inode(f));
392 }
393
394 static inline struct hstate *hstate_sizelog(int page_size_log)
395 {
396 if (!page_size_log)
397 return &default_hstate;
398
399 return size_to_hstate(1UL << page_size_log);
400 }
401
402 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
403 {
404 return hstate_file(vma->vm_file);
405 }
406
407 static inline unsigned long huge_page_size(struct hstate *h)
408 {
409 return (unsigned long)PAGE_SIZE << h->order;
410 }
411
412 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
413
414 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
415
416 static inline unsigned long huge_page_mask(struct hstate *h)
417 {
418 return h->mask;
419 }
420
421 static inline unsigned int huge_page_order(struct hstate *h)
422 {
423 return h->order;
424 }
425
426 static inline unsigned huge_page_shift(struct hstate *h)
427 {
428 return h->order + PAGE_SHIFT;
429 }
430
431 static inline bool hstate_is_gigantic(struct hstate *h)
432 {
433 return huge_page_order(h) >= MAX_ORDER;
434 }
435
436 static inline unsigned int pages_per_huge_page(struct hstate *h)
437 {
438 return 1 << h->order;
439 }
440
441 static inline unsigned int blocks_per_huge_page(struct hstate *h)
442 {
443 return huge_page_size(h) / 512;
444 }
445
446 #include <asm/hugetlb.h>
447
448 #ifndef arch_make_huge_pte
449 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
450 struct page *page, int writable)
451 {
452 return entry;
453 }
454 #endif
455
456 static inline struct hstate *page_hstate(struct page *page)
457 {
458 VM_BUG_ON_PAGE(!PageHuge(page), page);
459 return size_to_hstate(page_size(page));
460 }
461
462 static inline unsigned hstate_index_to_shift(unsigned index)
463 {
464 return hstates[index].order + PAGE_SHIFT;
465 }
466
467 static inline int hstate_index(struct hstate *h)
468 {
469 return h - hstates;
470 }
471
472 pgoff_t __basepage_index(struct page *page);
473
474
475 static inline pgoff_t basepage_index(struct page *page)
476 {
477 if (!PageCompound(page))
478 return page->index;
479
480 return __basepage_index(page);
481 }
482
483 extern int dissolve_free_huge_page(struct page *page);
484 extern int dissolve_free_huge_pages(unsigned long start_pfn,
485 unsigned long end_pfn);
486
487 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
488 #ifndef arch_hugetlb_migration_supported
489 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
490 {
491 if ((huge_page_shift(h) == PMD_SHIFT) ||
492 (huge_page_shift(h) == PUD_SHIFT) ||
493 (huge_page_shift(h) == PGDIR_SHIFT))
494 return true;
495 else
496 return false;
497 }
498 #endif
499 #else
500 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
501 {
502 return false;
503 }
504 #endif
505
506 static inline bool hugepage_migration_supported(struct hstate *h)
507 {
508 return arch_hugetlb_migration_supported(h);
509 }
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526 static inline bool hugepage_movable_supported(struct hstate *h)
527 {
528 if (!hugepage_migration_supported(h))
529 return false;
530
531 if (hstate_is_gigantic(h))
532 return false;
533 return true;
534 }
535
536 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
537 struct mm_struct *mm, pte_t *pte)
538 {
539 if (huge_page_size(h) == PMD_SIZE)
540 return pmd_lockptr(mm, (pmd_t *) pte);
541 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
542 return &mm->page_table_lock;
543 }
544
545 #ifndef hugepages_supported
546
547
548
549
550
551 #define hugepages_supported() (HPAGE_SHIFT != 0)
552 #endif
553
554 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
555
556 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
557 {
558 atomic_long_add(l, &mm->hugetlb_usage);
559 }
560
561 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
562 {
563 atomic_long_sub(l, &mm->hugetlb_usage);
564 }
565
566 #ifndef set_huge_swap_pte_at
567 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
568 pte_t *ptep, pte_t pte, unsigned long sz)
569 {
570 set_huge_pte_at(mm, addr, ptep, pte);
571 }
572 #endif
573
574 #ifndef huge_ptep_modify_prot_start
575 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
576 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
577 unsigned long addr, pte_t *ptep)
578 {
579 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
580 }
581 #endif
582
583 #ifndef huge_ptep_modify_prot_commit
584 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
585 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
586 unsigned long addr, pte_t *ptep,
587 pte_t old_pte, pte_t pte)
588 {
589 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
590 }
591 #endif
592
593 #else
594 struct hstate {};
595
596 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
597 unsigned long addr,
598 int avoid_reserve)
599 {
600 return NULL;
601 }
602
603 static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
604 {
605 return NULL;
606 }
607
608 static inline struct page *
609 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
610 {
611 return NULL;
612 }
613
614 static inline struct page *alloc_huge_page_vma(struct hstate *h,
615 struct vm_area_struct *vma,
616 unsigned long address)
617 {
618 return NULL;
619 }
620
621 static inline int __alloc_bootmem_huge_page(struct hstate *h)
622 {
623 return 0;
624 }
625
626 static inline struct hstate *hstate_file(struct file *f)
627 {
628 return NULL;
629 }
630
631 static inline struct hstate *hstate_sizelog(int page_size_log)
632 {
633 return NULL;
634 }
635
636 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
637 {
638 return NULL;
639 }
640
641 static inline struct hstate *page_hstate(struct page *page)
642 {
643 return NULL;
644 }
645
646 static inline unsigned long huge_page_size(struct hstate *h)
647 {
648 return PAGE_SIZE;
649 }
650
651 static inline unsigned long huge_page_mask(struct hstate *h)
652 {
653 return PAGE_MASK;
654 }
655
656 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
657 {
658 return PAGE_SIZE;
659 }
660
661 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
662 {
663 return PAGE_SIZE;
664 }
665
666 static inline unsigned int huge_page_order(struct hstate *h)
667 {
668 return 0;
669 }
670
671 static inline unsigned int huge_page_shift(struct hstate *h)
672 {
673 return PAGE_SHIFT;
674 }
675
676 static inline bool hstate_is_gigantic(struct hstate *h)
677 {
678 return false;
679 }
680
681 static inline unsigned int pages_per_huge_page(struct hstate *h)
682 {
683 return 1;
684 }
685
686 static inline unsigned hstate_index_to_shift(unsigned index)
687 {
688 return 0;
689 }
690
691 static inline int hstate_index(struct hstate *h)
692 {
693 return 0;
694 }
695
696 static inline pgoff_t basepage_index(struct page *page)
697 {
698 return page->index;
699 }
700
701 static inline int dissolve_free_huge_page(struct page *page)
702 {
703 return 0;
704 }
705
706 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
707 unsigned long end_pfn)
708 {
709 return 0;
710 }
711
712 static inline bool hugepage_migration_supported(struct hstate *h)
713 {
714 return false;
715 }
716
717 static inline bool hugepage_movable_supported(struct hstate *h)
718 {
719 return false;
720 }
721
722 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
723 struct mm_struct *mm, pte_t *pte)
724 {
725 return &mm->page_table_lock;
726 }
727
728 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
729 {
730 }
731
732 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
733 {
734 }
735
736 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
737 pte_t *ptep, pte_t pte, unsigned long sz)
738 {
739 }
740 #endif
741
742 static inline spinlock_t *huge_pte_lock(struct hstate *h,
743 struct mm_struct *mm, pte_t *pte)
744 {
745 spinlock_t *ptl;
746
747 ptl = huge_pte_lockptr(h, mm, pte);
748 spin_lock(ptl);
749 return ptl;
750 }
751
752 #endif