1#ifndef _ASM_GENERIC_PGTABLE_H 2#define _ASM_GENERIC_PGTABLE_H 3 4#ifndef __ASSEMBLY__ 5#ifdef CONFIG_MMU 6 7#include <linux/mm_types.h> 8#include <linux/bug.h> 9#include <linux/errno.h> 10 11#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ 12 CONFIG_PGTABLE_LEVELS 13#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED 14#endif 15 16/* 17 * On almost all architectures and configurations, 0 can be used as the 18 * upper ceiling to free_pgtables(): on many architectures it has the same 19 * effect as using TASK_SIZE. However, there is one configuration which 20 * must impose a more careful limit, to avoid freeing kernel pgtables. 21 */ 22#ifndef USER_PGTABLES_CEILING 23#define USER_PGTABLES_CEILING 0UL 24#endif 25 26#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 27extern int ptep_set_access_flags(struct vm_area_struct *vma, 28 unsigned long address, pte_t *ptep, 29 pte_t entry, int dirty); 30#endif 31 32#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 33extern int pmdp_set_access_flags(struct vm_area_struct *vma, 34 unsigned long address, pmd_t *pmdp, 35 pmd_t entry, int dirty); 36#endif 37 38#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 39static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 40 unsigned long address, 41 pte_t *ptep) 42{ 43 pte_t pte = *ptep; 44 int r = 1; 45 if (!pte_young(pte)) 46 r = 0; 47 else 48 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); 49 return r; 50} 51#endif 52 53#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 54#ifdef CONFIG_TRANSPARENT_HUGEPAGE 55static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 56 unsigned long address, 57 pmd_t *pmdp) 58{ 59 pmd_t pmd = *pmdp; 60 int r = 1; 61 if (!pmd_young(pmd)) 62 r = 0; 63 else 64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); 65 return r; 66} 67#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 68static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 69 unsigned long address, 70 pmd_t *pmdp) 71{ 72 BUG(); 73 return 0; 74} 75#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 76#endif 77 78#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 79int ptep_clear_flush_young(struct vm_area_struct *vma, 80 unsigned long address, pte_t *ptep); 81#endif 82 83#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 84int pmdp_clear_flush_young(struct vm_area_struct *vma, 85 unsigned long address, pmd_t *pmdp); 86#endif 87 88#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 89static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 90 unsigned long address, 91 pte_t *ptep) 92{ 93 pte_t pte = *ptep; 94 pte_clear(mm, address, ptep); 95 return pte; 96} 97#endif 98 99#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR 100#ifdef CONFIG_TRANSPARENT_HUGEPAGE 101static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 102 unsigned long address, 103 pmd_t *pmdp) 104{ 105 pmd_t pmd = *pmdp; 106 pmd_clear(pmdp); 107 return pmd; 108} 109#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 110#endif 111 112#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL 113#ifdef CONFIG_TRANSPARENT_HUGEPAGE 114static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, 115 unsigned long address, pmd_t *pmdp, 116 int full) 117{ 118 return pmdp_get_and_clear(mm, address, pmdp); 119} 120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 121#endif 122 123#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 124static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 125 unsigned long address, pte_t *ptep, 126 int full) 127{ 128 pte_t pte; 129 pte = ptep_get_and_clear(mm, address, ptep); 130 return pte; 131} 132#endif 133 134/* 135 * Some architectures may be able to avoid expensive synchronization 136 * primitives when modifications are made to PTE's which are already 137 * not present, or in the process of an address space destruction. 138 */ 139#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 140static inline void pte_clear_not_present_full(struct mm_struct *mm, 141 unsigned long address, 142 pte_t *ptep, 143 int full) 144{ 145 pte_clear(mm, address, ptep); 146} 147#endif 148 149#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 150extern pte_t ptep_clear_flush(struct vm_area_struct *vma, 151 unsigned long address, 152 pte_t *ptep); 153#endif 154 155#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH 156extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 157 unsigned long address, 158 pmd_t *pmdp); 159#endif 160 161#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 162struct mm_struct; 163static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 164{ 165 pte_t old_pte = *ptep; 166 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); 167} 168#endif 169 170#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT 171#ifdef CONFIG_TRANSPARENT_HUGEPAGE 172static inline void pmdp_set_wrprotect(struct mm_struct *mm, 173 unsigned long address, pmd_t *pmdp) 174{ 175 pmd_t old_pmd = *pmdp; 176 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); 177} 178#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 179static inline void pmdp_set_wrprotect(struct mm_struct *mm, 180 unsigned long address, pmd_t *pmdp) 181{ 182 BUG(); 183} 184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 185#endif 186 187#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 188extern void pmdp_splitting_flush(struct vm_area_struct *vma, 189 unsigned long address, pmd_t *pmdp); 190#endif 191 192#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 193extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 194 pgtable_t pgtable); 195#endif 196 197#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 198extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 199#endif 200 201#ifndef __HAVE_ARCH_PMDP_INVALIDATE 202extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 203 pmd_t *pmdp); 204#endif 205 206#ifndef __HAVE_ARCH_PTE_SAME 207static inline int pte_same(pte_t pte_a, pte_t pte_b) 208{ 209 return pte_val(pte_a) == pte_val(pte_b); 210} 211#endif 212 213#ifndef __HAVE_ARCH_PTE_UNUSED 214/* 215 * Some architectures provide facilities to virtualization guests 216 * so that they can flag allocated pages as unused. This allows the 217 * host to transparently reclaim unused pages. This function returns 218 * whether the pte's page is unused. 219 */ 220static inline int pte_unused(pte_t pte) 221{ 222 return 0; 223} 224#endif 225 226#ifndef __HAVE_ARCH_PMD_SAME 227#ifdef CONFIG_TRANSPARENT_HUGEPAGE 228static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 229{ 230 return pmd_val(pmd_a) == pmd_val(pmd_b); 231} 232#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 233static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 234{ 235 BUG(); 236 return 0; 237} 238#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 239#endif 240 241#ifndef __HAVE_ARCH_PGD_OFFSET_GATE 242#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) 243#endif 244 245#ifndef __HAVE_ARCH_MOVE_PTE 246#define move_pte(pte, prot, old_addr, new_addr) (pte) 247#endif 248 249#ifndef pte_accessible 250# define pte_accessible(mm, pte) ((void)(pte), 1) 251#endif 252 253#ifndef flush_tlb_fix_spurious_fault 254#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) 255#endif 256 257#ifndef pgprot_noncached 258#define pgprot_noncached(prot) (prot) 259#endif 260 261#ifndef pgprot_writecombine 262#define pgprot_writecombine pgprot_noncached 263#endif 264 265#ifndef pgprot_device 266#define pgprot_device pgprot_noncached 267#endif 268 269#ifndef pgprot_modify 270#define pgprot_modify pgprot_modify 271static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 272{ 273 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) 274 newprot = pgprot_noncached(newprot); 275 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) 276 newprot = pgprot_writecombine(newprot); 277 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) 278 newprot = pgprot_device(newprot); 279 return newprot; 280} 281#endif 282 283/* 284 * When walking page tables, get the address of the next boundary, 285 * or the end address of the range if that comes earlier. Although no 286 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 287 */ 288 289#define pgd_addr_end(addr, end) \ 290({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ 291 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 292}) 293 294#ifndef pud_addr_end 295#define pud_addr_end(addr, end) \ 296({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 297 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 298}) 299#endif 300 301#ifndef pmd_addr_end 302#define pmd_addr_end(addr, end) \ 303({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 304 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 305}) 306#endif 307 308/* 309 * When walking page tables, we usually want to skip any p?d_none entries; 310 * and any p?d_bad entries - reporting the error before resetting to none. 311 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 312 */ 313void pgd_clear_bad(pgd_t *); 314void pud_clear_bad(pud_t *); 315void pmd_clear_bad(pmd_t *); 316 317static inline int pgd_none_or_clear_bad(pgd_t *pgd) 318{ 319 if (pgd_none(*pgd)) 320 return 1; 321 if (unlikely(pgd_bad(*pgd))) { 322 pgd_clear_bad(pgd); 323 return 1; 324 } 325 return 0; 326} 327 328static inline int pud_none_or_clear_bad(pud_t *pud) 329{ 330 if (pud_none(*pud)) 331 return 1; 332 if (unlikely(pud_bad(*pud))) { 333 pud_clear_bad(pud); 334 return 1; 335 } 336 return 0; 337} 338 339static inline int pmd_none_or_clear_bad(pmd_t *pmd) 340{ 341 if (pmd_none(*pmd)) 342 return 1; 343 if (unlikely(pmd_bad(*pmd))) { 344 pmd_clear_bad(pmd); 345 return 1; 346 } 347 return 0; 348} 349 350static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, 351 unsigned long addr, 352 pte_t *ptep) 353{ 354 /* 355 * Get the current pte state, but zero it out to make it 356 * non-present, preventing the hardware from asynchronously 357 * updating it. 358 */ 359 return ptep_get_and_clear(mm, addr, ptep); 360} 361 362static inline void __ptep_modify_prot_commit(struct mm_struct *mm, 363 unsigned long addr, 364 pte_t *ptep, pte_t pte) 365{ 366 /* 367 * The pte is non-present, so there's no hardware state to 368 * preserve. 369 */ 370 set_pte_at(mm, addr, ptep, pte); 371} 372 373#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 374/* 375 * Start a pte protection read-modify-write transaction, which 376 * protects against asynchronous hardware modifications to the pte. 377 * The intention is not to prevent the hardware from making pte 378 * updates, but to prevent any updates it may make from being lost. 379 * 380 * This does not protect against other software modifications of the 381 * pte; the appropriate pte lock must be held over the transation. 382 * 383 * Note that this interface is intended to be batchable, meaning that 384 * ptep_modify_prot_commit may not actually update the pte, but merely 385 * queue the update to be done at some later time. The update must be 386 * actually committed before the pte lock is released, however. 387 */ 388static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, 389 unsigned long addr, 390 pte_t *ptep) 391{ 392 return __ptep_modify_prot_start(mm, addr, ptep); 393} 394 395/* 396 * Commit an update to a pte, leaving any hardware-controlled bits in 397 * the PTE unmodified. 398 */ 399static inline void ptep_modify_prot_commit(struct mm_struct *mm, 400 unsigned long addr, 401 pte_t *ptep, pte_t pte) 402{ 403 __ptep_modify_prot_commit(mm, addr, ptep, pte); 404} 405#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ 406#endif /* CONFIG_MMU */ 407 408/* 409 * A facility to provide lazy MMU batching. This allows PTE updates and 410 * page invalidations to be delayed until a call to leave lazy MMU mode 411 * is issued. Some architectures may benefit from doing this, and it is 412 * beneficial for both shadow and direct mode hypervisors, which may batch 413 * the PTE updates which happen during this window. Note that using this 414 * interface requires that read hazards be removed from the code. A read 415 * hazard could result in the direct mode hypervisor case, since the actual 416 * write to the page tables may not yet have taken place, so reads though 417 * a raw PTE pointer after it has been modified are not guaranteed to be 418 * up to date. This mode can only be entered and left under the protection of 419 * the page table locks for all page tables which may be modified. In the UP 420 * case, this is required so that preemption is disabled, and in the SMP case, 421 * it must synchronize the delayed page table writes properly on other CPUs. 422 */ 423#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE 424#define arch_enter_lazy_mmu_mode() do {} while (0) 425#define arch_leave_lazy_mmu_mode() do {} while (0) 426#define arch_flush_lazy_mmu_mode() do {} while (0) 427#endif 428 429/* 430 * A facility to provide batching of the reload of page tables and 431 * other process state with the actual context switch code for 432 * paravirtualized guests. By convention, only one of the batched 433 * update (lazy) modes (CPU, MMU) should be active at any given time, 434 * entry should never be nested, and entry and exits should always be 435 * paired. This is for sanity of maintaining and reasoning about the 436 * kernel code. In this case, the exit (end of the context switch) is 437 * in architecture-specific code, and so doesn't need a generic 438 * definition. 439 */ 440#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH 441#define arch_start_context_switch(prev) do {} while (0) 442#endif 443 444#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY 445static inline int pte_soft_dirty(pte_t pte) 446{ 447 return 0; 448} 449 450static inline int pmd_soft_dirty(pmd_t pmd) 451{ 452 return 0; 453} 454 455static inline pte_t pte_mksoft_dirty(pte_t pte) 456{ 457 return pte; 458} 459 460static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 461{ 462 return pmd; 463} 464 465static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 466{ 467 return pte; 468} 469 470static inline int pte_swp_soft_dirty(pte_t pte) 471{ 472 return 0; 473} 474 475static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 476{ 477 return pte; 478} 479#endif 480 481#ifndef __HAVE_PFNMAP_TRACKING 482/* 483 * Interfaces that can be used by architecture code to keep track of 484 * memory type of pfn mappings specified by the remap_pfn_range, 485 * vm_insert_pfn. 486 */ 487 488/* 489 * track_pfn_remap is called when a _new_ pfn mapping is being established 490 * by remap_pfn_range() for physical range indicated by pfn and size. 491 */ 492static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 493 unsigned long pfn, unsigned long addr, 494 unsigned long size) 495{ 496 return 0; 497} 498 499/* 500 * track_pfn_insert is called when a _new_ single pfn is established 501 * by vm_insert_pfn(). 502 */ 503static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 504 unsigned long pfn) 505{ 506 return 0; 507} 508 509/* 510 * track_pfn_copy is called when vma that is covering the pfnmap gets 511 * copied through copy_page_range(). 512 */ 513static inline int track_pfn_copy(struct vm_area_struct *vma) 514{ 515 return 0; 516} 517 518/* 519 * untrack_pfn_vma is called while unmapping a pfnmap for a region. 520 * untrack can be called for a specific region indicated by pfn and size or 521 * can be for the entire vma (in which case pfn, size are zero). 522 */ 523static inline void untrack_pfn(struct vm_area_struct *vma, 524 unsigned long pfn, unsigned long size) 525{ 526} 527#else 528extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 529 unsigned long pfn, unsigned long addr, 530 unsigned long size); 531extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 532 unsigned long pfn); 533extern int track_pfn_copy(struct vm_area_struct *vma); 534extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, 535 unsigned long size); 536#endif 537 538#ifdef __HAVE_COLOR_ZERO_PAGE 539static inline int is_zero_pfn(unsigned long pfn) 540{ 541 extern unsigned long zero_pfn; 542 unsigned long offset_from_zero_pfn = pfn - zero_pfn; 543 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 544} 545 546#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 547 548#else 549static inline int is_zero_pfn(unsigned long pfn) 550{ 551 extern unsigned long zero_pfn; 552 return pfn == zero_pfn; 553} 554 555static inline unsigned long my_zero_pfn(unsigned long addr) 556{ 557 extern unsigned long zero_pfn; 558 return zero_pfn; 559} 560#endif 561 562#ifdef CONFIG_MMU 563 564#ifndef CONFIG_TRANSPARENT_HUGEPAGE 565static inline int pmd_trans_huge(pmd_t pmd) 566{ 567 return 0; 568} 569static inline int pmd_trans_splitting(pmd_t pmd) 570{ 571 return 0; 572} 573#ifndef __HAVE_ARCH_PMD_WRITE 574static inline int pmd_write(pmd_t pmd) 575{ 576 BUG(); 577 return 0; 578} 579#endif /* __HAVE_ARCH_PMD_WRITE */ 580#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 581 582#ifndef pmd_read_atomic 583static inline pmd_t pmd_read_atomic(pmd_t *pmdp) 584{ 585 /* 586 * Depend on compiler for an atomic pmd read. NOTE: this is 587 * only going to work, if the pmdval_t isn't larger than 588 * an unsigned long. 589 */ 590 return *pmdp; 591} 592#endif 593 594#ifndef pmd_move_must_withdraw 595static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 596 spinlock_t *old_pmd_ptl) 597{ 598 /* 599 * With split pmd lock we also need to move preallocated 600 * PTE page table if new_pmd is on different PMD page table. 601 */ 602 return new_pmd_ptl != old_pmd_ptl; 603} 604#endif 605 606/* 607 * This function is meant to be used by sites walking pagetables with 608 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and 609 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd 610 * into a null pmd and the transhuge page fault can convert a null pmd 611 * into an hugepmd or into a regular pmd (if the hugepage allocation 612 * fails). While holding the mmap_sem in read mode the pmd becomes 613 * stable and stops changing under us only if it's not null and not a 614 * transhuge pmd. When those races occurs and this function makes a 615 * difference vs the standard pmd_none_or_clear_bad, the result is 616 * undefined so behaving like if the pmd was none is safe (because it 617 * can return none anyway). The compiler level barrier() is critically 618 * important to compute the two checks atomically on the same pmdval. 619 * 620 * For 32bit kernels with a 64bit large pmd_t this automatically takes 621 * care of reading the pmd atomically to avoid SMP race conditions 622 * against pmd_populate() when the mmap_sem is hold for reading by the 623 * caller (a special atomic read not done by "gcc" as in the generic 624 * version above, is also needed when THP is disabled because the page 625 * fault can populate the pmd from under us). 626 */ 627static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) 628{ 629 pmd_t pmdval = pmd_read_atomic(pmd); 630 /* 631 * The barrier will stabilize the pmdval in a register or on 632 * the stack so that it will stop changing under the code. 633 * 634 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, 635 * pmd_read_atomic is allowed to return a not atomic pmdval 636 * (for example pointing to an hugepage that has never been 637 * mapped in the pmd). The below checks will only care about 638 * the low part of the pmd with 32bit PAE x86 anyway, with the 639 * exception of pmd_none(). So the important thing is that if 640 * the low part of the pmd is found null, the high part will 641 * be also null or the pmd_none() check below would be 642 * confused. 643 */ 644#ifdef CONFIG_TRANSPARENT_HUGEPAGE 645 barrier(); 646#endif 647 if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) 648 return 1; 649 if (unlikely(pmd_bad(pmdval))) { 650 pmd_clear_bad(pmd); 651 return 1; 652 } 653 return 0; 654} 655 656/* 657 * This is a noop if Transparent Hugepage Support is not built into 658 * the kernel. Otherwise it is equivalent to 659 * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in 660 * places that already verified the pmd is not none and they want to 661 * walk ptes while holding the mmap sem in read mode (write mode don't 662 * need this). If THP is not enabled, the pmd can't go away under the 663 * code even if MADV_DONTNEED runs, but if THP is enabled we need to 664 * run a pmd_trans_unstable before walking the ptes after 665 * split_huge_page_pmd returns (because it may have run when the pmd 666 * become null, but then a page fault can map in a THP and not a 667 * regular page). 668 */ 669static inline int pmd_trans_unstable(pmd_t *pmd) 670{ 671#ifdef CONFIG_TRANSPARENT_HUGEPAGE 672 return pmd_none_or_trans_huge_or_clear_bad(pmd); 673#else 674 return 0; 675#endif 676} 677 678#ifndef CONFIG_NUMA_BALANCING 679/* 680 * Technically a PTE can be PROTNONE even when not doing NUMA balancing but 681 * the only case the kernel cares is for NUMA balancing and is only ever set 682 * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked 683 * _PAGE_PROTNONE so by by default, implement the helper as "always no". It 684 * is the responsibility of the caller to distinguish between PROT_NONE 685 * protections and NUMA hinting fault protections. 686 */ 687static inline int pte_protnone(pte_t pte) 688{ 689 return 0; 690} 691 692static inline int pmd_protnone(pmd_t pmd) 693{ 694 return 0; 695} 696#endif /* CONFIG_NUMA_BALANCING */ 697 698#endif /* CONFIG_MMU */ 699 700#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 701int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); 702int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 703int pud_clear_huge(pud_t *pud); 704int pmd_clear_huge(pmd_t *pmd); 705#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 706static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 707{ 708 return 0; 709} 710static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) 711{ 712 return 0; 713} 714static inline int pud_clear_huge(pud_t *pud) 715{ 716 return 0; 717} 718static inline int pmd_clear_huge(pmd_t *pmd) 719{ 720 return 0; 721} 722#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 723 724#endif /* !__ASSEMBLY__ */ 725 726#ifndef io_remap_pfn_range 727#define io_remap_pfn_range remap_pfn_range 728#endif 729 730#endif /* _ASM_GENERIC_PGTABLE_H */ 731