1#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ 2#define _ASM_POWERPC_PGTABLE_PPC64_H_ 3/* 4 * This file contains the functions and defines necessary to modify and use 5 * the ppc64 hashed page table. 6 */ 7 8#ifdef CONFIG_PPC_64K_PAGES 9#include <asm/pgtable-ppc64-64k.h> 10#else 11#include <asm/pgtable-ppc64-4k.h> 12#endif 13#include <asm/barrier.h> 14 15#define FIRST_USER_ADDRESS 0UL 16 17/* 18 * Size of EA range mapped by our pagetables. 19 */ 20#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 21 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 22#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 23 24#ifdef CONFIG_TRANSPARENT_HUGEPAGE 25#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) 26#else 27#define PMD_CACHE_INDEX PMD_INDEX_SIZE 28#endif 29/* 30 * Define the address range of the kernel non-linear virtual area 31 */ 32 33#ifdef CONFIG_PPC_BOOK3E 34#define KERN_VIRT_START ASM_CONST(0x8000000000000000) 35#else 36#define KERN_VIRT_START ASM_CONST(0xD000000000000000) 37#endif 38#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) 39 40/* 41 * The vmalloc space starts at the beginning of that region, and 42 * occupies half of it on hash CPUs and a quarter of it on Book3E 43 * (we keep a quarter for the virtual memmap) 44 */ 45#define VMALLOC_START KERN_VIRT_START 46#ifdef CONFIG_PPC_BOOK3E 47#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) 48#else 49#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 50#endif 51#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 52 53/* 54 * The second half of the kernel virtual space is used for IO mappings, 55 * it's itself carved into the PIO region (ISA and PHB IO space) and 56 * the ioremap space 57 * 58 * ISA_IO_BASE = KERN_IO_START, 64K reserved area 59 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces 60 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE 61 */ 62#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) 63#define FULL_IO_SIZE 0x80000000ul 64#define ISA_IO_BASE (KERN_IO_START) 65#define ISA_IO_END (KERN_IO_START + 0x10000ul) 66#define PHB_IO_BASE (ISA_IO_END) 67#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) 68#define IOREMAP_BASE (PHB_IO_END) 69#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) 70 71 72/* 73 * Region IDs 74 */ 75#define REGION_SHIFT 60UL 76#define REGION_MASK (0xfUL << REGION_SHIFT) 77#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 78 79#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 80#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 81#define VMEMMAP_REGION_ID (0xfUL) /* Server only */ 82#define USER_REGION_ID (0UL) 83 84/* 85 * Defines the address of the vmemap area, in its own region on 86 * hash table CPUs and after the vmalloc space on Book3E 87 */ 88#ifdef CONFIG_PPC_BOOK3E 89#define VMEMMAP_BASE VMALLOC_END 90#define VMEMMAP_END KERN_IO_START 91#else 92#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 93#endif 94#define vmemmap ((struct page *)VMEMMAP_BASE) 95 96 97/* 98 * Include the PTE bits definitions 99 */ 100#ifdef CONFIG_PPC_BOOK3S 101#include <asm/pte-hash64.h> 102#else 103#include <asm/pte-book3e.h> 104#endif 105#include <asm/pte-common.h> 106 107#ifdef CONFIG_PPC_MM_SLICES 108#define HAVE_ARCH_UNMAPPED_AREA 109#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 110#endif /* CONFIG_PPC_MM_SLICES */ 111 112#ifndef __ASSEMBLY__ 113 114/* 115 * This is the default implementation of various PTE accessors, it's 116 * used in all cases except Book3S with 64K pages where we have a 117 * concept of sub-pages 118 */ 119#ifndef __real_pte 120 121#ifdef STRICT_MM_TYPECHECKS 122#define __real_pte(e,p) ((real_pte_t){(e)}) 123#define __rpte_to_pte(r) ((r).pte) 124#else 125#define __real_pte(e,p) (e) 126#define __rpte_to_pte(r) (__pte(r)) 127#endif 128#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) 129 130#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 131 do { \ 132 index = 0; \ 133 shift = mmu_psize_defs[psize].shift; \ 134 135#define pte_iterate_hashed_end() } while(0) 136 137#ifdef CONFIG_PPC_HAS_HASH_64K 138/* 139 * We expect this to be called only for user addresses or kernel virtual 140 * addresses other than the linear mapping. 141 */ 142#define pte_pagesize_index(mm, addr, pte) \ 143 ({ \ 144 unsigned int psize; \ 145 if (is_kernel_addr(addr)) \ 146 psize = MMU_PAGE_4K; \ 147 else \ 148 psize = get_slice_psize(mm, addr); \ 149 psize; \ 150 }) 151#else 152#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K 153#endif 154 155#endif /* __real_pte */ 156 157 158/* pte_clear moved to later in this file */ 159 160#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 161#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 162 163#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 164#define pmd_none(pmd) (!pmd_val(pmd)) 165#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 166 || (pmd_val(pmd) & PMD_BAD_BITS)) 167#define pmd_present(pmd) (!pmd_none(pmd)) 168#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 169#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 170extern struct page *pmd_page(pmd_t pmd); 171 172#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 173#define pud_none(pud) (!pud_val(pud)) 174#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 175 || (pud_val(pud) & PUD_BAD_BITS)) 176#define pud_present(pud) (pud_val(pud) != 0) 177#define pud_clear(pudp) (pud_val(*(pudp)) = 0) 178#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 179 180extern struct page *pud_page(pud_t pud); 181 182static inline pte_t pud_pte(pud_t pud) 183{ 184 return __pte(pud_val(pud)); 185} 186 187static inline pud_t pte_pud(pte_t pte) 188{ 189 return __pud(pte_val(pte)); 190} 191#define pud_write(pud) pte_write(pud_pte(pud)) 192#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 193#define pgd_write(pgd) pte_write(pgd_pte(pgd)) 194 195/* 196 * Find an entry in a page-table-directory. We combine the address region 197 * (the high order N bits) and the pgd portion of the address. 198 */ 199#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) 200 201#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 202 203#define pmd_offset(pudp,addr) \ 204 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 205 206#define pte_offset_kernel(dir,addr) \ 207 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 208 209#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 210#define pte_unmap(pte) do { } while(0) 211 212/* to find an entry in a kernel page-table-directory */ 213/* This now only contains the vmalloc pages */ 214#define pgd_offset_k(address) pgd_offset(&init_mm, address) 215extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 216 pte_t *ptep, unsigned long pte, int huge); 217 218/* Atomic PTE updates */ 219static inline unsigned long pte_update(struct mm_struct *mm, 220 unsigned long addr, 221 pte_t *ptep, unsigned long clr, 222 unsigned long set, 223 int huge) 224{ 225#ifdef PTE_ATOMIC_UPDATES 226 unsigned long old, tmp; 227 228 __asm__ __volatile__( 229 "1: ldarx %0,0,%3 # pte_update\n\ 230 andi. %1,%0,%6\n\ 231 bne- 1b \n\ 232 andc %1,%0,%4 \n\ 233 or %1,%1,%7\n\ 234 stdcx. %1,0,%3 \n\ 235 bne- 1b" 236 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 237 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) 238 : "cc" ); 239#else 240 unsigned long old = pte_val(*ptep); 241 *ptep = __pte((old & ~clr) | set); 242#endif 243 /* huge pages use the old page table lock */ 244 if (!huge) 245 assert_pte_locked(mm, addr); 246 247#ifdef CONFIG_PPC_STD_MMU_64 248 if (old & _PAGE_HASHPTE) 249 hpte_need_flush(mm, addr, ptep, old, huge); 250#endif 251 252 return old; 253} 254 255static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 256 unsigned long addr, pte_t *ptep) 257{ 258 unsigned long old; 259 260 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 261 return 0; 262 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 263 return (old & _PAGE_ACCESSED) != 0; 264} 265#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 266#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 267({ \ 268 int __r; \ 269 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 270 __r; \ 271}) 272 273#define __HAVE_ARCH_PTEP_SET_WRPROTECT 274static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 275 pte_t *ptep) 276{ 277 278 if ((pte_val(*ptep) & _PAGE_RW) == 0) 279 return; 280 281 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); 282} 283 284static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 285 unsigned long addr, pte_t *ptep) 286{ 287 if ((pte_val(*ptep) & _PAGE_RW) == 0) 288 return; 289 290 pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); 291} 292 293/* 294 * We currently remove entries from the hashtable regardless of whether 295 * the entry was young or dirty. The generic routines only flush if the 296 * entry was young or dirty which is not good enough. 297 * 298 * We should be more intelligent about this but for the moment we override 299 * these functions and force a tlb flush unconditionally 300 */ 301#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 302#define ptep_clear_flush_young(__vma, __address, __ptep) \ 303({ \ 304 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 305 __ptep); \ 306 __young; \ 307}) 308 309#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 310static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 311 unsigned long addr, pte_t *ptep) 312{ 313 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); 314 return __pte(old); 315} 316 317static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 318 pte_t * ptep) 319{ 320 pte_update(mm, addr, ptep, ~0UL, 0, 0); 321} 322 323 324/* Set the dirty and/or accessed bits atomically in a linux PTE, this 325 * function doesn't need to flush the hash entry 326 */ 327static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 328{ 329 unsigned long bits = pte_val(entry) & 330 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 331 332#ifdef PTE_ATOMIC_UPDATES 333 unsigned long old, tmp; 334 335 __asm__ __volatile__( 336 "1: ldarx %0,0,%4\n\ 337 andi. %1,%0,%6\n\ 338 bne- 1b \n\ 339 or %0,%3,%0\n\ 340 stdcx. %0,0,%4\n\ 341 bne- 1b" 342 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 343 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 344 :"cc"); 345#else 346 unsigned long old = pte_val(*ptep); 347 *ptep = __pte(old | bits); 348#endif 349} 350 351#define __HAVE_ARCH_PTE_SAME 352#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 353 354#define pte_ERROR(e) \ 355 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 356#define pmd_ERROR(e) \ 357 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 358#define pgd_ERROR(e) \ 359 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 360 361/* Encode and de-code a swap entry */ 362#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 363#define __swp_offset(entry) ((entry).val >> 8) 364#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) 365#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) 366#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) 367 368void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 369void pgtable_cache_init(void); 370#endif /* __ASSEMBLY__ */ 371 372/* 373 * THP pages can't be special. So use the _PAGE_SPECIAL 374 */ 375#define _PAGE_SPLITTING _PAGE_SPECIAL 376 377/* 378 * We need to differentiate between explicit huge page and THP huge 379 * page, since THP huge page also need to track real subpage details 380 */ 381#define _PAGE_THP_HUGE _PAGE_4K_PFN 382 383/* 384 * set of bits not changed in pmd_modify. 385 */ 386#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ 387 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ 388 _PAGE_THP_HUGE) 389 390#ifndef __ASSEMBLY__ 391/* 392 * The linux hugepage PMD now include the pmd entries followed by the address 393 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. 394 * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per 395 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and 396 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. 397 * 398 * The last three bits are intentionally left to zero. This memory location 399 * are also used as normal page PTE pointers. So if we have any pointers 400 * left around while we collapse a hugepage, we need to make sure 401 * _PAGE_PRESENT bit of that is zero when we look at them 402 */ 403static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 404{ 405 return (hpte_slot_array[index] >> 3) & 0x1; 406} 407 408static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, 409 int index) 410{ 411 return hpte_slot_array[index] >> 4; 412} 413 414static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, 415 unsigned int index, unsigned int hidx) 416{ 417 hpte_slot_array[index] = hidx << 4 | 0x1 << 3; 418} 419 420struct page *realmode_pfn_to_page(unsigned long pfn); 421 422static inline char *get_hpte_slot_array(pmd_t *pmdp) 423{ 424 /* 425 * The hpte hindex is stored in the pgtable whose address is in the 426 * second half of the PMD 427 * 428 * Order this load with the test for pmd_trans_huge in the caller 429 */ 430 smp_rmb(); 431 return *(char **)(pmdp + PTRS_PER_PMD); 432 433 434} 435 436extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 437 pmd_t *pmdp, unsigned long old_pmd); 438#ifdef CONFIG_TRANSPARENT_HUGEPAGE 439extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); 440extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); 441extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); 442extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 443 pmd_t *pmdp, pmd_t pmd); 444extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 445 pmd_t *pmd); 446/* 447 * 448 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs 449 * page. The hugetlbfs page table walking and mangling paths are totally 450 * separated form the core VM paths and they're differentiated by 451 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. 452 * 453 * pmd_trans_huge() is defined as false at build time if 454 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build 455 * time in such case. 456 * 457 * For ppc64 we need to differntiate from explicit hugepages from THP, because 458 * for THP we also track the subpage details at the pmd level. We don't do 459 * that for explicit huge pages. 460 * 461 */ 462static inline int pmd_trans_huge(pmd_t pmd) 463{ 464 /* 465 * leaf pte for huge page, bottom two bits != 00 466 */ 467 return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); 468} 469 470static inline int pmd_trans_splitting(pmd_t pmd) 471{ 472 if (pmd_trans_huge(pmd)) 473 return pmd_val(pmd) & _PAGE_SPLITTING; 474 return 0; 475} 476 477extern int has_transparent_hugepage(void); 478#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 479 480static inline int pmd_large(pmd_t pmd) 481{ 482 /* 483 * leaf pte for huge page, bottom two bits != 00 484 */ 485 return ((pmd_val(pmd) & 0x3) != 0x0); 486} 487 488static inline pte_t pmd_pte(pmd_t pmd) 489{ 490 return __pte(pmd_val(pmd)); 491} 492 493static inline pmd_t pte_pmd(pte_t pte) 494{ 495 return __pmd(pte_val(pte)); 496} 497 498static inline pte_t *pmdp_ptep(pmd_t *pmd) 499{ 500 return (pte_t *)pmd; 501} 502 503#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) 504#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 505#define pmd_young(pmd) pte_young(pmd_pte(pmd)) 506#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 507#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 508#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 509#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 510#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 511 512#define __HAVE_ARCH_PMD_WRITE 513#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 514 515static inline pmd_t pmd_mkhuge(pmd_t pmd) 516{ 517 /* Do nothing, mk_pmd() does this part. */ 518 return pmd; 519} 520 521static inline pmd_t pmd_mknotpresent(pmd_t pmd) 522{ 523 pmd_val(pmd) &= ~_PAGE_PRESENT; 524 return pmd; 525} 526 527static inline pmd_t pmd_mksplitting(pmd_t pmd) 528{ 529 pmd_val(pmd) |= _PAGE_SPLITTING; 530 return pmd; 531} 532 533#define __HAVE_ARCH_PMD_SAME 534static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 535{ 536 return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); 537} 538 539#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 540extern int pmdp_set_access_flags(struct vm_area_struct *vma, 541 unsigned long address, pmd_t *pmdp, 542 pmd_t entry, int dirty); 543 544extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 545 unsigned long addr, 546 pmd_t *pmdp, 547 unsigned long clr, 548 unsigned long set); 549 550static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 551 unsigned long addr, pmd_t *pmdp) 552{ 553 unsigned long old; 554 555 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 556 return 0; 557 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); 558 return ((old & _PAGE_ACCESSED) != 0); 559} 560 561#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 562extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 563 unsigned long address, pmd_t *pmdp); 564#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 565extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 566 unsigned long address, pmd_t *pmdp); 567 568#define __HAVE_ARCH_PMDP_GET_AND_CLEAR 569extern pmd_t pmdp_get_and_clear(struct mm_struct *mm, 570 unsigned long addr, pmd_t *pmdp); 571 572#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 573extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, 574 pmd_t *pmdp); 575 576#define __HAVE_ARCH_PMDP_SET_WRPROTECT 577static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, 578 pmd_t *pmdp) 579{ 580 581 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 582 return; 583 584 pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); 585} 586 587#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 588extern void pmdp_splitting_flush(struct vm_area_struct *vma, 589 unsigned long address, pmd_t *pmdp); 590 591#define __HAVE_ARCH_PGTABLE_DEPOSIT 592extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 593 pgtable_t pgtable); 594#define __HAVE_ARCH_PGTABLE_WITHDRAW 595extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 596 597#define __HAVE_ARCH_PMDP_INVALIDATE 598extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 599 pmd_t *pmdp); 600 601#define pmd_move_must_withdraw pmd_move_must_withdraw 602struct spinlock; 603static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 604 struct spinlock *old_pmd_ptl) 605{ 606 /* 607 * Archs like ppc64 use pgtable to store per pmd 608 * specific information. So when we switch the pmd, 609 * we should also withdraw and deposit the pgtable 610 */ 611 return true; 612} 613#endif /* __ASSEMBLY__ */ 614#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ 615