1#ifndef _ASM_IA64_PGTABLE_H 2#define _ASM_IA64_PGTABLE_H 3 4/* 5 * This file contains the functions and defines necessary to modify and use 6 * the IA-64 page table tree. 7 * 8 * This hopefully works with any (fixed) IA-64 page-size, as defined 9 * in <asm/page.h>. 10 * 11 * Copyright (C) 1998-2005 Hewlett-Packard Co 12 * David Mosberger-Tang <davidm@hpl.hp.com> 13 */ 14 15 16#include <asm/mman.h> 17#include <asm/page.h> 18#include <asm/processor.h> 19#include <asm/types.h> 20 21#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ 22 23/* 24 * First, define the various bits in a PTE. Note that the PTE format 25 * matches the VHPT short format, the firt doubleword of the VHPD long 26 * format, and the first doubleword of the TLB insertion format. 27 */ 28#define _PAGE_P_BIT 0 29#define _PAGE_A_BIT 5 30#define _PAGE_D_BIT 6 31 32#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */ 33#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ 34#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ 35#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ 36#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ 37#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ 38#define _PAGE_MA_MASK (0x7 << 2) 39#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ 40#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ 41#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ 42#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ 43#define _PAGE_PL_MASK (3 << 7) 44#define _PAGE_AR_R (0 << 9) /* read only */ 45#define _PAGE_AR_RX (1 << 9) /* read & execute */ 46#define _PAGE_AR_RW (2 << 9) /* read & write */ 47#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ 48#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ 49#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ 50#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ 51#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ 52#define _PAGE_AR_MASK (7 << 9) 53#define _PAGE_AR_SHIFT 9 54#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */ 55#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */ 56#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) 57#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ 58#define _PAGE_PROTNONE (__IA64_UL(1) << 63) 59 60#define _PFN_MASK _PAGE_PPN_MASK 61/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */ 62#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED) 63 64#define _PAGE_SIZE_4K 12 65#define _PAGE_SIZE_8K 13 66#define _PAGE_SIZE_16K 14 67#define _PAGE_SIZE_64K 16 68#define _PAGE_SIZE_256K 18 69#define _PAGE_SIZE_1M 20 70#define _PAGE_SIZE_4M 22 71#define _PAGE_SIZE_16M 24 72#define _PAGE_SIZE_64M 26 73#define _PAGE_SIZE_256M 28 74#define _PAGE_SIZE_1G 30 75#define _PAGE_SIZE_4G 32 76 77#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB 78#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB 79#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED 80 81/* 82 * How many pointers will a page table level hold expressed in shift 83 */ 84#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3) 85 86/* 87 * Definitions for fourth level: 88 */ 89#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) 90 91/* 92 * Definitions for third level: 93 * 94 * PMD_SHIFT determines the size of the area a third-level page table 95 * can map. 96 */ 97#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT)) 98#define PMD_SIZE (1UL << PMD_SHIFT) 99#define PMD_MASK (~(PMD_SIZE-1)) 100#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) 101 102#if CONFIG_PGTABLE_LEVELS == 4 103/* 104 * Definitions for second level: 105 * 106 * PUD_SHIFT determines the size of the area a second-level page table 107 * can map. 108 */ 109#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) 110#define PUD_SIZE (1UL << PUD_SHIFT) 111#define PUD_MASK (~(PUD_SIZE-1)) 112#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) 113#endif 114 115/* 116 * Definitions for first level: 117 * 118 * PGDIR_SHIFT determines what a first-level page table entry can map. 119 */ 120#if CONFIG_PGTABLE_LEVELS == 4 121#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) 122#else 123#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) 124#endif 125#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) 126#define PGDIR_MASK (~(PGDIR_SIZE-1)) 127#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT 128#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) 129#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ 130#define FIRST_USER_ADDRESS 0UL 131 132/* 133 * All the normal masks have the "page accessed" bits on, as any time 134 * they are used, the page is accessed. They are cleared only by the 135 * page-out routines. 136 */ 137#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A) 138#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) 139#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) 140#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) 141#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) 142#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) 143#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) 144#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) 145#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \ 146 _PAGE_MA_UC) 147 148# ifndef __ASSEMBLY__ 149 150#include <linux/sched.h> /* for mm_struct */ 151#include <linux/bitops.h> 152#include <asm/cacheflush.h> 153#include <asm/mmu_context.h> 154 155/* 156 * Next come the mappings that determine how mmap() protection bits 157 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The 158 * _P version gets used for a private shared memory segment, the _S 159 * version gets used for a shared memory segment with MAP_SHARED on. 160 * In a private shared memory segment, we do a copy-on-write if a task 161 * attempts to write to the page. 162 */ 163 /* xwr */ 164#define __P000 PAGE_NONE 165#define __P001 PAGE_READONLY 166#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */ 167#define __P011 PAGE_READONLY /* ditto */ 168#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) 169#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) 170#define __P110 PAGE_COPY_EXEC 171#define __P111 PAGE_COPY_EXEC 172 173#define __S000 PAGE_NONE 174#define __S001 PAGE_READONLY 175#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */ 176#define __S011 PAGE_SHARED 177#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) 178#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) 179#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) 180#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) 181 182#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 183#if CONFIG_PGTABLE_LEVELS == 4 184#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 185#endif 186#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 187#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 188 189 190/* 191 * Some definitions to translate between mem_map, PTEs, and page addresses: 192 */ 193 194 195/* Quick test to see if ADDR is a (potentially) valid physical address. */ 196static inline long 197ia64_phys_addr_valid (unsigned long addr) 198{ 199 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0; 200} 201 202/* 203 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 204 * memory. For the return value to be meaningful, ADDR must be >= 205 * PAGE_OFFSET. This operation can be relatively expensive (e.g., 206 * require a hash-, or multi-level tree-lookup or something of that 207 * sort) but it guarantees to return TRUE only if accessing the page 208 * at that address does not cause an error. Note that there may be 209 * addresses for which kern_addr_valid() returns FALSE even though an 210 * access would not cause an error (e.g., this is typically true for 211 * memory mapped I/O regions. 212 * 213 * XXX Need to implement this for IA-64. 214 */ 215#define kern_addr_valid(addr) (1) 216 217 218/* 219 * Now come the defines and routines to manage and access the three-level 220 * page table. 221 */ 222 223 224#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 225#ifdef CONFIG_VIRTUAL_MEM_MAP 226# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 227extern unsigned long VMALLOC_END; 228#else 229#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) 230/* SPARSEMEM_VMEMMAP uses half of vmalloc... */ 231# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10))) 232# define vmemmap ((struct page *)VMALLOC_END) 233#else 234# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 235#endif 236#endif 237 238/* fs/proc/kcore.c */ 239#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) 240#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) 241 242#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3) 243#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */ 244 245/* 246 * Conversion functions: convert page frame number (pfn) and a protection value to a page 247 * table entry (pte). 248 */ 249#define pfn_pte(pfn, pgprot) \ 250({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; }) 251 252/* Extract pfn from pte. */ 253#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT) 254 255#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 256 257/* This takes a physical page address that is used by the remapping functions */ 258#define mk_pte_phys(physpage, pgprot) \ 259({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) 260 261#define pte_modify(_pte, newprot) \ 262 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) 263 264#define pte_none(pte) (!pte_val(pte)) 265#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) 266#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) 267/* pte_page() returns the "struct page *" corresponding to the PTE: */ 268#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) 269 270#define pmd_none(pmd) (!pmd_val(pmd)) 271#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd))) 272#define pmd_present(pmd) (pmd_val(pmd) != 0UL) 273#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) 274#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) 275#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) 276 277#define pud_none(pud) (!pud_val(pud)) 278#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) 279#define pud_present(pud) (pud_val(pud) != 0UL) 280#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 281#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) 282#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) 283 284#if CONFIG_PGTABLE_LEVELS == 4 285#define pgd_none(pgd) (!pgd_val(pgd)) 286#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) 287#define pgd_present(pgd) (pgd_val(pgd) != 0UL) 288#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 289#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) 290#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET)) 291#endif 292 293/* 294 * The following have defined behavior only work if pte_present() is true. 295 */ 296#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4) 297#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) 298#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) 299#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) 300#define pte_special(pte) 0 301 302/* 303 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the 304 * access rights: 305 */ 306#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW)) 307#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW)) 308#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A)) 309#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) 310#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) 311#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) 312#define pte_mkhuge(pte) (__pte(pte_val(pte))) 313#define pte_mkspecial(pte) (pte) 314 315/* 316 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to 317 * sync icache and dcache when we insert *new* executable page. 318 * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache 319 * if necessary. 320 * 321 * set_pte() is also called by the kernel, but we can expect that the kernel 322 * flushes icache explicitly if necessary. 323 */ 324#define pte_present_exec_user(pte)\ 325 ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \ 326 (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)) 327 328extern void __ia64_sync_icache_dcache(pte_t pteval); 329static inline void set_pte(pte_t *ptep, pte_t pteval) 330{ 331 /* page is present && page is user && page is executable 332 * && (page swapin or new page or page migraton 333 * || copy_on_write with page copying.) 334 */ 335 if (pte_present_exec_user(pteval) && 336 (!pte_present(*ptep) || 337 pte_pfn(*ptep) != pte_pfn(pteval))) 338 /* load_module() calles flush_icache_range() explicitly*/ 339 __ia64_sync_icache_dcache(pteval); 340 *ptep = pteval; 341} 342 343#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 344 345/* 346 * Make page protection values cacheable, uncacheable, or write- 347 * combining. Note that "protection" is really a misnomer here as the 348 * protection value contains the memory attribute bits, dirty bits, and 349 * various other bits as well. 350 */ 351#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB) 352#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC) 353#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) 354 355struct file; 356extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 357 unsigned long size, pgprot_t vma_prot); 358#define __HAVE_PHYS_MEM_ACCESS_PROT 359 360static inline unsigned long 361pgd_index (unsigned long address) 362{ 363 unsigned long region = address >> 61; 364 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); 365 366 return (region << (PAGE_SHIFT - 6)) | l1index; 367} 368 369/* The offset in the 1-level directory is given by the 3 region bits 370 (61..63) and the level-1 bits. */ 371static inline pgd_t* 372pgd_offset (const struct mm_struct *mm, unsigned long address) 373{ 374 return mm->pgd + pgd_index(address); 375} 376 377/* In the kernel's mapped region we completely ignore the region number 378 (since we know it's in region number 5). */ 379#define pgd_offset_k(addr) \ 380 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) 381 382/* Look up a pgd entry in the gate area. On IA-64, the gate-area 383 resides in the kernel-mapped segment, hence we use pgd_offset_k() 384 here. */ 385#define pgd_offset_gate(mm, addr) pgd_offset_k(addr) 386 387#if CONFIG_PGTABLE_LEVELS == 4 388/* Find an entry in the second-level page table.. */ 389#define pud_offset(dir,addr) \ 390 ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) 391#endif 392 393/* Find an entry in the third-level page table.. */ 394#define pmd_offset(dir,addr) \ 395 ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 396 397/* 398 * Find an entry in the third-level page table. This looks more complicated than it 399 * should be because some platforms place page tables in high memory. 400 */ 401#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 402#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) 403#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) 404#define pte_unmap(pte) do { } while (0) 405 406/* atomic versions of the some PTE manipulations: */ 407 408static inline int 409ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 410{ 411#ifdef CONFIG_SMP 412 if (!pte_young(*ptep)) 413 return 0; 414 return test_and_clear_bit(_PAGE_A_BIT, ptep); 415#else 416 pte_t pte = *ptep; 417 if (!pte_young(pte)) 418 return 0; 419 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 420 return 1; 421#endif 422} 423 424static inline pte_t 425ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 426{ 427#ifdef CONFIG_SMP 428 return __pte(xchg((long *) ptep, 0)); 429#else 430 pte_t pte = *ptep; 431 pte_clear(mm, addr, ptep); 432 return pte; 433#endif 434} 435 436static inline void 437ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 438{ 439#ifdef CONFIG_SMP 440 unsigned long new, old; 441 442 do { 443 old = pte_val(*ptep); 444 new = pte_val(pte_wrprotect(__pte (old))); 445 } while (cmpxchg((unsigned long *) ptep, old, new) != old); 446#else 447 pte_t old_pte = *ptep; 448 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 449#endif 450} 451 452static inline int 453pte_same (pte_t a, pte_t b) 454{ 455 return pte_val(a) == pte_val(b); 456} 457 458#define update_mmu_cache(vma, address, ptep) do { } while (0) 459 460extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 461extern void paging_init (void); 462 463/* 464 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of 465 * bits in the swap-type field of the swap pte. It would be nice to 466 * enforce that, but we can't easily include <linux/swap.h> here. 467 * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...). 468 * 469 * Format of swap pte: 470 * bit 0 : present bit (must be zero) 471 * bits 1- 7: swap-type 472 * bits 8-62: swap offset 473 * bit 63 : _PAGE_PROTNONE bit 474 */ 475#define __swp_type(entry) (((entry).val >> 1) & 0x7f) 476#define __swp_offset(entry) (((entry).val << 1) >> 9) 477#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) }) 478#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 479#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 480 481/* 482 * ZERO_PAGE is a global shared page that is always zero: used 483 * for zero-mapped memory areas etc.. 484 */ 485extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 486extern struct page *zero_page_memmap_ptr; 487#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr) 488 489/* We provide our own get_unmapped_area to cope with VA holes for userland */ 490#define HAVE_ARCH_UNMAPPED_AREA 491 492#ifdef CONFIG_HUGETLB_PAGE 493#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) 494#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) 495#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) 496#endif 497 498 499#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 500/* 501 * Update PTEP with ENTRY, which is guaranteed to be a less 502 * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and 503 * WRITABLE bits turned on, when the value at PTEP did not. The 504 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE. 505 * 506 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without 507 * having to worry about races. On SMP machines, there are only two 508 * cases where this is true: 509 * 510 * (1) *PTEP has the PRESENT bit turned OFF 511 * (2) ENTRY has the DIRTY bit turned ON 512 * 513 * On ia64, we could implement this routine with a cmpxchg()-loop 514 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY. 515 * However, like on x86, we can get a more streamlined version by 516 * observing that it is OK to drop ACCESSED bit updates when 517 * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is 518 * result in an extra Access-bit fault, which would then turn on the 519 * ACCESSED bit in the low-level fault handler (iaccess_bit or 520 * daccess_bit in ivt.S). 521 */ 522#ifdef CONFIG_SMP 523# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 524({ \ 525 int __changed = !pte_same(*(__ptep), __entry); \ 526 if (__changed && __safely_writable) { \ 527 set_pte(__ptep, __entry); \ 528 flush_tlb_page(__vma, __addr); \ 529 } \ 530 __changed; \ 531}) 532#else 533# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 534({ \ 535 int __changed = !pte_same(*(__ptep), __entry); \ 536 if (__changed) { \ 537 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \ 538 flush_tlb_page(__vma, __addr); \ 539 } \ 540 __changed; \ 541}) 542#endif 543 544# ifdef CONFIG_VIRTUAL_MEM_MAP 545 /* arch mem_map init routine is needed due to holes in a virtual mem_map */ 546# define __HAVE_ARCH_MEMMAP_INIT 547 extern void memmap_init (unsigned long size, int nid, unsigned long zone, 548 unsigned long start_pfn); 549# endif /* CONFIG_VIRTUAL_MEM_MAP */ 550# endif /* !__ASSEMBLY__ */ 551 552/* 553 * Identity-mapped regions use a large page size. We'll call such large pages 554 * "granules". If you can think of a better name that's unambiguous, let me 555 * know... 556 */ 557#if defined(CONFIG_IA64_GRANULE_64MB) 558# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M 559#elif defined(CONFIG_IA64_GRANULE_16MB) 560# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M 561#endif 562#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT) 563/* 564 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL): 565 */ 566#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M 567#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) 568 569/* 570 * No page table caches to initialise 571 */ 572#define pgtable_cache_init() do { } while (0) 573 574/* These tell get_user_pages() that the first gate page is accessible from user-level. */ 575#define FIXADDR_USER_START GATE_ADDR 576#ifdef HAVE_BUGGY_SEGREL 577# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE) 578#else 579# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) 580#endif 581 582#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 583#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 584#define __HAVE_ARCH_PTEP_SET_WRPROTECT 585#define __HAVE_ARCH_PTE_SAME 586#define __HAVE_ARCH_PGD_OFFSET_GATE 587 588 589#if CONFIG_PGTABLE_LEVELS == 3 590#include <asm-generic/pgtable-nopud.h> 591#endif 592#include <asm-generic/pgtable.h> 593 594#endif /* _ASM_IA64_PGTABLE_H */ 595