root/arch/alpha/include/asm/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pfn_pte
  2. pte_modify
  3. pmd_set
  4. pgd_set
  5. pmd_page_vaddr
  6. pgd_page_vaddr
  7. pte_none
  8. pte_present
  9. pte_clear
  10. pmd_none
  11. pmd_bad
  12. pmd_present
  13. pmd_clear
  14. pgd_none
  15. pgd_bad
  16. pgd_present
  17. pgd_clear
  18. pte_write
  19. pte_dirty
  20. pte_young
  21. pte_special
  22. pte_wrprotect
  23. pte_mkclean
  24. pte_mkold
  25. pte_mkwrite
  26. pte_mkdirty
  27. pte_mkyoung
  28. pte_mkspecial
  29. pmd_offset
  30. pte_offset_kernel
  31. update_mmu_cache
  32. mk_swap_pte

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ALPHA_PGTABLE_H
   3 #define _ALPHA_PGTABLE_H
   4 
   5 #include <asm-generic/4level-fixup.h>
   6 
   7 /*
   8  * This file contains the functions and defines necessary to modify and use
   9  * the Alpha page table tree.
  10  *
  11  * This hopefully works with any standard Alpha page-size, as defined
  12  * in <asm/page.h> (currently 8192).
  13  */
  14 #include <linux/mmzone.h>
  15 
  16 #include <asm/page.h>
  17 #include <asm/processor.h>      /* For TASK_SIZE */
  18 #include <asm/machvec.h>
  19 #include <asm/setup.h>
  20 
  21 struct mm_struct;
  22 struct vm_area_struct;
  23 
  24 /* Certain architectures need to do special things when PTEs
  25  * within a page table are directly modified.  Thus, the following
  26  * hook is made available.
  27  */
  28 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  29 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  30 
  31 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  32 #define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
  33 #define PMD_SIZE        (1UL << PMD_SHIFT)
  34 #define PMD_MASK        (~(PMD_SIZE-1))
  35 
  36 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  37 #define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
  38 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  39 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  40 
  41 /*
  42  * Entries per page directory level:  the Alpha is three-level, with
  43  * all levels having a one-page page table.
  44  */
  45 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
  46 #define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
  47 #define PTRS_PER_PGD    (1UL << (PAGE_SHIFT-3))
  48 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  49 #define FIRST_USER_ADDRESS      0UL
  50 
  51 /* Number of pointers that fit on a page:  this will go away. */
  52 #define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
  53 
  54 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
  55 #define VMALLOC_START           0xfffffe0000000000
  56 #else
  57 #define VMALLOC_START           (-2*PGDIR_SIZE)
  58 #endif
  59 #define VMALLOC_END             (-PGDIR_SIZE)
  60 
  61 /*
  62  * OSF/1 PAL-code-imposed page table bits
  63  */
  64 #define _PAGE_VALID     0x0001
  65 #define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
  66 #define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
  67 #define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
  68 #define _PAGE_ASM       0x0010
  69 #define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
  70 #define _PAGE_URE       0x0200  /* xxx */
  71 #define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
  72 #define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
  73 
  74 /* .. and these are ours ... */
  75 #define _PAGE_DIRTY     0x20000
  76 #define _PAGE_ACCESSED  0x40000
  77 
  78 /*
  79  * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
  80  * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
  81  * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
  82  * the KRE/URE bits to watch for it. That way we don't need to overload the
  83  * KWE/UWE bits with both handling dirty and accessed.
  84  *
  85  * Note that the kernel uses the accessed bit just to check whether to page
  86  * out a page or not, so it doesn't have to be exact anyway.
  87  */
  88 
  89 #define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
  90 #define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
  91 
  92 #define _PFN_MASK       0xFFFFFFFF00000000UL
  93 
  94 #define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
  95 #define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
  96 
  97 /*
  98  * All the normal masks have the "page accessed" bits on, as any time they are used,
  99  * the page is accessed. They are cleared only by the page-out routines
 100  */
 101 #define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
 102 #define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
 103 #define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 104 #define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 105 #define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 106 
 107 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 108 
 109 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
 110 #define _PAGE_S(x) _PAGE_NORMAL(x)
 111 
 112 /*
 113  * The hardware can handle write-only mappings, but as the Alpha
 114  * architecture does byte-wide writes with a read-modify-write
 115  * sequence, it's not practical to have write-without-read privs.
 116  * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
 117  * arch/alpha/mm/fault.c)
 118  */
 119         /* xwr */
 120 #define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 121 #define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
 122 #define __P010  _PAGE_P(_PAGE_FOE)
 123 #define __P011  _PAGE_P(_PAGE_FOE)
 124 #define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
 125 #define __P101  _PAGE_P(_PAGE_FOW)
 126 #define __P110  _PAGE_P(0)
 127 #define __P111  _PAGE_P(0)
 128 
 129 #define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 130 #define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
 131 #define __S010  _PAGE_S(_PAGE_FOE)
 132 #define __S011  _PAGE_S(_PAGE_FOE)
 133 #define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
 134 #define __S101  _PAGE_S(_PAGE_FOW)
 135 #define __S110  _PAGE_S(0)
 136 #define __S111  _PAGE_S(0)
 137 
 138 /*
 139  * pgprot_noncached() is only for infiniband pci support, and a real
 140  * implementation for RAM would be more complicated.
 141  */
 142 #define pgprot_noncached(prot)  (prot)
 143 
 144 /*
 145  * BAD_PAGETABLE is used when we need a bogus page-table, while
 146  * BAD_PAGE is used for a bogus page.
 147  *
 148  * ZERO_PAGE is a global shared page that is always zero:  used
 149  * for zero-mapped memory areas etc..
 150  */
 151 extern pte_t __bad_page(void);
 152 extern pmd_t * __bad_pagetable(void);
 153 
 154 extern unsigned long __zero_page(void);
 155 
 156 #define BAD_PAGETABLE   __bad_pagetable()
 157 #define BAD_PAGE        __bad_page()
 158 #define ZERO_PAGE(vaddr)        (virt_to_page(ZERO_PGE))
 159 
 160 /* number of bits that fit into a memory pointer */
 161 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 162 
 163 /* to align the pointer to a pointer address */
 164 #define PTR_MASK                        (~(sizeof(void*)-1))
 165 
 166 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 167 #define SIZEOF_PTR_LOG2                 3
 168 
 169 /* to find an entry in a page-table */
 170 #define PAGE_PTR(address)               \
 171   ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 172 
 173 /*
 174  * On certain platforms whose physical address space can overlap KSEG,
 175  * namely EV6 and above, we must re-twiddle the physaddr to restore the
 176  * correct high-order bits.
 177  *
 178  * This is extremely confusing until you realize that this is actually
 179  * just working around a userspace bug.  The X server was intending to
 180  * provide the physical address but instead provided the KSEG address.
 181  * Or tried to, except it's not representable.
 182  * 
 183  * On Tsunami there's nothing meaningful at 0x40000000000, so this is
 184  * a safe thing to do.  Come the first core logic that does put something
 185  * in this area -- memory or whathaveyou -- then this hack will have
 186  * to go away.  So be prepared!
 187  */
 188 
 189 #if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
 190 #error "EV6-only feature in a generic kernel"
 191 #endif
 192 #if defined(CONFIG_ALPHA_GENERIC) || \
 193     (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
 194 #define KSEG_PFN        (0xc0000000000UL >> PAGE_SHIFT)
 195 #define PHYS_TWIDDLE(pfn) \
 196   ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
 197   ? ((pfn) ^= KSEG_PFN) : (pfn))
 198 #else
 199 #define PHYS_TWIDDLE(pfn) (pfn)
 200 #endif
 201 
 202 /*
 203  * Conversion functions:  convert a page and protection to a page entry,
 204  * and a page entry and page directory to the page they refer to.
 205  */
 206 #ifndef CONFIG_DISCONTIGMEM
 207 #define page_to_pa(page)        (((page) - mem_map) << PAGE_SHIFT)
 208 
 209 #define pte_pfn(pte)    (pte_val(pte) >> 32)
 210 #define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 211 #define mk_pte(page, pgprot)                                            \
 212 ({                                                                      \
 213         pte_t pte;                                                      \
 214                                                                         \
 215         pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);  \
 216         pte;                                                            \
 217 })
 218 #endif
 219 
 220 extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
 221 { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
 222 
 223 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 224 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 225 
 226 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 227 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 228 
 229 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
 230 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 231 
 232 
 233 extern inline unsigned long
 234 pmd_page_vaddr(pmd_t pmd)
 235 {
 236         return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
 237 }
 238 
 239 #ifndef CONFIG_DISCONTIGMEM
 240 #define pmd_page(pmd)   (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
 241 #define pgd_page(pgd)   (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
 242 #endif
 243 
 244 extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
 245 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 246 
 247 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
 248 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
 249 extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 250 {
 251         pte_val(*ptep) = 0;
 252 }
 253 
 254 extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
 255 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
 256 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
 257 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
 258 
 259 extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
 260 extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
 261 extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
 262 extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
 263 
 264 /*
 265  * The following only work if pte_present() is true.
 266  * Undefined behaviour if not..
 267  */
 268 extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
 269 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
 270 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
 271 extern inline int pte_special(pte_t pte)        { return 0; }
 272 
 273 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
 274 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
 275 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
 276 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
 277 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
 278 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
 279 extern inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
 280 
 281 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 282 
 283 /* to find an entry in a kernel page-table-directory */
 284 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 285 
 286 /* to find an entry in a page-table-directory. */
 287 #define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 288 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
 289 
 290 /*
 291  * The smp_read_barrier_depends() in the following functions are required to
 292  * order the load of *dir (the pointer in the top level page table) with any
 293  * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
 294  *
 295  * If this ordering is not enforced, the CPU might load an older value of
 296  * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
 297  * more details.
 298  *
 299  * Note that we never change the mm->pgd pointer after the task is running, so
 300  * pgd_offset does not require such a barrier.
 301  */
 302 
 303 /* Find an entry in the second-level page table.. */
 304 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 305 {
 306         pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
 307         smp_read_barrier_depends(); /* see above */
 308         return ret;
 309 }
 310 
 311 /* Find an entry in the third-level page table.. */
 312 extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
 313 {
 314         pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
 315                 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
 316         smp_read_barrier_depends(); /* see above */
 317         return ret;
 318 }
 319 
 320 #define pte_offset_map(dir,addr)        pte_offset_kernel((dir),(addr))
 321 #define pte_unmap(pte)                  do { } while (0)
 322 
 323 extern pgd_t swapper_pg_dir[1024];
 324 
 325 /*
 326  * The Alpha doesn't have any external MMU info:  the kernel page
 327  * tables contain all the necessary information.
 328  */
 329 extern inline void update_mmu_cache(struct vm_area_struct * vma,
 330         unsigned long address, pte_t *ptep)
 331 {
 332 }
 333 
 334 /*
 335  * Non-present pages:  high 24 bits are offset, next 8 bits type,
 336  * low 32 bits zero.
 337  */
 338 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 339 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 340 
 341 #define __swp_type(x)           (((x).val >> 32) & 0xff)
 342 #define __swp_offset(x)         ((x).val >> 40)
 343 #define __swp_entry(type, off)  ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
 344 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 345 #define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
 346 
 347 #ifndef CONFIG_DISCONTIGMEM
 348 #define kern_addr_valid(addr)   (1)
 349 #endif
 350 
 351 #define pte_ERROR(e) \
 352         printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 353 #define pmd_ERROR(e) \
 354         printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 355 #define pgd_ERROR(e) \
 356         printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 357 
 358 extern void paging_init(void);
 359 
 360 #include <asm-generic/pgtable.h>
 361 
 362 /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
 363 #define HAVE_ARCH_UNMAPPED_AREA
 364 
 365 #endif /* _ALPHA_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */