root/arch/sh/include/asm/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. neff_sign_extend
  2. phys_addr_mask
  3. update_mmu_cache
  4. __pte_access_permitted
  5. pte_access_permitted
  6. pte_access_permitted
  7. pte_access_permitted

   1 /* SPDX-License-Identifier: GPL-2.0
   2  *
   3  * This file contains the functions and defines necessary to modify and
   4  * use the SuperH page table tree.
   5  *
   6  * Copyright (C) 1999 Niibe Yutaka
   7  * Copyright (C) 2002 - 2007 Paul Mundt
   8  */
   9 #ifndef __ASM_SH_PGTABLE_H
  10 #define __ASM_SH_PGTABLE_H
  11 
  12 #ifdef CONFIG_X2TLB
  13 #include <asm/pgtable-3level.h>
  14 #else
  15 #include <asm/pgtable-2level.h>
  16 #endif
  17 #include <asm/page.h>
  18 #include <asm/mmu.h>
  19 
  20 #ifndef __ASSEMBLY__
  21 #include <asm/addrspace.h>
  22 #include <asm/fixmap.h>
  23 
  24 /*
  25  * ZERO_PAGE is a global shared page that is always zero: used
  26  * for zero-mapped memory areas etc..
  27  */
  28 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  29 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  30 
  31 #endif /* !__ASSEMBLY__ */
  32 
  33 /*
  34  * Effective and physical address definitions, to aid with sign
  35  * extension.
  36  */
  37 #define NEFF            32
  38 #define NEFF_SIGN       (1LL << (NEFF - 1))
  39 #define NEFF_MASK       (-1LL << NEFF)
  40 
  41 static inline unsigned long long neff_sign_extend(unsigned long val)
  42 {
  43         unsigned long long extended = val;
  44         return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended;
  45 }
  46 
  47 #ifdef CONFIG_29BIT
  48 #define NPHYS           29
  49 #else
  50 #define NPHYS           32
  51 #endif
  52 
  53 #define NPHYS_SIGN      (1LL << (NPHYS - 1))
  54 #define NPHYS_MASK      (-1LL << NPHYS)
  55 
  56 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  57 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  58 
  59 /* Entries per level */
  60 #define PTRS_PER_PTE    (PAGE_SIZE / (1 << PTE_MAGNITUDE))
  61 
  62 #define FIRST_USER_ADDRESS      0UL
  63 
  64 #define PHYS_ADDR_MASK29                0x1fffffff
  65 #define PHYS_ADDR_MASK32                0xffffffff
  66 
  67 static inline unsigned long phys_addr_mask(void)
  68 {
  69         /* Is the MMU in 29bit mode? */
  70         if (__in_29bit_mode())
  71                 return PHYS_ADDR_MASK29;
  72 
  73         return PHYS_ADDR_MASK32;
  74 }
  75 
  76 #define PTE_PHYS_MASK           (phys_addr_mask() & PAGE_MASK)
  77 #define PTE_FLAGS_MASK          (~(PTE_PHYS_MASK) << PAGE_SHIFT)
  78 
  79 #ifdef CONFIG_SUPERH32
  80 #define VMALLOC_START   (P3SEG)
  81 #else
  82 #define VMALLOC_START   (0xf0000000)
  83 #endif
  84 #define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
  85 
  86 #if defined(CONFIG_SUPERH32)
  87 #include <asm/pgtable_32.h>
  88 #else
  89 #include <asm/pgtable_64.h>
  90 #endif
  91 
  92 /*
  93  * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
  94  * protection for execute, and considers it the same as a read. Also, write
  95  * permission implies read permission. This is the closest we can get..
  96  *
  97  * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
  98  * not only supporting separate execute, read, and write bits, but having
  99  * completely separate permission bits for user and kernel space.
 100  */
 101          /*xwr*/
 102 #define __P000  PAGE_NONE
 103 #define __P001  PAGE_READONLY
 104 #define __P010  PAGE_COPY
 105 #define __P011  PAGE_COPY
 106 #define __P100  PAGE_EXECREAD
 107 #define __P101  PAGE_EXECREAD
 108 #define __P110  PAGE_COPY
 109 #define __P111  PAGE_COPY
 110 
 111 #define __S000  PAGE_NONE
 112 #define __S001  PAGE_READONLY
 113 #define __S010  PAGE_WRITEONLY
 114 #define __S011  PAGE_SHARED
 115 #define __S100  PAGE_EXECREAD
 116 #define __S101  PAGE_EXECREAD
 117 #define __S110  PAGE_RWX
 118 #define __S111  PAGE_RWX
 119 
 120 typedef pte_t *pte_addr_t;
 121 
 122 #define kern_addr_valid(addr)   (1)
 123 
 124 #define pte_pfn(x)              ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 125 
 126 struct vm_area_struct;
 127 struct mm_struct;
 128 
 129 extern void __update_cache(struct vm_area_struct *vma,
 130                            unsigned long address, pte_t pte);
 131 extern void __update_tlb(struct vm_area_struct *vma,
 132                          unsigned long address, pte_t pte);
 133 
 134 static inline void
 135 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 136 {
 137         pte_t pte = *ptep;
 138         __update_cache(vma, address, pte);
 139         __update_tlb(vma, address, pte);
 140 }
 141 
 142 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 143 extern void paging_init(void);
 144 extern void page_table_range_init(unsigned long start, unsigned long end,
 145                                   pgd_t *pgd);
 146 
 147 static inline bool __pte_access_permitted(pte_t pte, u64 prot)
 148 {
 149         return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
 150 }
 151 
 152 #ifdef CONFIG_X2TLB
 153 static inline bool pte_access_permitted(pte_t pte, bool write)
 154 {
 155         u64 prot = _PAGE_PRESENT;
 156 
 157         prot |= _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
 158         if (write)
 159                 prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
 160         return __pte_access_permitted(pte, prot);
 161 }
 162 #elif defined(CONFIG_SUPERH64)
 163 static inline bool pte_access_permitted(pte_t pte, bool write)
 164 {
 165         u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
 166 
 167         if (write)
 168                 prot |= _PAGE_WRITE;
 169         return __pte_access_permitted(pte, prot);
 170 }
 171 #else
 172 static inline bool pte_access_permitted(pte_t pte, bool write)
 173 {
 174         u64 prot = _PAGE_PRESENT | _PAGE_USER;
 175 
 176         if (write)
 177                 prot |= _PAGE_RW;
 178         return __pte_access_permitted(pte, prot);
 179 }
 180 #endif
 181 
 182 #define pte_access_permitted pte_access_permitted
 183 
 184 /* arch/sh/mm/mmap.c */
 185 #define HAVE_ARCH_UNMAPPED_AREA
 186 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 187 
 188 #include <asm-generic/pgtable.h>
 189 
 190 #endif /* __ASM_SH_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */