This source file includes following definitions.
- pkey_to_vmflag_bits
- vmflag_to_pte_pkey_bits
- vma_pkey
- pte_to_hpte_pkey_bits
- pte_to_pkey_bits
- mm_pkey_is_allocated
- mm_pkey_alloc
- mm_pkey_free
- execute_only_pkey
- arch_override_mprotect_pkey
- arch_set_user_pkey_access
- arch_pkeys_enabled
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 #ifndef _ASM_POWERPC_KEYS_H
   9 #define _ASM_POWERPC_KEYS_H
  10 
  11 #include <linux/jump_label.h>
  12 #include <asm/firmware.h>
  13 
  14 DECLARE_STATIC_KEY_TRUE(pkey_disabled);
  15 extern int pkeys_total; 
  16 extern u32 initial_allocation_mask; 
  17 extern u32 reserved_allocation_mask; 
  18 
  19 #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
  20                             VM_PKEY_BIT3 | VM_PKEY_BIT4)
  21 
  22 
  23 #define PKEY_DISABLE_EXECUTE   0x4
  24 #define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS | \
  25                                 PKEY_DISABLE_WRITE  | \
  26                                 PKEY_DISABLE_EXECUTE)
  27 
  28 static inline u64 pkey_to_vmflag_bits(u16 pkey)
  29 {
  30         return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
  31 }
  32 
  33 static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
  34 {
  35         if (static_branch_likely(&pkey_disabled))
  36                 return 0x0UL;
  37 
  38         return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT4 : 0x0UL) |
  39                 ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT3 : 0x0UL) |
  40                 ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
  41                 ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT1 : 0x0UL) |
  42                 ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT0 : 0x0UL));
  43 }
  44 
  45 static inline int vma_pkey(struct vm_area_struct *vma)
  46 {
  47         if (static_branch_likely(&pkey_disabled))
  48                 return 0;
  49         return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
  50 }
  51 
  52 #define arch_max_pkey() pkeys_total
  53 
  54 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
  55 {
  56         return (((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL) |
  57                 ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
  58                 ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
  59                 ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
  60                 ((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL));
  61 }
  62 
  63 static inline u16 pte_to_pkey_bits(u64 pteflags)
  64 {
  65         return (((pteflags & H_PTE_PKEY_BIT0) ? 0x10 : 0x0UL) |
  66                 ((pteflags & H_PTE_PKEY_BIT1) ? 0x8 : 0x0UL) |
  67                 ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
  68                 ((pteflags & H_PTE_PKEY_BIT3) ? 0x2 : 0x0UL) |
  69                 ((pteflags & H_PTE_PKEY_BIT4) ? 0x1 : 0x0UL));
  70 }
  71 
  72 #define pkey_alloc_mask(pkey) (0x1 << pkey)
  73 
  74 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
  75 
  76 #define __mm_pkey_allocated(mm, pkey) { \
  77         mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
  78 }
  79 
  80 #define __mm_pkey_free(mm, pkey) {      \
  81         mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey);   \
  82 }
  83 
  84 #define __mm_pkey_is_allocated(mm, pkey)        \
  85         (mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
  86 
  87 #define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
  88                                        pkey_alloc_mask(pkey))
  89 
  90 static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
  91 {
  92         if (pkey < 0 || pkey >= arch_max_pkey())
  93                 return false;
  94 
  95         
  96         if (__mm_pkey_is_reserved(pkey))
  97                 return false;
  98 
  99         return __mm_pkey_is_allocated(mm, pkey);
 100 }
 101 
 102 
 103 
 104 
 105 
 106 
 107 static inline int mm_pkey_alloc(struct mm_struct *mm)
 108 {
 109         
 110 
 111 
 112 
 113 
 114         u32 all_pkeys_mask = (u32)(~(0x0));
 115         int ret;
 116 
 117         if (static_branch_likely(&pkey_disabled))
 118                 return -1;
 119 
 120         
 121 
 122 
 123 
 124         if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
 125                 return -1;
 126 
 127         ret = ffz((u32)mm_pkey_allocation_map(mm));
 128         __mm_pkey_allocated(mm, ret);
 129 
 130         return ret;
 131 }
 132 
 133 static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
 134 {
 135         if (static_branch_likely(&pkey_disabled))
 136                 return -1;
 137 
 138         if (!mm_pkey_is_allocated(mm, pkey))
 139                 return -EINVAL;
 140 
 141         __mm_pkey_free(mm, pkey);
 142 
 143         return 0;
 144 }
 145 
 146 
 147 
 148 
 149 
 150 extern int __execute_only_pkey(struct mm_struct *mm);
 151 static inline int execute_only_pkey(struct mm_struct *mm)
 152 {
 153         if (static_branch_likely(&pkey_disabled))
 154                 return -1;
 155 
 156         return __execute_only_pkey(mm);
 157 }
 158 
 159 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
 160                                          int prot, int pkey);
 161 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
 162                                               int prot, int pkey)
 163 {
 164         if (static_branch_likely(&pkey_disabled))
 165                 return 0;
 166 
 167         
 168 
 169 
 170 
 171         if (pkey != -1)
 172                 return pkey;
 173 
 174         return __arch_override_mprotect_pkey(vma, prot, pkey);
 175 }
 176 
 177 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
 178                                        unsigned long init_val);
 179 static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
 180                                             unsigned long init_val)
 181 {
 182         if (static_branch_likely(&pkey_disabled))
 183                 return -EINVAL;
 184 
 185         
 186 
 187 
 188 
 189 
 190 
 191         if (pkey == 0)
 192                 return init_val ? -EINVAL : 0;
 193 
 194         return __arch_set_user_pkey_access(tsk, pkey, init_val);
 195 }
 196 
 197 static inline bool arch_pkeys_enabled(void)
 198 {
 199         return !static_branch_likely(&pkey_disabled);
 200 }
 201 
 202 extern void pkey_mm_init(struct mm_struct *mm);
 203 extern bool arch_supports_pkeys(int cap);
 204 extern unsigned int arch_usable_pkeys(void);
 205 extern void thread_pkey_regs_save(struct thread_struct *thread);
 206 extern void thread_pkey_regs_restore(struct thread_struct *new_thread,
 207                                      struct thread_struct *old_thread);
 208 extern void thread_pkey_regs_init(struct thread_struct *thread);
 209 #endif