root/arch/arm64/include/asm/stage2_pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. kvm_stage2_has_pud
  2. stage2_pgd_none
  3. stage2_pgd_clear
  4. stage2_pgd_present
  5. stage2_pgd_populate
  6. stage2_pud_offset
  7. stage2_pud_free
  8. stage2_pud_table_empty
  9. stage2_pud_addr_end
  10. kvm_stage2_has_pmd
  11. stage2_pud_none
  12. stage2_pud_clear
  13. stage2_pud_present
  14. stage2_pud_populate
  15. stage2_pmd_offset
  16. stage2_pmd_free
  17. stage2_pud_huge
  18. stage2_pmd_table_empty
  19. stage2_pmd_addr_end
  20. stage2_pte_table_empty
  21. stage2_pgd_index
  22. stage2_pgd_addr_end

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2016 - ARM Ltd
   4  *
   5  * stage2 page table helpers
   6  */
   7 
   8 #ifndef __ARM64_S2_PGTABLE_H_
   9 #define __ARM64_S2_PGTABLE_H_
  10 
  11 #include <linux/hugetlb.h>
  12 #include <asm/pgtable.h>
  13 
  14 /*
  15  * PGDIR_SHIFT determines the size a top-level page table entry can map
  16  * and depends on the number of levels in the page table. Compute the
  17  * PGDIR_SHIFT for a given number of levels.
  18  */
  19 #define pt_levels_pgdir_shift(lvls)     ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
  20 
  21 /*
  22  * The hardware supports concatenation of up to 16 tables at stage2 entry
  23  * level and we use the feature whenever possible, which means we resolve 4
  24  * additional bits of address at the entry level.
  25  *
  26  * This implies, the total number of page table levels required for
  27  * IPA_SHIFT at stage2 expected by the hardware can be calculated using
  28  * the same logic used for the (non-collapsable) stage1 page tables but for
  29  * (IPA_SHIFT - 4).
  30  */
  31 #define stage2_pgtable_levels(ipa)      ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
  32 #define kvm_stage2_levels(kvm)          VTCR_EL2_LVLS(kvm->arch.vtcr)
  33 
  34 /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
  35 #define stage2_pgdir_shift(kvm)         pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
  36 #define stage2_pgdir_size(kvm)          (1ULL << stage2_pgdir_shift(kvm))
  37 #define stage2_pgdir_mask(kvm)          ~(stage2_pgdir_size(kvm) - 1)
  38 
  39 /*
  40  * The number of PTRS across all concatenated stage2 tables given by the
  41  * number of bits resolved at the initial level.
  42  * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA),
  43  * in which case, stage2_pgd_ptrs will have one entry.
  44  */
  45 #define pgd_ptrs_shift(ipa, pgdir_shift)        \
  46         ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
  47 #define __s2_pgd_ptrs(ipa, lvls)                \
  48         (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
  49 #define __s2_pgd_size(ipa, lvls)        (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))
  50 
  51 #define stage2_pgd_ptrs(kvm)            __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
  52 #define stage2_pgd_size(kvm)            __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
  53 
  54 /*
  55  * kvm_mmmu_cache_min_pages() is the number of pages required to install
  56  * a stage-2 translation. We pre-allocate the entry level page table at
  57  * the VM creation.
  58  */
  59 #define kvm_mmu_cache_min_pages(kvm)    (kvm_stage2_levels(kvm) - 1)
  60 
  61 /* Stage2 PUD definitions when the level is present */
  62 static inline bool kvm_stage2_has_pud(struct kvm *kvm)
  63 {
  64         return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
  65 }
  66 
  67 #define S2_PUD_SHIFT                    ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
  68 #define S2_PUD_SIZE                     (1UL << S2_PUD_SHIFT)
  69 #define S2_PUD_MASK                     (~(S2_PUD_SIZE - 1))
  70 
  71 static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
  72 {
  73         if (kvm_stage2_has_pud(kvm))
  74                 return pgd_none(pgd);
  75         else
  76                 return 0;
  77 }
  78 
  79 static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
  80 {
  81         if (kvm_stage2_has_pud(kvm))
  82                 pgd_clear(pgdp);
  83 }
  84 
  85 static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
  86 {
  87         if (kvm_stage2_has_pud(kvm))
  88                 return pgd_present(pgd);
  89         else
  90                 return 1;
  91 }
  92 
  93 static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
  94 {
  95         if (kvm_stage2_has_pud(kvm))
  96                 pgd_populate(NULL, pgd, pud);
  97 }
  98 
  99 static inline pud_t *stage2_pud_offset(struct kvm *kvm,
 100                                        pgd_t *pgd, unsigned long address)
 101 {
 102         if (kvm_stage2_has_pud(kvm))
 103                 return pud_offset(pgd, address);
 104         else
 105                 return (pud_t *)pgd;
 106 }
 107 
 108 static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
 109 {
 110         if (kvm_stage2_has_pud(kvm))
 111                 free_page((unsigned long)pud);
 112 }
 113 
 114 static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
 115 {
 116         if (kvm_stage2_has_pud(kvm))
 117                 return kvm_page_empty(pudp);
 118         else
 119                 return false;
 120 }
 121 
 122 static inline phys_addr_t
 123 stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
 124 {
 125         if (kvm_stage2_has_pud(kvm)) {
 126                 phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
 127 
 128                 return (boundary - 1 < end - 1) ? boundary : end;
 129         } else {
 130                 return end;
 131         }
 132 }
 133 
 134 /* Stage2 PMD definitions when the level is present */
 135 static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
 136 {
 137         return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
 138 }
 139 
 140 #define S2_PMD_SHIFT                    ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
 141 #define S2_PMD_SIZE                     (1UL << S2_PMD_SHIFT)
 142 #define S2_PMD_MASK                     (~(S2_PMD_SIZE - 1))
 143 
 144 static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
 145 {
 146         if (kvm_stage2_has_pmd(kvm))
 147                 return pud_none(pud);
 148         else
 149                 return 0;
 150 }
 151 
 152 static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
 153 {
 154         if (kvm_stage2_has_pmd(kvm))
 155                 pud_clear(pud);
 156 }
 157 
 158 static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
 159 {
 160         if (kvm_stage2_has_pmd(kvm))
 161                 return pud_present(pud);
 162         else
 163                 return 1;
 164 }
 165 
 166 static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
 167 {
 168         if (kvm_stage2_has_pmd(kvm))
 169                 pud_populate(NULL, pud, pmd);
 170 }
 171 
 172 static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
 173                                        pud_t *pud, unsigned long address)
 174 {
 175         if (kvm_stage2_has_pmd(kvm))
 176                 return pmd_offset(pud, address);
 177         else
 178                 return (pmd_t *)pud;
 179 }
 180 
 181 static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
 182 {
 183         if (kvm_stage2_has_pmd(kvm))
 184                 free_page((unsigned long)pmd);
 185 }
 186 
 187 static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
 188 {
 189         if (kvm_stage2_has_pmd(kvm))
 190                 return pud_huge(pud);
 191         else
 192                 return 0;
 193 }
 194 
 195 static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
 196 {
 197         if (kvm_stage2_has_pmd(kvm))
 198                 return kvm_page_empty(pmdp);
 199         else
 200                 return 0;
 201 }
 202 
 203 static inline phys_addr_t
 204 stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
 205 {
 206         if (kvm_stage2_has_pmd(kvm)) {
 207                 phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
 208 
 209                 return (boundary - 1 < end - 1) ? boundary : end;
 210         } else {
 211                 return end;
 212         }
 213 }
 214 
 215 static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
 216 {
 217         return kvm_page_empty(ptep);
 218 }
 219 
 220 static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
 221 {
 222         return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
 223 }
 224 
 225 static inline phys_addr_t
 226 stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
 227 {
 228         phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
 229 
 230         return (boundary - 1 < end - 1) ? boundary : end;
 231 }
 232 
 233 #endif  /* __ARM64_S2_PGTABLE_H_ */

/* [<][>][^][v][top][bottom][index][help] */