root/arch/x86/mm/kaslr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_padding
  2. kaslr_memory_enabled
  3. kernel_randomize_memory
  4. init_trampoline_pud
  5. init_trampoline

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * This file implements KASLR memory randomization for x86_64. It randomizes
   4  * the virtual address space of kernel memory regions (physical memory
   5  * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
   6  * exploits relying on predictable kernel addresses.
   7  *
   8  * Entropy is generated using the KASLR early boot functions now shared in
   9  * the lib directory (originally written by Kees Cook). Randomization is
  10  * done on PGD & P4D/PUD page table levels to increase possible addresses.
  11  * The physical memory mapping code was adapted to support P4D/PUD level
  12  * virtual addresses. This implementation on the best configuration provides
  13  * 30,000 possible virtual addresses in average for each memory region.
  14  * An additional low memory page is used to ensure each CPU can start with
  15  * a PGD aligned virtual address (for realmode).
  16  *
  17  * The order of each memory region is not changed. The feature looks at
  18  * the available space for the regions based on different configuration
  19  * options and randomizes the base and space between each. The size of the
  20  * physical memory mapping is the available physical memory.
  21  */
  22 
  23 #include <linux/kernel.h>
  24 #include <linux/init.h>
  25 #include <linux/random.h>
  26 #include <linux/memblock.h>
  27 
  28 #include <asm/pgalloc.h>
  29 #include <asm/pgtable.h>
  30 #include <asm/setup.h>
  31 #include <asm/kaslr.h>
  32 
  33 #include "mm_internal.h"
  34 
  35 #define TB_SHIFT 40
  36 
  37 /*
  38  * The end address could depend on more configuration options to make the
  39  * highest amount of space for randomization available, but that's too hard
  40  * to keep straight and caused issues already.
  41  */
  42 static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
  43 
  44 /*
  45  * Memory regions randomized by KASLR (except modules that use a separate logic
  46  * earlier during boot). The list is ordered based on virtual addresses. This
  47  * order is kept after randomization.
  48  */
  49 static __initdata struct kaslr_memory_region {
  50         unsigned long *base;
  51         unsigned long size_tb;
  52 } kaslr_regions[] = {
  53         { &page_offset_base, 0 },
  54         { &vmalloc_base, 0 },
  55         { &vmemmap_base, 0 },
  56 };
  57 
  58 /* Get size in bytes used by the memory region */
  59 static inline unsigned long get_padding(struct kaslr_memory_region *region)
  60 {
  61         return (region->size_tb << TB_SHIFT);
  62 }
  63 
  64 /*
  65  * Apply no randomization if KASLR was disabled at boot or if KASAN
  66  * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
  67  */
  68 static inline bool kaslr_memory_enabled(void)
  69 {
  70         return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
  71 }
  72 
  73 /* Initialize base and padding for each memory region randomized with KASLR */
  74 void __init kernel_randomize_memory(void)
  75 {
  76         size_t i;
  77         unsigned long vaddr_start, vaddr;
  78         unsigned long rand, memory_tb;
  79         struct rnd_state rand_state;
  80         unsigned long remain_entropy;
  81         unsigned long vmemmap_size;
  82 
  83         vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
  84         vaddr = vaddr_start;
  85 
  86         /*
  87          * These BUILD_BUG_ON checks ensure the memory layout is consistent
  88          * with the vaddr_start/vaddr_end variables. These checks are very
  89          * limited....
  90          */
  91         BUILD_BUG_ON(vaddr_start >= vaddr_end);
  92         BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
  93         BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
  94 
  95         if (!kaslr_memory_enabled())
  96                 return;
  97 
  98         kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
  99         kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
 100 
 101         /*
 102          * Update Physical memory mapping to available and
 103          * add padding if needed (especially for memory hotplug support).
 104          */
 105         BUG_ON(kaslr_regions[0].base != &page_offset_base);
 106         memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
 107                 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
 108 
 109         /* Adapt phyiscal memory region size based on available memory */
 110         if (memory_tb < kaslr_regions[0].size_tb)
 111                 kaslr_regions[0].size_tb = memory_tb;
 112 
 113         /*
 114          * Calculate the vmemmap region size in TBs, aligned to a TB
 115          * boundary.
 116          */
 117         vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
 118                         sizeof(struct page);
 119         kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
 120 
 121         /* Calculate entropy available between regions */
 122         remain_entropy = vaddr_end - vaddr_start;
 123         for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
 124                 remain_entropy -= get_padding(&kaslr_regions[i]);
 125 
 126         prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
 127 
 128         for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
 129                 unsigned long entropy;
 130 
 131                 /*
 132                  * Select a random virtual address using the extra entropy
 133                  * available.
 134                  */
 135                 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
 136                 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
 137                 entropy = (rand % (entropy + 1)) & PUD_MASK;
 138                 vaddr += entropy;
 139                 *kaslr_regions[i].base = vaddr;
 140 
 141                 /*
 142                  * Jump the region and add a minimum padding based on
 143                  * randomization alignment.
 144                  */
 145                 vaddr += get_padding(&kaslr_regions[i]);
 146                 vaddr = round_up(vaddr + 1, PUD_SIZE);
 147                 remain_entropy -= entropy;
 148         }
 149 }
 150 
 151 static void __meminit init_trampoline_pud(void)
 152 {
 153         pud_t *pud_page_tramp, *pud, *pud_tramp;
 154         p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
 155         unsigned long paddr, vaddr;
 156         pgd_t *pgd;
 157 
 158         pud_page_tramp = alloc_low_page();
 159 
 160         /*
 161          * There are two mappings for the low 1MB area, the direct mapping
 162          * and the 1:1 mapping for the real mode trampoline:
 163          *
 164          * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
 165          * 1:1 mapping:    virt_addr = phys_addr
 166          */
 167         paddr = 0;
 168         vaddr = (unsigned long)__va(paddr);
 169         pgd = pgd_offset_k(vaddr);
 170 
 171         p4d = p4d_offset(pgd, vaddr);
 172         pud = pud_offset(p4d, vaddr);
 173 
 174         pud_tramp = pud_page_tramp + pud_index(paddr);
 175         *pud_tramp = *pud;
 176 
 177         if (pgtable_l5_enabled()) {
 178                 p4d_page_tramp = alloc_low_page();
 179 
 180                 p4d_tramp = p4d_page_tramp + p4d_index(paddr);
 181 
 182                 set_p4d(p4d_tramp,
 183                         __p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
 184 
 185                 set_pgd(&trampoline_pgd_entry,
 186                         __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
 187         } else {
 188                 set_pgd(&trampoline_pgd_entry,
 189                         __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
 190         }
 191 }
 192 
 193 /*
 194  * The real mode trampoline, which is required for bootstrapping CPUs
 195  * occupies only a small area under the low 1MB.  See reserve_real_mode()
 196  * for details.
 197  *
 198  * If KASLR is disabled the first PGD entry of the direct mapping is copied
 199  * to map the real mode trampoline.
 200  *
 201  * If KASLR is enabled, copy only the PUD which covers the low 1MB
 202  * area. This limits the randomization granularity to 1GB for both 4-level
 203  * and 5-level paging.
 204  */
 205 void __meminit init_trampoline(void)
 206 {
 207         if (!kaslr_memory_enabled()) {
 208                 init_trampoline_default();
 209                 return;
 210         }
 211 
 212         init_trampoline_pud();
 213 }

/* [<][>][^][v][top][bottom][index][help] */