root/arch/x86/realmode/init.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. reserve_real_mode
  2. setup_real_mode
  3. set_real_mode_permissions
  4. init_real_mode

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/io.h>
   3 #include <linux/slab.h>
   4 #include <linux/memblock.h>
   5 #include <linux/mem_encrypt.h>
   6 
   7 #include <asm/set_memory.h>
   8 #include <asm/pgtable.h>
   9 #include <asm/realmode.h>
  10 #include <asm/tlbflush.h>
  11 
  12 struct real_mode_header *real_mode_header;
  13 u32 *trampoline_cr4_features;
  14 
  15 /* Hold the pgd entry used on booting additional CPUs */
  16 pgd_t trampoline_pgd_entry;
  17 
  18 void __init reserve_real_mode(void)
  19 {
  20         phys_addr_t mem;
  21         size_t size = real_mode_size_needed();
  22 
  23         if (!size)
  24                 return;
  25 
  26         WARN_ON(slab_is_available());
  27 
  28         /* Has to be under 1M so we can execute real-mode AP code. */
  29         mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
  30         if (!mem) {
  31                 pr_info("No sub-1M memory is available for the trampoline\n");
  32                 return;
  33         }
  34 
  35         memblock_reserve(mem, size);
  36         set_real_mode_mem(mem);
  37 }
  38 
  39 static void __init setup_real_mode(void)
  40 {
  41         u16 real_mode_seg;
  42         const u32 *rel;
  43         u32 count;
  44         unsigned char *base;
  45         unsigned long phys_base;
  46         struct trampoline_header *trampoline_header;
  47         size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
  48 #ifdef CONFIG_X86_64
  49         u64 *trampoline_pgd;
  50         u64 efer;
  51 #endif
  52 
  53         base = (unsigned char *)real_mode_header;
  54 
  55         /*
  56          * If SME is active, the trampoline area will need to be in
  57          * decrypted memory in order to bring up other processors
  58          * successfully. This is not needed for SEV.
  59          */
  60         if (sme_active())
  61                 set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
  62 
  63         memcpy(base, real_mode_blob, size);
  64 
  65         phys_base = __pa(base);
  66         real_mode_seg = phys_base >> 4;
  67 
  68         rel = (u32 *) real_mode_relocs;
  69 
  70         /* 16-bit segment relocations. */
  71         count = *rel++;
  72         while (count--) {
  73                 u16 *seg = (u16 *) (base + *rel++);
  74                 *seg = real_mode_seg;
  75         }
  76 
  77         /* 32-bit linear relocations. */
  78         count = *rel++;
  79         while (count--) {
  80                 u32 *ptr = (u32 *) (base + *rel++);
  81                 *ptr += phys_base;
  82         }
  83 
  84         /* Must be perfomed *after* relocation. */
  85         trampoline_header = (struct trampoline_header *)
  86                 __va(real_mode_header->trampoline_header);
  87 
  88 #ifdef CONFIG_X86_32
  89         trampoline_header->start = __pa_symbol(startup_32_smp);
  90         trampoline_header->gdt_limit = __BOOT_DS + 7;
  91         trampoline_header->gdt_base = __pa_symbol(boot_gdt);
  92 #else
  93         /*
  94          * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
  95          * so we need to mask it out.
  96          */
  97         rdmsrl(MSR_EFER, efer);
  98         trampoline_header->efer = efer & ~EFER_LMA;
  99 
 100         trampoline_header->start = (u64) secondary_startup_64;
 101         trampoline_cr4_features = &trampoline_header->cr4;
 102         *trampoline_cr4_features = mmu_cr4_features;
 103 
 104         trampoline_header->flags = 0;
 105         if (sme_active())
 106                 trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
 107 
 108         trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
 109         trampoline_pgd[0] = trampoline_pgd_entry.pgd;
 110         trampoline_pgd[511] = init_top_pgt[511].pgd;
 111 #endif
 112 }
 113 
 114 /*
 115  * reserve_real_mode() gets called very early, to guarantee the
 116  * availability of low memory. This is before the proper kernel page
 117  * tables are set up, so we cannot set page permissions in that
 118  * function. Also trampoline code will be executed by APs so we
 119  * need to mark it executable at do_pre_smp_initcalls() at least,
 120  * thus run it as a early_initcall().
 121  */
 122 static void __init set_real_mode_permissions(void)
 123 {
 124         unsigned char *base = (unsigned char *) real_mode_header;
 125         size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
 126 
 127         size_t ro_size =
 128                 PAGE_ALIGN(real_mode_header->ro_end) -
 129                 __pa(base);
 130 
 131         size_t text_size =
 132                 PAGE_ALIGN(real_mode_header->ro_end) -
 133                 real_mode_header->text_start;
 134 
 135         unsigned long text_start =
 136                 (unsigned long) __va(real_mode_header->text_start);
 137 
 138         set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
 139         set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
 140         set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
 141 }
 142 
 143 static int __init init_real_mode(void)
 144 {
 145         if (!real_mode_header)
 146                 panic("Real mode trampoline was not allocated");
 147 
 148         setup_real_mode();
 149         set_real_mode_permissions();
 150 
 151         return 0;
 152 }
 153 early_initcall(init_real_mode);

/* [<][>][^][v][top][bottom][index][help] */