root/arch/x86/kernel/vmlinux.lds.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * ld script for the x86 kernel
   4  *
   5  * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   6  *
   7  * Modernisation, unification and other changes and fixes:
   8  *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
   9  *
  10  *
  11  * Don't define absolute symbols until and unless you know that symbol
  12  * value is should remain constant even if kernel image is relocated
  13  * at run time. Absolute symbols are not relocated. If symbol value should
  14  * change if kernel is relocated, make the symbol section relative and
  15  * put it inside the section definition.
  16  */
  17 
  18 #ifdef CONFIG_X86_32
  19 #define LOAD_OFFSET __PAGE_OFFSET
  20 #else
  21 #define LOAD_OFFSET __START_KERNEL_map
  22 #endif
  23 
  24 #include <asm-generic/vmlinux.lds.h>
  25 #include <asm/asm-offsets.h>
  26 #include <asm/thread_info.h>
  27 #include <asm/page_types.h>
  28 #include <asm/orc_lookup.h>
  29 #include <asm/cache.h>
  30 #include <asm/boot.h>
  31 
  32 #undef i386     /* in case the preprocessor is a 32bit one */
  33 
  34 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
  35 
  36 #ifdef CONFIG_X86_32
  37 OUTPUT_ARCH(i386)
  38 ENTRY(phys_startup_32)
  39 #else
  40 OUTPUT_ARCH(i386:x86-64)
  41 ENTRY(phys_startup_64)
  42 #endif
  43 
  44 jiffies = jiffies_64;
  45 
  46 #if defined(CONFIG_X86_64)
  47 /*
  48  * On 64-bit, align RODATA to 2MB so we retain large page mappings for
  49  * boundaries spanning kernel text, rodata and data sections.
  50  *
  51  * However, kernel identity mappings will have different RWX permissions
  52  * to the pages mapping to text and to the pages padding (which are freed) the
  53  * text section. Hence kernel identity mappings will be broken to smaller
  54  * pages. For 64-bit, kernel text and kernel identity mappings are different,
  55  * so we can enable protection checks as well as retain 2MB large page
  56  * mappings for kernel text.
  57  */
  58 #define X86_ALIGN_RODATA_BEGIN  . = ALIGN(HPAGE_SIZE);
  59 
  60 #define X86_ALIGN_RODATA_END                                    \
  61                 . = ALIGN(HPAGE_SIZE);                          \
  62                 __end_rodata_hpage_align = .;                   \
  63                 __end_rodata_aligned = .;
  64 
  65 #define ALIGN_ENTRY_TEXT_BEGIN  . = ALIGN(PMD_SIZE);
  66 #define ALIGN_ENTRY_TEXT_END    . = ALIGN(PMD_SIZE);
  67 
  68 /*
  69  * This section contains data which will be mapped as decrypted. Memory
  70  * encryption operates on a page basis. Make this section PMD-aligned
  71  * to avoid splitting the pages while mapping the section early.
  72  *
  73  * Note: We use a separate section so that only this section gets
  74  * decrypted to avoid exposing more than we wish.
  75  */
  76 #define BSS_DECRYPTED                                           \
  77         . = ALIGN(PMD_SIZE);                                    \
  78         __start_bss_decrypted = .;                              \
  79         *(.bss..decrypted);                                     \
  80         . = ALIGN(PAGE_SIZE);                                   \
  81         __start_bss_decrypted_unused = .;                       \
  82         . = ALIGN(PMD_SIZE);                                    \
  83         __end_bss_decrypted = .;                                \
  84 
  85 #else
  86 
  87 #define X86_ALIGN_RODATA_BEGIN
  88 #define X86_ALIGN_RODATA_END                                    \
  89                 . = ALIGN(PAGE_SIZE);                           \
  90                 __end_rodata_aligned = .;
  91 
  92 #define ALIGN_ENTRY_TEXT_BEGIN
  93 #define ALIGN_ENTRY_TEXT_END
  94 #define BSS_DECRYPTED
  95 
  96 #endif
  97 
  98 PHDRS {
  99         text PT_LOAD FLAGS(5);          /* R_E */
 100         data PT_LOAD FLAGS(6);          /* RW_ */
 101 #ifdef CONFIG_X86_64
 102 #ifdef CONFIG_SMP
 103         percpu PT_LOAD FLAGS(6);        /* RW_ */
 104 #endif
 105         init PT_LOAD FLAGS(7);          /* RWE */
 106 #endif
 107         note PT_NOTE FLAGS(0);          /* ___ */
 108 }
 109 
 110 SECTIONS
 111 {
 112 #ifdef CONFIG_X86_32
 113         . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
 114         phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
 115 #else
 116         . = __START_KERNEL;
 117         phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
 118 #endif
 119 
 120         /* Text and read-only data */
 121         .text :  AT(ADDR(.text) - LOAD_OFFSET) {
 122                 _text = .;
 123                 _stext = .;
 124                 /* bootstrapping code */
 125                 HEAD_TEXT
 126                 TEXT_TEXT
 127                 SCHED_TEXT
 128                 CPUIDLE_TEXT
 129                 LOCK_TEXT
 130                 KPROBES_TEXT
 131                 ALIGN_ENTRY_TEXT_BEGIN
 132                 ENTRY_TEXT
 133                 IRQENTRY_TEXT
 134                 ALIGN_ENTRY_TEXT_END
 135                 SOFTIRQENTRY_TEXT
 136                 *(.fixup)
 137                 *(.gnu.warning)
 138 
 139 #ifdef CONFIG_RETPOLINE
 140                 __indirect_thunk_start = .;
 141                 *(.text.__x86.indirect_thunk)
 142                 __indirect_thunk_end = .;
 143 #endif
 144 
 145                 /* End of text section */
 146                 _etext = .;
 147         } :text = 0x9090
 148 
 149         NOTES :text :note
 150 
 151         EXCEPTION_TABLE(16) :text = 0x9090
 152 
 153         /* .text should occupy whole number of pages */
 154         . = ALIGN(PAGE_SIZE);
 155         X86_ALIGN_RODATA_BEGIN
 156         RO_DATA(PAGE_SIZE)
 157         X86_ALIGN_RODATA_END
 158 
 159         /* Data */
 160         .data : AT(ADDR(.data) - LOAD_OFFSET) {
 161                 /* Start of data section */
 162                 _sdata = .;
 163 
 164                 /* init_task */
 165                 INIT_TASK_DATA(THREAD_SIZE)
 166 
 167 #ifdef CONFIG_X86_32
 168                 /* 32 bit has nosave before _edata */
 169                 NOSAVE_DATA
 170 #endif
 171 
 172                 PAGE_ALIGNED_DATA(PAGE_SIZE)
 173 
 174                 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 175 
 176                 DATA_DATA
 177                 CONSTRUCTORS
 178 
 179                 /* rarely changed data like cpu maps */
 180                 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
 181 
 182                 /* End of data section */
 183                 _edata = .;
 184         } :data
 185 
 186         BUG_TABLE
 187 
 188         ORC_UNWIND_TABLE
 189 
 190         . = ALIGN(PAGE_SIZE);
 191         __vvar_page = .;
 192 
 193         .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
 194                 /* work around gold bug 13023 */
 195                 __vvar_beginning_hack = .;
 196 
 197                 /* Place all vvars at the offsets in asm/vvar.h. */
 198 #define EMIT_VVAR(name, offset)                         \
 199                 . = __vvar_beginning_hack + offset;     \
 200                 *(.vvar_ ## name)
 201 #define __VVAR_KERNEL_LDS
 202 #include <asm/vvar.h>
 203 #undef __VVAR_KERNEL_LDS
 204 #undef EMIT_VVAR
 205 
 206                 /*
 207                  * Pad the rest of the page with zeros.  Otherwise the loader
 208                  * can leave garbage here.
 209                  */
 210                 . = __vvar_beginning_hack + PAGE_SIZE;
 211         } :data
 212 
 213         . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
 214 
 215         /* Init code and data - will be freed after init */
 216         . = ALIGN(PAGE_SIZE);
 217         .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
 218                 __init_begin = .; /* paired with __init_end */
 219         }
 220 
 221 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
 222         /*
 223          * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
 224          * output PHDR, so the next output section - .init.text - should
 225          * start another segment - init.
 226          */
 227         PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
 228         ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
 229                "per-CPU data too large - increase CONFIG_PHYSICAL_START")
 230 #endif
 231 
 232         INIT_TEXT_SECTION(PAGE_SIZE)
 233 #ifdef CONFIG_X86_64
 234         :init
 235 #endif
 236 
 237         /*
 238          * Section for code used exclusively before alternatives are run. All
 239          * references to such code must be patched out by alternatives, normally
 240          * by using X86_FEATURE_ALWAYS CPU feature bit.
 241          *
 242          * See static_cpu_has() for an example.
 243          */
 244         .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
 245                 *(.altinstr_aux)
 246         }
 247 
 248         INIT_DATA_SECTION(16)
 249 
 250         .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
 251                 __x86_cpu_dev_start = .;
 252                 *(.x86_cpu_dev.init)
 253                 __x86_cpu_dev_end = .;
 254         }
 255 
 256 #ifdef CONFIG_X86_INTEL_MID
 257         .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
 258                                                                 LOAD_OFFSET) {
 259                 __x86_intel_mid_dev_start = .;
 260                 *(.x86_intel_mid_dev.init)
 261                 __x86_intel_mid_dev_end = .;
 262         }
 263 #endif
 264 
 265         /*
 266          * start address and size of operations which during runtime
 267          * can be patched with virtualization friendly instructions or
 268          * baremetal native ones. Think page table operations.
 269          * Details in paravirt_types.h
 270          */
 271         . = ALIGN(8);
 272         .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
 273                 __parainstructions = .;
 274                 *(.parainstructions)
 275                 __parainstructions_end = .;
 276         }
 277 
 278         /*
 279          * struct alt_inst entries. From the header (alternative.h):
 280          * "Alternative instructions for different CPU types or capabilities"
 281          * Think locking instructions on spinlocks.
 282          */
 283         . = ALIGN(8);
 284         .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
 285                 __alt_instructions = .;
 286                 *(.altinstructions)
 287                 __alt_instructions_end = .;
 288         }
 289 
 290         /*
 291          * And here are the replacement instructions. The linker sticks
 292          * them as binary blobs. The .altinstructions has enough data to
 293          * get the address and the length of them to patch the kernel safely.
 294          */
 295         .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
 296                 *(.altinstr_replacement)
 297         }
 298 
 299         /*
 300          * struct iommu_table_entry entries are injected in this section.
 301          * It is an array of IOMMUs which during run time gets sorted depending
 302          * on its dependency order. After rootfs_initcall is complete
 303          * this section can be safely removed.
 304          */
 305         .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
 306                 __iommu_table = .;
 307                 *(.iommu_table)
 308                 __iommu_table_end = .;
 309         }
 310 
 311         . = ALIGN(8);
 312         .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
 313                 __apicdrivers = .;
 314                 *(.apicdrivers);
 315                 __apicdrivers_end = .;
 316         }
 317 
 318         . = ALIGN(8);
 319         /*
 320          * .exit.text is discard at runtime, not link time, to deal with
 321          *  references from .altinstructions and .eh_frame
 322          */
 323         .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
 324                 EXIT_TEXT
 325         }
 326 
 327         .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
 328                 EXIT_DATA
 329         }
 330 
 331 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
 332         PERCPU_SECTION(INTERNODE_CACHE_BYTES)
 333 #endif
 334 
 335         . = ALIGN(PAGE_SIZE);
 336 
 337         /* freed after init ends here */
 338         .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
 339                 __init_end = .;
 340         }
 341 
 342         /*
 343          * smp_locks might be freed after init
 344          * start/end must be page aligned
 345          */
 346         . = ALIGN(PAGE_SIZE);
 347         .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
 348                 __smp_locks = .;
 349                 *(.smp_locks)
 350                 . = ALIGN(PAGE_SIZE);
 351                 __smp_locks_end = .;
 352         }
 353 
 354 #ifdef CONFIG_X86_64
 355         .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
 356                 NOSAVE_DATA
 357         }
 358 #endif
 359 
 360         /* BSS */
 361         . = ALIGN(PAGE_SIZE);
 362         .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
 363                 __bss_start = .;
 364                 *(.bss..page_aligned)
 365                 *(BSS_MAIN)
 366                 BSS_DECRYPTED
 367                 . = ALIGN(PAGE_SIZE);
 368                 __bss_stop = .;
 369         }
 370 
 371         /*
 372          * The memory occupied from _text to here, __end_of_kernel_reserve, is
 373          * automatically reserved in setup_arch(). Anything after here must be
 374          * explicitly reserved using memblock_reserve() or it will be discarded
 375          * and treated as available memory.
 376          */
 377         __end_of_kernel_reserve = .;
 378 
 379         . = ALIGN(PAGE_SIZE);
 380         .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
 381                 __brk_base = .;
 382                 . += 64 * 1024;         /* 64k alignment slop space */
 383                 *(.brk_reservation)     /* areas brk users have reserved */
 384                 __brk_limit = .;
 385         }
 386 
 387         . = ALIGN(PAGE_SIZE);           /* keep VO_INIT_SIZE page aligned */
 388         _end = .;
 389 
 390 #ifdef CONFIG_AMD_MEM_ENCRYPT
 391         /*
 392          * Early scratch/workarea section: Lives outside of the kernel proper
 393          * (_text - _end).
 394          *
 395          * Resides after _end because even though the .brk section is after
 396          * __end_of_kernel_reserve, the .brk section is later reserved as a
 397          * part of the kernel. Since it is located after __end_of_kernel_reserve
 398          * it will be discarded and become part of the available memory. As
 399          * such, it can only be used by very early boot code and must not be
 400          * needed afterwards.
 401          *
 402          * Currently used by SME for performing in-place encryption of the
 403          * kernel during boot. Resides on a 2MB boundary to simplify the
 404          * pagetable setup used for SME in-place encryption.
 405          */
 406         . = ALIGN(HPAGE_SIZE);
 407         .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
 408                 __init_scratch_begin = .;
 409                 *(.init.scratch)
 410                 . = ALIGN(HPAGE_SIZE);
 411                 __init_scratch_end = .;
 412         }
 413 #endif
 414 
 415         STABS_DEBUG
 416         DWARF_DEBUG
 417 
 418         DISCARDS
 419         /DISCARD/ : {
 420                 *(.eh_frame)
 421         }
 422 }
 423 
 424 
 425 #ifdef CONFIG_X86_32
 426 /*
 427  * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
 428  */
 429 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
 430            "kernel image bigger than KERNEL_IMAGE_SIZE");
 431 #else
 432 /*
 433  * Per-cpu symbols which need to be offset from __per_cpu_load
 434  * for the boot processor.
 435  */
 436 #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
 437 INIT_PER_CPU(gdt_page);
 438 INIT_PER_CPU(fixed_percpu_data);
 439 INIT_PER_CPU(irq_stack_backing_store);
 440 
 441 /*
 442  * Build-time check on the image size:
 443  */
 444 . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
 445            "kernel image bigger than KERNEL_IMAGE_SIZE");
 446 
 447 #ifdef CONFIG_SMP
 448 . = ASSERT((fixed_percpu_data == 0),
 449            "fixed_percpu_data is not at start of per-cpu area");
 450 #endif
 451 
 452 #endif /* CONFIG_X86_32 */
 453 
 454 #ifdef CONFIG_KEXEC_CORE
 455 #include <asm/kexec.h>
 456 
 457 . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
 458            "kexec control code size is too big");
 459 #endif
 460 

/* [<][>][^][v][top][bottom][index][help] */