1/* 2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/segment.h> 16#include <asm/pgtable.h> 17#include <asm/page.h> 18#include <asm/msr.h> 19#include <asm/cache.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22#include <asm/nops.h> 23 24#ifdef CONFIG_PARAVIRT 25#include <asm/asm-offsets.h> 26#include <asm/paravirt.h> 27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 28#else 29#define GET_CR2_INTO(reg) movq %cr2, reg 30#define INTERRUPT_RETURN iretq 31#endif 32 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 34 * because we need identity-mapped pages. 35 * 36 */ 37 38#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 39 40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) 41L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) 42L4_START_KERNEL = pgd_index(__START_KERNEL_map) 43L3_START_KERNEL = pud_index(__START_KERNEL_map) 44 45 .text 46 __HEAD 47 .code64 48 .globl startup_64 49startup_64: 50 /* 51 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 52 * and someone has loaded an identity mapped page table 53 * for us. These identity mapped page tables map all of the 54 * kernel pages and possibly all of memory. 55 * 56 * %rsi holds a physical pointer to real_mode_data. 57 * 58 * We come here either directly from a 64bit bootloader, or from 59 * arch/x86/boot/compressed/head_64.S. 60 * 61 * We only come here initially at boot nothing else comes here. 62 * 63 * Since we may be loaded at an address different from what we were 64 * compiled to run at we first fixup the physical addresses in our page 65 * tables and then reload them. 66 */ 67 68 /* Sanitize CPU configuration */ 69 call verify_cpu 70 71 /* 72 * Compute the delta between the address I am compiled to run at and the 73 * address I am actually running at. 74 */ 75 leaq _text(%rip), %rbp 76 subq $_text - __START_KERNEL_map, %rbp 77 78 /* Is the address not 2M aligned? */ 79 movq %rbp, %rax 80 andl $~PMD_PAGE_MASK, %eax 81 testl %eax, %eax 82 jnz bad_address 83 84 /* 85 * Is the address too large? 86 */ 87 leaq _text(%rip), %rax 88 shrq $MAX_PHYSMEM_BITS, %rax 89 jnz bad_address 90 91 /* 92 * Fixup the physical addresses in the page table 93 */ 94 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) 95 96 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 97 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 98 99 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 100 101 /* 102 * Set up the identity mapping for the switchover. These 103 * entries should *NOT* have the global bit set! This also 104 * creates a bunch of nonsense entries but that is fine -- 105 * it avoids problems around wraparound. 106 */ 107 leaq _text(%rip), %rdi 108 leaq early_level4_pgt(%rip), %rbx 109 110 movq %rdi, %rax 111 shrq $PGDIR_SHIFT, %rax 112 113 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx 114 movq %rdx, 0(%rbx,%rax,8) 115 movq %rdx, 8(%rbx,%rax,8) 116 117 addq $4096, %rdx 118 movq %rdi, %rax 119 shrq $PUD_SHIFT, %rax 120 andl $(PTRS_PER_PUD-1), %eax 121 movq %rdx, 4096(%rbx,%rax,8) 122 incl %eax 123 andl $(PTRS_PER_PUD-1), %eax 124 movq %rdx, 4096(%rbx,%rax,8) 125 126 addq $8192, %rbx 127 movq %rdi, %rax 128 shrq $PMD_SHIFT, %rdi 129 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax 130 leaq (_end - 1)(%rip), %rcx 131 shrq $PMD_SHIFT, %rcx 132 subq %rdi, %rcx 133 incl %ecx 134 1351: 136 andq $(PTRS_PER_PMD - 1), %rdi 137 movq %rax, (%rbx,%rdi,8) 138 incq %rdi 139 addq $PMD_SIZE, %rax 140 decl %ecx 141 jnz 1b 142 143 /* 144 * Fixup the kernel text+data virtual addresses. Note that 145 * we might write invalid pmds, when the kernel is relocated 146 * cleanup_highmap() fixes this up along with the mappings 147 * beyond _end. 148 */ 149 leaq level2_kernel_pgt(%rip), %rdi 150 leaq 4096(%rdi), %r8 151 /* See if it is a valid page table entry */ 1521: testb $1, 0(%rdi) 153 jz 2f 154 addq %rbp, 0(%rdi) 155 /* Go to the next page */ 1562: addq $8, %rdi 157 cmp %r8, %rdi 158 jne 1b 159 160 /* Fixup phys_base */ 161 addq %rbp, phys_base(%rip) 162 163 movq $(early_level4_pgt - __START_KERNEL_map), %rax 164 jmp 1f 165ENTRY(secondary_startup_64) 166 /* 167 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 168 * and someone has loaded a mapped page table. 169 * 170 * %rsi holds a physical pointer to real_mode_data. 171 * 172 * We come here either from startup_64 (using physical addresses) 173 * or from trampoline.S (using virtual addresses). 174 * 175 * Using virtual addresses from trampoline.S removes the need 176 * to have any identity mapped pages in the kernel page table 177 * after the boot processor executes this code. 178 */ 179 180 /* Sanitize CPU configuration */ 181 call verify_cpu 182 183 movq $(init_level4_pgt - __START_KERNEL_map), %rax 1841: 185 186 /* Enable PAE mode and PGE */ 187 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 188 movq %rcx, %cr4 189 190 /* Setup early boot stage 4 level pagetables. */ 191 addq phys_base(%rip), %rax 192 movq %rax, %cr3 193 194 /* Ensure I am executing from virtual addresses */ 195 movq $1f, %rax 196 jmp *%rax 1971: 198 199 /* Check if nx is implemented */ 200 movl $0x80000001, %eax 201 cpuid 202 movl %edx,%edi 203 204 /* Setup EFER (Extended Feature Enable Register) */ 205 movl $MSR_EFER, %ecx 206 rdmsr 207 btsl $_EFER_SCE, %eax /* Enable System Call */ 208 btl $20,%edi /* No Execute supported? */ 209 jnc 1f 210 btsl $_EFER_NX, %eax 211 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2121: wrmsr /* Make changes effective */ 213 214 /* Setup cr0 */ 215#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 216 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 217 X86_CR0_PG) 218 movl $CR0_STATE, %eax 219 /* Make changes effective */ 220 movq %rax, %cr0 221 222 /* Setup a boot time stack */ 223 movq stack_start(%rip), %rsp 224 225 /* zero EFLAGS after setting rsp */ 226 pushq $0 227 popfq 228 229 /* 230 * We must switch to a new descriptor in kernel space for the GDT 231 * because soon the kernel won't have access anymore to the userspace 232 * addresses where we're currently running on. We have to do that here 233 * because in 32bit we couldn't load a 64bit linear address. 234 */ 235 lgdt early_gdt_descr(%rip) 236 237 /* set up data segments */ 238 xorl %eax,%eax 239 movl %eax,%ds 240 movl %eax,%ss 241 movl %eax,%es 242 243 /* 244 * We don't really need to load %fs or %gs, but load them anyway 245 * to kill any stale realmode selectors. This allows execution 246 * under VT hardware. 247 */ 248 movl %eax,%fs 249 movl %eax,%gs 250 251 /* Set up %gs. 252 * 253 * The base of %gs always points to the bottom of the irqstack 254 * union. If the stack protector canary is enabled, it is 255 * located at %gs:40. Note that, on SMP, the boot cpu uses 256 * init data section till per cpu areas are set up. 257 */ 258 movl $MSR_GS_BASE,%ecx 259 movl initial_gs(%rip),%eax 260 movl initial_gs+4(%rip),%edx 261 wrmsr 262 263 /* rsi is pointer to real mode structure with interesting info. 264 pass it to C */ 265 movq %rsi, %rdi 266 267 /* Finally jump to run C code and to be on real kernel address 268 * Since we are running on identity-mapped space we have to jump 269 * to the full 64bit address, this is only possible as indirect 270 * jump. In addition we need to ensure %cs is set so we make this 271 * a far return. 272 * 273 * Note: do not change to far jump indirect with 64bit offset. 274 * 275 * AMD does not support far jump indirect with 64bit offset. 276 * AMD64 Architecture Programmer's Manual, Volume 3: states only 277 * JMP FAR mem16:16 FF /5 Far jump indirect, 278 * with the target specified by a far pointer in memory. 279 * JMP FAR mem16:32 FF /5 Far jump indirect, 280 * with the target specified by a far pointer in memory. 281 * 282 * Intel64 does support 64bit offset. 283 * Software Developer Manual Vol 2: states: 284 * FF /5 JMP m16:16 Jump far, absolute indirect, 285 * address given in m16:16 286 * FF /5 JMP m16:32 Jump far, absolute indirect, 287 * address given in m16:32. 288 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 289 * address given in m16:64. 290 */ 291 movq initial_code(%rip),%rax 292 pushq $0 # fake return address to stop unwinder 293 pushq $__KERNEL_CS # set correct cs 294 pushq %rax # target address in negative space 295 lretq 296 297#include "verify_cpu.S" 298 299#ifdef CONFIG_HOTPLUG_CPU 300/* 301 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 302 * up already except stack. We just set up stack here. Then call 303 * start_secondary(). 304 */ 305ENTRY(start_cpu0) 306 movq stack_start(%rip),%rsp 307 movq initial_code(%rip),%rax 308 pushq $0 # fake return address to stop unwinder 309 pushq $__KERNEL_CS # set correct cs 310 pushq %rax # target address in negative space 311 lretq 312ENDPROC(start_cpu0) 313#endif 314 315 /* SMP bootup changes these two */ 316 __REFDATA 317 .balign 8 318 GLOBAL(initial_code) 319 .quad x86_64_start_kernel 320 GLOBAL(initial_gs) 321 .quad INIT_PER_CPU_VAR(irq_stack_union) 322 323 GLOBAL(stack_start) 324 .quad init_thread_union+THREAD_SIZE-8 325 .word 0 326 __FINITDATA 327 328bad_address: 329 jmp bad_address 330 331 __INIT 332ENTRY(early_idt_handler_array) 333 # 104(%rsp) %rflags 334 # 96(%rsp) %cs 335 # 88(%rsp) %rip 336 # 80(%rsp) error code 337 i = 0 338 .rept NUM_EXCEPTION_VECTORS 339 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 340 pushq $0 # Dummy error code, to make stack frame uniform 341 .endif 342 pushq $i # 72(%rsp) Vector number 343 jmp early_idt_handler_common 344 i = i + 1 345 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 346 .endr 347ENDPROC(early_idt_handler_array) 348 349early_idt_handler_common: 350 /* 351 * The stack is the hardware frame, an error code or zero, and the 352 * vector number. 353 */ 354 cld 355 356 cmpl $2,(%rsp) # X86_TRAP_NMI 357 je is_nmi # Ignore NMI 358 359 cmpl $2,early_recursion_flag(%rip) 360 jz 1f 361 incl early_recursion_flag(%rip) 362 363 pushq %rax # 64(%rsp) 364 pushq %rcx # 56(%rsp) 365 pushq %rdx # 48(%rsp) 366 pushq %rsi # 40(%rsp) 367 pushq %rdi # 32(%rsp) 368 pushq %r8 # 24(%rsp) 369 pushq %r9 # 16(%rsp) 370 pushq %r10 # 8(%rsp) 371 pushq %r11 # 0(%rsp) 372 373 cmpl $__KERNEL_CS,96(%rsp) 374 jne 11f 375 376 cmpl $14,72(%rsp) # Page fault? 377 jnz 10f 378 GET_CR2_INTO(%rdi) # can clobber any volatile register if pv 379 call early_make_pgtable 380 andl %eax,%eax 381 jz 20f # All good 382 38310: 384 leaq 88(%rsp),%rdi # Pointer to %rip 385 call early_fixup_exception 386 andl %eax,%eax 387 jnz 20f # Found an exception entry 388 38911: 390#ifdef CONFIG_EARLY_PRINTK 391 GET_CR2_INTO(%r9) # can clobber any volatile register if pv 392 movl 80(%rsp),%r8d # error code 393 movl 72(%rsp),%esi # vector number 394 movl 96(%rsp),%edx # %cs 395 movq 88(%rsp),%rcx # %rip 396 xorl %eax,%eax 397 leaq early_idt_msg(%rip),%rdi 398 call early_printk 399 cmpl $2,early_recursion_flag(%rip) 400 jz 1f 401 call dump_stack 402#ifdef CONFIG_KALLSYMS 403 leaq early_idt_ripmsg(%rip),%rdi 404 movq 40(%rsp),%rsi # %rip again 405 call __print_symbol 406#endif 407#endif /* EARLY_PRINTK */ 4081: hlt 409 jmp 1b 410 41120: # Exception table entry found or page table generated 412 popq %r11 413 popq %r10 414 popq %r9 415 popq %r8 416 popq %rdi 417 popq %rsi 418 popq %rdx 419 popq %rcx 420 popq %rax 421 decl early_recursion_flag(%rip) 422is_nmi: 423 addq $16,%rsp # drop vector number and error code 424 INTERRUPT_RETURN 425ENDPROC(early_idt_handler_common) 426 427 __INITDATA 428 429 .balign 4 430early_recursion_flag: 431 .long 0 432 433#ifdef CONFIG_EARLY_PRINTK 434early_idt_msg: 435 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 436early_idt_ripmsg: 437 .asciz "RIP %s\n" 438#endif /* CONFIG_EARLY_PRINTK */ 439 440#define NEXT_PAGE(name) \ 441 .balign PAGE_SIZE; \ 442GLOBAL(name) 443 444/* Automate the creation of 1 to 1 mapping pmd entries */ 445#define PMDS(START, PERM, COUNT) \ 446 i = 0 ; \ 447 .rept (COUNT) ; \ 448 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 449 i = i + 1 ; \ 450 .endr 451 452 __INITDATA 453NEXT_PAGE(early_level4_pgt) 454 .fill 511,8,0 455 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 456 457NEXT_PAGE(early_dynamic_pgts) 458 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 459 460 .data 461 462#ifndef CONFIG_XEN 463NEXT_PAGE(init_level4_pgt) 464 .fill 512,8,0 465#else 466NEXT_PAGE(init_level4_pgt) 467 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 468 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 469 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 470 .org init_level4_pgt + L4_START_KERNEL*8, 0 471 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 472 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 473 474NEXT_PAGE(level3_ident_pgt) 475 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 476 .fill 511, 8, 0 477NEXT_PAGE(level2_ident_pgt) 478 /* Since I easily can, map the first 1G. 479 * Don't set NX because code runs from these pages. 480 */ 481 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 482#endif 483 484NEXT_PAGE(level3_kernel_pgt) 485 .fill L3_START_KERNEL,8,0 486 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 487 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 488 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 489 490NEXT_PAGE(level2_kernel_pgt) 491 /* 492 * 512 MB kernel mapping. We spend a full page on this pagetable 493 * anyway. 494 * 495 * The kernel code+data+bss must not be bigger than that. 496 * 497 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 498 * If you want to increase this then increase MODULES_VADDR 499 * too.) 500 */ 501 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 502 KERNEL_IMAGE_SIZE/PMD_SIZE) 503 504NEXT_PAGE(level2_fixmap_pgt) 505 .fill 506,8,0 506 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 507 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 508 .fill 5,8,0 509 510NEXT_PAGE(level1_fixmap_pgt) 511 .fill 512,8,0 512 513#undef PMDS 514 515 .data 516 .align 16 517 .globl early_gdt_descr 518early_gdt_descr: 519 .word GDT_ENTRIES*8-1 520early_gdt_descr_base: 521 .quad INIT_PER_CPU_VAR(gdt_page) 522 523ENTRY(phys_base) 524 /* This must match the first entry in level2_kernel_pgt */ 525 .quad 0x0000000000000000 526 527#include "../../x86/xen/xen-head.S" 528 529 __PAGE_ALIGNED_BSS 530NEXT_PAGE(empty_zero_page) 531 .skip PAGE_SIZE 532 533