1/* 2 * 3 * Trampoline.S Derived from Setup.S by Linus Torvalds 4 * 5 * 4 Jan 1997 Michael Chastain: changed to gnu as. 6 * 15 Sept 2005 Eric Biederman: 64bit PIC support 7 * 8 * Entry: CS:IP point to the start of our code, we are 9 * in real mode with no stack, but the rest of the 10 * trampoline page to make our stack and everything else 11 * is a mystery. 12 * 13 * On entry to trampoline_start, the processor is in real mode 14 * with 16-bit addressing and 16-bit data. CS has some value 15 * and IP is zero. Thus, data addresses need to be absolute 16 * (no relocation) and are taken with regard to r_base. 17 * 18 * With the addition of trampoline_level4_pgt this code can 19 * now enter a 64bit kernel that lives at arbitrary 64bit 20 * physical addresses. 21 * 22 * If you work on this file, check the object module with objdump 23 * --full-contents --reloc to make sure there are no relocation 24 * entries. 25 */ 26 27#include <linux/linkage.h> 28#include <asm/pgtable_types.h> 29#include <asm/page_types.h> 30#include <asm/msr.h> 31#include <asm/segment.h> 32#include <asm/processor-flags.h> 33#include "realmode.h" 34 35 .text 36 .code16 37 38 .balign PAGE_SIZE 39ENTRY(trampoline_start) 40 cli # We should be safe anyway 41 wbinvd 42 43 LJMPW_RM(1f) 441: 45 mov %cs, %ax # Code and data in the same place 46 mov %ax, %ds 47 mov %ax, %es 48 mov %ax, %ss 49 50 movl $0xA5A5A5A5, trampoline_status 51 # write marker for master knows we're running 52 53 # Setup stack 54 movl $rm_stack_end, %esp 55 56 call verify_cpu # Verify the cpu supports long mode 57 testl %eax, %eax # Check for return code 58 jnz no_longmode 59 60 /* 61 * GDT tables in non default location kernel can be beyond 16MB and 62 * lgdt will not be able to load the address as in real mode default 63 * operand size is 16bit. Use lgdtl instead to force operand size 64 * to 32 bit. 65 */ 66 67 lidtl tr_idt # load idt with 0, 0 68 lgdtl tr_gdt # load gdt with whatever is appropriate 69 70 movw $__KERNEL_DS, %dx # Data segment descriptor 71 72 # Enable protected mode 73 movl $X86_CR0_PE, %eax # protected mode (PE) bit 74 movl %eax, %cr0 # into protected mode 75 76 # flush prefetch and jump to startup_32 77 ljmpl $__KERNEL32_CS, $pa_startup_32 78 79no_longmode: 80 hlt 81 jmp no_longmode 82#include "../kernel/verify_cpu.S" 83 84 .section ".text32","ax" 85 .code32 86 .balign 4 87ENTRY(startup_32) 88 movl %edx, %ss 89 addl $pa_real_mode_base, %esp 90 movl %edx, %ds 91 movl %edx, %es 92 movl %edx, %fs 93 movl %edx, %gs 94 95 movl pa_tr_cr4, %eax 96 movl %eax, %cr4 # Enable PAE mode 97 98 # Setup trampoline 4 level pagetables 99 movl $pa_trampoline_pgd, %eax 100 movl %eax, %cr3 101 102 # Set up EFER 103 movl pa_tr_efer, %eax 104 movl pa_tr_efer + 4, %edx 105 movl $MSR_EFER, %ecx 106 wrmsr 107 108 # Enable paging and in turn activate Long Mode 109 movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax 110 movl %eax, %cr0 111 112 /* 113 * At this point we're in long mode but in 32bit compatibility mode 114 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn 115 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use 116 * the new gdt/idt that has __KERNEL_CS with CS.L = 1. 117 */ 118 ljmpl $__KERNEL_CS, $pa_startup_64 119 120 .section ".text64","ax" 121 .code64 122 .balign 4 123ENTRY(startup_64) 124 # Now jump into the kernel using virtual addresses 125 jmpq *tr_start(%rip) 126 127 .section ".rodata","a" 128 # Duplicate the global descriptor table 129 # so the kernel can live anywhere 130 .balign 16 131 .globl tr_gdt 132tr_gdt: 133 .short tr_gdt_end - tr_gdt - 1 # gdt limit 134 .long pa_tr_gdt 135 .short 0 136 .quad 0x00cf9b000000ffff # __KERNEL32_CS 137 .quad 0x00af9b000000ffff # __KERNEL_CS 138 .quad 0x00cf93000000ffff # __KERNEL_DS 139tr_gdt_end: 140 141 .bss 142 .balign PAGE_SIZE 143GLOBAL(trampoline_pgd) .space PAGE_SIZE 144 145 .balign 8 146GLOBAL(trampoline_header) 147 tr_start: .space 8 148 GLOBAL(tr_efer) .space 8 149 GLOBAL(tr_cr4) .space 4 150END(trampoline_header) 151 152#include "trampoline_common.S" 153