1/* 2 * Core of Xen paravirt_ops implementation. 3 * 4 * This file contains the xen_paravirt_ops structure itself, and the 5 * implementations for: 6 * - privileged instructions 7 * - interrupt flags 8 * - segment operations 9 * - booting and setup 10 * 11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 12 */ 13 14#include <linux/cpu.h> 15#include <linux/kernel.h> 16#include <linux/init.h> 17#include <linux/smp.h> 18#include <linux/preempt.h> 19#include <linux/hardirq.h> 20#include <linux/percpu.h> 21#include <linux/delay.h> 22#include <linux/start_kernel.h> 23#include <linux/sched.h> 24#include <linux/kprobes.h> 25#include <linux/bootmem.h> 26#include <linux/module.h> 27#include <linux/mm.h> 28#include <linux/page-flags.h> 29#include <linux/highmem.h> 30#include <linux/console.h> 31#include <linux/pci.h> 32#include <linux/gfp.h> 33#include <linux/memblock.h> 34#include <linux/edd.h> 35 36#ifdef CONFIG_KEXEC 37#include <linux/kexec.h> 38#endif 39 40#include <xen/xen.h> 41#include <xen/events.h> 42#include <xen/interface/xen.h> 43#include <xen/interface/version.h> 44#include <xen/interface/physdev.h> 45#include <xen/interface/vcpu.h> 46#include <xen/interface/memory.h> 47#include <xen/interface/nmi.h> 48#include <xen/interface/xen-mca.h> 49#include <xen/features.h> 50#include <xen/page.h> 51#include <xen/hvm.h> 52#include <xen/hvc-console.h> 53#include <xen/acpi.h> 54 55#include <asm/paravirt.h> 56#include <asm/apic.h> 57#include <asm/page.h> 58#include <asm/xen/pci.h> 59#include <asm/xen/hypercall.h> 60#include <asm/xen/hypervisor.h> 61#include <asm/fixmap.h> 62#include <asm/processor.h> 63#include <asm/proto.h> 64#include <asm/msr-index.h> 65#include <asm/traps.h> 66#include <asm/setup.h> 67#include <asm/desc.h> 68#include <asm/pgalloc.h> 69#include <asm/pgtable.h> 70#include <asm/tlbflush.h> 71#include <asm/reboot.h> 72#include <asm/stackprotector.h> 73#include <asm/hypervisor.h> 74#include <asm/mach_traps.h> 75#include <asm/mwait.h> 76#include <asm/pci_x86.h> 77#include <asm/pat.h> 78 79#ifdef CONFIG_ACPI 80#include <linux/acpi.h> 81#include <asm/acpi.h> 82#include <acpi/pdc_intel.h> 83#include <acpi/processor.h> 84#include <xen/interface/platform.h> 85#endif 86 87#include "xen-ops.h" 88#include "mmu.h" 89#include "smp.h" 90#include "multicalls.h" 91 92EXPORT_SYMBOL_GPL(hypercall_page); 93 94/* 95 * Pointer to the xen_vcpu_info structure or 96 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info 97 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info 98 * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point 99 * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to 100 * acknowledge pending events. 101 * Also more subtly it is used by the patched version of irq enable/disable 102 * e.g. xen_irq_enable_direct and xen_iret in PV mode. 103 * 104 * The desire to be able to do those mask/unmask operations as a single 105 * instruction by using the per-cpu offset held in %gs is the real reason 106 * vcpu info is in a per-cpu pointer and the original reason for this 107 * hypercall. 108 * 109 */ 110DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 111 112/* 113 * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info 114 * hypercall. This can be used both in PV and PVHVM mode. The structure 115 * overrides the default per_cpu(xen_vcpu, cpu) value. 116 */ 117DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 118 119enum xen_domain_type xen_domain_type = XEN_NATIVE; 120EXPORT_SYMBOL_GPL(xen_domain_type); 121 122unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; 123EXPORT_SYMBOL(machine_to_phys_mapping); 124unsigned long machine_to_phys_nr; 125EXPORT_SYMBOL(machine_to_phys_nr); 126 127struct start_info *xen_start_info; 128EXPORT_SYMBOL_GPL(xen_start_info); 129 130struct shared_info xen_dummy_shared_info; 131 132void *xen_initial_gdt; 133 134RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); 135__read_mostly int xen_have_vector_callback; 136EXPORT_SYMBOL_GPL(xen_have_vector_callback); 137 138/* 139 * Point at some empty memory to start with. We map the real shared_info 140 * page as soon as fixmap is up and running. 141 */ 142struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; 143 144/* 145 * Flag to determine whether vcpu info placement is available on all 146 * VCPUs. We assume it is to start with, and then set it to zero on 147 * the first failure. This is because it can succeed on some VCPUs 148 * and not others, since it can involve hypervisor memory allocation, 149 * or because the guest failed to guarantee all the appropriate 150 * constraints on all VCPUs (ie buffer can't cross a page boundary). 151 * 152 * Note that any particular CPU may be using a placed vcpu structure, 153 * but we can only optimise if the all are. 154 * 155 * 0: not available, 1: available 156 */ 157static int have_vcpu_info_placement = 1; 158 159struct tls_descs { 160 struct desc_struct desc[3]; 161}; 162 163/* 164 * Updating the 3 TLS descriptors in the GDT on every task switch is 165 * surprisingly expensive so we avoid updating them if they haven't 166 * changed. Since Xen writes different descriptors than the one 167 * passed in the update_descriptor hypercall we keep shadow copies to 168 * compare against. 169 */ 170static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); 171 172static void clamp_max_cpus(void) 173{ 174#ifdef CONFIG_SMP 175 if (setup_max_cpus > MAX_VIRT_CPUS) 176 setup_max_cpus = MAX_VIRT_CPUS; 177#endif 178} 179 180static void xen_vcpu_setup(int cpu) 181{ 182 struct vcpu_register_vcpu_info info; 183 int err; 184 struct vcpu_info *vcpup; 185 186 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 187 188 /* 189 * This path is called twice on PVHVM - first during bootup via 190 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being 191 * hotplugged: cpu_up -> xen_hvm_cpu_notify. 192 * As we can only do the VCPUOP_register_vcpu_info once lets 193 * not over-write its result. 194 * 195 * For PV it is called during restore (xen_vcpu_restore) and bootup 196 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not 197 * use this function. 198 */ 199 if (xen_hvm_domain()) { 200 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) 201 return; 202 } 203 if (cpu < MAX_VIRT_CPUS) 204 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 205 206 if (!have_vcpu_info_placement) { 207 if (cpu >= MAX_VIRT_CPUS) 208 clamp_max_cpus(); 209 return; 210 } 211 212 vcpup = &per_cpu(xen_vcpu_info, cpu); 213 info.mfn = arbitrary_virt_to_mfn(vcpup); 214 info.offset = offset_in_page(vcpup); 215 216 /* Check to see if the hypervisor will put the vcpu_info 217 structure where we want it, which allows direct access via 218 a percpu-variable. 219 N.B. This hypercall can _only_ be called once per CPU. Subsequent 220 calls will error out with -EINVAL. This is due to the fact that 221 hypervisor has no unregister variant and this hypercall does not 222 allow to over-write info.mfn and info.offset. 223 */ 224 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 225 226 if (err) { 227 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); 228 have_vcpu_info_placement = 0; 229 clamp_max_cpus(); 230 } else { 231 /* This cpu is using the registered vcpu info, even if 232 later ones fail to. */ 233 per_cpu(xen_vcpu, cpu) = vcpup; 234 } 235} 236 237/* 238 * On restore, set the vcpu placement up again. 239 * If it fails, then we're in a bad state, since 240 * we can't back out from using it... 241 */ 242void xen_vcpu_restore(void) 243{ 244 int cpu; 245 246 for_each_possible_cpu(cpu) { 247 bool other_cpu = (cpu != smp_processor_id()); 248 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL); 249 250 if (other_cpu && is_up && 251 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 252 BUG(); 253 254 xen_setup_runstate_info(cpu); 255 256 if (have_vcpu_info_placement) 257 xen_vcpu_setup(cpu); 258 259 if (other_cpu && is_up && 260 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 261 BUG(); 262 } 263} 264 265static void __init xen_banner(void) 266{ 267 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL); 268 struct xen_extraversion extra; 269 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 270 271 pr_info("Booting paravirtualized kernel %son %s\n", 272 xen_feature(XENFEAT_auto_translated_physmap) ? 273 "with PVH extensions " : "", pv_info.name); 274 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 275 version >> 16, version & 0xffff, extra.extraversion, 276 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 277} 278/* Check if running on Xen version (major, minor) or later */ 279bool 280xen_running_on_version_or_later(unsigned int major, unsigned int minor) 281{ 282 unsigned int version; 283 284 if (!xen_domain()) 285 return false; 286 287 version = HYPERVISOR_xen_version(XENVER_version, NULL); 288 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) || 289 ((version >> 16) > major)) 290 return true; 291 return false; 292} 293 294#define CPUID_THERM_POWER_LEAF 6 295#define APERFMPERF_PRESENT 0 296 297static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; 298static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; 299 300static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask; 301static __read_mostly unsigned int cpuid_leaf5_ecx_val; 302static __read_mostly unsigned int cpuid_leaf5_edx_val; 303 304static void xen_cpuid(unsigned int *ax, unsigned int *bx, 305 unsigned int *cx, unsigned int *dx) 306{ 307 unsigned maskebx = ~0; 308 unsigned maskecx = ~0; 309 unsigned maskedx = ~0; 310 unsigned setecx = 0; 311 /* 312 * Mask out inconvenient features, to try and disable as many 313 * unsupported kernel subsystems as possible. 314 */ 315 switch (*ax) { 316 case 1: 317 maskecx = cpuid_leaf1_ecx_mask; 318 setecx = cpuid_leaf1_ecx_set_mask; 319 maskedx = cpuid_leaf1_edx_mask; 320 break; 321 322 case CPUID_MWAIT_LEAF: 323 /* Synthesize the values.. */ 324 *ax = 0; 325 *bx = 0; 326 *cx = cpuid_leaf5_ecx_val; 327 *dx = cpuid_leaf5_edx_val; 328 return; 329 330 case CPUID_THERM_POWER_LEAF: 331 /* Disabling APERFMPERF for kernel usage */ 332 maskecx = ~(1 << APERFMPERF_PRESENT); 333 break; 334 335 case 0xb: 336 /* Suppress extended topology stuff */ 337 maskebx = 0; 338 break; 339 } 340 341 asm(XEN_EMULATE_PREFIX "cpuid" 342 : "=a" (*ax), 343 "=b" (*bx), 344 "=c" (*cx), 345 "=d" (*dx) 346 : "0" (*ax), "2" (*cx)); 347 348 *bx &= maskebx; 349 *cx &= maskecx; 350 *cx |= setecx; 351 *dx &= maskedx; 352 353} 354 355static bool __init xen_check_mwait(void) 356{ 357#ifdef CONFIG_ACPI 358 struct xen_platform_op op = { 359 .cmd = XENPF_set_processor_pminfo, 360 .u.set_pminfo.id = -1, 361 .u.set_pminfo.type = XEN_PM_PDC, 362 }; 363 uint32_t buf[3]; 364 unsigned int ax, bx, cx, dx; 365 unsigned int mwait_mask; 366 367 /* We need to determine whether it is OK to expose the MWAIT 368 * capability to the kernel to harvest deeper than C3 states from ACPI 369 * _CST using the processor_harvest_xen.c module. For this to work, we 370 * need to gather the MWAIT_LEAF values (which the cstate.c code 371 * checks against). The hypervisor won't expose the MWAIT flag because 372 * it would break backwards compatibility; so we will find out directly 373 * from the hardware and hypercall. 374 */ 375 if (!xen_initial_domain()) 376 return false; 377 378 /* 379 * When running under platform earlier than Xen4.2, do not expose 380 * mwait, to avoid the risk of loading native acpi pad driver 381 */ 382 if (!xen_running_on_version_or_later(4, 2)) 383 return false; 384 385 ax = 1; 386 cx = 0; 387 388 native_cpuid(&ax, &bx, &cx, &dx); 389 390 mwait_mask = (1 << (X86_FEATURE_EST % 32)) | 391 (1 << (X86_FEATURE_MWAIT % 32)); 392 393 if ((cx & mwait_mask) != mwait_mask) 394 return false; 395 396 /* We need to emulate the MWAIT_LEAF and for that we need both 397 * ecx and edx. The hypercall provides only partial information. 398 */ 399 400 ax = CPUID_MWAIT_LEAF; 401 bx = 0; 402 cx = 0; 403 dx = 0; 404 405 native_cpuid(&ax, &bx, &cx, &dx); 406 407 /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so, 408 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3. 409 */ 410 buf[0] = ACPI_PDC_REVISION_ID; 411 buf[1] = 1; 412 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP); 413 414 set_xen_guest_handle(op.u.set_pminfo.pdc, buf); 415 416 if ((HYPERVISOR_dom0_op(&op) == 0) && 417 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) { 418 cpuid_leaf5_ecx_val = cx; 419 cpuid_leaf5_edx_val = dx; 420 } 421 return true; 422#else 423 return false; 424#endif 425} 426static void __init xen_init_cpuid_mask(void) 427{ 428 unsigned int ax, bx, cx, dx; 429 unsigned int xsave_mask; 430 431 cpuid_leaf1_edx_mask = 432 ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */ 433 (1 << X86_FEATURE_ACC)); /* thermal monitoring */ 434 435 if (!xen_initial_domain()) 436 cpuid_leaf1_edx_mask &= 437 ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */ 438 439 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32)); 440 441 ax = 1; 442 cx = 0; 443 cpuid(1, &ax, &bx, &cx, &dx); 444 445 xsave_mask = 446 (1 << (X86_FEATURE_XSAVE % 32)) | 447 (1 << (X86_FEATURE_OSXSAVE % 32)); 448 449 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ 450 if ((cx & xsave_mask) != xsave_mask) 451 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ 452 if (xen_check_mwait()) 453 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); 454} 455 456static void xen_set_debugreg(int reg, unsigned long val) 457{ 458 HYPERVISOR_set_debugreg(reg, val); 459} 460 461static unsigned long xen_get_debugreg(int reg) 462{ 463 return HYPERVISOR_get_debugreg(reg); 464} 465 466static void xen_end_context_switch(struct task_struct *next) 467{ 468 xen_mc_flush(); 469 paravirt_end_context_switch(next); 470} 471 472static unsigned long xen_store_tr(void) 473{ 474 return 0; 475} 476 477/* 478 * Set the page permissions for a particular virtual address. If the 479 * address is a vmalloc mapping (or other non-linear mapping), then 480 * find the linear mapping of the page and also set its protections to 481 * match. 482 */ 483static void set_aliased_prot(void *v, pgprot_t prot) 484{ 485 int level; 486 pte_t *ptep; 487 pte_t pte; 488 unsigned long pfn; 489 struct page *page; 490 unsigned char dummy; 491 492 ptep = lookup_address((unsigned long)v, &level); 493 BUG_ON(ptep == NULL); 494 495 pfn = pte_pfn(*ptep); 496 page = pfn_to_page(pfn); 497 498 pte = pfn_pte(pfn, prot); 499 500 /* 501 * Careful: update_va_mapping() will fail if the virtual address 502 * we're poking isn't populated in the page tables. We don't 503 * need to worry about the direct map (that's always in the page 504 * tables), but we need to be careful about vmap space. In 505 * particular, the top level page table can lazily propagate 506 * entries between processes, so if we've switched mms since we 507 * vmapped the target in the first place, we might not have the 508 * top-level page table entry populated. 509 * 510 * We disable preemption because we want the same mm active when 511 * we probe the target and when we issue the hypercall. We'll 512 * have the same nominal mm, but if we're a kernel thread, lazy 513 * mm dropping could change our pgd. 514 * 515 * Out of an abundance of caution, this uses __get_user() to fault 516 * in the target address just in case there's some obscure case 517 * in which the target address isn't readable. 518 */ 519 520 preempt_disable(); 521 522 pagefault_disable(); /* Avoid warnings due to being atomic. */ 523 __get_user(dummy, (unsigned char __user __force *)v); 524 pagefault_enable(); 525 526 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 527 BUG(); 528 529 if (!PageHighMem(page)) { 530 void *av = __va(PFN_PHYS(pfn)); 531 532 if (av != v) 533 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) 534 BUG(); 535 } else 536 kmap_flush_unused(); 537 538 preempt_enable(); 539} 540 541static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 542{ 543 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 544 int i; 545 546 /* 547 * We need to mark the all aliases of the LDT pages RO. We 548 * don't need to call vm_flush_aliases(), though, since that's 549 * only responsible for flushing aliases out the TLBs, not the 550 * page tables, and Xen will flush the TLB for us if needed. 551 * 552 * To avoid confusing future readers: none of this is necessary 553 * to load the LDT. The hypervisor only checks this when the 554 * LDT is faulted in due to subsequent descriptor access. 555 */ 556 557 for(i = 0; i < entries; i += entries_per_page) 558 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 559} 560 561static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) 562{ 563 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 564 int i; 565 566 for(i = 0; i < entries; i += entries_per_page) 567 set_aliased_prot(ldt + i, PAGE_KERNEL); 568} 569 570static void xen_set_ldt(const void *addr, unsigned entries) 571{ 572 struct mmuext_op *op; 573 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 574 575 trace_xen_cpu_set_ldt(addr, entries); 576 577 op = mcs.args; 578 op->cmd = MMUEXT_SET_LDT; 579 op->arg1.linear_addr = (unsigned long)addr; 580 op->arg2.nr_ents = entries; 581 582 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 583 584 xen_mc_issue(PARAVIRT_LAZY_CPU); 585} 586 587static void xen_load_gdt(const struct desc_ptr *dtr) 588{ 589 unsigned long va = dtr->address; 590 unsigned int size = dtr->size + 1; 591 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 592 unsigned long frames[pages]; 593 int f; 594 595 /* 596 * A GDT can be up to 64k in size, which corresponds to 8192 597 * 8-byte entries, or 16 4k pages.. 598 */ 599 600 BUG_ON(size > 65536); 601 BUG_ON(va & ~PAGE_MASK); 602 603 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 604 int level; 605 pte_t *ptep; 606 unsigned long pfn, mfn; 607 void *virt; 608 609 /* 610 * The GDT is per-cpu and is in the percpu data area. 611 * That can be virtually mapped, so we need to do a 612 * page-walk to get the underlying MFN for the 613 * hypercall. The page can also be in the kernel's 614 * linear range, so we need to RO that mapping too. 615 */ 616 ptep = lookup_address(va, &level); 617 BUG_ON(ptep == NULL); 618 619 pfn = pte_pfn(*ptep); 620 mfn = pfn_to_mfn(pfn); 621 virt = __va(PFN_PHYS(pfn)); 622 623 frames[f] = mfn; 624 625 make_lowmem_page_readonly((void *)va); 626 make_lowmem_page_readonly(virt); 627 } 628 629 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 630 BUG(); 631} 632 633/* 634 * load_gdt for early boot, when the gdt is only mapped once 635 */ 636static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) 637{ 638 unsigned long va = dtr->address; 639 unsigned int size = dtr->size + 1; 640 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 641 unsigned long frames[pages]; 642 int f; 643 644 /* 645 * A GDT can be up to 64k in size, which corresponds to 8192 646 * 8-byte entries, or 16 4k pages.. 647 */ 648 649 BUG_ON(size > 65536); 650 BUG_ON(va & ~PAGE_MASK); 651 652 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 653 pte_t pte; 654 unsigned long pfn, mfn; 655 656 pfn = virt_to_pfn(va); 657 mfn = pfn_to_mfn(pfn); 658 659 pte = pfn_pte(pfn, PAGE_KERNEL_RO); 660 661 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) 662 BUG(); 663 664 frames[f] = mfn; 665 } 666 667 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 668 BUG(); 669} 670 671static inline bool desc_equal(const struct desc_struct *d1, 672 const struct desc_struct *d2) 673{ 674 return d1->a == d2->a && d1->b == d2->b; 675} 676 677static void load_TLS_descriptor(struct thread_struct *t, 678 unsigned int cpu, unsigned int i) 679{ 680 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; 681 struct desc_struct *gdt; 682 xmaddr_t maddr; 683 struct multicall_space mc; 684 685 if (desc_equal(shadow, &t->tls_array[i])) 686 return; 687 688 *shadow = t->tls_array[i]; 689 690 gdt = get_cpu_gdt_table(cpu); 691 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); 692 mc = __xen_mc_entry(0); 693 694 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); 695} 696 697static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 698{ 699 /* 700 * XXX sleazy hack: If we're being called in a lazy-cpu zone 701 * and lazy gs handling is enabled, it means we're in a 702 * context switch, and %gs has just been saved. This means we 703 * can zero it out to prevent faults on exit from the 704 * hypervisor if the next process has no %gs. Either way, it 705 * has been saved, and the new value will get loaded properly. 706 * This will go away as soon as Xen has been modified to not 707 * save/restore %gs for normal hypercalls. 708 * 709 * On x86_64, this hack is not used for %gs, because gs points 710 * to KERNEL_GS_BASE (and uses it for PDA references), so we 711 * must not zero %gs on x86_64 712 * 713 * For x86_64, we need to zero %fs, otherwise we may get an 714 * exception between the new %fs descriptor being loaded and 715 * %fs being effectively cleared at __switch_to(). 716 */ 717 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 718#ifdef CONFIG_X86_32 719 lazy_load_gs(0); 720#else 721 loadsegment(fs, 0); 722#endif 723 } 724 725 xen_mc_batch(); 726 727 load_TLS_descriptor(t, cpu, 0); 728 load_TLS_descriptor(t, cpu, 1); 729 load_TLS_descriptor(t, cpu, 2); 730 731 xen_mc_issue(PARAVIRT_LAZY_CPU); 732} 733 734#ifdef CONFIG_X86_64 735static void xen_load_gs_index(unsigned int idx) 736{ 737 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) 738 BUG(); 739} 740#endif 741 742static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, 743 const void *ptr) 744{ 745 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); 746 u64 entry = *(u64 *)ptr; 747 748 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry); 749 750 preempt_disable(); 751 752 xen_mc_flush(); 753 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) 754 BUG(); 755 756 preempt_enable(); 757} 758 759static int cvt_gate_to_trap(int vector, const gate_desc *val, 760 struct trap_info *info) 761{ 762 unsigned long addr; 763 764 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT) 765 return 0; 766 767 info->vector = vector; 768 769 addr = gate_offset(*val); 770#ifdef CONFIG_X86_64 771 /* 772 * Look for known traps using IST, and substitute them 773 * appropriately. The debugger ones are the only ones we care 774 * about. Xen will handle faults like double_fault, 775 * so we should never see them. Warn if 776 * there's an unexpected IST-using fault handler. 777 */ 778 if (addr == (unsigned long)debug) 779 addr = (unsigned long)xen_debug; 780 else if (addr == (unsigned long)int3) 781 addr = (unsigned long)xen_int3; 782 else if (addr == (unsigned long)stack_segment) 783 addr = (unsigned long)xen_stack_segment; 784 else if (addr == (unsigned long)double_fault) { 785 /* Don't need to handle these */ 786 return 0; 787#ifdef CONFIG_X86_MCE 788 } else if (addr == (unsigned long)machine_check) { 789 /* 790 * when xen hypervisor inject vMCE to guest, 791 * use native mce handler to handle it 792 */ 793 ; 794#endif 795 } else if (addr == (unsigned long)nmi) 796 /* 797 * Use the native version as well. 798 */ 799 ; 800 else { 801 /* Some other trap using IST? */ 802 if (WARN_ON(val->ist != 0)) 803 return 0; 804 } 805#endif /* CONFIG_X86_64 */ 806 info->address = addr; 807 808 info->cs = gate_segment(*val); 809 info->flags = val->dpl; 810 /* interrupt gates clear IF */ 811 if (val->type == GATE_INTERRUPT) 812 info->flags |= 1 << 2; 813 814 return 1; 815} 816 817/* Locations of each CPU's IDT */ 818static DEFINE_PER_CPU(struct desc_ptr, idt_desc); 819 820/* Set an IDT entry. If the entry is part of the current IDT, then 821 also update Xen. */ 822static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) 823{ 824 unsigned long p = (unsigned long)&dt[entrynum]; 825 unsigned long start, end; 826 827 trace_xen_cpu_write_idt_entry(dt, entrynum, g); 828 829 preempt_disable(); 830 831 start = __this_cpu_read(idt_desc.address); 832 end = start + __this_cpu_read(idt_desc.size) + 1; 833 834 xen_mc_flush(); 835 836 native_write_idt_entry(dt, entrynum, g); 837 838 if (p >= start && (p + 8) <= end) { 839 struct trap_info info[2]; 840 841 info[1].address = 0; 842 843 if (cvt_gate_to_trap(entrynum, g, &info[0])) 844 if (HYPERVISOR_set_trap_table(info)) 845 BUG(); 846 } 847 848 preempt_enable(); 849} 850 851static void xen_convert_trap_info(const struct desc_ptr *desc, 852 struct trap_info *traps) 853{ 854 unsigned in, out, count; 855 856 count = (desc->size+1) / sizeof(gate_desc); 857 BUG_ON(count > 256); 858 859 for (in = out = 0; in < count; in++) { 860 gate_desc *entry = (gate_desc*)(desc->address) + in; 861 862 if (cvt_gate_to_trap(in, entry, &traps[out])) 863 out++; 864 } 865 traps[out].address = 0; 866} 867 868void xen_copy_trap_info(struct trap_info *traps) 869{ 870 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); 871 872 xen_convert_trap_info(desc, traps); 873} 874 875/* Load a new IDT into Xen. In principle this can be per-CPU, so we 876 hold a spinlock to protect the static traps[] array (static because 877 it avoids allocation, and saves stack space). */ 878static void xen_load_idt(const struct desc_ptr *desc) 879{ 880 static DEFINE_SPINLOCK(lock); 881 static struct trap_info traps[257]; 882 883 trace_xen_cpu_load_idt(desc); 884 885 spin_lock(&lock); 886 887 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); 888 889 xen_convert_trap_info(desc, traps); 890 891 xen_mc_flush(); 892 if (HYPERVISOR_set_trap_table(traps)) 893 BUG(); 894 895 spin_unlock(&lock); 896} 897 898/* Write a GDT descriptor entry. Ignore LDT descriptors, since 899 they're handled differently. */ 900static void xen_write_gdt_entry(struct desc_struct *dt, int entry, 901 const void *desc, int type) 902{ 903 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); 904 905 preempt_disable(); 906 907 switch (type) { 908 case DESC_LDT: 909 case DESC_TSS: 910 /* ignore */ 911 break; 912 913 default: { 914 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); 915 916 xen_mc_flush(); 917 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 918 BUG(); 919 } 920 921 } 922 923 preempt_enable(); 924} 925 926/* 927 * Version of write_gdt_entry for use at early boot-time needed to 928 * update an entry as simply as possible. 929 */ 930static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, 931 const void *desc, int type) 932{ 933 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); 934 935 switch (type) { 936 case DESC_LDT: 937 case DESC_TSS: 938 /* ignore */ 939 break; 940 941 default: { 942 xmaddr_t maddr = virt_to_machine(&dt[entry]); 943 944 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 945 dt[entry] = *(struct desc_struct *)desc; 946 } 947 948 } 949} 950 951static void xen_load_sp0(struct tss_struct *tss, 952 struct thread_struct *thread) 953{ 954 struct multicall_space mcs; 955 956 mcs = xen_mc_entry(0); 957 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); 958 xen_mc_issue(PARAVIRT_LAZY_CPU); 959 tss->x86_tss.sp0 = thread->sp0; 960} 961 962void xen_set_iopl_mask(unsigned mask) 963{ 964 struct physdev_set_iopl set_iopl; 965 966 /* Force the change at ring 0. */ 967 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; 968 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 969} 970 971static void xen_io_delay(void) 972{ 973} 974 975static void xen_clts(void) 976{ 977 struct multicall_space mcs; 978 979 mcs = xen_mc_entry(0); 980 981 MULTI_fpu_taskswitch(mcs.mc, 0); 982 983 xen_mc_issue(PARAVIRT_LAZY_CPU); 984} 985 986static DEFINE_PER_CPU(unsigned long, xen_cr0_value); 987 988static unsigned long xen_read_cr0(void) 989{ 990 unsigned long cr0 = this_cpu_read(xen_cr0_value); 991 992 if (unlikely(cr0 == 0)) { 993 cr0 = native_read_cr0(); 994 this_cpu_write(xen_cr0_value, cr0); 995 } 996 997 return cr0; 998} 999 1000static void xen_write_cr0(unsigned long cr0) 1001{ 1002 struct multicall_space mcs; 1003 1004 this_cpu_write(xen_cr0_value, cr0); 1005 1006 /* Only pay attention to cr0.TS; everything else is 1007 ignored. */ 1008 mcs = xen_mc_entry(0); 1009 1010 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); 1011 1012 xen_mc_issue(PARAVIRT_LAZY_CPU); 1013} 1014 1015static void xen_write_cr4(unsigned long cr4) 1016{ 1017 cr4 &= ~X86_CR4_PGE; 1018 cr4 &= ~X86_CR4_PSE; 1019 1020 native_write_cr4(cr4); 1021} 1022#ifdef CONFIG_X86_64 1023static inline unsigned long xen_read_cr8(void) 1024{ 1025 return 0; 1026} 1027static inline void xen_write_cr8(unsigned long val) 1028{ 1029 BUG_ON(val); 1030} 1031#endif 1032 1033static u64 xen_read_msr_safe(unsigned int msr, int *err) 1034{ 1035 u64 val; 1036 1037 val = native_read_msr_safe(msr, err); 1038 switch (msr) { 1039 case MSR_IA32_APICBASE: 1040#ifdef CONFIG_X86_X2APIC 1041 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) 1042#endif 1043 val &= ~X2APIC_ENABLE; 1044 break; 1045 } 1046 return val; 1047} 1048 1049static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 1050{ 1051 int ret; 1052 1053 ret = 0; 1054 1055 switch (msr) { 1056#ifdef CONFIG_X86_64 1057 unsigned which; 1058 u64 base; 1059 1060 case MSR_FS_BASE: which = SEGBASE_FS; goto set; 1061 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; 1062 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; 1063 1064 set: 1065 base = ((u64)high << 32) | low; 1066 if (HYPERVISOR_set_segment_base(which, base) != 0) 1067 ret = -EIO; 1068 break; 1069#endif 1070 1071 case MSR_STAR: 1072 case MSR_CSTAR: 1073 case MSR_LSTAR: 1074 case MSR_SYSCALL_MASK: 1075 case MSR_IA32_SYSENTER_CS: 1076 case MSR_IA32_SYSENTER_ESP: 1077 case MSR_IA32_SYSENTER_EIP: 1078 /* Fast syscall setup is all done in hypercalls, so 1079 these are all ignored. Stub them out here to stop 1080 Xen console noise. */ 1081 1082 default: 1083 ret = native_write_msr_safe(msr, low, high); 1084 } 1085 1086 return ret; 1087} 1088 1089void xen_setup_shared_info(void) 1090{ 1091 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1092 set_fixmap(FIX_PARAVIRT_BOOTMAP, 1093 xen_start_info->shared_info); 1094 1095 HYPERVISOR_shared_info = 1096 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 1097 } else 1098 HYPERVISOR_shared_info = 1099 (struct shared_info *)__va(xen_start_info->shared_info); 1100 1101#ifndef CONFIG_SMP 1102 /* In UP this is as good a place as any to set up shared info */ 1103 xen_setup_vcpu_info_placement(); 1104#endif 1105 1106 xen_setup_mfn_list_list(); 1107} 1108 1109/* This is called once we have the cpu_possible_mask */ 1110void xen_setup_vcpu_info_placement(void) 1111{ 1112 int cpu; 1113 1114 for_each_possible_cpu(cpu) 1115 xen_vcpu_setup(cpu); 1116 1117 /* xen_vcpu_setup managed to place the vcpu_info within the 1118 * percpu area for all cpus, so make use of it. Note that for 1119 * PVH we want to use native IRQ mechanism. */ 1120 if (have_vcpu_info_placement && !xen_pvh_domain()) { 1121 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); 1122 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); 1123 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); 1124 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); 1125 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 1126 } 1127} 1128 1129static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, 1130 unsigned long addr, unsigned len) 1131{ 1132 char *start, *end, *reloc; 1133 unsigned ret; 1134 1135 start = end = reloc = NULL; 1136 1137#define SITE(op, x) \ 1138 case PARAVIRT_PATCH(op.x): \ 1139 if (have_vcpu_info_placement) { \ 1140 start = (char *)xen_##x##_direct; \ 1141 end = xen_##x##_direct_end; \ 1142 reloc = xen_##x##_direct_reloc; \ 1143 } \ 1144 goto patch_site 1145 1146 switch (type) { 1147 SITE(pv_irq_ops, irq_enable); 1148 SITE(pv_irq_ops, irq_disable); 1149 SITE(pv_irq_ops, save_fl); 1150 SITE(pv_irq_ops, restore_fl); 1151#undef SITE 1152 1153 patch_site: 1154 if (start == NULL || (end-start) > len) 1155 goto default_patch; 1156 1157 ret = paravirt_patch_insns(insnbuf, len, start, end); 1158 1159 /* Note: because reloc is assigned from something that 1160 appears to be an array, gcc assumes it's non-null, 1161 but doesn't know its relationship with start and 1162 end. */ 1163 if (reloc > start && reloc < end) { 1164 int reloc_off = reloc - start; 1165 long *relocp = (long *)(insnbuf + reloc_off); 1166 long delta = start - (char *)addr; 1167 1168 *relocp += delta; 1169 } 1170 break; 1171 1172 default_patch: 1173 default: 1174 ret = paravirt_patch_default(type, clobbers, insnbuf, 1175 addr, len); 1176 break; 1177 } 1178 1179 return ret; 1180} 1181 1182static const struct pv_info xen_info __initconst = { 1183 .paravirt_enabled = 1, 1184 .shared_kernel_pmd = 0, 1185 1186#ifdef CONFIG_X86_64 1187 .extra_user_64bit_cs = FLAT_USER_CS64, 1188#endif 1189 1190 .name = "Xen", 1191}; 1192 1193static const struct pv_init_ops xen_init_ops __initconst = { 1194 .patch = xen_patch, 1195}; 1196 1197static const struct pv_cpu_ops xen_cpu_ops __initconst = { 1198 .cpuid = xen_cpuid, 1199 1200 .set_debugreg = xen_set_debugreg, 1201 .get_debugreg = xen_get_debugreg, 1202 1203 .clts = xen_clts, 1204 1205 .read_cr0 = xen_read_cr0, 1206 .write_cr0 = xen_write_cr0, 1207 1208 .read_cr4 = native_read_cr4, 1209 .read_cr4_safe = native_read_cr4_safe, 1210 .write_cr4 = xen_write_cr4, 1211 1212#ifdef CONFIG_X86_64 1213 .read_cr8 = xen_read_cr8, 1214 .write_cr8 = xen_write_cr8, 1215#endif 1216 1217 .wbinvd = native_wbinvd, 1218 1219 .read_msr = xen_read_msr_safe, 1220 .write_msr = xen_write_msr_safe, 1221 1222 .read_tsc = native_read_tsc, 1223 .read_pmc = native_read_pmc, 1224 1225 .read_tscp = native_read_tscp, 1226 1227 .iret = xen_iret, 1228 .irq_enable_sysexit = xen_sysexit, 1229#ifdef CONFIG_X86_64 1230 .usergs_sysret32 = xen_sysret32, 1231 .usergs_sysret64 = xen_sysret64, 1232#endif 1233 1234 .load_tr_desc = paravirt_nop, 1235 .set_ldt = xen_set_ldt, 1236 .load_gdt = xen_load_gdt, 1237 .load_idt = xen_load_idt, 1238 .load_tls = xen_load_tls, 1239#ifdef CONFIG_X86_64 1240 .load_gs_index = xen_load_gs_index, 1241#endif 1242 1243 .alloc_ldt = xen_alloc_ldt, 1244 .free_ldt = xen_free_ldt, 1245 1246 .store_idt = native_store_idt, 1247 .store_tr = xen_store_tr, 1248 1249 .write_ldt_entry = xen_write_ldt_entry, 1250 .write_gdt_entry = xen_write_gdt_entry, 1251 .write_idt_entry = xen_write_idt_entry, 1252 .load_sp0 = xen_load_sp0, 1253 1254 .set_iopl_mask = xen_set_iopl_mask, 1255 .io_delay = xen_io_delay, 1256 1257 /* Xen takes care of %gs when switching to usermode for us */ 1258 .swapgs = paravirt_nop, 1259 1260 .start_context_switch = paravirt_start_context_switch, 1261 .end_context_switch = xen_end_context_switch, 1262}; 1263 1264static const struct pv_apic_ops xen_apic_ops __initconst = { 1265#ifdef CONFIG_X86_LOCAL_APIC 1266 .startup_ipi_hook = paravirt_nop, 1267#endif 1268}; 1269 1270static void xen_reboot(int reason) 1271{ 1272 struct sched_shutdown r = { .reason = reason }; 1273 1274 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1275 BUG(); 1276} 1277 1278static void xen_restart(char *msg) 1279{ 1280 xen_reboot(SHUTDOWN_reboot); 1281} 1282 1283static void xen_emergency_restart(void) 1284{ 1285 xen_reboot(SHUTDOWN_reboot); 1286} 1287 1288static void xen_machine_halt(void) 1289{ 1290 xen_reboot(SHUTDOWN_poweroff); 1291} 1292 1293static void xen_machine_power_off(void) 1294{ 1295 if (pm_power_off) 1296 pm_power_off(); 1297 xen_reboot(SHUTDOWN_poweroff); 1298} 1299 1300static void xen_crash_shutdown(struct pt_regs *regs) 1301{ 1302 xen_reboot(SHUTDOWN_crash); 1303} 1304 1305static int 1306xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 1307{ 1308 xen_reboot(SHUTDOWN_crash); 1309 return NOTIFY_DONE; 1310} 1311 1312static struct notifier_block xen_panic_block = { 1313 .notifier_call= xen_panic_event, 1314 .priority = INT_MIN 1315}; 1316 1317int xen_panic_handler_init(void) 1318{ 1319 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); 1320 return 0; 1321} 1322 1323static const struct machine_ops xen_machine_ops __initconst = { 1324 .restart = xen_restart, 1325 .halt = xen_machine_halt, 1326 .power_off = xen_machine_power_off, 1327 .shutdown = xen_machine_halt, 1328 .crash_shutdown = xen_crash_shutdown, 1329 .emergency_restart = xen_emergency_restart, 1330}; 1331 1332static unsigned char xen_get_nmi_reason(void) 1333{ 1334 unsigned char reason = 0; 1335 1336 /* Construct a value which looks like it came from port 0x61. */ 1337 if (test_bit(_XEN_NMIREASON_io_error, 1338 &HYPERVISOR_shared_info->arch.nmi_reason)) 1339 reason |= NMI_REASON_IOCHK; 1340 if (test_bit(_XEN_NMIREASON_pci_serr, 1341 &HYPERVISOR_shared_info->arch.nmi_reason)) 1342 reason |= NMI_REASON_SERR; 1343 1344 return reason; 1345} 1346 1347static void __init xen_boot_params_init_edd(void) 1348{ 1349#if IS_ENABLED(CONFIG_EDD) 1350 struct xen_platform_op op; 1351 struct edd_info *edd_info; 1352 u32 *mbr_signature; 1353 unsigned nr; 1354 int ret; 1355 1356 edd_info = boot_params.eddbuf; 1357 mbr_signature = boot_params.edd_mbr_sig_buffer; 1358 1359 op.cmd = XENPF_firmware_info; 1360 1361 op.u.firmware_info.type = XEN_FW_DISK_INFO; 1362 for (nr = 0; nr < EDDMAXNR; nr++) { 1363 struct edd_info *info = edd_info + nr; 1364 1365 op.u.firmware_info.index = nr; 1366 info->params.length = sizeof(info->params); 1367 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, 1368 &info->params); 1369 ret = HYPERVISOR_dom0_op(&op); 1370 if (ret) 1371 break; 1372 1373#define C(x) info->x = op.u.firmware_info.u.disk_info.x 1374 C(device); 1375 C(version); 1376 C(interface_support); 1377 C(legacy_max_cylinder); 1378 C(legacy_max_head); 1379 C(legacy_sectors_per_track); 1380#undef C 1381 } 1382 boot_params.eddbuf_entries = nr; 1383 1384 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; 1385 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { 1386 op.u.firmware_info.index = nr; 1387 ret = HYPERVISOR_dom0_op(&op); 1388 if (ret) 1389 break; 1390 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; 1391 } 1392 boot_params.edd_mbr_sig_buf_entries = nr; 1393#endif 1394} 1395 1396/* 1397 * Set up the GDT and segment registers for -fstack-protector. Until 1398 * we do this, we have to be careful not to call any stack-protected 1399 * function, which is most of the kernel. 1400 * 1401 * Note, that it is __ref because the only caller of this after init 1402 * is PVH which is not going to use xen_load_gdt_boot or other 1403 * __init functions. 1404 */ 1405static void __ref xen_setup_gdt(int cpu) 1406{ 1407 if (xen_feature(XENFEAT_auto_translated_physmap)) { 1408#ifdef CONFIG_X86_64 1409 unsigned long dummy; 1410 1411 load_percpu_segment(cpu); /* We need to access per-cpu area */ 1412 switch_to_new_gdt(cpu); /* GDT and GS set */ 1413 1414 /* We are switching of the Xen provided GDT to our HVM mode 1415 * GDT. The new GDT has __KERNEL_CS with CS.L = 1 1416 * and we are jumping to reload it. 1417 */ 1418 asm volatile ("pushq %0\n" 1419 "leaq 1f(%%rip),%0\n" 1420 "pushq %0\n" 1421 "lretq\n" 1422 "1:\n" 1423 : "=&r" (dummy) : "0" (__KERNEL_CS)); 1424 1425 /* 1426 * While not needed, we also set the %es, %ds, and %fs 1427 * to zero. We don't care about %ss as it is NULL. 1428 * Strictly speaking this is not needed as Xen zeros those 1429 * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE) 1430 * 1431 * Linux zeros them in cpu_init() and in secondary_startup_64 1432 * (for BSP). 1433 */ 1434 loadsegment(es, 0); 1435 loadsegment(ds, 0); 1436 loadsegment(fs, 0); 1437#else 1438 /* PVH: TODO Implement. */ 1439 BUG(); 1440#endif 1441 return; /* PVH does not need any PV GDT ops. */ 1442 } 1443 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot; 1444 pv_cpu_ops.load_gdt = xen_load_gdt_boot; 1445 1446 setup_stack_canary_segment(0); 1447 switch_to_new_gdt(0); 1448 1449 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry; 1450 pv_cpu_ops.load_gdt = xen_load_gdt; 1451} 1452 1453#ifdef CONFIG_XEN_PVH 1454/* 1455 * A PV guest starts with default flags that are not set for PVH, set them 1456 * here asap. 1457 */ 1458static void xen_pvh_set_cr_flags(int cpu) 1459{ 1460 1461 /* Some of these are setup in 'secondary_startup_64'. The others: 1462 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests 1463 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ 1464 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); 1465 1466 if (!cpu) 1467 return; 1468 /* 1469 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs 1470 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init. 1471 */ 1472 if (cpu_has_pse) 1473 cr4_set_bits_and_update_boot(X86_CR4_PSE); 1474 1475 if (cpu_has_pge) 1476 cr4_set_bits_and_update_boot(X86_CR4_PGE); 1477} 1478 1479/* 1480 * Note, that it is ref - because the only caller of this after init 1481 * is PVH which is not going to use xen_load_gdt_boot or other 1482 * __init functions. 1483 */ 1484void __ref xen_pvh_secondary_vcpu_init(int cpu) 1485{ 1486 xen_setup_gdt(cpu); 1487 xen_pvh_set_cr_flags(cpu); 1488} 1489 1490static void __init xen_pvh_early_guest_init(void) 1491{ 1492 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1493 return; 1494 1495 if (!xen_feature(XENFEAT_hvm_callback_vector)) 1496 return; 1497 1498 xen_have_vector_callback = 1; 1499 1500 xen_pvh_early_cpu_init(0, false); 1501 xen_pvh_set_cr_flags(0); 1502 1503#ifdef CONFIG_X86_32 1504 BUG(); /* PVH: Implement proper support. */ 1505#endif 1506} 1507#endif /* CONFIG_XEN_PVH */ 1508 1509/* First C function to be called on Xen boot */ 1510asmlinkage __visible void __init xen_start_kernel(void) 1511{ 1512 struct physdev_set_iopl set_iopl; 1513 unsigned long initrd_start = 0; 1514 int rc; 1515 1516 if (!xen_start_info) 1517 return; 1518 1519 xen_domain_type = XEN_PV_DOMAIN; 1520 1521 xen_setup_features(); 1522#ifdef CONFIG_XEN_PVH 1523 xen_pvh_early_guest_init(); 1524#endif 1525 xen_setup_machphys_mapping(); 1526 1527 /* Install Xen paravirt ops */ 1528 pv_info = xen_info; 1529 pv_init_ops = xen_init_ops; 1530 pv_apic_ops = xen_apic_ops; 1531 if (!xen_pvh_domain()) { 1532 pv_cpu_ops = xen_cpu_ops; 1533 1534 x86_platform.get_nmi_reason = xen_get_nmi_reason; 1535 } 1536 1537 if (xen_feature(XENFEAT_auto_translated_physmap)) 1538 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; 1539 else 1540 x86_init.resources.memory_setup = xen_memory_setup; 1541 x86_init.oem.arch_setup = xen_arch_setup; 1542 x86_init.oem.banner = xen_banner; 1543 1544 xen_init_time_ops(); 1545 1546 /* 1547 * Set up some pagetable state before starting to set any ptes. 1548 */ 1549 1550 xen_init_mmu_ops(); 1551 1552 /* Prevent unwanted bits from being set in PTEs. */ 1553 __supported_pte_mask &= ~_PAGE_GLOBAL; 1554 1555 /* 1556 * Prevent page tables from being allocated in highmem, even 1557 * if CONFIG_HIGHPTE is enabled. 1558 */ 1559 __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 1560 1561 /* Work out if we support NX */ 1562 x86_configure_nx(); 1563 1564 /* Get mfn list */ 1565 xen_build_dynamic_phys_to_machine(); 1566 1567 /* 1568 * Set up kernel GDT and segment registers, mainly so that 1569 * -fstack-protector code can be executed. 1570 */ 1571 xen_setup_gdt(0); 1572 1573 xen_init_irq_ops(); 1574 xen_init_cpuid_mask(); 1575 1576#ifdef CONFIG_X86_LOCAL_APIC 1577 /* 1578 * set up the basic apic ops. 1579 */ 1580 xen_init_apic(); 1581#endif 1582 1583 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 1584 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 1585 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 1586 } 1587 1588 machine_ops = xen_machine_ops; 1589 1590 /* 1591 * The only reliable way to retain the initial address of the 1592 * percpu gdt_page is to remember it here, so we can go and 1593 * mark it RW later, when the initial percpu area is freed. 1594 */ 1595 xen_initial_gdt = &per_cpu(gdt_page, 0); 1596 1597 xen_smp_init(); 1598 1599#ifdef CONFIG_ACPI_NUMA 1600 /* 1601 * The pages we from Xen are not related to machine pages, so 1602 * any NUMA information the kernel tries to get from ACPI will 1603 * be meaningless. Prevent it from trying. 1604 */ 1605 acpi_numa = -1; 1606#endif 1607 /* Don't do the full vcpu_info placement stuff until we have a 1608 possible map and a non-dummy shared_info. */ 1609 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1610 1611 local_irq_disable(); 1612 early_boot_irqs_disabled = true; 1613 1614 xen_raw_console_write("mapping kernel into physical memory\n"); 1615 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); 1616 1617 /* 1618 * Modify the cache mode translation tables to match Xen's PAT 1619 * configuration. 1620 */ 1621 1622 pat_init_cache_modes(); 1623 1624 /* keep using Xen gdt for now; no urgent need to change it */ 1625 1626#ifdef CONFIG_X86_32 1627 pv_info.kernel_rpl = 1; 1628 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1629 pv_info.kernel_rpl = 0; 1630#else 1631 pv_info.kernel_rpl = 0; 1632#endif 1633 /* set the limit of our address space */ 1634 xen_reserve_top(); 1635 1636 /* PVH: runs at default kernel iopl of 0 */ 1637 if (!xen_pvh_domain()) { 1638 /* 1639 * We used to do this in xen_arch_setup, but that is too late 1640 * on AMD were early_cpu_init (run before ->arch_setup()) calls 1641 * early_amd_init which pokes 0xcf8 port. 1642 */ 1643 set_iopl.iopl = 1; 1644 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 1645 if (rc != 0) 1646 xen_raw_printk("physdev_op failed %d\n", rc); 1647 } 1648 1649#ifdef CONFIG_X86_32 1650 /* set up basic CPUID stuff */ 1651 cpu_detect(&new_cpu_data); 1652 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU); 1653 new_cpu_data.wp_works_ok = 1; 1654 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1655#endif 1656 1657 if (xen_start_info->mod_start) { 1658 if (xen_start_info->flags & SIF_MOD_START_PFN) 1659 initrd_start = PFN_PHYS(xen_start_info->mod_start); 1660 else 1661 initrd_start = __pa(xen_start_info->mod_start); 1662 } 1663 1664 /* Poke various useful things into boot_params */ 1665 boot_params.hdr.type_of_loader = (9 << 4) | 0; 1666 boot_params.hdr.ramdisk_image = initrd_start; 1667 boot_params.hdr.ramdisk_size = xen_start_info->mod_len; 1668 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); 1669 1670 if (!xen_initial_domain()) { 1671 add_preferred_console("xenboot", 0, NULL); 1672 add_preferred_console("tty", 0, NULL); 1673 add_preferred_console("hvc", 0, NULL); 1674 if (pci_xen) 1675 x86_init.pci.arch_init = pci_xen_init; 1676 } else { 1677 const struct dom0_vga_console_info *info = 1678 (void *)((char *)xen_start_info + 1679 xen_start_info->console.dom0.info_off); 1680 struct xen_platform_op op = { 1681 .cmd = XENPF_firmware_info, 1682 .interface_version = XENPF_INTERFACE_VERSION, 1683 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS, 1684 }; 1685 1686 xen_init_vga(info, xen_start_info->console.dom0.info_size); 1687 xen_start_info->console.domU.mfn = 0; 1688 xen_start_info->console.domU.evtchn = 0; 1689 1690 if (HYPERVISOR_dom0_op(&op) == 0) 1691 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags; 1692 1693 /* Make sure ACS will be enabled */ 1694 pci_request_acs(); 1695 1696 xen_acpi_sleep_register(); 1697 1698 /* Avoid searching for BIOS MP tables */ 1699 x86_init.mpparse.find_smp_config = x86_init_noop; 1700 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 1701 1702 xen_boot_params_init_edd(); 1703 } 1704#ifdef CONFIG_PCI 1705 /* PCI BIOS service won't work from a PV guest. */ 1706 pci_probe &= ~PCI_PROBE_BIOS; 1707#endif 1708 xen_raw_console_write("about to get started...\n"); 1709 1710 xen_setup_runstate_info(0); 1711 1712 xen_efi_init(); 1713 1714 /* Start the world */ 1715#ifdef CONFIG_X86_32 1716 i386_start_kernel(); 1717#else 1718 cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ 1719 x86_64_start_reservations((char *)__pa_symbol(&boot_params)); 1720#endif 1721} 1722 1723void __ref xen_hvm_init_shared_info(void) 1724{ 1725 int cpu; 1726 struct xen_add_to_physmap xatp; 1727 static struct shared_info *shared_info_page = 0; 1728 1729 if (!shared_info_page) 1730 shared_info_page = (struct shared_info *) 1731 extend_brk(PAGE_SIZE, PAGE_SIZE); 1732 xatp.domid = DOMID_SELF; 1733 xatp.idx = 0; 1734 xatp.space = XENMAPSPACE_shared_info; 1735 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 1736 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1737 BUG(); 1738 1739 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 1740 1741 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1742 * page, we use it in the event channel upcall and in some pvclock 1743 * related functions. We don't need the vcpu_info placement 1744 * optimizations because we don't use any pv_mmu or pv_irq op on 1745 * HVM. 1746 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is 1747 * online but xen_hvm_init_shared_info is run at resume time too and 1748 * in that case multiple vcpus might be online. */ 1749 for_each_online_cpu(cpu) { 1750 /* Leave it to be NULL. */ 1751 if (cpu >= MAX_VIRT_CPUS) 1752 continue; 1753 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1754 } 1755} 1756 1757#ifdef CONFIG_XEN_PVHVM 1758static void __init init_hvm_pv_info(void) 1759{ 1760 int major, minor; 1761 uint32_t eax, ebx, ecx, edx, pages, msr, base; 1762 u64 pfn; 1763 1764 base = xen_cpuid_base(); 1765 cpuid(base + 1, &eax, &ebx, &ecx, &edx); 1766 1767 major = eax >> 16; 1768 minor = eax & 0xffff; 1769 printk(KERN_INFO "Xen version %d.%d.\n", major, minor); 1770 1771 cpuid(base + 2, &pages, &msr, &ecx, &edx); 1772 1773 pfn = __pa(hypercall_page); 1774 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); 1775 1776 xen_setup_features(); 1777 1778 pv_info.name = "Xen HVM"; 1779 1780 xen_domain_type = XEN_HVM_DOMAIN; 1781} 1782 1783static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, 1784 void *hcpu) 1785{ 1786 int cpu = (long)hcpu; 1787 switch (action) { 1788 case CPU_UP_PREPARE: 1789 xen_vcpu_setup(cpu); 1790 if (xen_have_vector_callback) { 1791 if (xen_feature(XENFEAT_hvm_safe_pvclock)) 1792 xen_setup_timer(cpu); 1793 } 1794 break; 1795 default: 1796 break; 1797 } 1798 return NOTIFY_OK; 1799} 1800 1801static struct notifier_block xen_hvm_cpu_notifier = { 1802 .notifier_call = xen_hvm_cpu_notify, 1803}; 1804 1805#ifdef CONFIG_KEXEC 1806static void xen_hvm_shutdown(void) 1807{ 1808 native_machine_shutdown(); 1809 if (kexec_in_progress) 1810 xen_reboot(SHUTDOWN_soft_reset); 1811} 1812 1813static void xen_hvm_crash_shutdown(struct pt_regs *regs) 1814{ 1815 native_machine_crash_shutdown(regs); 1816 xen_reboot(SHUTDOWN_soft_reset); 1817} 1818#endif 1819 1820static void __init xen_hvm_guest_init(void) 1821{ 1822 if (xen_pv_domain()) 1823 return; 1824 1825 init_hvm_pv_info(); 1826 1827 xen_hvm_init_shared_info(); 1828 1829 xen_panic_handler_init(); 1830 1831 if (xen_feature(XENFEAT_hvm_callback_vector)) 1832 xen_have_vector_callback = 1; 1833 xen_hvm_smp_init(); 1834 register_cpu_notifier(&xen_hvm_cpu_notifier); 1835 xen_unplug_emulated_devices(); 1836 x86_init.irqs.intr_init = xen_init_IRQ; 1837 xen_hvm_init_time_ops(); 1838 xen_hvm_init_mmu_ops(); 1839#ifdef CONFIG_KEXEC 1840 machine_ops.shutdown = xen_hvm_shutdown; 1841 machine_ops.crash_shutdown = xen_hvm_crash_shutdown; 1842#endif 1843} 1844#endif 1845 1846static bool xen_nopv = false; 1847static __init int xen_parse_nopv(char *arg) 1848{ 1849 xen_nopv = true; 1850 return 0; 1851} 1852early_param("xen_nopv", xen_parse_nopv); 1853 1854static uint32_t __init xen_platform(void) 1855{ 1856 if (xen_nopv) 1857 return 0; 1858 1859 return xen_cpuid_base(); 1860} 1861 1862bool xen_hvm_need_lapic(void) 1863{ 1864 if (xen_nopv) 1865 return false; 1866 if (xen_pv_domain()) 1867 return false; 1868 if (!xen_hvm_domain()) 1869 return false; 1870 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) 1871 return false; 1872 return true; 1873} 1874EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1875 1876static void xen_set_cpu_features(struct cpuinfo_x86 *c) 1877{ 1878 if (xen_pv_domain()) 1879 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 1880} 1881 1882const struct hypervisor_x86 x86_hyper_xen = { 1883 .name = "Xen", 1884 .detect = xen_platform, 1885#ifdef CONFIG_XEN_PVHVM 1886 .init_platform = xen_hvm_guest_init, 1887#endif 1888 .x2apic_available = xen_x2apic_para_available, 1889 .set_cpu_features = xen_set_cpu_features, 1890}; 1891EXPORT_SYMBOL(x86_hyper_xen); 1892