root/arch/x86/kernel/kvm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. parse_no_kvmapf
  2. parse_no_stealacc
  3. kvm_io_delay
  4. _find_apf_task
  5. kvm_async_pf_task_wait
  6. apf_task_wake_one
  7. apf_task_wake_all
  8. kvm_async_pf_task_wake
  9. kvm_read_and_reset_pf_reason
  10. do_async_page_fault
  11. paravirt_ops_setup
  12. kvm_register_steal_time
  13. kvm_guest_apic_eoi_write
  14. kvm_guest_cpu_init
  15. kvm_pv_disable_apf
  16. kvm_pv_guest_cpu_reboot
  17. kvm_pv_reboot_notify
  18. kvm_steal_clock
  19. kvm_disable_steal_time
  20. __set_percpu_decrypted
  21. sev_map_percpu_data
  22. __send_ipi_mask
  23. kvm_send_ipi_mask
  24. kvm_send_ipi_mask_allbutself
  25. kvm_setup_pv_ipi
  26. kvm_smp_send_call_func_ipi
  27. kvm_smp_prepare_cpus
  28. kvm_smp_prepare_boot_cpu
  29. kvm_guest_cpu_offline
  30. kvm_cpu_online
  31. kvm_cpu_down_prepare
  32. kvm_apf_trap_init
  33. kvm_flush_tlb_others
  34. kvm_guest_init
  35. __kvm_cpuid_base
  36. kvm_cpuid_base
  37. kvm_para_available
  38. kvm_arch_para_features
  39. kvm_arch_para_hints
  40. kvm_detect
  41. kvm_apic_init
  42. kvm_init_platform
  43. activate_jump_labels
  44. kvm_setup_pv_tlb_flush
  45. kvm_kick_cpu
  46. kvm_wait
  47. __kvm_vcpu_is_preempted
  48. kvm_spinlock_init
  49. kvm_disable_host_haltpoll
  50. kvm_enable_host_haltpoll
  51. arch_haltpoll_enable
  52. arch_haltpoll_disable

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * KVM paravirt_ops implementation
   4  *
   5  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   6  * Copyright IBM Corporation, 2007
   7  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
   8  */
   9 
  10 #include <linux/context_tracking.h>
  11 #include <linux/init.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kvm_para.h>
  14 #include <linux/cpu.h>
  15 #include <linux/mm.h>
  16 #include <linux/highmem.h>
  17 #include <linux/hardirq.h>
  18 #include <linux/notifier.h>
  19 #include <linux/reboot.h>
  20 #include <linux/hash.h>
  21 #include <linux/sched.h>
  22 #include <linux/slab.h>
  23 #include <linux/kprobes.h>
  24 #include <linux/debugfs.h>
  25 #include <linux/nmi.h>
  26 #include <linux/swait.h>
  27 #include <asm/timer.h>
  28 #include <asm/cpu.h>
  29 #include <asm/traps.h>
  30 #include <asm/desc.h>
  31 #include <asm/tlbflush.h>
  32 #include <asm/apic.h>
  33 #include <asm/apicdef.h>
  34 #include <asm/hypervisor.h>
  35 #include <asm/tlb.h>
  36 
  37 static int kvmapf = 1;
  38 
  39 static int __init parse_no_kvmapf(char *arg)
  40 {
  41         kvmapf = 0;
  42         return 0;
  43 }
  44 
  45 early_param("no-kvmapf", parse_no_kvmapf);
  46 
  47 static int steal_acc = 1;
  48 static int __init parse_no_stealacc(char *arg)
  49 {
  50         steal_acc = 0;
  51         return 0;
  52 }
  53 
  54 early_param("no-steal-acc", parse_no_stealacc);
  55 
  56 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
  57 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
  58 static int has_steal_clock = 0;
  59 
  60 /*
  61  * No need for any "IO delay" on KVM
  62  */
  63 static void kvm_io_delay(void)
  64 {
  65 }
  66 
  67 #define KVM_TASK_SLEEP_HASHBITS 8
  68 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
  69 
  70 struct kvm_task_sleep_node {
  71         struct hlist_node link;
  72         struct swait_queue_head wq;
  73         u32 token;
  74         int cpu;
  75         bool halted;
  76 };
  77 
  78 static struct kvm_task_sleep_head {
  79         raw_spinlock_t lock;
  80         struct hlist_head list;
  81 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
  82 
  83 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
  84                                                   u32 token)
  85 {
  86         struct hlist_node *p;
  87 
  88         hlist_for_each(p, &b->list) {
  89                 struct kvm_task_sleep_node *n =
  90                         hlist_entry(p, typeof(*n), link);
  91                 if (n->token == token)
  92                         return n;
  93         }
  94 
  95         return NULL;
  96 }
  97 
  98 /*
  99  * @interrupt_kernel: Is this called from a routine which interrupts the kernel
 100  *                    (other than user space)?
 101  */
 102 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
 103 {
 104         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
 105         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
 106         struct kvm_task_sleep_node n, *e;
 107         DECLARE_SWAITQUEUE(wait);
 108 
 109         rcu_irq_enter();
 110 
 111         raw_spin_lock(&b->lock);
 112         e = _find_apf_task(b, token);
 113         if (e) {
 114                 /* dummy entry exist -> wake up was delivered ahead of PF */
 115                 hlist_del(&e->link);
 116                 kfree(e);
 117                 raw_spin_unlock(&b->lock);
 118 
 119                 rcu_irq_exit();
 120                 return;
 121         }
 122 
 123         n.token = token;
 124         n.cpu = smp_processor_id();
 125         n.halted = is_idle_task(current) ||
 126                    (IS_ENABLED(CONFIG_PREEMPT_COUNT)
 127                     ? preempt_count() > 1 || rcu_preempt_depth()
 128                     : interrupt_kernel);
 129         init_swait_queue_head(&n.wq);
 130         hlist_add_head(&n.link, &b->list);
 131         raw_spin_unlock(&b->lock);
 132 
 133         for (;;) {
 134                 if (!n.halted)
 135                         prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
 136                 if (hlist_unhashed(&n.link))
 137                         break;
 138 
 139                 rcu_irq_exit();
 140 
 141                 if (!n.halted) {
 142                         local_irq_enable();
 143                         schedule();
 144                         local_irq_disable();
 145                 } else {
 146                         /*
 147                          * We cannot reschedule. So halt.
 148                          */
 149                         native_safe_halt();
 150                         local_irq_disable();
 151                 }
 152 
 153                 rcu_irq_enter();
 154         }
 155         if (!n.halted)
 156                 finish_swait(&n.wq, &wait);
 157 
 158         rcu_irq_exit();
 159         return;
 160 }
 161 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
 162 
 163 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
 164 {
 165         hlist_del_init(&n->link);
 166         if (n->halted)
 167                 smp_send_reschedule(n->cpu);
 168         else if (swq_has_sleeper(&n->wq))
 169                 swake_up_one(&n->wq);
 170 }
 171 
 172 static void apf_task_wake_all(void)
 173 {
 174         int i;
 175 
 176         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
 177                 struct hlist_node *p, *next;
 178                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
 179                 raw_spin_lock(&b->lock);
 180                 hlist_for_each_safe(p, next, &b->list) {
 181                         struct kvm_task_sleep_node *n =
 182                                 hlist_entry(p, typeof(*n), link);
 183                         if (n->cpu == smp_processor_id())
 184                                 apf_task_wake_one(n);
 185                 }
 186                 raw_spin_unlock(&b->lock);
 187         }
 188 }
 189 
 190 void kvm_async_pf_task_wake(u32 token)
 191 {
 192         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
 193         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
 194         struct kvm_task_sleep_node *n;
 195 
 196         if (token == ~0) {
 197                 apf_task_wake_all();
 198                 return;
 199         }
 200 
 201 again:
 202         raw_spin_lock(&b->lock);
 203         n = _find_apf_task(b, token);
 204         if (!n) {
 205                 /*
 206                  * async PF was not yet handled.
 207                  * Add dummy entry for the token.
 208                  */
 209                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
 210                 if (!n) {
 211                         /*
 212                          * Allocation failed! Busy wait while other cpu
 213                          * handles async PF.
 214                          */
 215                         raw_spin_unlock(&b->lock);
 216                         cpu_relax();
 217                         goto again;
 218                 }
 219                 n->token = token;
 220                 n->cpu = smp_processor_id();
 221                 init_swait_queue_head(&n->wq);
 222                 hlist_add_head(&n->link, &b->list);
 223         } else
 224                 apf_task_wake_one(n);
 225         raw_spin_unlock(&b->lock);
 226         return;
 227 }
 228 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
 229 
 230 u32 kvm_read_and_reset_pf_reason(void)
 231 {
 232         u32 reason = 0;
 233 
 234         if (__this_cpu_read(apf_reason.enabled)) {
 235                 reason = __this_cpu_read(apf_reason.reason);
 236                 __this_cpu_write(apf_reason.reason, 0);
 237         }
 238 
 239         return reason;
 240 }
 241 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
 242 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
 243 
 244 dotraplinkage void
 245 do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 246 {
 247         enum ctx_state prev_state;
 248 
 249         switch (kvm_read_and_reset_pf_reason()) {
 250         default:
 251                 do_page_fault(regs, error_code, address);
 252                 break;
 253         case KVM_PV_REASON_PAGE_NOT_PRESENT:
 254                 /* page is swapped out by the host. */
 255                 prev_state = exception_enter();
 256                 kvm_async_pf_task_wait((u32)address, !user_mode(regs));
 257                 exception_exit(prev_state);
 258                 break;
 259         case KVM_PV_REASON_PAGE_READY:
 260                 rcu_irq_enter();
 261                 kvm_async_pf_task_wake((u32)address);
 262                 rcu_irq_exit();
 263                 break;
 264         }
 265 }
 266 NOKPROBE_SYMBOL(do_async_page_fault);
 267 
 268 static void __init paravirt_ops_setup(void)
 269 {
 270         pv_info.name = "KVM";
 271 
 272         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
 273                 pv_ops.cpu.io_delay = kvm_io_delay;
 274 
 275 #ifdef CONFIG_X86_IO_APIC
 276         no_timer_check = 1;
 277 #endif
 278 }
 279 
 280 static void kvm_register_steal_time(void)
 281 {
 282         int cpu = smp_processor_id();
 283         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
 284 
 285         if (!has_steal_clock)
 286                 return;
 287 
 288         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
 289         pr_info("kvm-stealtime: cpu %d, msr %llx\n",
 290                 cpu, (unsigned long long) slow_virt_to_phys(st));
 291 }
 292 
 293 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
 294 
 295 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
 296 {
 297         /**
 298          * This relies on __test_and_clear_bit to modify the memory
 299          * in a way that is atomic with respect to the local CPU.
 300          * The hypervisor only accesses this memory from the local CPU so
 301          * there's no need for lock or memory barriers.
 302          * An optimization barrier is implied in apic write.
 303          */
 304         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
 305                 return;
 306         apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
 307 }
 308 
 309 static void kvm_guest_cpu_init(void)
 310 {
 311         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
 312                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
 313 
 314 #ifdef CONFIG_PREEMPTION
 315                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
 316 #endif
 317                 pa |= KVM_ASYNC_PF_ENABLED;
 318 
 319                 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
 320                         pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
 321 
 322                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
 323                 __this_cpu_write(apf_reason.enabled, 1);
 324                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
 325                        smp_processor_id());
 326         }
 327 
 328         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
 329                 unsigned long pa;
 330                 /* Size alignment is implied but just to make it explicit. */
 331                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
 332                 __this_cpu_write(kvm_apic_eoi, 0);
 333                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
 334                         | KVM_MSR_ENABLED;
 335                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
 336         }
 337 
 338         if (has_steal_clock)
 339                 kvm_register_steal_time();
 340 }
 341 
 342 static void kvm_pv_disable_apf(void)
 343 {
 344         if (!__this_cpu_read(apf_reason.enabled))
 345                 return;
 346 
 347         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
 348         __this_cpu_write(apf_reason.enabled, 0);
 349 
 350         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
 351                smp_processor_id());
 352 }
 353 
 354 static void kvm_pv_guest_cpu_reboot(void *unused)
 355 {
 356         /*
 357          * We disable PV EOI before we load a new kernel by kexec,
 358          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
 359          * New kernel can re-enable when it boots.
 360          */
 361         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 362                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
 363         kvm_pv_disable_apf();
 364         kvm_disable_steal_time();
 365 }
 366 
 367 static int kvm_pv_reboot_notify(struct notifier_block *nb,
 368                                 unsigned long code, void *unused)
 369 {
 370         if (code == SYS_RESTART)
 371                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
 372         return NOTIFY_DONE;
 373 }
 374 
 375 static struct notifier_block kvm_pv_reboot_nb = {
 376         .notifier_call = kvm_pv_reboot_notify,
 377 };
 378 
 379 static u64 kvm_steal_clock(int cpu)
 380 {
 381         u64 steal;
 382         struct kvm_steal_time *src;
 383         int version;
 384 
 385         src = &per_cpu(steal_time, cpu);
 386         do {
 387                 version = src->version;
 388                 virt_rmb();
 389                 steal = src->steal;
 390                 virt_rmb();
 391         } while ((version & 1) || (version != src->version));
 392 
 393         return steal;
 394 }
 395 
 396 void kvm_disable_steal_time(void)
 397 {
 398         if (!has_steal_clock)
 399                 return;
 400 
 401         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
 402 }
 403 
 404 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
 405 {
 406         early_set_memory_decrypted((unsigned long) ptr, size);
 407 }
 408 
 409 /*
 410  * Iterate through all possible CPUs and map the memory region pointed
 411  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
 412  *
 413  * Note: we iterate through all possible CPUs to ensure that CPUs
 414  * hotplugged will have their per-cpu variable already mapped as
 415  * decrypted.
 416  */
 417 static void __init sev_map_percpu_data(void)
 418 {
 419         int cpu;
 420 
 421         if (!sev_active())
 422                 return;
 423 
 424         for_each_possible_cpu(cpu) {
 425                 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
 426                 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
 427                 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
 428         }
 429 }
 430 
 431 #ifdef CONFIG_SMP
 432 #define KVM_IPI_CLUSTER_SIZE    (2 * BITS_PER_LONG)
 433 
 434 static void __send_ipi_mask(const struct cpumask *mask, int vector)
 435 {
 436         unsigned long flags;
 437         int cpu, apic_id, icr;
 438         int min = 0, max = 0;
 439 #ifdef CONFIG_X86_64
 440         __uint128_t ipi_bitmap = 0;
 441 #else
 442         u64 ipi_bitmap = 0;
 443 #endif
 444         long ret;
 445 
 446         if (cpumask_empty(mask))
 447                 return;
 448 
 449         local_irq_save(flags);
 450 
 451         switch (vector) {
 452         default:
 453                 icr = APIC_DM_FIXED | vector;
 454                 break;
 455         case NMI_VECTOR:
 456                 icr = APIC_DM_NMI;
 457                 break;
 458         }
 459 
 460         for_each_cpu(cpu, mask) {
 461                 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
 462                 if (!ipi_bitmap) {
 463                         min = max = apic_id;
 464                 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
 465                         ipi_bitmap <<= min - apic_id;
 466                         min = apic_id;
 467                 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
 468                         max = apic_id < max ? max : apic_id;
 469                 } else {
 470                         ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
 471                                 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
 472                         WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
 473                         min = max = apic_id;
 474                         ipi_bitmap = 0;
 475                 }
 476                 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
 477         }
 478 
 479         if (ipi_bitmap) {
 480                 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
 481                         (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
 482                 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
 483         }
 484 
 485         local_irq_restore(flags);
 486 }
 487 
 488 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
 489 {
 490         __send_ipi_mask(mask, vector);
 491 }
 492 
 493 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 494 {
 495         unsigned int this_cpu = smp_processor_id();
 496         struct cpumask new_mask;
 497         const struct cpumask *local_mask;
 498 
 499         cpumask_copy(&new_mask, mask);
 500         cpumask_clear_cpu(this_cpu, &new_mask);
 501         local_mask = &new_mask;
 502         __send_ipi_mask(local_mask, vector);
 503 }
 504 
 505 /*
 506  * Set the IPI entry points
 507  */
 508 static void kvm_setup_pv_ipi(void)
 509 {
 510         apic->send_IPI_mask = kvm_send_ipi_mask;
 511         apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
 512         pr_info("KVM setup pv IPIs\n");
 513 }
 514 
 515 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 516 {
 517         int cpu;
 518 
 519         native_send_call_func_ipi(mask);
 520 
 521         /* Make sure other vCPUs get a chance to run if they need to. */
 522         for_each_cpu(cpu, mask) {
 523                 if (vcpu_is_preempted(cpu)) {
 524                         kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
 525                         break;
 526                 }
 527         }
 528 }
 529 
 530 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
 531 {
 532         native_smp_prepare_cpus(max_cpus);
 533         if (kvm_para_has_hint(KVM_HINTS_REALTIME))
 534                 static_branch_disable(&virt_spin_lock_key);
 535 }
 536 
 537 static void __init kvm_smp_prepare_boot_cpu(void)
 538 {
 539         /*
 540          * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
 541          * shares the guest physical address with the hypervisor.
 542          */
 543         sev_map_percpu_data();
 544 
 545         kvm_guest_cpu_init();
 546         native_smp_prepare_boot_cpu();
 547         kvm_spinlock_init();
 548 }
 549 
 550 static void kvm_guest_cpu_offline(void)
 551 {
 552         kvm_disable_steal_time();
 553         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 554                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
 555         kvm_pv_disable_apf();
 556         apf_task_wake_all();
 557 }
 558 
 559 static int kvm_cpu_online(unsigned int cpu)
 560 {
 561         local_irq_disable();
 562         kvm_guest_cpu_init();
 563         local_irq_enable();
 564         return 0;
 565 }
 566 
 567 static int kvm_cpu_down_prepare(unsigned int cpu)
 568 {
 569         local_irq_disable();
 570         kvm_guest_cpu_offline();
 571         local_irq_enable();
 572         return 0;
 573 }
 574 #endif
 575 
 576 static void __init kvm_apf_trap_init(void)
 577 {
 578         update_intr_gate(X86_TRAP_PF, async_page_fault);
 579 }
 580 
 581 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
 582 
 583 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
 584                         const struct flush_tlb_info *info)
 585 {
 586         u8 state;
 587         int cpu;
 588         struct kvm_steal_time *src;
 589         struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
 590 
 591         cpumask_copy(flushmask, cpumask);
 592         /*
 593          * We have to call flush only on online vCPUs. And
 594          * queue flush_on_enter for pre-empted vCPUs
 595          */
 596         for_each_cpu(cpu, flushmask) {
 597                 src = &per_cpu(steal_time, cpu);
 598                 state = READ_ONCE(src->preempted);
 599                 if ((state & KVM_VCPU_PREEMPTED)) {
 600                         if (try_cmpxchg(&src->preempted, &state,
 601                                         state | KVM_VCPU_FLUSH_TLB))
 602                                 __cpumask_clear_cpu(cpu, flushmask);
 603                 }
 604         }
 605 
 606         native_flush_tlb_others(flushmask, info);
 607 }
 608 
 609 static void __init kvm_guest_init(void)
 610 {
 611         int i;
 612 
 613         paravirt_ops_setup();
 614         register_reboot_notifier(&kvm_pv_reboot_nb);
 615         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
 616                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
 617         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
 618                 x86_init.irqs.trap_init = kvm_apf_trap_init;
 619 
 620         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
 621                 has_steal_clock = 1;
 622                 pv_ops.time.steal_clock = kvm_steal_clock;
 623         }
 624 
 625         if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
 626             !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
 627             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
 628                 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
 629                 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
 630         }
 631 
 632         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 633                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
 634 
 635 #ifdef CONFIG_SMP
 636         smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
 637         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
 638         if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
 639             !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
 640             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
 641                 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
 642                 pr_info("KVM setup pv sched yield\n");
 643         }
 644         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
 645                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
 646                 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
 647 #else
 648         sev_map_percpu_data();
 649         kvm_guest_cpu_init();
 650 #endif
 651 
 652         /*
 653          * Hard lockup detection is enabled by default. Disable it, as guests
 654          * can get false positives too easily, for example if the host is
 655          * overcommitted.
 656          */
 657         hardlockup_detector_disable();
 658 }
 659 
 660 static noinline uint32_t __kvm_cpuid_base(void)
 661 {
 662         if (boot_cpu_data.cpuid_level < 0)
 663                 return 0;       /* So we don't blow up on old processors */
 664 
 665         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 666                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
 667 
 668         return 0;
 669 }
 670 
 671 static inline uint32_t kvm_cpuid_base(void)
 672 {
 673         static int kvm_cpuid_base = -1;
 674 
 675         if (kvm_cpuid_base == -1)
 676                 kvm_cpuid_base = __kvm_cpuid_base();
 677 
 678         return kvm_cpuid_base;
 679 }
 680 
 681 bool kvm_para_available(void)
 682 {
 683         return kvm_cpuid_base() != 0;
 684 }
 685 EXPORT_SYMBOL_GPL(kvm_para_available);
 686 
 687 unsigned int kvm_arch_para_features(void)
 688 {
 689         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
 690 }
 691 
 692 unsigned int kvm_arch_para_hints(void)
 693 {
 694         return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
 695 }
 696 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
 697 
 698 static uint32_t __init kvm_detect(void)
 699 {
 700         return kvm_cpuid_base();
 701 }
 702 
 703 static void __init kvm_apic_init(void)
 704 {
 705 #if defined(CONFIG_SMP)
 706         if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
 707                 kvm_setup_pv_ipi();
 708 #endif
 709 }
 710 
 711 static void __init kvm_init_platform(void)
 712 {
 713         kvmclock_init();
 714         x86_platform.apic_post_init = kvm_apic_init;
 715 }
 716 
 717 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
 718         .name                   = "KVM",
 719         .detect                 = kvm_detect,
 720         .type                   = X86_HYPER_KVM,
 721         .init.guest_late_init   = kvm_guest_init,
 722         .init.x2apic_available  = kvm_para_available,
 723         .init.init_platform     = kvm_init_platform,
 724 };
 725 
 726 static __init int activate_jump_labels(void)
 727 {
 728         if (has_steal_clock) {
 729                 static_key_slow_inc(&paravirt_steal_enabled);
 730                 if (steal_acc)
 731                         static_key_slow_inc(&paravirt_steal_rq_enabled);
 732         }
 733 
 734         return 0;
 735 }
 736 arch_initcall(activate_jump_labels);
 737 
 738 static __init int kvm_setup_pv_tlb_flush(void)
 739 {
 740         int cpu;
 741 
 742         if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
 743             !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
 744             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
 745                 for_each_possible_cpu(cpu) {
 746                         zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
 747                                 GFP_KERNEL, cpu_to_node(cpu));
 748                 }
 749                 pr_info("KVM setup pv remote TLB flush\n");
 750         }
 751 
 752         return 0;
 753 }
 754 arch_initcall(kvm_setup_pv_tlb_flush);
 755 
 756 #ifdef CONFIG_PARAVIRT_SPINLOCKS
 757 
 758 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
 759 static void kvm_kick_cpu(int cpu)
 760 {
 761         int apicid;
 762         unsigned long flags = 0;
 763 
 764         apicid = per_cpu(x86_cpu_to_apicid, cpu);
 765         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
 766 }
 767 
 768 #include <asm/qspinlock.h>
 769 
 770 static void kvm_wait(u8 *ptr, u8 val)
 771 {
 772         unsigned long flags;
 773 
 774         if (in_nmi())
 775                 return;
 776 
 777         local_irq_save(flags);
 778 
 779         if (READ_ONCE(*ptr) != val)
 780                 goto out;
 781 
 782         /*
 783          * halt until it's our turn and kicked. Note that we do safe halt
 784          * for irq enabled case to avoid hang when lock info is overwritten
 785          * in irq spinlock slowpath and no spurious interrupt occur to save us.
 786          */
 787         if (arch_irqs_disabled_flags(flags))
 788                 halt();
 789         else
 790                 safe_halt();
 791 
 792 out:
 793         local_irq_restore(flags);
 794 }
 795 
 796 #ifdef CONFIG_X86_32
 797 __visible bool __kvm_vcpu_is_preempted(long cpu)
 798 {
 799         struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
 800 
 801         return !!(src->preempted & KVM_VCPU_PREEMPTED);
 802 }
 803 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
 804 
 805 #else
 806 
 807 #include <asm/asm-offsets.h>
 808 
 809 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
 810 
 811 /*
 812  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
 813  * restoring to/from the stack.
 814  */
 815 asm(
 816 ".pushsection .text;"
 817 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
 818 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
 819 "__raw_callee_save___kvm_vcpu_is_preempted:"
 820 "movq   __per_cpu_offset(,%rdi,8), %rax;"
 821 "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
 822 "setne  %al;"
 823 "ret;"
 824 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
 825 ".popsection");
 826 
 827 #endif
 828 
 829 /*
 830  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
 831  */
 832 void __init kvm_spinlock_init(void)
 833 {
 834         /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
 835         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
 836                 return;
 837 
 838         if (kvm_para_has_hint(KVM_HINTS_REALTIME))
 839                 return;
 840 
 841         /* Don't use the pvqspinlock code if there is only 1 vCPU. */
 842         if (num_possible_cpus() == 1)
 843                 return;
 844 
 845         __pv_init_lock_hash();
 846         pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 847         pv_ops.lock.queued_spin_unlock =
 848                 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 849         pv_ops.lock.wait = kvm_wait;
 850         pv_ops.lock.kick = kvm_kick_cpu;
 851 
 852         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
 853                 pv_ops.lock.vcpu_is_preempted =
 854                         PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
 855         }
 856 }
 857 
 858 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */
 859 
 860 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
 861 
 862 static void kvm_disable_host_haltpoll(void *i)
 863 {
 864         wrmsrl(MSR_KVM_POLL_CONTROL, 0);
 865 }
 866 
 867 static void kvm_enable_host_haltpoll(void *i)
 868 {
 869         wrmsrl(MSR_KVM_POLL_CONTROL, 1);
 870 }
 871 
 872 void arch_haltpoll_enable(unsigned int cpu)
 873 {
 874         if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
 875                 pr_err_once("kvm: host does not support poll control\n");
 876                 pr_err_once("kvm: host upgrade recommended\n");
 877                 return;
 878         }
 879 
 880         /* Enable guest halt poll disables host halt poll */
 881         smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
 882 }
 883 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
 884 
 885 void arch_haltpoll_disable(unsigned int cpu)
 886 {
 887         if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
 888                 return;
 889 
 890         /* Enable guest halt poll disables host halt poll */
 891         smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
 892 }
 893 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
 894 #endif

/* [<][>][^][v][top][bottom][index][help] */