root/arch/powerpc/include/asm/kvm_host.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. kvm_arch_hardware_disable
  2. kvm_arch_hardware_unsetup
  3. kvm_arch_sync_events
  4. kvm_arch_memslots_updated
  5. kvm_arch_flush_shadow_all
  6. kvm_arch_sched_in
  7. kvm_arch_exit
  8. kvm_arch_vcpu_blocking
  9. kvm_arch_vcpu_unblocking
  10. kvm_arch_vcpu_block_finish

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  *
   4  * Copyright IBM Corp. 2007
   5  *
   6  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
   7  */
   8 
   9 #ifndef __POWERPC_KVM_HOST_H__
  10 #define __POWERPC_KVM_HOST_H__
  11 
  12 #include <linux/mutex.h>
  13 #include <linux/hrtimer.h>
  14 #include <linux/interrupt.h>
  15 #include <linux/types.h>
  16 #include <linux/kvm_types.h>
  17 #include <linux/threads.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/kvm_para.h>
  20 #include <linux/list.h>
  21 #include <linux/atomic.h>
  22 #include <asm/kvm_asm.h>
  23 #include <asm/processor.h>
  24 #include <asm/page.h>
  25 #include <asm/cacheflush.h>
  26 #include <asm/hvcall.h>
  27 #include <asm/mce.h>
  28 
  29 #define KVM_MAX_VCPUS           NR_CPUS
  30 #define KVM_MAX_VCORES          NR_CPUS
  31 #define KVM_USER_MEM_SLOTS      512
  32 
  33 #include <asm/cputhreads.h>
  34 
  35 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  36 #include <asm/kvm_book3s_asm.h>         /* for MAX_SMT_THREADS */
  37 #define KVM_MAX_VCPU_ID         (MAX_SMT_THREADS * KVM_MAX_VCORES)
  38 #define KVM_MAX_NESTED_GUESTS   KVMPPC_NR_LPIDS
  39 
  40 #else
  41 #define KVM_MAX_VCPU_ID         KVM_MAX_VCPUS
  42 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  43 
  44 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
  45 
  46 #define KVM_HALT_POLL_NS_DEFAULT 10000  /* 10 us */
  47 
  48 /* These values are internal and can be increased later */
  49 #define KVM_NR_IRQCHIPS          1
  50 #define KVM_IRQCHIP_NUM_PINS     256
  51 
  52 /* PPC-specific vcpu->requests bit members */
  53 #define KVM_REQ_WATCHDOG        KVM_ARCH_REQ(0)
  54 #define KVM_REQ_EPR_EXIT        KVM_ARCH_REQ(1)
  55 
  56 #include <linux/mmu_notifier.h>
  57 
  58 #define KVM_ARCH_WANT_MMU_NOTIFIER
  59 
  60 extern int kvm_unmap_hva_range(struct kvm *kvm,
  61                                unsigned long start, unsigned long end);
  62 extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
  63 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  64 extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  65 
  66 #define HPTEG_CACHE_NUM                 (1 << 15)
  67 #define HPTEG_HASH_BITS_PTE             13
  68 #define HPTEG_HASH_BITS_PTE_LONG        12
  69 #define HPTEG_HASH_BITS_VPTE            13
  70 #define HPTEG_HASH_BITS_VPTE_LONG       5
  71 #define HPTEG_HASH_BITS_VPTE_64K        11
  72 #define HPTEG_HASH_NUM_PTE              (1 << HPTEG_HASH_BITS_PTE)
  73 #define HPTEG_HASH_NUM_PTE_LONG         (1 << HPTEG_HASH_BITS_PTE_LONG)
  74 #define HPTEG_HASH_NUM_VPTE             (1 << HPTEG_HASH_BITS_VPTE)
  75 #define HPTEG_HASH_NUM_VPTE_LONG        (1 << HPTEG_HASH_BITS_VPTE_LONG)
  76 #define HPTEG_HASH_NUM_VPTE_64K         (1 << HPTEG_HASH_BITS_VPTE_64K)
  77 
  78 /* Physical Address Mask - allowed range of real mode RAM access */
  79 #define KVM_PAM                 0x0fffffffffffffffULL
  80 
  81 struct lppaca;
  82 struct slb_shadow;
  83 struct dtl_entry;
  84 
  85 struct kvmppc_vcpu_book3s;
  86 struct kvmppc_book3s_shadow_vcpu;
  87 struct kvm_nested_guest;
  88 
  89 struct kvm_vm_stat {
  90         ulong remote_tlb_flush;
  91         ulong num_2M_pages;
  92         ulong num_1G_pages;
  93 };
  94 
  95 struct kvm_vcpu_stat {
  96         u64 sum_exits;
  97         u64 mmio_exits;
  98         u64 signal_exits;
  99         u64 light_exits;
 100         /* Account for special types of light exits: */
 101         u64 itlb_real_miss_exits;
 102         u64 itlb_virt_miss_exits;
 103         u64 dtlb_real_miss_exits;
 104         u64 dtlb_virt_miss_exits;
 105         u64 syscall_exits;
 106         u64 isi_exits;
 107         u64 dsi_exits;
 108         u64 emulated_inst_exits;
 109         u64 dec_exits;
 110         u64 ext_intr_exits;
 111         u64 halt_poll_success_ns;
 112         u64 halt_poll_fail_ns;
 113         u64 halt_wait_ns;
 114         u64 halt_successful_poll;
 115         u64 halt_attempted_poll;
 116         u64 halt_successful_wait;
 117         u64 halt_poll_invalid;
 118         u64 halt_wakeup;
 119         u64 dbell_exits;
 120         u64 gdbell_exits;
 121         u64 ld;
 122         u64 st;
 123 #ifdef CONFIG_PPC_BOOK3S
 124         u64 pf_storage;
 125         u64 pf_instruc;
 126         u64 sp_storage;
 127         u64 sp_instruc;
 128         u64 queue_intr;
 129         u64 ld_slow;
 130         u64 st_slow;
 131 #endif
 132         u64 pthru_all;
 133         u64 pthru_host;
 134         u64 pthru_bad_aff;
 135 };
 136 
 137 enum kvm_exit_types {
 138         MMIO_EXITS,
 139         SIGNAL_EXITS,
 140         ITLB_REAL_MISS_EXITS,
 141         ITLB_VIRT_MISS_EXITS,
 142         DTLB_REAL_MISS_EXITS,
 143         DTLB_VIRT_MISS_EXITS,
 144         SYSCALL_EXITS,
 145         ISI_EXITS,
 146         DSI_EXITS,
 147         EMULATED_INST_EXITS,
 148         EMULATED_MTMSRWE_EXITS,
 149         EMULATED_WRTEE_EXITS,
 150         EMULATED_MTSPR_EXITS,
 151         EMULATED_MFSPR_EXITS,
 152         EMULATED_MTMSR_EXITS,
 153         EMULATED_MFMSR_EXITS,
 154         EMULATED_TLBSX_EXITS,
 155         EMULATED_TLBWE_EXITS,
 156         EMULATED_RFI_EXITS,
 157         EMULATED_RFCI_EXITS,
 158         EMULATED_RFDI_EXITS,
 159         DEC_EXITS,
 160         EXT_INTR_EXITS,
 161         HALT_WAKEUP,
 162         USR_PR_INST,
 163         FP_UNAVAIL,
 164         DEBUG_EXITS,
 165         TIMEINGUEST,
 166         DBELL_EXITS,
 167         GDBELL_EXITS,
 168         __NUMBER_OF_KVM_EXIT_TYPES
 169 };
 170 
 171 /* allow access to big endian 32bit upper/lower parts and 64bit var */
 172 struct kvmppc_exit_timing {
 173         union {
 174                 u64 tv64;
 175                 struct {
 176                         u32 tbu, tbl;
 177                 } tv32;
 178         };
 179 };
 180 
 181 struct kvmppc_pginfo {
 182         unsigned long pfn;
 183         atomic_t refcnt;
 184 };
 185 
 186 struct kvmppc_spapr_tce_iommu_table {
 187         struct rcu_head rcu;
 188         struct list_head next;
 189         struct iommu_table *tbl;
 190         struct kref kref;
 191 };
 192 
 193 #define TCES_PER_PAGE   (PAGE_SIZE / sizeof(u64))
 194 
 195 struct kvmppc_spapr_tce_table {
 196         struct list_head list;
 197         struct kvm *kvm;
 198         u64 liobn;
 199         struct rcu_head rcu;
 200         u32 page_shift;
 201         u64 offset;             /* in pages */
 202         u64 size;               /* window size in pages */
 203         struct list_head iommu_tables;
 204         struct mutex alloc_lock;
 205         struct page *pages[0];
 206 };
 207 
 208 /* XICS components, defined in book3s_xics.c */
 209 struct kvmppc_xics;
 210 struct kvmppc_icp;
 211 extern struct kvm_device_ops kvm_xics_ops;
 212 
 213 /* XIVE components, defined in book3s_xive.c */
 214 struct kvmppc_xive;
 215 struct kvmppc_xive_vcpu;
 216 extern struct kvm_device_ops kvm_xive_ops;
 217 extern struct kvm_device_ops kvm_xive_native_ops;
 218 
 219 struct kvmppc_passthru_irqmap;
 220 
 221 /*
 222  * The reverse mapping array has one entry for each HPTE,
 223  * which stores the guest's view of the second word of the HPTE
 224  * (including the guest physical address of the mapping),
 225  * plus forward and backward pointers in a doubly-linked ring
 226  * of HPTEs that map the same host page.  The pointers in this
 227  * ring are 32-bit HPTE indexes, to save space.
 228  */
 229 struct revmap_entry {
 230         unsigned long guest_rpte;
 231         unsigned int forw, back;
 232 };
 233 
 234 /*
 235  * The rmap array of size number of guest pages is allocated for each memslot.
 236  * This array is used to store usage specific information about the guest page.
 237  * Below are the encodings of the various possible usage types.
 238  */
 239 /* Free bits which can be used to define a new usage */
 240 #define KVMPPC_RMAP_TYPE_MASK   0xff00000000000000
 241 #define KVMPPC_RMAP_NESTED      0xc000000000000000      /* Nested rmap array */
 242 #define KVMPPC_RMAP_HPT         0x0100000000000000      /* HPT guest */
 243 
 244 /*
 245  * rmap usage definition for a hash page table (hpt) guest:
 246  * 0x0000080000000000   Lock bit
 247  * 0x0000018000000000   RC bits
 248  * 0x0000000100000000   Present bit
 249  * 0x00000000ffffffff   HPT index bits
 250  * The bottom 32 bits are the index in the guest HPT of a HPTE that points to
 251  * the page.
 252  */
 253 #define KVMPPC_RMAP_LOCK_BIT    43
 254 #define KVMPPC_RMAP_RC_SHIFT    32
 255 #define KVMPPC_RMAP_REFERENCED  (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
 256 #define KVMPPC_RMAP_PRESENT     0x100000000ul
 257 #define KVMPPC_RMAP_INDEX       0xfffffffful
 258 
 259 struct kvm_arch_memory_slot {
 260 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 261         unsigned long *rmap;
 262 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 263 };
 264 
 265 struct kvm_hpt_info {
 266         /* Host virtual (linear mapping) address of guest HPT */
 267         unsigned long virt;
 268         /* Array of reverse mapping entries for each guest HPTE */
 269         struct revmap_entry *rev;
 270         /* Guest HPT size is 2**(order) bytes */
 271         u32 order;
 272         /* 1 if HPT allocated with CMA, 0 otherwise */
 273         int cma;
 274 };
 275 
 276 struct kvm_resize_hpt;
 277 
 278 struct kvm_arch {
 279         unsigned int lpid;
 280         unsigned int smt_mode;          /* # vcpus per virtual core */
 281         unsigned int emul_smt_mode;     /* emualted SMT mode, on P9 */
 282 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 283         unsigned int tlb_sets;
 284         struct kvm_hpt_info hpt;
 285         atomic64_t mmio_update;
 286         unsigned int host_lpid;
 287         unsigned long host_lpcr;
 288         unsigned long sdr1;
 289         unsigned long host_sdr1;
 290         unsigned long lpcr;
 291         unsigned long vrma_slb_v;
 292         int mmu_ready;
 293         atomic_t vcpus_running;
 294         u32 online_vcores;
 295         atomic_t hpte_mod_interest;
 296         cpumask_t need_tlb_flush;
 297         cpumask_t cpu_in_guest;
 298         u8 radix;
 299         u8 fwnmi_enabled;
 300         u8 secure_guest;
 301         bool threads_indep;
 302         bool nested_enable;
 303         pgd_t *pgtable;
 304         u64 process_table;
 305         struct dentry *debugfs_dir;
 306         struct dentry *htab_dentry;
 307         struct dentry *radix_dentry;
 308         struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
 309 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 310 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 311         struct mutex hpt_mutex;
 312 #endif
 313 #ifdef CONFIG_PPC_BOOK3S_64
 314         struct list_head spapr_tce_tables;
 315         struct list_head rtas_tokens;
 316         struct mutex rtas_token_lock;
 317         DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
 318 #endif
 319 #ifdef CONFIG_KVM_MPIC
 320         struct openpic *mpic;
 321 #endif
 322 #ifdef CONFIG_KVM_XICS
 323         struct kvmppc_xics *xics;
 324         struct kvmppc_xive *xive;    /* Current XIVE device in use */
 325         struct {
 326                 struct kvmppc_xive *native;
 327                 struct kvmppc_xive *xics_on_xive;
 328         } xive_devices;
 329         struct kvmppc_passthru_irqmap *pimap;
 330 #endif
 331         struct kvmppc_ops *kvm_ops;
 332 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 333         struct mutex mmu_setup_lock;    /* nests inside vcpu mutexes */
 334         u64 l1_ptcr;
 335         int max_nested_lpid;
 336         struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
 337         /* This array can grow quite large, keep it at the end */
 338         struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
 339 #endif
 340 };
 341 
 342 #define VCORE_ENTRY_MAP(vc)     ((vc)->entry_exit_map & 0xff)
 343 #define VCORE_EXIT_MAP(vc)      ((vc)->entry_exit_map >> 8)
 344 #define VCORE_IS_EXITING(vc)    (VCORE_EXIT_MAP(vc) != 0)
 345 
 346 /* This bit is used when a vcore exit is triggered from outside the vcore */
 347 #define VCORE_EXIT_REQ          0x10000
 348 
 349 /*
 350  * Values for vcore_state.
 351  * Note that these are arranged such that lower values
 352  * (< VCORE_SLEEPING) don't require stolen time accounting
 353  * on load/unload, and higher values do.
 354  */
 355 #define VCORE_INACTIVE  0
 356 #define VCORE_PREEMPT   1
 357 #define VCORE_PIGGYBACK 2
 358 #define VCORE_SLEEPING  3
 359 #define VCORE_RUNNING   4
 360 #define VCORE_EXITING   5
 361 #define VCORE_POLLING   6
 362 
 363 /*
 364  * Struct used to manage memory for a virtual processor area
 365  * registered by a PAPR guest.  There are three types of area
 366  * that a guest can register.
 367  */
 368 struct kvmppc_vpa {
 369         unsigned long gpa;      /* Current guest phys addr */
 370         void *pinned_addr;      /* Address in kernel linear mapping */
 371         void *pinned_end;       /* End of region */
 372         unsigned long next_gpa; /* Guest phys addr for update */
 373         unsigned long len;      /* Number of bytes required */
 374         u8 update_pending;      /* 1 => update pinned_addr from next_gpa */
 375         bool dirty;             /* true => area has been modified by kernel */
 376 };
 377 
 378 struct kvmppc_pte {
 379         ulong eaddr;
 380         u64 vpage;
 381         ulong raddr;
 382         bool may_read           : 1;
 383         bool may_write          : 1;
 384         bool may_execute        : 1;
 385         unsigned long wimg;
 386         unsigned long rc;
 387         u8 page_size;           /* MMU_PAGE_xxx */
 388         u8 page_shift;
 389 };
 390 
 391 struct kvmppc_mmu {
 392         /* book3s_64 only */
 393         void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
 394         u64  (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
 395         u64  (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
 396         int  (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
 397         void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
 398         void (*slbia)(struct kvm_vcpu *vcpu);
 399         /* book3s */
 400         void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
 401         u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
 402         int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
 403                       struct kvmppc_pte *pte, bool data, bool iswrite);
 404         void (*reset_msr)(struct kvm_vcpu *vcpu);
 405         void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
 406         int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
 407         u64  (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
 408         bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
 409 };
 410 
 411 struct kvmppc_slb {
 412         u64 esid;
 413         u64 vsid;
 414         u64 orige;
 415         u64 origv;
 416         bool valid      : 1;
 417         bool Ks         : 1;
 418         bool Kp         : 1;
 419         bool nx         : 1;
 420         bool large      : 1;    /* PTEs are 16MB */
 421         bool tb         : 1;    /* 1TB segment */
 422         bool class      : 1;
 423         u8 base_page_size;      /* MMU_PAGE_xxx */
 424 };
 425 
 426 /* Struct used to accumulate timing information in HV real mode code */
 427 struct kvmhv_tb_accumulator {
 428         u64     seqcount;       /* used to synchronize access, also count * 2 */
 429         u64     tb_total;       /* total time in timebase ticks */
 430         u64     tb_min;         /* min time */
 431         u64     tb_max;         /* max time */
 432 };
 433 
 434 #ifdef CONFIG_PPC_BOOK3S_64
 435 struct kvmppc_irq_map {
 436         u32     r_hwirq;
 437         u32     v_hwirq;
 438         struct irq_desc *desc;
 439 };
 440 
 441 #define KVMPPC_PIRQ_MAPPED      1024
 442 struct kvmppc_passthru_irqmap {
 443         int n_mapped;
 444         struct kvmppc_irq_map mapped[KVMPPC_PIRQ_MAPPED];
 445 };
 446 #endif
 447 
 448 # ifdef CONFIG_PPC_FSL_BOOK3E
 449 #define KVMPPC_BOOKE_IAC_NUM    2
 450 #define KVMPPC_BOOKE_DAC_NUM    2
 451 # else
 452 #define KVMPPC_BOOKE_IAC_NUM    4
 453 #define KVMPPC_BOOKE_DAC_NUM    2
 454 # endif
 455 #define KVMPPC_BOOKE_MAX_IAC    4
 456 #define KVMPPC_BOOKE_MAX_DAC    2
 457 
 458 /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
 459 #define KVMPPC_EPR_NONE         0 /* EPR not supported */
 460 #define KVMPPC_EPR_USER         1 /* exit to userspace to fill EPR */
 461 #define KVMPPC_EPR_KERNEL       2 /* in-kernel irqchip */
 462 
 463 #define KVMPPC_IRQ_DEFAULT      0
 464 #define KVMPPC_IRQ_MPIC         1
 465 #define KVMPPC_IRQ_XICS         2 /* Includes a XIVE option */
 466 #define KVMPPC_IRQ_XIVE         3 /* XIVE native exploitation mode */
 467 
 468 #define MMIO_HPTE_CACHE_SIZE    4
 469 
 470 struct mmio_hpte_cache_entry {
 471         unsigned long hpte_v;
 472         unsigned long hpte_r;
 473         unsigned long rpte;
 474         unsigned long pte_index;
 475         unsigned long eaddr;
 476         unsigned long slb_v;
 477         long mmio_update;
 478         unsigned int slb_base_pshift;
 479 };
 480 
 481 struct mmio_hpte_cache {
 482         struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
 483         unsigned int index;
 484 };
 485 
 486 #define KVMPPC_VSX_COPY_NONE            0
 487 #define KVMPPC_VSX_COPY_WORD            1
 488 #define KVMPPC_VSX_COPY_DWORD           2
 489 #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
 490 #define KVMPPC_VSX_COPY_WORD_LOAD_DUMP  4
 491 
 492 #define KVMPPC_VMX_COPY_BYTE            8
 493 #define KVMPPC_VMX_COPY_HWORD           9
 494 #define KVMPPC_VMX_COPY_WORD            10
 495 #define KVMPPC_VMX_COPY_DWORD           11
 496 
 497 struct openpic;
 498 
 499 /* W0 and W1 of a XIVE thread management context */
 500 union xive_tma_w01 {
 501         struct {
 502                 u8      nsr;
 503                 u8      cppr;
 504                 u8      ipb;
 505                 u8      lsmfb;
 506                 u8      ack;
 507                 u8      inc;
 508                 u8      age;
 509                 u8      pipr;
 510         };
 511         __be64 w01;
 512 };
 513 
 514 struct kvm_vcpu_arch {
 515         ulong host_stack;
 516         u32 host_pid;
 517 #ifdef CONFIG_PPC_BOOK3S
 518         struct kvmppc_slb slb[64];
 519         int slb_max;            /* 1 + index of last valid entry in slb[] */
 520         int slb_nr;             /* total number of entries in SLB */
 521         struct kvmppc_mmu mmu;
 522         struct kvmppc_vcpu_book3s *book3s;
 523 #endif
 524 #ifdef CONFIG_PPC_BOOK3S_32
 525         struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
 526 #endif
 527 
 528         struct pt_regs regs;
 529 
 530         struct thread_fp_state fp;
 531 
 532 #ifdef CONFIG_SPE
 533         ulong evr[32];
 534         ulong spefscr;
 535         ulong host_spefscr;
 536         u64 acc;
 537 #endif
 538 #ifdef CONFIG_ALTIVEC
 539         struct thread_vr_state vr;
 540 #endif
 541 
 542 #ifdef CONFIG_KVM_BOOKE_HV
 543         u32 host_mas4;
 544         u32 host_mas6;
 545         u32 shadow_epcr;
 546         u32 shadow_msrp;
 547         u32 eplc;
 548         u32 epsc;
 549         u32 oldpir;
 550 #endif
 551 
 552 #if defined(CONFIG_BOOKE)
 553 #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
 554         u32 epcr;
 555 #endif
 556 #endif
 557 
 558 #ifdef CONFIG_PPC_BOOK3S
 559         /* For Gekko paired singles */
 560         u32 qpr[32];
 561 #endif
 562 
 563 #ifdef CONFIG_PPC_BOOK3S
 564         ulong tar;
 565 #endif
 566 
 567 #ifdef CONFIG_PPC_BOOK3S
 568         ulong hflags;
 569         ulong guest_owned_ext;
 570         ulong purr;
 571         ulong spurr;
 572         ulong ic;
 573         ulong dscr;
 574         ulong amr;
 575         ulong uamor;
 576         ulong iamr;
 577         u32 ctrl;
 578         u32 dabrx;
 579         ulong dabr;
 580         ulong dawr;
 581         ulong dawrx;
 582         ulong ciabr;
 583         ulong cfar;
 584         ulong ppr;
 585         u32 pspb;
 586         ulong fscr;
 587         ulong shadow_fscr;
 588         ulong ebbhr;
 589         ulong ebbrr;
 590         ulong bescr;
 591         ulong csigr;
 592         ulong tacr;
 593         ulong tcscr;
 594         ulong acop;
 595         ulong wort;
 596         ulong tid;
 597         ulong psscr;
 598         ulong hfscr;
 599         ulong shadow_srr1;
 600 #endif
 601         u32 vrsave; /* also USPRG0 */
 602         u32 mmucr;
 603         /* shadow_msr is unused for BookE HV */
 604         ulong shadow_msr;
 605         ulong csrr0;
 606         ulong csrr1;
 607         ulong dsrr0;
 608         ulong dsrr1;
 609         ulong mcsrr0;
 610         ulong mcsrr1;
 611         ulong mcsr;
 612         ulong dec;
 613 #ifdef CONFIG_BOOKE
 614         u32 decar;
 615 #endif
 616         /* Time base value when we entered the guest */
 617         u64 entry_tb;
 618         u64 entry_vtb;
 619         u64 entry_ic;
 620         u32 tcr;
 621         ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
 622         u32 ivor[64];
 623         ulong ivpr;
 624         u32 pvr;
 625 
 626         u32 shadow_pid;
 627         u32 shadow_pid1;
 628         u32 pid;
 629         u32 swap_pid;
 630 
 631         u32 ccr0;
 632         u32 ccr1;
 633         u32 dbsr;
 634 
 635         u64 mmcr[5];
 636         u32 pmc[8];
 637         u32 spmc[2];
 638         u64 siar;
 639         u64 sdar;
 640         u64 sier;
 641 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 642         u64 tfhar;
 643         u64 texasr;
 644         u64 tfiar;
 645         u64 orig_texasr;
 646 
 647         u32 cr_tm;
 648         u64 xer_tm;
 649         u64 lr_tm;
 650         u64 ctr_tm;
 651         u64 amr_tm;
 652         u64 ppr_tm;
 653         u64 dscr_tm;
 654         u64 tar_tm;
 655 
 656         ulong gpr_tm[32];
 657 
 658         struct thread_fp_state fp_tm;
 659 
 660         struct thread_vr_state vr_tm;
 661         u32 vrsave_tm; /* also USPRG0 */
 662 #endif
 663 
 664 #ifdef CONFIG_KVM_EXIT_TIMING
 665         struct mutex exit_timing_lock;
 666         struct kvmppc_exit_timing timing_exit;
 667         struct kvmppc_exit_timing timing_last_enter;
 668         u32 last_exit_type;
 669         u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
 670         u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
 671         u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
 672         u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
 673         u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
 674         u64 timing_last_exit;
 675         struct dentry *debugfs_exit_timing;
 676 #endif
 677 
 678 #ifdef CONFIG_PPC_BOOK3S
 679         ulong fault_dar;
 680         u32 fault_dsisr;
 681         unsigned long intr_msr;
 682         ulong fault_gpa;        /* guest real address of page fault (POWER9) */
 683 #endif
 684 
 685 #ifdef CONFIG_BOOKE
 686         ulong fault_dear;
 687         ulong fault_esr;
 688         ulong queued_dear;
 689         ulong queued_esr;
 690         spinlock_t wdt_lock;
 691         struct timer_list wdt_timer;
 692         u32 tlbcfg[4];
 693         u32 tlbps[4];
 694         u32 mmucfg;
 695         u32 eptcfg;
 696         u32 epr;
 697         u64 sprg9;
 698         u32 pwrmgtcr0;
 699         u32 crit_save;
 700         /* guest debug registers*/
 701         struct debug_reg dbg_reg;
 702 #endif
 703         gpa_t paddr_accessed;
 704         gva_t vaddr_accessed;
 705         pgd_t *pgdir;
 706 
 707         u16 io_gpr; /* GPR used as IO source/target */
 708         u8 mmio_host_swabbed;
 709         u8 mmio_sign_extend;
 710         /* conversion between single and double precision */
 711         u8 mmio_sp64_extend;
 712         /*
 713          * Number of simulations for vsx.
 714          * If we use 2*8bytes to simulate 1*16bytes,
 715          * then the number should be 2 and
 716          * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
 717          * If we use 4*4bytes to simulate 1*16bytes,
 718          * the number should be 4 and
 719          * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
 720          */
 721         u8 mmio_vsx_copy_nums;
 722         u8 mmio_vsx_offset;
 723         u8 mmio_vmx_copy_nums;
 724         u8 mmio_vmx_offset;
 725         u8 mmio_copy_type;
 726         u8 osi_needed;
 727         u8 osi_enabled;
 728         u8 papr_enabled;
 729         u8 watchdog_enabled;
 730         u8 sane;
 731         u8 cpu_type;
 732         u8 hcall_needed;
 733         u8 epr_flags; /* KVMPPC_EPR_xxx */
 734         u8 epr_needed;
 735         u8 external_oneshot;    /* clear external irq after delivery */
 736 
 737         u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
 738 
 739         struct hrtimer dec_timer;
 740         u64 dec_jiffies;
 741         u64 dec_expires;
 742         unsigned long pending_exceptions;
 743         u8 ceded;
 744         u8 prodded;
 745         u8 doorbell_request;
 746         u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
 747         u32 last_inst;
 748 
 749         struct swait_queue_head *wqp;
 750         struct kvmppc_vcore *vcore;
 751         int ret;
 752         int trap;
 753         int state;
 754         int ptid;
 755         int thread_cpu;
 756         int prev_cpu;
 757         bool timer_running;
 758         wait_queue_head_t cpu_run;
 759         struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
 760 
 761         struct kvm_vcpu_arch_shared *shared;
 762 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 763         bool shared_big_endian;
 764 #endif
 765         unsigned long magic_page_pa; /* phys addr to map the magic page to */
 766         unsigned long magic_page_ea; /* effect. addr to map the magic page to */
 767         bool disable_kernel_nx;
 768 
 769         int irq_type;           /* one of KVM_IRQ_* */
 770         int irq_cpu_id;
 771         struct openpic *mpic;   /* KVM_IRQ_MPIC */
 772 #ifdef CONFIG_KVM_XICS
 773         struct kvmppc_icp *icp; /* XICS presentation controller */
 774         struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
 775         __be32 xive_cam_word;    /* Cooked W2 in proper endian with valid bit */
 776         u8 xive_pushed;          /* Is the VP pushed on the physical CPU ? */
 777         u8 xive_esc_on;          /* Is the escalation irq enabled ? */
 778         union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
 779         u64 xive_esc_raddr;      /* Escalation interrupt ESB real addr */
 780         u64 xive_esc_vaddr;      /* Escalation interrupt ESB virt addr */
 781 #endif
 782 
 783 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 784         struct kvm_vcpu_arch_shared shregs;
 785 
 786         struct mmio_hpte_cache mmio_cache;
 787         unsigned long pgfault_addr;
 788         long pgfault_index;
 789         unsigned long pgfault_hpte[2];
 790         struct mmio_hpte_cache_entry *pgfault_cache;
 791 
 792         struct task_struct *run_task;
 793         struct kvm_run *kvm_run;
 794 
 795         spinlock_t vpa_update_lock;
 796         struct kvmppc_vpa vpa;
 797         struct kvmppc_vpa dtl;
 798         struct dtl_entry *dtl_ptr;
 799         unsigned long dtl_index;
 800         u64 stolen_logged;
 801         struct kvmppc_vpa slb_shadow;
 802 
 803         spinlock_t tbacct_lock;
 804         u64 busy_stolen;
 805         u64 busy_preempt;
 806 
 807         u32 emul_inst;
 808 
 809         u32 online;
 810 
 811         /* For support of nested guests */
 812         struct kvm_nested_guest *nested;
 813         u32 nested_vcpu_id;
 814         gpa_t nested_io_gpr;
 815 #endif
 816 
 817 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
 818         struct kvmhv_tb_accumulator *cur_activity;      /* What we're timing */
 819         u64     cur_tb_start;                   /* when it started */
 820         struct kvmhv_tb_accumulator rm_entry;   /* real-mode entry code */
 821         struct kvmhv_tb_accumulator rm_intr;    /* real-mode intr handling */
 822         struct kvmhv_tb_accumulator rm_exit;    /* real-mode exit code */
 823         struct kvmhv_tb_accumulator guest_time; /* guest execution */
 824         struct kvmhv_tb_accumulator cede_time;  /* time napping inside guest */
 825 
 826         struct dentry *debugfs_dir;
 827         struct dentry *debugfs_timings;
 828 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
 829 };
 830 
 831 #define VCPU_FPR(vcpu, i)       (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
 832 #define VCPU_VSX_FPR(vcpu, i, j)        ((vcpu)->arch.fp.fpr[i][j])
 833 #define VCPU_VSX_VR(vcpu, i)            ((vcpu)->arch.vr.vr[i])
 834 
 835 /* Values for vcpu->arch.state */
 836 #define KVMPPC_VCPU_NOTREADY            0
 837 #define KVMPPC_VCPU_RUNNABLE            1
 838 #define KVMPPC_VCPU_BUSY_IN_HOST        2
 839 
 840 /* Values for vcpu->arch.io_gpr */
 841 #define KVM_MMIO_REG_MASK       0x003f
 842 #define KVM_MMIO_REG_EXT_MASK   0xffc0
 843 #define KVM_MMIO_REG_GPR        0x0000
 844 #define KVM_MMIO_REG_FPR        0x0040
 845 #define KVM_MMIO_REG_QPR        0x0080
 846 #define KVM_MMIO_REG_FQPR       0x00c0
 847 #define KVM_MMIO_REG_VSX        0x0100
 848 #define KVM_MMIO_REG_VMX        0x0180
 849 #define KVM_MMIO_REG_NESTED_GPR 0xffc0
 850 
 851 
 852 #define __KVM_HAVE_ARCH_WQP
 853 #define __KVM_HAVE_CREATE_DEVICE
 854 
 855 static inline void kvm_arch_hardware_disable(void) {}
 856 static inline void kvm_arch_hardware_unsetup(void) {}
 857 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 858 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
 859 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
 860 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 861 static inline void kvm_arch_exit(void) {}
 862 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
 863 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
 864 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 865 
 866 #endif /* __POWERPC_KVM_HOST_H__ */

/* [<][>][^][v][top][bottom][index][help] */