/linux-4.1.27/scripts/ |
H A D | headers.sh | 9 if [ -f ${srctree}/arch/$2/include/asm/Kbuild ]; then 12 printf "Ignoring arch: %s\n" ${arch} 16 archs=${HDR_ARCH_LIST:-$(ls ${srctree}/arch)} 18 for arch in ${archs}; do 19 case ${arch} in 23 if [ -d ${srctree}/arch/${arch} ]; then 24 do_command $1 ${arch}
|
/linux-4.1.27/arch/parisc/mm/ |
H A D | Makefile | 2 # Makefile for arch/parisc/mm
|
/linux-4.1.27/arch/blackfin/mach-bf518/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf518/Makefile
|
/linux-4.1.27/arch/blackfin/mach-bf527/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf527/Makefile
|
/linux-4.1.27/arch/blackfin/mach-bf533/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf533/Makefile
|
/linux-4.1.27/arch/blackfin/mach-bf537/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf537/Makefile
|
/linux-4.1.27/arch/blackfin/mach-bf548/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf537/Makefile
|
/linux-4.1.27/arch/cris/include/asm/ |
H A D | bug.h | 3 #include <arch/bug.h>
|
H A D | cache.h | 4 #include <arch/cache.h>
|
H A D | mmu.h | 8 #include <arch/mmu.h>
|
H A D | swab.h | 4 #include <arch/swab.h>
|
H A D | irq.h | 4 #include <arch/irq.h>
|
H A D | delay.h | 10 #include <arch/delay.h> 14 extern unsigned long loops_per_usec; /* arch/cris/mm/init.c */ 16 /* May be defined by arch/delay.h. */
|
H A D | string.h | 4 /* the optimized memcpy is in arch/cris/lib/string.c */ 9 /* New and improved. In arch/cris/lib/memset.c */
|
/linux-4.1.27/arch/m68k/mvme147/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/mvme147 source directory
|
/linux-4.1.27/arch/x86/tools/ |
H A D | Makefile | 15 distill_awk = $(srctree)/arch/x86/tools/distill.awk 16 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk 31 HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/ 33 HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ 36 $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c 38 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
|
/linux-4.1.27/arch/cris/ |
H A D | Makefile | 15 arch-y := v10 16 arch-$(CONFIG_ETRAX_ARCH_V10) := v10 17 arch-$(CONFIG_ETRAX_ARCH_V32) := v32 24 ifneq ($(arch-y),) 25 SARCH := arch-$(arch-y) 28 inc += -Iarch/cris/include/uapi/$(SARCH)/arch 29 inc += -Iarch/cris/include/$(SARCH)/arch 44 core-$(CONFIG_OF) += arch/cris/boot/dts/ 51 KBUILD_AFLAGS += -mlinux -march=$(arch-y) $(inc) 52 KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe $(inc) 60 head-y := arch/cris/$(SARCH)/kernel/head.o 64 core-y += arch/cris/kernel/ arch/cris/mm/ 65 core-y += arch/cris/$(SARCH)/kernel/ arch/cris/$(SARCH)/mm/ 67 core-y += arch/cris/$(SARCH)/$(MACH)/ 69 drivers-y += arch/cris/$(SARCH)/drivers/ 70 libs-y += arch/cris/$(SARCH)/lib/ $(LIBGCC) 73 SRC_ARCH = $(srctree)/arch/cris 75 OBJ_ARCH = $(objtree)/arch/cris 77 boot := arch/cris/boot 78 MACHINE := arch/cris/$(SARCH) 88 $(Q)if [ -e arch/cris/boot ]; then \ 89 $(MAKE) $(clean)=arch/cris/boot; \ 102 echo '* zImage - Compressed kernel image (arch/cris/boot/zImage)' 103 echo '* Image - Uncompressed kernel image (arch/cris/boot/Image)'
|
/linux-4.1.27/arch/powerpc/kvm/ |
H A D | booke_emulate.c | 37 vcpu->arch.pc = vcpu->arch.shared->srr0; kvmppc_emul_rfi() 38 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); kvmppc_emul_rfi() 43 vcpu->arch.pc = vcpu->arch.dsrr0; kvmppc_emul_rfdi() 44 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); kvmppc_emul_rfdi() 49 vcpu->arch.pc = vcpu->arch.csrr0; kvmppc_emul_rfci() 50 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); kvmppc_emul_rfci() 91 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); kvmppc_booke_emulate_op() 101 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) kvmppc_booke_emulate_op() 107 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) kvmppc_booke_emulate_op() 138 vcpu->arch.shared->dar = spr_val; kvmppc_booke_emulate_mtspr() 141 vcpu->arch.shared->esr = spr_val; kvmppc_booke_emulate_mtspr() 144 vcpu->arch.csrr0 = spr_val; kvmppc_booke_emulate_mtspr() 147 vcpu->arch.csrr1 = spr_val; kvmppc_booke_emulate_mtspr() 150 vcpu->arch.dsrr0 = spr_val; kvmppc_booke_emulate_mtspr() 153 vcpu->arch.dsrr1 = spr_val; kvmppc_booke_emulate_mtspr() 164 vcpu->arch.dbg_reg.iac1 = spr_val; kvmppc_booke_emulate_mtspr() 175 vcpu->arch.dbg_reg.iac2 = spr_val; kvmppc_booke_emulate_mtspr() 187 vcpu->arch.dbg_reg.iac3 = spr_val; kvmppc_booke_emulate_mtspr() 198 vcpu->arch.dbg_reg.iac4 = spr_val; kvmppc_booke_emulate_mtspr() 210 vcpu->arch.dbg_reg.dac1 = spr_val; kvmppc_booke_emulate_mtspr() 221 vcpu->arch.dbg_reg.dac2 = spr_val; kvmppc_booke_emulate_mtspr() 236 vcpu->arch.dbg_reg.dbcr0 = spr_val; kvmppc_booke_emulate_mtspr() 247 vcpu->arch.dbg_reg.dbcr1 = spr_val; kvmppc_booke_emulate_mtspr() 258 vcpu->arch.dbg_reg.dbcr2 = spr_val; kvmppc_booke_emulate_mtspr() 268 vcpu->arch.dbsr &= ~spr_val; kvmppc_booke_emulate_mtspr() 269 if (!(vcpu->arch.dbsr & ~DBSR_IDE)) kvmppc_booke_emulate_mtspr() 280 if (vcpu->arch.tcr & TCR_WRC_MASK) { kvmppc_booke_emulate_mtspr() 282 spr_val |= vcpu->arch.tcr & TCR_WRC_MASK; kvmppc_booke_emulate_mtspr() 288 vcpu->arch.decar = spr_val; kvmppc_booke_emulate_mtspr() 309 vcpu->arch.ivpr = spr_val; kvmppc_booke_emulate_mtspr() 315 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; kvmppc_booke_emulate_mtspr() 318 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; kvmppc_booke_emulate_mtspr() 321 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; kvmppc_booke_emulate_mtspr() 327 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; kvmppc_booke_emulate_mtspr() 330 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; kvmppc_booke_emulate_mtspr() 333 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; kvmppc_booke_emulate_mtspr() 336 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; kvmppc_booke_emulate_mtspr() 339 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; kvmppc_booke_emulate_mtspr() 342 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; kvmppc_booke_emulate_mtspr() 348 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; kvmppc_booke_emulate_mtspr() 351 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; kvmppc_booke_emulate_mtspr() 354 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; kvmppc_booke_emulate_mtspr() 357 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; kvmppc_booke_emulate_mtspr() 360 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; kvmppc_booke_emulate_mtspr() 363 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; kvmppc_booke_emulate_mtspr() 366 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; kvmppc_booke_emulate_mtspr() 369 vcpu->arch.mcsr &= ~spr_val; kvmppc_booke_emulate_mtspr() 375 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); kvmppc_booke_emulate_mtspr() 384 current->thread.debug = vcpu->arch.dbg_reg; kvmppc_booke_emulate_mtspr() 385 switch_booke_debug_regs(&vcpu->arch.dbg_reg); kvmppc_booke_emulate_mtspr() 396 *spr_val = vcpu->arch.ivpr; kvmppc_booke_emulate_mfspr() 399 *spr_val = vcpu->arch.shared->dar; kvmppc_booke_emulate_mfspr() 402 *spr_val = vcpu->arch.shared->esr; kvmppc_booke_emulate_mfspr() 405 *spr_val = vcpu->arch.epr; kvmppc_booke_emulate_mfspr() 408 *spr_val = vcpu->arch.csrr0; kvmppc_booke_emulate_mfspr() 411 *spr_val = vcpu->arch.csrr1; kvmppc_booke_emulate_mfspr() 414 *spr_val = vcpu->arch.dsrr0; kvmppc_booke_emulate_mfspr() 417 *spr_val = vcpu->arch.dsrr1; kvmppc_booke_emulate_mfspr() 420 *spr_val = vcpu->arch.dbg_reg.iac1; kvmppc_booke_emulate_mfspr() 423 *spr_val = vcpu->arch.dbg_reg.iac2; kvmppc_booke_emulate_mfspr() 427 *spr_val = vcpu->arch.dbg_reg.iac3; kvmppc_booke_emulate_mfspr() 430 *spr_val = vcpu->arch.dbg_reg.iac4; kvmppc_booke_emulate_mfspr() 434 *spr_val = vcpu->arch.dbg_reg.dac1; kvmppc_booke_emulate_mfspr() 437 *spr_val = vcpu->arch.dbg_reg.dac2; kvmppc_booke_emulate_mfspr() 440 *spr_val = vcpu->arch.dbg_reg.dbcr0; kvmppc_booke_emulate_mfspr() 445 *spr_val = vcpu->arch.dbg_reg.dbcr1; kvmppc_booke_emulate_mfspr() 448 *spr_val = vcpu->arch.dbg_reg.dbcr2; kvmppc_booke_emulate_mfspr() 451 *spr_val = vcpu->arch.dbsr; kvmppc_booke_emulate_mfspr() 454 *spr_val = vcpu->arch.tsr; kvmppc_booke_emulate_mfspr() 457 *spr_val = vcpu->arch.tcr; kvmppc_booke_emulate_mfspr() 461 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; kvmppc_booke_emulate_mfspr() 464 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; kvmppc_booke_emulate_mfspr() 467 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; kvmppc_booke_emulate_mfspr() 470 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; kvmppc_booke_emulate_mfspr() 473 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; kvmppc_booke_emulate_mfspr() 476 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; kvmppc_booke_emulate_mfspr() 479 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; kvmppc_booke_emulate_mfspr() 482 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; kvmppc_booke_emulate_mfspr() 485 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; kvmppc_booke_emulate_mfspr() 488 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; kvmppc_booke_emulate_mfspr() 491 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; kvmppc_booke_emulate_mfspr() 494 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; kvmppc_booke_emulate_mfspr() 497 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; kvmppc_booke_emulate_mfspr() 500 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; kvmppc_booke_emulate_mfspr() 503 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; kvmppc_booke_emulate_mfspr() 506 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; kvmppc_booke_emulate_mfspr() 509 *spr_val = vcpu->arch.mcsr; kvmppc_booke_emulate_mfspr() 513 *spr_val = vcpu->arch.epcr; kvmppc_booke_emulate_mfspr()
|
H A D | e500_emulate.c | 7 * This file is derived from arch/powerpc/kvm/44x_emulate.c, 54 ulong param = vcpu->arch.gpr[rb]; kvmppc_e500_emul_msgclr() 60 clear_bit(prio, &vcpu->arch.pending_exceptions); kvmppc_e500_emul_msgclr() 66 ulong param = vcpu->arch.gpr[rb]; kvmppc_e500_emul_msgsnd() 76 int cpir = cvcpu->arch.shared->pir; kvmppc_e500_emul_msgsnd() 78 set_bit(prio, &cvcpu->arch.pending_exceptions); kvmppc_e500_emul_msgsnd() 95 run->debug.arch.address = vcpu->arch.pc; kvmppc_e500_emul_ehpriv() 96 run->debug.arch.status = 0; kvmppc_e500_emul_ehpriv() 210 vcpu->arch.shared->mas0 = spr_val; kvmppc_core_emulate_mtspr_e500() 213 vcpu->arch.shared->mas1 = spr_val; kvmppc_core_emulate_mtspr_e500() 216 vcpu->arch.shared->mas2 = spr_val; kvmppc_core_emulate_mtspr_e500() 219 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; kvmppc_core_emulate_mtspr_e500() 220 vcpu->arch.shared->mas7_3 |= spr_val; kvmppc_core_emulate_mtspr_e500() 223 vcpu->arch.shared->mas4 = spr_val; kvmppc_core_emulate_mtspr_e500() 226 vcpu->arch.shared->mas6 = spr_val; kvmppc_core_emulate_mtspr_e500() 229 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; kvmppc_core_emulate_mtspr_e500() 230 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; kvmppc_core_emulate_mtspr_e500() 258 vcpu->arch.pwrmgtcr0 = spr_val; kvmppc_core_emulate_mtspr_e500() 264 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; kvmppc_core_emulate_mtspr_e500() 267 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; kvmppc_core_emulate_mtspr_e500() 270 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; kvmppc_core_emulate_mtspr_e500() 275 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val; kvmppc_core_emulate_mtspr_e500() 278 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val; kvmppc_core_emulate_mtspr_e500() 282 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; kvmppc_core_emulate_mtspr_e500() 286 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val; kvmppc_core_emulate_mtspr_e500() 289 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val; kvmppc_core_emulate_mtspr_e500() 316 *spr_val = vcpu->arch.shared->mas0; kvmppc_core_emulate_mfspr_e500() 319 *spr_val = vcpu->arch.shared->mas1; kvmppc_core_emulate_mfspr_e500() 322 *spr_val = vcpu->arch.shared->mas2; kvmppc_core_emulate_mfspr_e500() 325 *spr_val = (u32)vcpu->arch.shared->mas7_3; kvmppc_core_emulate_mfspr_e500() 328 *spr_val = vcpu->arch.shared->mas4; kvmppc_core_emulate_mfspr_e500() 331 *spr_val = vcpu->arch.shared->mas6; kvmppc_core_emulate_mfspr_e500() 334 *spr_val = vcpu->arch.shared->mas7_3 >> 32; kvmppc_core_emulate_mfspr_e500() 338 *spr_val = vcpu->arch.decar; kvmppc_core_emulate_mfspr_e500() 341 *spr_val = vcpu->arch.tlbcfg[0]; kvmppc_core_emulate_mfspr_e500() 344 *spr_val = vcpu->arch.tlbcfg[1]; kvmppc_core_emulate_mfspr_e500() 349 *spr_val = vcpu->arch.tlbps[0]; kvmppc_core_emulate_mfspr_e500() 354 *spr_val = vcpu->arch.tlbps[1]; kvmppc_core_emulate_mfspr_e500() 377 *spr_val = vcpu->arch.mmucfg; kvmppc_core_emulate_mfspr_e500() 386 *spr_val = vcpu->arch.eptcfg; kvmppc_core_emulate_mfspr_e500() 390 *spr_val = vcpu->arch.pwrmgtcr0; kvmppc_core_emulate_mfspr_e500() 396 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; kvmppc_core_emulate_mfspr_e500() 399 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; kvmppc_core_emulate_mfspr_e500() 402 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; kvmppc_core_emulate_mfspr_e500() 407 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL]; kvmppc_core_emulate_mfspr_e500() 410 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST]; kvmppc_core_emulate_mfspr_e500() 414 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; kvmppc_core_emulate_mfspr_e500() 418 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; kvmppc_core_emulate_mfspr_e500() 421 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; kvmppc_core_emulate_mfspr_e500()
|
H A D | irq.h | 11 ret = ret || (kvm->arch.mpic != NULL); irqchip_in_kernel() 14 ret = ret || (kvm->arch.xics != NULL); irqchip_in_kernel()
|
H A D | timing.c | 38 mutex_lock(&vcpu->arch.exit_timing_lock); kvmppc_init_timing_stats() 40 vcpu->arch.last_exit_type = 0xDEAD; kvmppc_init_timing_stats() 42 vcpu->arch.timing_count_type[i] = 0; kvmppc_init_timing_stats() 43 vcpu->arch.timing_max_duration[i] = 0; kvmppc_init_timing_stats() 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; kvmppc_init_timing_stats() 45 vcpu->arch.timing_sum_duration[i] = 0; kvmppc_init_timing_stats() 46 vcpu->arch.timing_sum_quad_duration[i] = 0; kvmppc_init_timing_stats() 48 vcpu->arch.timing_last_exit = 0; kvmppc_init_timing_stats() 49 vcpu->arch.timing_exit.tv64 = 0; kvmppc_init_timing_stats() 50 vcpu->arch.timing_last_enter.tv64 = 0; kvmppc_init_timing_stats() 52 mutex_unlock(&vcpu->arch.exit_timing_lock); kvmppc_init_timing_stats() 59 mutex_lock(&vcpu->arch.exit_timing_lock); add_exit_timing() 61 vcpu->arch.timing_count_type[type]++; add_exit_timing() 64 old = vcpu->arch.timing_sum_duration[type]; add_exit_timing() 65 vcpu->arch.timing_sum_duration[type] += duration; add_exit_timing() 66 if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { add_exit_timing() 69 __func__, old, vcpu->arch.timing_sum_duration[type], add_exit_timing() 70 type, vcpu->arch.timing_count_type[type]); add_exit_timing() 74 old = vcpu->arch.timing_sum_quad_duration[type]; add_exit_timing() 75 vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); add_exit_timing() 76 if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { add_exit_timing() 80 vcpu->arch.timing_sum_quad_duration[type], add_exit_timing() 81 type, vcpu->arch.timing_count_type[type]); add_exit_timing() 85 if (unlikely(duration < vcpu->arch.timing_min_duration[type])) add_exit_timing() 86 vcpu->arch.timing_min_duration[type] = duration; add_exit_timing() 87 if (unlikely(duration > vcpu->arch.timing_max_duration[type])) add_exit_timing() 88 vcpu->arch.timing_max_duration[type] = duration; add_exit_timing() 90 mutex_unlock(&vcpu->arch.exit_timing_lock); add_exit_timing() 95 u64 exit = vcpu->arch.timing_last_exit; kvmppc_update_timing_stats() 96 u64 enter = vcpu->arch.timing_last_enter.tv64; kvmppc_update_timing_stats() 99 vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; kvmppc_update_timing_stats() 101 if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) kvmppc_update_timing_stats() 105 add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); kvmppc_update_timing_stats() 107 add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), kvmppc_update_timing_stats() 151 min = vcpu->arch.timing_min_duration[i]; kvmppc_exit_timing_show() 153 max = vcpu->arch.timing_max_duration[i]; kvmppc_exit_timing_show() 155 sum = vcpu->arch.timing_sum_duration[i]; kvmppc_exit_timing_show() 157 sum_quad = vcpu->arch.timing_sum_quad_duration[i]; kvmppc_exit_timing_show() 162 vcpu->arch.timing_count_type[i], kvmppc_exit_timing_show() 236 vcpu->arch.debugfs_exit_timing = debugfs_file; kvmppc_create_vcpu_debugfs() 241 if (vcpu->arch.debugfs_exit_timing) { kvmppc_remove_vcpu_debugfs() 242 debugfs_remove(vcpu->arch.debugfs_exit_timing); kvmppc_remove_vcpu_debugfs() 243 vcpu->arch.debugfs_exit_timing = NULL; kvmppc_remove_vcpu_debugfs()
|
H A D | e500mc.c | 7 * This file is derived from arch/powerpc/kvm/e500.c, 106 vcpu->arch.pid = pid; kvmppc_set_pid() 123 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); kvmppc_core_vcpu_load_e500mc() 125 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); kvmppc_core_vcpu_load_e500mc() 126 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); kvmppc_core_vcpu_load_e500mc() 127 vcpu->arch.epsc = vcpu->arch.eplc; kvmppc_core_vcpu_load_e500mc() 128 mtspr(SPRN_EPLC, vcpu->arch.eplc); kvmppc_core_vcpu_load_e500mc() 129 mtspr(SPRN_EPSC, vcpu->arch.epsc); kvmppc_core_vcpu_load_e500mc() 131 mtspr(SPRN_GIVPR, vcpu->arch.ivpr); kvmppc_core_vcpu_load_e500mc() 132 mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); kvmppc_core_vcpu_load_e500mc() 133 mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); kvmppc_core_vcpu_load_e500mc() 134 mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); kvmppc_core_vcpu_load_e500mc() 135 mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); kvmppc_core_vcpu_load_e500mc() 136 mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); kvmppc_core_vcpu_load_e500mc() 137 mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); kvmppc_core_vcpu_load_e500mc() 139 mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); kvmppc_core_vcpu_load_e500mc() 140 mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); kvmppc_core_vcpu_load_e500mc() 142 mtspr(SPRN_GEPR, vcpu->arch.epr); kvmppc_core_vcpu_load_e500mc() 143 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); kvmppc_core_vcpu_load_e500mc() 144 mtspr(SPRN_GESR, vcpu->arch.shared->esr); kvmppc_core_vcpu_load_e500mc() 146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || kvmppc_core_vcpu_load_e500mc() 155 vcpu->arch.eplc = mfspr(SPRN_EPLC); kvmppc_core_vcpu_put_e500mc() 156 vcpu->arch.epsc = mfspr(SPRN_EPSC); kvmppc_core_vcpu_put_e500mc() 158 vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); kvmppc_core_vcpu_put_e500mc() 159 vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); kvmppc_core_vcpu_put_e500mc() 160 vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); kvmppc_core_vcpu_put_e500mc() 161 vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); kvmppc_core_vcpu_put_e500mc() 163 vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); kvmppc_core_vcpu_put_e500mc() 164 vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); kvmppc_core_vcpu_put_e500mc() 166 vcpu->arch.epr = mfspr(SPRN_GEPR); kvmppc_core_vcpu_put_e500mc() 167 vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); kvmppc_core_vcpu_put_e500mc() 168 vcpu->arch.shared->esr = mfspr(SPRN_GESR); kvmppc_core_vcpu_put_e500mc() 170 vcpu->arch.oldpir = mfspr(SPRN_PIR); kvmppc_core_vcpu_put_e500mc() 203 vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ kvmppc_core_vcpu_setup() 206 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; kvmppc_core_vcpu_setup() 208 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; kvmppc_core_vcpu_setup() 210 vcpu->arch.pvr = mfspr(SPRN_PVR); kvmppc_core_vcpu_setup() 213 vcpu->arch.cpu_type = KVM_CPU_E500MC; kvmppc_core_vcpu_setup() 235 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; kvmppc_core_get_sregs_e500mc() 236 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; kvmppc_core_get_sregs_e500mc() 237 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; kvmppc_core_get_sregs_e500mc() 262 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = kvmppc_core_set_sregs_e500mc() 267 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = kvmppc_core_set_sregs_e500mc() 269 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = kvmppc_core_set_sregs_e500mc() 283 *val = get_reg_val(id, vcpu->arch.sprg9); kvmppc_get_one_reg_e500mc() 299 vcpu->arch.sprg9 = set_reg_val(id, *val); kvmppc_set_one_reg_e500mc() 323 vcpu->arch.oldpir = 0xffffffff; kvmppc_core_vcpu_create_e500mc() 333 vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); kvmppc_core_vcpu_create_e500mc() 334 if (!vcpu->arch.shared) kvmppc_core_vcpu_create_e500mc() 354 free_page((unsigned long)vcpu->arch.shared); kvmppc_core_vcpu_free_e500mc() 376 kvm->arch.lpid = lpid; kvmppc_core_init_vm_e500mc() 382 int lpid = kvm->arch.lpid; kvmppc_core_destroy_vm_e500mc()
|
H A D | book3s_hv.c | 13 * This file is derived from arch/powerpc/kvm/book3s.c, 126 if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) kvmppc_fast_vcpu_kick_hv() 160 * Updates to busy_stolen are protected by arch.tbacct_lock; 169 struct kvmppc_vcore *vc = vcpu->arch.vcore; kvmppc_core_vcpu_load_hv() 186 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); kvmppc_core_vcpu_load_hv() 187 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && kvmppc_core_vcpu_load_hv() 188 vcpu->arch.busy_preempt != TB_NIL) { kvmppc_core_vcpu_load_hv() 189 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; kvmppc_core_vcpu_load_hv() 190 vcpu->arch.busy_preempt = TB_NIL; kvmppc_core_vcpu_load_hv() 192 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); kvmppc_core_vcpu_load_hv() 197 struct kvmppc_vcore *vc = vcpu->arch.vcore; kvmppc_core_vcpu_put_hv() 205 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); kvmppc_core_vcpu_put_hv() 206 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) kvmppc_core_vcpu_put_hv() 207 vcpu->arch.busy_preempt = mftb(); kvmppc_core_vcpu_put_hv() 208 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); kvmppc_core_vcpu_put_hv() 219 vcpu->arch.shregs.msr = msr; kvmppc_set_msr_hv() 225 vcpu->arch.pvr = pvr; kvmppc_set_pvr_hv() 231 struct kvmppc_vcore *vc = vcpu->arch.vcore; kvmppc_set_arch_compat() 237 * If an arch bit is set in PCR, all the defined kvmppc_set_arch_compat() 238 * higher-order arch bits also have to be set. kvmppc_set_arch_compat() 274 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); kvmppc_dump_regs() 280 vcpu->arch.ctr, vcpu->arch.lr); kvmppc_dump_regs() 282 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); kvmppc_dump_regs() 284 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); kvmppc_dump_regs() 286 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); kvmppc_dump_regs() 288 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); kvmppc_dump_regs() 289 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); kvmppc_dump_regs() 291 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); kvmppc_dump_regs() 292 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); kvmppc_dump_regs() 293 for (r = 0; r < vcpu->arch.slb_max; ++r) kvmppc_dump_regs() 295 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); kvmppc_dump_regs() 297 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, kvmppc_dump_regs() 298 vcpu->arch.last_inst); kvmppc_dump_regs() 329 spin_lock(&vcpu->arch.vpa_update_lock); set_vpa() 335 spin_unlock(&vcpu->arch.vpa_update_lock); set_vpa() 398 spin_lock(&tvcpu->arch.vpa_update_lock); do_h_register_vpa() 404 vpap = &tvcpu->arch.vpa; do_h_register_vpa() 415 if (!vpa_is_registered(&tvcpu->arch.vpa)) do_h_register_vpa() 418 vpap = &tvcpu->arch.dtl; do_h_register_vpa() 425 if (!vpa_is_registered(&tvcpu->arch.vpa)) do_h_register_vpa() 428 vpap = &tvcpu->arch.slb_shadow; do_h_register_vpa() 435 if (vpa_is_registered(&tvcpu->arch.dtl) || do_h_register_vpa() 436 vpa_is_registered(&tvcpu->arch.slb_shadow)) do_h_register_vpa() 439 vpap = &tvcpu->arch.vpa; do_h_register_vpa() 444 vpap = &tvcpu->arch.dtl; do_h_register_vpa() 449 vpap = &tvcpu->arch.slb_shadow; do_h_register_vpa() 460 spin_unlock(&tvcpu->arch.vpa_update_lock); do_h_register_vpa() 482 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_update_vpa() 487 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_update_vpa() 517 if (!(vcpu->arch.vpa.update_pending || kvmppc_update_vpas() 518 vcpu->arch.slb_shadow.update_pending || kvmppc_update_vpas() 519 vcpu->arch.dtl.update_pending)) kvmppc_update_vpas() 522 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_update_vpas() 523 if (vcpu->arch.vpa.update_pending) { kvmppc_update_vpas() 524 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); kvmppc_update_vpas() 525 if (vcpu->arch.vpa.pinned_addr) kvmppc_update_vpas() 526 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); kvmppc_update_vpas() 528 if (vcpu->arch.dtl.update_pending) { kvmppc_update_vpas() 529 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); kvmppc_update_vpas() 530 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; kvmppc_update_vpas() 531 vcpu->arch.dtl_index = 0; kvmppc_update_vpas() 533 if (vcpu->arch.slb_shadow.update_pending) kvmppc_update_vpas() 534 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); kvmppc_update_vpas() 535 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_update_vpas() 565 dt = vcpu->arch.dtl_ptr; kvmppc_create_dtl_entry() 566 vpa = vcpu->arch.vpa.pinned_addr; kvmppc_create_dtl_entry() 569 stolen = core_stolen - vcpu->arch.stolen_logged; kvmppc_create_dtl_entry() 570 vcpu->arch.stolen_logged = core_stolen; kvmppc_create_dtl_entry() 571 spin_lock_irq(&vcpu->arch.tbacct_lock); kvmppc_create_dtl_entry() 572 stolen += vcpu->arch.busy_stolen; kvmppc_create_dtl_entry() 573 vcpu->arch.busy_stolen = 0; kvmppc_create_dtl_entry() 574 spin_unlock_irq(&vcpu->arch.tbacct_lock); kvmppc_create_dtl_entry() 579 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); kvmppc_create_dtl_entry() 583 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); kvmppc_create_dtl_entry() 585 if (dt == vcpu->arch.dtl.pinned_end) kvmppc_create_dtl_entry() 586 dt = vcpu->arch.dtl.pinned_addr; kvmppc_create_dtl_entry() 587 vcpu->arch.dtl_ptr = dt; kvmppc_create_dtl_entry() 590 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); kvmppc_create_dtl_entry() 591 vcpu->arch.dtl.dirty = true; kvmppc_create_dtl_entry() 596 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) kvmppc_power8_compatible() 598 if ((!vcpu->arch.vcore->arch_compat) && kvmppc_power8_compatible() 619 vcpu->arch.ciabr = value1; kvmppc_h_set_mode() 628 vcpu->arch.dawr = value1; kvmppc_h_set_mode() 629 vcpu->arch.dawrx = value2; kvmppc_h_set_mode() 638 struct kvmppc_vcore *vcore = target->arch.vcore; kvm_arch_vcpu_yield_to() 649 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && kvm_arch_vcpu_yield_to() 662 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_get_yield_count() 663 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; kvmppc_get_yield_count() 666 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_get_yield_count() 679 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) kvmppc_pseries_do_hcall() 692 tvcpu->arch.prodded = 1; kvmppc_pseries_do_hcall() 694 if (vcpu->arch.ceded) { kvmppc_pseries_do_hcall() 721 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) kvmppc_pseries_do_hcall() 767 vcpu->arch.hcall_needed = 0; kvmppc_pseries_do_hcall() 812 run->debug.arch.address = kvmppc_get_pc(vcpu); kvmppc_emulate_debug_inst() 829 switch (vcpu->arch.trap) { kvmppc_handle_exit_hv() 865 flags = vcpu->arch.shregs.msr & 0x1f0000ull; kvmppc_handle_exit_hv() 883 vcpu->arch.hcall_needed = 1; kvmppc_handle_exit_hv() 898 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); kvmppc_handle_exit_hv() 899 vcpu->arch.fault_dsisr = 0; kvmppc_handle_exit_hv() 910 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) kvmppc_handle_exit_hv() 911 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? kvmppc_handle_exit_hv() 912 swab32(vcpu->arch.emul_inst) : kvmppc_handle_exit_hv() 913 vcpu->arch.emul_inst; kvmppc_handle_exit_hv() 933 vcpu->arch.trap, kvmppc_get_pc(vcpu), kvmppc_handle_exit_hv() 934 vcpu->arch.shregs.msr); kvmppc_handle_exit_hv() 935 run->hw.hardware_exit_reason = vcpu->arch.trap; kvmppc_handle_exit_hv() 949 sregs->pvr = vcpu->arch.pvr; kvm_arch_vcpu_ioctl_get_sregs_hv() 950 for (i = 0; i < vcpu->arch.slb_max; i++) { kvm_arch_vcpu_ioctl_get_sregs_hv() 951 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; kvm_arch_vcpu_ioctl_get_sregs_hv() 952 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; kvm_arch_vcpu_ioctl_get_sregs_hv() 964 if (sregs->pvr != vcpu->arch.pvr) kvm_arch_vcpu_ioctl_set_sregs_hv() 968 for (i = 0; i < vcpu->arch.slb_nr; i++) { kvm_arch_vcpu_ioctl_set_sregs_hv() 970 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; kvm_arch_vcpu_ioctl_set_sregs_hv() 971 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; kvm_arch_vcpu_ioctl_set_sregs_hv() 975 vcpu->arch.slb_max = j; kvm_arch_vcpu_ioctl_set_sregs_hv() 984 struct kvmppc_vcore *vc = vcpu->arch.vcore; kvmppc_set_lpcr() 998 if (vcpu->arch.vcore != vc) kvm_for_each_vcpu() 1001 vcpu->arch.intr_msr |= MSR_LE; kvm_for_each_vcpu() 1003 vcpu->arch.intr_msr &= ~MSR_LE; kvm_for_each_vcpu() 1038 *val = get_reg_val(id, vcpu->arch.dabr); kvmppc_get_one_reg_hv() 1041 *val = get_reg_val(id, vcpu->arch.dabrx); kvmppc_get_one_reg_hv() 1044 *val = get_reg_val(id, vcpu->arch.dscr); kvmppc_get_one_reg_hv() 1047 *val = get_reg_val(id, vcpu->arch.purr); kvmppc_get_one_reg_hv() 1050 *val = get_reg_val(id, vcpu->arch.spurr); kvmppc_get_one_reg_hv() 1053 *val = get_reg_val(id, vcpu->arch.amr); kvmppc_get_one_reg_hv() 1056 *val = get_reg_val(id, vcpu->arch.uamor); kvmppc_get_one_reg_hv() 1060 *val = get_reg_val(id, vcpu->arch.mmcr[i]); kvmppc_get_one_reg_hv() 1064 *val = get_reg_val(id, vcpu->arch.pmc[i]); kvmppc_get_one_reg_hv() 1068 *val = get_reg_val(id, vcpu->arch.spmc[i]); kvmppc_get_one_reg_hv() 1071 *val = get_reg_val(id, vcpu->arch.siar); kvmppc_get_one_reg_hv() 1074 *val = get_reg_val(id, vcpu->arch.sdar); kvmppc_get_one_reg_hv() 1077 *val = get_reg_val(id, vcpu->arch.sier); kvmppc_get_one_reg_hv() 1080 *val = get_reg_val(id, vcpu->arch.iamr); kvmppc_get_one_reg_hv() 1083 *val = get_reg_val(id, vcpu->arch.pspb); kvmppc_get_one_reg_hv() 1086 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); kvmppc_get_one_reg_hv() 1089 *val = get_reg_val(id, vcpu->arch.dawr); kvmppc_get_one_reg_hv() 1092 *val = get_reg_val(id, vcpu->arch.dawrx); kvmppc_get_one_reg_hv() 1095 *val = get_reg_val(id, vcpu->arch.ciabr); kvmppc_get_one_reg_hv() 1098 *val = get_reg_val(id, vcpu->arch.csigr); kvmppc_get_one_reg_hv() 1101 *val = get_reg_val(id, vcpu->arch.tacr); kvmppc_get_one_reg_hv() 1104 *val = get_reg_val(id, vcpu->arch.tcscr); kvmppc_get_one_reg_hv() 1107 *val = get_reg_val(id, vcpu->arch.pid); kvmppc_get_one_reg_hv() 1110 *val = get_reg_val(id, vcpu->arch.acop); kvmppc_get_one_reg_hv() 1113 *val = get_reg_val(id, vcpu->arch.wort); kvmppc_get_one_reg_hv() 1116 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_get_one_reg_hv() 1117 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); kvmppc_get_one_reg_hv() 1118 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_get_one_reg_hv() 1121 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_get_one_reg_hv() 1122 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; kvmppc_get_one_reg_hv() 1123 val->vpaval.length = vcpu->arch.slb_shadow.len; kvmppc_get_one_reg_hv() 1124 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_get_one_reg_hv() 1127 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_get_one_reg_hv() 1128 val->vpaval.addr = vcpu->arch.dtl.next_gpa; kvmppc_get_one_reg_hv() 1129 val->vpaval.length = vcpu->arch.dtl.len; kvmppc_get_one_reg_hv() 1130 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_get_one_reg_hv() 1133 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); kvmppc_get_one_reg_hv() 1137 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); kvmppc_get_one_reg_hv() 1140 *val = get_reg_val(id, vcpu->arch.ppr); kvmppc_get_one_reg_hv() 1144 *val = get_reg_val(id, vcpu->arch.tfhar); kvmppc_get_one_reg_hv() 1147 *val = get_reg_val(id, vcpu->arch.tfiar); kvmppc_get_one_reg_hv() 1150 *val = get_reg_val(id, vcpu->arch.texasr); kvmppc_get_one_reg_hv() 1154 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); kvmppc_get_one_reg_hv() 1162 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; kvmppc_get_one_reg_hv() 1165 val->vval = vcpu->arch.vr_tm.vr[i-32]; kvmppc_get_one_reg_hv() 1172 *val = get_reg_val(id, vcpu->arch.cr_tm); kvmppc_get_one_reg_hv() 1175 *val = get_reg_val(id, vcpu->arch.lr_tm); kvmppc_get_one_reg_hv() 1178 *val = get_reg_val(id, vcpu->arch.ctr_tm); kvmppc_get_one_reg_hv() 1181 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); kvmppc_get_one_reg_hv() 1184 *val = get_reg_val(id, vcpu->arch.amr_tm); kvmppc_get_one_reg_hv() 1187 *val = get_reg_val(id, vcpu->arch.ppr_tm); kvmppc_get_one_reg_hv() 1190 *val = get_reg_val(id, vcpu->arch.vrsave_tm); kvmppc_get_one_reg_hv() 1194 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); kvmppc_get_one_reg_hv() 1199 *val = get_reg_val(id, vcpu->arch.dscr_tm); kvmppc_get_one_reg_hv() 1202 *val = get_reg_val(id, vcpu->arch.tar_tm); kvmppc_get_one_reg_hv() 1206 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); kvmppc_get_one_reg_hv() 1230 vcpu->arch.dabr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1233 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; kvmppc_set_one_reg_hv() 1236 vcpu->arch.dscr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1239 vcpu->arch.purr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1242 vcpu->arch.spurr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1245 vcpu->arch.amr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1248 vcpu->arch.uamor = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1252 vcpu->arch.mmcr[i] = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1256 vcpu->arch.pmc[i] = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1260 vcpu->arch.spmc[i] = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1263 vcpu->arch.siar = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1266 vcpu->arch.sdar = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1269 vcpu->arch.sier = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1272 vcpu->arch.iamr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1275 vcpu->arch.pspb = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1278 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1281 vcpu->arch.dawr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1284 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; kvmppc_set_one_reg_hv() 1287 vcpu->arch.ciabr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1289 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) kvmppc_set_one_reg_hv() 1290 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ kvmppc_set_one_reg_hv() 1293 vcpu->arch.csigr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1296 vcpu->arch.tacr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1299 vcpu->arch.tcscr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1302 vcpu->arch.pid = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1305 vcpu->arch.acop = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1308 vcpu->arch.wort = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1313 if (!addr && (vcpu->arch.slb_shadow.next_gpa || kvmppc_set_one_reg_hv() 1314 vcpu->arch.dtl.next_gpa)) kvmppc_set_one_reg_hv() 1316 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); kvmppc_set_one_reg_hv() 1322 if (addr && !vcpu->arch.vpa.next_gpa) kvmppc_set_one_reg_hv() 1324 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); kvmppc_set_one_reg_hv() 1331 !vcpu->arch.vpa.next_gpa)) kvmppc_set_one_reg_hv() 1334 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); kvmppc_set_one_reg_hv() 1338 vcpu->arch.vcore->tb_offset = kvmppc_set_one_reg_hv() 1348 vcpu->arch.ppr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1352 vcpu->arch.tfhar = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1355 vcpu->arch.tfiar = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1358 vcpu->arch.texasr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1362 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1370 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; kvmppc_set_one_reg_hv() 1373 vcpu->arch.vr_tm.vr[i-32] = val->vval; kvmppc_set_one_reg_hv() 1379 vcpu->arch.cr_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1382 vcpu->arch.lr_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1385 vcpu->arch.ctr_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1388 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1391 vcpu->arch.amr_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1394 vcpu->arch.ppr_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1397 vcpu->arch.vrsave_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1401 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1406 vcpu->arch.dscr_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1409 vcpu->arch.tar_tm = set_reg_val(id, *val); kvmppc_set_one_reg_hv() 1437 vcore->lpcr = kvm->arch.lpcr; kvmppc_vcore_create() 1456 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, 1457 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, 1458 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, 1459 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, 1460 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, 1583 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) debugfs_vcpu_init() 1585 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); debugfs_vcpu_init() 1586 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) debugfs_vcpu_init() 1588 vcpu->arch.debugfs_timings = debugfs_vcpu_init() 1589 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, debugfs_vcpu_init() 1620 vcpu->arch.shared = &vcpu->arch.shregs; kvmppc_core_vcpu_create_hv() 1627 vcpu->arch.shared_big_endian = true; kvmppc_core_vcpu_create_hv() 1629 vcpu->arch.shared_big_endian = false; kvmppc_core_vcpu_create_hv() 1632 vcpu->arch.mmcr[0] = MMCR0_FC; kvmppc_core_vcpu_create_hv() 1633 vcpu->arch.ctrl = CTRL_RUNLATCH; kvmppc_core_vcpu_create_hv() 1636 spin_lock_init(&vcpu->arch.vpa_update_lock); kvmppc_core_vcpu_create_hv() 1637 spin_lock_init(&vcpu->arch.tbacct_lock); kvmppc_core_vcpu_create_hv() 1638 vcpu->arch.busy_preempt = TB_NIL; kvmppc_core_vcpu_create_hv() 1639 vcpu->arch.intr_msr = MSR_SF | MSR_ME; kvmppc_core_vcpu_create_hv() 1643 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; kvmppc_core_vcpu_create_hv() 1645 init_waitqueue_head(&vcpu->arch.cpu_run); kvmppc_core_vcpu_create_hv() 1648 vcore = kvm->arch.vcores[core]; kvmppc_core_vcpu_create_hv() 1651 kvm->arch.vcores[core] = vcore; kvmppc_core_vcpu_create_hv() 1652 kvm->arch.online_vcores++; kvmppc_core_vcpu_create_hv() 1662 vcpu->arch.vcore = vcore; kvmppc_core_vcpu_create_hv() 1663 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; kvmppc_core_vcpu_create_hv() 1665 vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_core_vcpu_create_hv() 1687 spin_lock(&vcpu->arch.vpa_update_lock); kvmppc_core_vcpu_free_hv() 1688 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); kvmppc_core_vcpu_free_hv() 1689 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); kvmppc_core_vcpu_free_hv() 1690 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); kvmppc_core_vcpu_free_hv() 1691 spin_unlock(&vcpu->arch.vpa_update_lock); kvmppc_core_vcpu_free_hv() 1707 if (now > vcpu->arch.dec_expires) { kvmppc_set_timer() 1713 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC kvmppc_set_timer() 1715 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), kvmppc_set_timer() 1717 vcpu->arch.timer_running = 1; kvmppc_set_timer() 1722 vcpu->arch.ceded = 0; kvmppc_end_cede() 1723 if (vcpu->arch.timer_running) { kvmppc_end_cede() 1724 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_end_cede() 1725 vcpu->arch.timer_running = 0; kvmppc_end_cede() 1736 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) kvmppc_remove_runnable() 1738 spin_lock_irq(&vcpu->arch.tbacct_lock); kvmppc_remove_runnable() 1740 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - kvmppc_remove_runnable() 1741 vcpu->arch.stolen_logged; kvmppc_remove_runnable() 1742 vcpu->arch.busy_preempt = now; kvmppc_remove_runnable() 1743 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; kvmppc_remove_runnable() 1744 spin_unlock_irq(&vcpu->arch.tbacct_lock); kvmppc_remove_runnable() 1746 list_del(&vcpu->arch.run_list); kvmppc_remove_runnable() 1795 struct kvmppc_vcore *vc = vcpu->arch.vcore; kvmppc_start_thread() 1797 if (vcpu->arch.timer_running) { kvmppc_start_thread() 1798 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_start_thread() 1799 vcpu->arch.timer_running = 0; kvmppc_start_thread() 1801 cpu = vc->pcpu + vcpu->arch.ptid; kvmppc_start_thread() 1804 tpaca->kvm_hstate.ptid = vcpu->arch.ptid; kvmppc_start_thread() 1904 arch.run_list) { prepare_threads() 1905 if (signal_pending(vcpu->arch.run_task)) prepare_threads() 1906 vcpu->arch.ret = -EINTR; prepare_threads() 1907 else if (vcpu->arch.vpa.update_pending || prepare_threads() 1908 vcpu->arch.slb_shadow.update_pending || prepare_threads() 1909 vcpu->arch.dtl.update_pending) prepare_threads() 1910 vcpu->arch.ret = RESUME_GUEST; prepare_threads() 1914 wake_up(&vcpu->arch.cpu_run); prepare_threads() 1926 arch.run_list) { post_guest_process() 1928 if (now < vcpu->arch.dec_expires && post_guest_process() 1935 if (vcpu->arch.trap) post_guest_process() 1936 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, post_guest_process() 1937 vcpu->arch.run_task); post_guest_process() 1939 vcpu->arch.ret = ret; post_guest_process() 1940 vcpu->arch.trap = 0; post_guest_process() 1942 if (vcpu->arch.ceded) { post_guest_process() 1948 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { post_guest_process() 1950 wake_up(&vcpu->arch.cpu_run); post_guest_process() 1972 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) kvmppc_run_core() 1992 arch.run_list) { kvmppc_run_core() 1993 vcpu->arch.ret = -EBUSY; kvmppc_run_core() 1995 wake_up(&vcpu->arch.cpu_run); kvmppc_run_core() 2002 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { kvmppc_run_core() 2034 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) kvmppc_run_core() 2068 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); kvmppc_wait_for_exec() 2069 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) kvmppc_wait_for_exec() 2071 finish_wait(&vcpu->arch.cpu_run, &wait); kvmppc_wait_for_exec() 2091 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { kvmppc_vcore_blocked() 2092 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) { kvmppc_vcore_blocked() 2122 vcpu->arch.ret = RESUME_GUEST; kvmppc_run_vcpu() 2123 vcpu->arch.trap = 0; kvmppc_run_vcpu() 2129 vc = vcpu->arch.vcore; kvmppc_run_vcpu() 2131 vcpu->arch.ceded = 0; kvmppc_run_vcpu() 2132 vcpu->arch.run_task = current; kvmppc_run_vcpu() 2133 vcpu->arch.kvm_run = kvm_run; kvmppc_run_vcpu() 2134 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); kvmppc_run_vcpu() 2135 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; kvmppc_run_vcpu() 2136 vcpu->arch.busy_preempt = TB_NIL; kvmppc_run_vcpu() 2137 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); kvmppc_run_vcpu() 2156 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && kvmppc_run_vcpu() 2165 arch.run_list) { kvmppc_run_vcpu() 2167 if (signal_pending(v->arch.run_task)) { kvmppc_run_vcpu() 2170 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; kvmppc_run_vcpu() 2171 v->arch.ret = -EINTR; kvmppc_run_vcpu() 2172 wake_up(&v->arch.cpu_run); kvmppc_run_vcpu() 2175 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) kvmppc_run_vcpu() 2178 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { kvmppc_run_vcpu() 2179 if (!v->arch.pending_exceptions) kvmppc_run_vcpu() 2180 n_ceded += v->arch.ceded; kvmppc_run_vcpu() 2182 v->arch.ceded = 0; kvmppc_run_vcpu() 2198 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && kvmppc_run_vcpu() 2206 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { kvmppc_run_vcpu() 2210 vcpu->arch.ret = -EINTR; kvmppc_run_vcpu() 2216 struct kvm_vcpu, arch.run_list); kvmppc_run_vcpu() 2217 wake_up(&v->arch.cpu_run); kvmppc_run_vcpu() 2222 return vcpu->arch.ret; kvmppc_run_vcpu() 2230 if (!vcpu->arch.sane) { kvmppc_vcpu_run_hv() 2243 atomic_inc(&vcpu->kvm->arch.vcpus_running); kvmppc_vcpu_run_hv() 2248 if (!vcpu->kvm->arch.hpte_setup_done) { kvmppc_vcpu_run_hv() 2257 vcpu->arch.wqp = &vcpu->arch.vcore->wq; kvmppc_vcpu_run_hv() 2258 vcpu->arch.pgdir = current->mm->pgd; kvmppc_vcpu_run_hv() 2259 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; kvmppc_vcpu_run_hv() 2265 !(vcpu->arch.shregs.msr & MSR_PR)) { kvmppc_vcpu_run_hv() 2273 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); kvmppc_vcpu_run_hv() 2279 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; kvmppc_vcpu_run_hv() 2280 atomic_dec(&vcpu->kvm->arch.vcpus_running); kvmppc_vcpu_run_hv() 2365 if (!dont || free->arch.rmap != dont->arch.rmap) { kvmppc_core_free_memslot_hv() 2366 vfree(free->arch.rmap); kvmppc_core_free_memslot_hv() 2367 free->arch.rmap = NULL; kvmppc_core_free_memslot_hv() 2374 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); kvmppc_core_create_memslot_hv() 2375 if (!slot->arch.rmap) kvmppc_core_create_memslot_hv() 2408 * Update LPCR values in kvm->arch and in vcores. 2416 if ((kvm->arch.lpcr & mask) == lpcr) kvmppc_update_lpcr() 2419 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; kvmppc_update_lpcr() 2422 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; kvmppc_update_lpcr() 2428 if (++cores_done >= kvm->arch.online_vcores) kvmppc_update_lpcr() 2450 if (kvm->arch.hpte_setup_done) kvmppc_hv_setup_htab_rma() 2454 if (!kvm->arch.hpt_virt) { kvmppc_hv_setup_htab_rma() 2491 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | kvmppc_hv_setup_htab_rma() 2501 /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */ kvmppc_hv_setup_htab_rma() 2503 kvm->arch.hpte_setup_done = 1; kvmppc_hv_setup_htab_rma() 2526 kvm->arch.lpid = lpid; kvmppc_core_init_vm_hv() 2533 cpumask_setall(&kvm->arch.need_tlb_flush); kvmppc_core_init_vm_hv() 2536 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, kvmppc_core_init_vm_hv() 2537 sizeof(kvm->arch.enabled_hcalls)); kvmppc_core_init_vm_hv() 2539 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); kvmppc_core_init_vm_hv() 2542 kvm->arch.host_lpid = mfspr(SPRN_LPID); kvmppc_core_init_vm_hv() 2543 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); kvmppc_core_init_vm_hv() 2547 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | kvmppc_core_init_vm_hv() 2552 kvm->arch.lpcr = lpcr; kvmppc_core_init_vm_hv() 2564 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); kvmppc_core_init_vm_hv() 2565 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) kvmppc_core_init_vm_hv() 2576 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { kvmppc_free_vcores() 2577 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; kvmppc_free_vcores() 2581 kfree(kvm->arch.vcores[i]); kvmppc_free_vcores() 2583 kvm->arch.online_vcores = 0; kvmppc_free_vcores() 2588 debugfs_remove_recursive(kvm->arch.debugfs_dir); kvmppc_core_destroy_vm_hv()
|
H A D | booke.c | 78 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); kvmppc_dump_vcpu() 79 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); kvmppc_dump_vcpu() 80 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, kvmppc_dump_vcpu() 81 vcpu->arch.shared->srr1); kvmppc_dump_vcpu() 83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); kvmppc_dump_vcpu() 100 vcpu->arch.shadow_msr &= ~MSR_SPE; kvmppc_vcpu_disable_spe() 109 vcpu->arch.shadow_msr |= MSR_SPE; kvmppc_vcpu_enable_spe() 115 if (vcpu->arch.shared->msr & MSR_SPE) { kvmppc_vcpu_sync_spe() 116 if (!(vcpu->arch.shadow_msr & MSR_SPE)) kvmppc_vcpu_sync_spe() 118 } else if (vcpu->arch.shadow_msr & MSR_SPE) { kvmppc_vcpu_sync_spe() 142 load_fp_state(&vcpu->arch.fp); kvmppc_load_guest_fp() 143 current->thread.fp_save_area = &vcpu->arch.fp; kvmppc_load_guest_fp() 167 vcpu->arch.shadow_msr &= ~MSR_FP; kvmppc_vcpu_sync_fpu() 168 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; kvmppc_vcpu_sync_fpu() 183 load_vr_state(&vcpu->arch.vr); kvmppc_load_guest_altivec() 184 current->thread.vr_save_area = &vcpu->arch.vr; kvmppc_load_guest_altivec() 210 vcpu->arch.shadow_msr &= ~MSR_DE; kvmppc_vcpu_sync_debug() 211 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; kvmppc_vcpu_sync_debug() 221 vcpu->arch.shared->msr |= MSR_DE; kvmppc_vcpu_sync_debug() 223 vcpu->arch.shadow_msr |= MSR_DE; kvmppc_vcpu_sync_debug() 224 vcpu->arch.shared->msr &= ~MSR_DE; kvmppc_vcpu_sync_debug() 235 u32 old_msr = vcpu->arch.shared->msr; kvmppc_set_msr() 241 vcpu->arch.shared->msr = new_msr; kvmppc_set_msr() 253 set_bit(priority, &vcpu->arch.pending_exceptions); kvmppc_booke_queue_irqprio() 259 vcpu->arch.queued_dear = dear_flags; kvmppc_core_queue_dtlb_miss() 260 vcpu->arch.queued_esr = esr_flags; kvmppc_core_queue_dtlb_miss() 267 vcpu->arch.queued_dear = dear_flags; kvmppc_core_queue_data_storage() 268 vcpu->arch.queued_esr = esr_flags; kvmppc_core_queue_data_storage() 279 vcpu->arch.queued_esr = esr_flags; kvmppc_core_queue_inst_storage() 286 vcpu->arch.queued_dear = dear_flags; kvmppc_core_queue_alignment() 287 vcpu->arch.queued_esr = esr_flags; kvmppc_core_queue_alignment() 293 vcpu->arch.queued_esr = esr_flags; kvmppc_core_queue_program() 304 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); kvmppc_core_pending_dec() 309 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); kvmppc_core_dequeue_dec() 325 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); kvmppc_core_dequeue_external() 326 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); kvmppc_core_dequeue_external() 336 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); kvmppc_core_dequeue_watchdog() 346 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions); kvmppc_core_dequeue_debug() 357 vcpu->arch.csrr0 = srr0; set_guest_csrr() 358 vcpu->arch.csrr1 = srr1; set_guest_csrr() 364 vcpu->arch.dsrr0 = srr0; set_guest_dsrr() 365 vcpu->arch.dsrr1 = srr1; set_guest_dsrr() 373 vcpu->arch.mcsrr0 = srr0; set_guest_mcsrr() 374 vcpu->arch.mcsrr1 = srr1; set_guest_mcsrr() 384 ulong crit_raw = vcpu->arch.shared->critical; kvmppc_booke_irqprio_deliver() 389 ulong new_msr = vcpu->arch.shared->msr; kvmppc_booke_irqprio_deliver() 392 if (!(vcpu->arch.shared->msr & MSR_SF)) { kvmppc_booke_irqprio_deliver() 400 crit = crit && !(vcpu->arch.shared->msr & MSR_PR); kvmppc_booke_irqprio_deliver() 407 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags) kvmppc_booke_irqprio_deliver() 440 allowed = vcpu->arch.shared->msr & MSR_CE; kvmppc_booke_irqprio_deliver() 446 allowed = vcpu->arch.shared->msr & MSR_ME; kvmppc_booke_irqprio_deliver() 456 allowed = vcpu->arch.shared->msr & MSR_EE; kvmppc_booke_irqprio_deliver() 462 allowed = vcpu->arch.shared->msr & MSR_DE; kvmppc_booke_irqprio_deliver() 476 set_guest_srr(vcpu, vcpu->arch.pc, kvmppc_booke_irqprio_deliver() 477 vcpu->arch.shared->msr); kvmppc_booke_irqprio_deliver() 480 set_guest_csrr(vcpu, vcpu->arch.pc, kvmppc_booke_irqprio_deliver() 481 vcpu->arch.shared->msr); kvmppc_booke_irqprio_deliver() 484 set_guest_dsrr(vcpu, vcpu->arch.pc, kvmppc_booke_irqprio_deliver() 485 vcpu->arch.shared->msr); kvmppc_booke_irqprio_deliver() 488 set_guest_mcsrr(vcpu, vcpu->arch.pc, kvmppc_booke_irqprio_deliver() 489 vcpu->arch.shared->msr); kvmppc_booke_irqprio_deliver() 493 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; kvmppc_booke_irqprio_deliver() 495 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); kvmppc_booke_irqprio_deliver() 497 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); kvmppc_booke_irqprio_deliver() 499 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) kvmppc_booke_irqprio_deliver() 501 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) { kvmppc_booke_irqprio_deliver() 502 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC); kvmppc_booke_irqprio_deliver() 509 if (vcpu->arch.epcr & SPRN_EPCR_ICM) kvmppc_booke_irqprio_deliver() 515 clear_bit(priority, &vcpu->arch.pending_exceptions); kvmppc_booke_irqprio_deliver() 524 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) kvmppc_booke_irqprio_deliver() 526 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) kvmppc_booke_irqprio_deliver() 528 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) kvmppc_booke_irqprio_deliver() 544 u32 period = TCR_GET_WP(vcpu->arch.tcr); watchdog_next_timeout() 575 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) arm_next_watchdog() 578 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); arm_next_watchdog() 585 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); arm_next_watchdog() 587 del_timer(&vcpu->arch.wdt_timer); arm_next_watchdog() 588 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); arm_next_watchdog() 598 new_tsr = tsr = vcpu->arch.tsr; kvmppc_watchdog_func() 610 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); kvmppc_watchdog_func() 622 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && kvmppc_watchdog_func() 623 vcpu->arch.watchdog_enabled) { kvmppc_watchdog_func() 641 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) update_timer_ints() 646 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) update_timer_ints() 654 unsigned long *pending = &vcpu->arch.pending_exceptions; kvmppc_core_check_exceptions() 668 vcpu->arch.shared->int_pending = !!*pending; kvmppc_core_check_exceptions() 684 if (vcpu->arch.shared->msr & MSR_WE) { kvmppc_core_prepare_to_enter() 715 vcpu->arch.epr_needed = true; kvmppc_core_check_requests() 728 if (!vcpu->arch.sane) { kvmppc_vcpu_run() 763 debug = vcpu->arch.dbg_reg; kvmppc_vcpu_run() 766 current->thread.debug = vcpu->arch.dbg_reg; kvmppc_vcpu_run() 768 vcpu->arch.pgdir = current->mm->pgd; kvmppc_vcpu_run() 811 __func__, vcpu->arch.pc, vcpu->arch.last_inst); emulation_exit() 815 run->hw.hardware_exit_reason |= vcpu->arch.last_inst; emulation_exit() 829 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); kvmppc_handle_debug() 830 u32 dbsr = vcpu->arch.dbsr; kvmppc_handle_debug() 843 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) && kvmppc_handle_debug() 844 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM)) kvmppc_handle_debug() 848 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE)) kvmppc_handle_debug() 856 * Clear guest dbsr (vcpu->arch.dbsr) kvmppc_handle_debug() 858 vcpu->arch.dbsr = 0; kvmppc_handle_debug() 859 run->debug.arch.status = 0; kvmppc_handle_debug() 860 run->debug.arch.address = vcpu->arch.pc; kvmppc_handle_debug() 863 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; kvmppc_handle_debug() 866 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; kvmppc_handle_debug() 868 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; kvmppc_handle_debug() 870 run->debug.arch.address = dbg_reg->dac1; kvmppc_handle_debug() 872 run->debug.arch.address = dbg_reg->dac2; kvmppc_handle_debug() 898 * (such as from arch/powerpc/kernel/head_fsl_booke.S). 940 vcpu->arch.dbsr = mfspr(SPRN_DBSR); kvmppc_restart_interrupt() 955 __func__, vcpu->arch.pc); kvmppc_resume_inst_load() 1092 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { kvmppc_handle_exit() 1101 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); kvmppc_handle_exit() 1118 if (vcpu->arch.shared->msr & MSR_SPE) kvmppc_handle_exit() 1153 __func__, exit_nr, vcpu->arch.pc); kvmppc_handle_exit() 1176 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, kvmppc_handle_exit() 1177 vcpu->arch.fault_esr); kvmppc_handle_exit() 1183 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); kvmppc_handle_exit() 1189 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, kvmppc_handle_exit() 1190 vcpu->arch.fault_esr); kvmppc_handle_exit() 1196 if (!(vcpu->arch.shared->msr & MSR_PR)) { kvmppc_handle_exit() 1210 if (!(vcpu->arch.shared->msr & MSR_PR) && kvmppc_handle_exit() 1225 unsigned long eaddr = vcpu->arch.fault_dear; kvmppc_handle_exit() 1231 if (!(vcpu->arch.shared->msr & MSR_PR) && kvmppc_handle_exit() 1232 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { kvmppc_handle_exit() 1246 vcpu->arch.fault_dear, kvmppc_handle_exit() 1247 vcpu->arch.fault_esr); kvmppc_handle_exit() 1272 vcpu->arch.paddr_accessed = gpaddr; kvmppc_handle_exit() 1273 vcpu->arch.vaddr_accessed = eaddr; kvmppc_handle_exit() 1283 unsigned long eaddr = vcpu->arch.pc; kvmppc_handle_exit() 1359 u32 old_tsr = vcpu->arch.tsr; kvmppc_set_tsr() 1361 vcpu->arch.tsr = new_tsr; kvmppc_set_tsr() 1363 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) kvmppc_set_tsr() 1375 vcpu->arch.pc = 0; kvm_arch_vcpu_setup() 1376 vcpu->arch.shared->pir = vcpu->vcpu_id; kvm_arch_vcpu_setup() 1381 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; kvm_arch_vcpu_setup() 1382 vcpu->arch.shadow_pid = 1; kvm_arch_vcpu_setup() 1383 vcpu->arch.shared->msr = 0; kvm_arch_vcpu_setup() 1388 vcpu->arch.ivpr = 0x55550000; kvm_arch_vcpu_setup() 1390 vcpu->arch.ivor[i] = 0x7700 | i * 4; kvm_arch_vcpu_setup() 1402 spin_lock_init(&vcpu->arch.wdt_lock); kvmppc_subarch_vcpu_init() 1403 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, kvmppc_subarch_vcpu_init() 1416 del_timer_sync(&vcpu->arch.wdt_timer); kvmppc_subarch_vcpu_uninit() 1423 regs->pc = vcpu->arch.pc; kvm_arch_vcpu_ioctl_get_regs() 1425 regs->ctr = vcpu->arch.ctr; kvm_arch_vcpu_ioctl_get_regs() 1426 regs->lr = vcpu->arch.lr; kvm_arch_vcpu_ioctl_get_regs() 1428 regs->msr = vcpu->arch.shared->msr; kvm_arch_vcpu_ioctl_get_regs() 1431 regs->pid = vcpu->arch.pid; kvm_arch_vcpu_ioctl_get_regs() 1451 vcpu->arch.pc = regs->pc; kvm_arch_vcpu_ioctl_set_regs() 1453 vcpu->arch.ctr = regs->ctr; kvm_arch_vcpu_ioctl_set_regs() 1454 vcpu->arch.lr = regs->lr; kvm_arch_vcpu_ioctl_set_regs() 1482 sregs->u.e.csrr0 = vcpu->arch.csrr0; get_sregs_base() 1483 sregs->u.e.csrr1 = vcpu->arch.csrr1; get_sregs_base() 1484 sregs->u.e.mcsr = vcpu->arch.mcsr; get_sregs_base() 1487 sregs->u.e.tsr = vcpu->arch.tsr; get_sregs_base() 1488 sregs->u.e.tcr = vcpu->arch.tcr; get_sregs_base() 1491 sregs->u.e.vrsave = vcpu->arch.vrsave; get_sregs_base() 1500 vcpu->arch.csrr0 = sregs->u.e.csrr0; set_sregs_base() 1501 vcpu->arch.csrr1 = sregs->u.e.csrr1; set_sregs_base() 1502 vcpu->arch.mcsr = sregs->u.e.mcsr; set_sregs_base() 1505 vcpu->arch.vrsave = sregs->u.e.vrsave; set_sregs_base() 1509 vcpu->arch.dec = sregs->u.e.dec; set_sregs_base() 1525 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; get_sregs_arch206() 1526 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; get_sregs_arch206() 1527 sregs->u.e.decar = vcpu->arch.decar; get_sregs_arch206() 1528 sregs->u.e.ivpr = vcpu->arch.ivpr; get_sregs_arch206() 1540 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; set_sregs_arch206() 1541 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; set_sregs_arch206() 1542 vcpu->arch.decar = sregs->u.e.decar; set_sregs_arch206() 1543 vcpu->arch.ivpr = sregs->u.e.ivpr; set_sregs_arch206() 1552 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; kvmppc_get_sregs_ivor() 1553 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; kvmppc_get_sregs_ivor() 1554 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; kvmppc_get_sregs_ivor() 1555 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; kvmppc_get_sregs_ivor() 1556 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; kvmppc_get_sregs_ivor() 1557 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; kvmppc_get_sregs_ivor() 1558 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; kvmppc_get_sregs_ivor() 1559 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; kvmppc_get_sregs_ivor() 1560 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; kvmppc_get_sregs_ivor() 1561 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; kvmppc_get_sregs_ivor() 1562 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; kvmppc_get_sregs_ivor() 1563 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; kvmppc_get_sregs_ivor() 1564 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; kvmppc_get_sregs_ivor() 1565 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; kvmppc_get_sregs_ivor() 1566 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; kvmppc_get_sregs_ivor() 1567 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; kvmppc_get_sregs_ivor() 1576 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; kvmppc_set_sregs_ivor() 1577 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; kvmppc_set_sregs_ivor() 1578 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; kvmppc_set_sregs_ivor() 1579 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; kvmppc_set_sregs_ivor() 1580 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; kvmppc_set_sregs_ivor() 1581 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; kvmppc_set_sregs_ivor() 1582 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; kvmppc_set_sregs_ivor() 1583 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; kvmppc_set_sregs_ivor() 1584 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; kvmppc_set_sregs_ivor() 1585 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; kvmppc_set_sregs_ivor() 1586 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; kvmppc_set_sregs_ivor() 1587 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; kvmppc_set_sregs_ivor() 1588 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; kvmppc_set_sregs_ivor() 1589 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; kvmppc_set_sregs_ivor() 1590 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; kvmppc_set_sregs_ivor() 1591 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; kvmppc_set_sregs_ivor() 1599 sregs->pvr = vcpu->arch.pvr; kvm_arch_vcpu_ioctl_get_sregs() 1603 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); kvm_arch_vcpu_ioctl_get_sregs() 1611 if (vcpu->arch.pvr != sregs->pvr) kvm_arch_vcpu_ioctl_set_sregs() 1622 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); kvm_arch_vcpu_ioctl_set_sregs() 1632 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1); kvmppc_get_one_reg() 1635 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2); kvmppc_get_one_reg() 1639 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3); kvmppc_get_one_reg() 1642 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4); kvmppc_get_one_reg() 1646 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1); kvmppc_get_one_reg() 1649 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2); kvmppc_get_one_reg() 1658 *val = get_reg_val(id, vcpu->arch.epcr); kvmppc_get_one_reg() 1662 *val = get_reg_val(id, vcpu->arch.tcr); kvmppc_get_one_reg() 1665 *val = get_reg_val(id, vcpu->arch.tsr); kvmppc_get_one_reg() 1671 *val = get_reg_val(id, vcpu->arch.vrsave); kvmppc_get_one_reg() 1674 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); kvmppc_get_one_reg() 1688 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val); kvmppc_set_one_reg() 1691 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val); kvmppc_set_one_reg() 1695 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val); kvmppc_set_one_reg() 1698 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val); kvmppc_set_one_reg() 1702 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val); kvmppc_set_one_reg() 1705 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val); kvmppc_set_one_reg() 1740 vcpu->arch.vrsave = set_reg_val(id, *val); kvmppc_set_one_reg() 1743 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); kvmppc_set_one_reg() 1805 vcpu->arch.epcr = new_epcr; kvmppc_set_epcr() 1807 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; kvmppc_set_epcr() 1808 if (vcpu->arch.epcr & SPRN_EPCR_ICM) kvmppc_set_epcr() 1809 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; kvmppc_set_epcr() 1816 vcpu->arch.tcr = new_tcr; kvmppc_set_tcr() 1823 set_bits(tsr_bits, &vcpu->arch.tsr); kvmppc_set_tsr_bits() 1831 clear_bits(tsr_bits, &vcpu->arch.tsr); kvmppc_clr_tsr_bits() 1845 if (vcpu->arch.tcr & TCR_ARE) { kvmppc_decrementer_func() 1846 vcpu->arch.dec = vcpu->arch.decar; kvmppc_decrementer_func() 1915 vcpu->arch.shadow_msrp |= MSRP_UCLEP; kvm_guest_protect_msr() 1917 vcpu->arch.shadow_msrp |= MSRP_DEP; kvm_guest_protect_msr() 1919 vcpu->arch.shadow_msrp |= MSRP_PMMP; kvm_guest_protect_msr() 1922 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; kvm_guest_protect_msr() 1924 vcpu->arch.shadow_msrp &= ~MSRP_DEP; kvm_guest_protect_msr() 1926 vcpu->arch.shadow_msrp &= ~MSRP_PMMP; kvm_guest_protect_msr() 1938 if (!(vcpu->arch.shared->msr & MSR_PR) && kvmppc_xlate() 1939 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { kvmppc_xlate() 1941 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) | kvmppc_xlate() 1989 vcpu->arch.dbg_reg.dbcr0 = 0; kvm_arch_vcpu_ioctl_set_guest_debug() 1997 vcpu->arch.dbg_reg.dbcr0 = 0; kvm_arch_vcpu_ioctl_set_guest_debug() 2000 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; kvm_arch_vcpu_ioctl_set_guest_debug() 2003 dbg_reg = &(vcpu->arch.dbg_reg); kvm_arch_vcpu_ioctl_set_guest_debug() 2027 uint64_t addr = dbg->arch.bp[n].addr; kvm_arch_vcpu_ioctl_set_guest_debug() 2028 uint32_t type = dbg->arch.bp[n].type; kvm_arch_vcpu_ioctl_set_guest_debug() 2070 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); kvmppc_mmu_destroy() 2075 return kvm->arch.kvm_ops->init_vm(kvm); kvmppc_core_init_vm() 2080 return kvm->arch.kvm_ops->vcpu_create(kvm, id); kvmppc_core_vcpu_create() 2085 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); kvmppc_core_vcpu_free() 2090 kvm->arch.kvm_ops->destroy_vm(kvm); kvmppc_core_destroy_vm() 2095 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); kvmppc_core_vcpu_load() 2100 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); kvmppc_core_vcpu_put()
|
H A D | e500_mmu.c | 11 * This file is based on arch/powerpc/kvm/44x_tlb.c, 74 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); get_tlb_esel() 137 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; kvmppc_e500_deliver_tlb_miss() 139 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; kvmppc_e500_deliver_tlb_miss() 141 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) kvmppc_e500_deliver_tlb_miss() 143 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) kvmppc_e500_deliver_tlb_miss() 146 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) kvmppc_e500_deliver_tlb_miss() 147 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); kvmppc_e500_deliver_tlb_miss() 148 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; kvmppc_e500_deliver_tlb_miss() 149 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) kvmppc_e500_deliver_tlb_miss() 335 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); kvmppc_e500_emul_tlbre() 336 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); kvmppc_e500_emul_tlbre() 337 vcpu->arch.shared->mas1 = gtlbe->mas1; kvmppc_e500_emul_tlbre() 338 vcpu->arch.shared->mas2 = gtlbe->mas2; kvmppc_e500_emul_tlbre() 339 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; kvmppc_e500_emul_tlbre() 363 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) kvmppc_e500_emul_tlbsx() 365 vcpu->arch.shared->mas1 = gtlbe->mas1; kvmppc_e500_emul_tlbsx() 366 vcpu->arch.shared->mas2 = gtlbe->mas2; kvmppc_e500_emul_tlbsx() 367 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; kvmppc_e500_emul_tlbsx() 372 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; kvmppc_e500_emul_tlbsx() 375 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) kvmppc_e500_emul_tlbsx() 378 vcpu->arch.shared->mas1 = kvmppc_e500_emul_tlbsx() 379 (vcpu->arch.shared->mas6 & MAS6_SPID0) kvmppc_e500_emul_tlbsx() 380 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0)) kvmppc_e500_emul_tlbsx() 381 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); kvmppc_e500_emul_tlbsx() 382 vcpu->arch.shared->mas2 &= MAS2_EPN; kvmppc_e500_emul_tlbsx() 383 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & kvmppc_e500_emul_tlbsx() 385 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | kvmppc_e500_emul_tlbsx() 413 gtlbe->mas1 = vcpu->arch.shared->mas1; kvmppc_e500_emul_tlbwe() 414 gtlbe->mas2 = vcpu->arch.shared->mas2; kvmppc_e500_emul_tlbwe() 415 if (!(vcpu->arch.shared->msr & MSR_CM)) kvmppc_e500_emul_tlbwe() 417 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; kvmppc_e500_emul_tlbwe() 419 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, kvmppc_e500_emul_tlbwe() 500 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); kvmppc_mmu_itlb_index() 507 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); kvmppc_mmu_dtlb_index() 514 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); kvmppc_mmu_itlb_miss() 516 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); kvmppc_mmu_itlb_miss() 521 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); kvmppc_mmu_dtlb_miss() 523 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); kvmppc_mmu_dtlb_miss() 576 sregs->u.e.mas0 = vcpu->arch.shared->mas0; kvmppc_get_sregs_e500_tlb() 577 sregs->u.e.mas1 = vcpu->arch.shared->mas1; kvmppc_get_sregs_e500_tlb() 578 sregs->u.e.mas2 = vcpu->arch.shared->mas2; kvmppc_get_sregs_e500_tlb() 579 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; kvmppc_get_sregs_e500_tlb() 580 sregs->u.e.mas4 = vcpu->arch.shared->mas4; kvmppc_get_sregs_e500_tlb() 581 sregs->u.e.mas6 = vcpu->arch.shared->mas6; kvmppc_get_sregs_e500_tlb() 583 sregs->u.e.mmucfg = vcpu->arch.mmucfg; kvmppc_get_sregs_e500_tlb() 584 sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; kvmppc_get_sregs_e500_tlb() 585 sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; kvmppc_get_sregs_e500_tlb() 593 vcpu->arch.shared->mas0 = sregs->u.e.mas0; kvmppc_set_sregs_e500_tlb() 594 vcpu->arch.shared->mas1 = sregs->u.e.mas1; kvmppc_set_sregs_e500_tlb() 595 vcpu->arch.shared->mas2 = sregs->u.e.mas2; kvmppc_set_sregs_e500_tlb() 596 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; kvmppc_set_sregs_e500_tlb() 597 vcpu->arch.shared->mas4 = sregs->u.e.mas4; kvmppc_set_sregs_e500_tlb() 598 vcpu->arch.shared->mas6 = sregs->u.e.mas6; kvmppc_set_sregs_e500_tlb() 612 *val = get_reg_val(id, vcpu->arch.shared->mas0); kvmppc_get_one_reg_e500_tlb() 615 *val = get_reg_val(id, vcpu->arch.shared->mas1); kvmppc_get_one_reg_e500_tlb() 618 *val = get_reg_val(id, vcpu->arch.shared->mas2); kvmppc_get_one_reg_e500_tlb() 621 *val = get_reg_val(id, vcpu->arch.shared->mas7_3); kvmppc_get_one_reg_e500_tlb() 624 *val = get_reg_val(id, vcpu->arch.shared->mas4); kvmppc_get_one_reg_e500_tlb() 627 *val = get_reg_val(id, vcpu->arch.shared->mas6); kvmppc_get_one_reg_e500_tlb() 630 *val = get_reg_val(id, vcpu->arch.mmucfg); kvmppc_get_one_reg_e500_tlb() 633 *val = get_reg_val(id, vcpu->arch.eptcfg); kvmppc_get_one_reg_e500_tlb() 640 *val = get_reg_val(id, vcpu->arch.tlbcfg[i]); kvmppc_get_one_reg_e500_tlb() 647 *val = get_reg_val(id, vcpu->arch.tlbps[i]); kvmppc_get_one_reg_e500_tlb() 665 vcpu->arch.shared->mas0 = set_reg_val(id, *val); kvmppc_set_one_reg_e500_tlb() 668 vcpu->arch.shared->mas1 = set_reg_val(id, *val); kvmppc_set_one_reg_e500_tlb() 671 vcpu->arch.shared->mas2 = set_reg_val(id, *val); kvmppc_set_one_reg_e500_tlb() 674 vcpu->arch.shared->mas7_3 = set_reg_val(id, *val); kvmppc_set_one_reg_e500_tlb() 677 vcpu->arch.shared->mas4 = set_reg_val(id, *val); kvmppc_set_one_reg_e500_tlb() 680 vcpu->arch.shared->mas6 = set_reg_val(id, *val); kvmppc_set_one_reg_e500_tlb() 685 if (reg != vcpu->arch.mmucfg) kvmppc_set_one_reg_e500_tlb() 691 if (reg != vcpu->arch.eptcfg) kvmppc_set_one_reg_e500_tlb() 702 if (reg != vcpu->arch.tlbcfg[i]) kvmppc_set_one_reg_e500_tlb() 712 if (reg != vcpu->arch.tlbps[i]) kvmppc_set_one_reg_e500_tlb() 727 vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); vcpu_mmu_geometry_update() 729 vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0]; vcpu_mmu_geometry_update() 730 vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; vcpu_mmu_geometry_update() 732 vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); vcpu_mmu_geometry_update() 733 vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1]; vcpu_mmu_geometry_update() 734 vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; vcpu_mmu_geometry_update() 876 vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; vcpu_mmu_init() 879 vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & vcpu_mmu_init() 881 vcpu->arch.tlbcfg[0] |= params[0].entries; vcpu_mmu_init() 882 vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT; vcpu_mmu_init() 884 vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & vcpu_mmu_init() 886 vcpu->arch.tlbcfg[1] |= params[1].entries; vcpu_mmu_init() 887 vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT; vcpu_mmu_init() 890 vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS); vcpu_mmu_init() 891 vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS); vcpu_mmu_init() 893 vcpu->arch.mmucfg &= ~MMUCFG_LRAT; vcpu_mmu_init() 896 vcpu->arch.eptcfg = 0; vcpu_mmu_init() 897 vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT; vcpu_mmu_init() 898 vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND; vcpu_mmu_init()
|
H A D | book3s_pr.c | 14 * This file is derived from arch/powerpc/kvm/44x.c, 81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) kvmppc_fixup_split_real() 88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; kvmppc_fixup_split_real() 111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; kvmppc_core_vcpu_load_pr() 148 svcpu->gpr[0] = vcpu->arch.gpr[0]; kvmppc_copy_to_svcpu() 149 svcpu->gpr[1] = vcpu->arch.gpr[1]; kvmppc_copy_to_svcpu() 150 svcpu->gpr[2] = vcpu->arch.gpr[2]; kvmppc_copy_to_svcpu() 151 svcpu->gpr[3] = vcpu->arch.gpr[3]; kvmppc_copy_to_svcpu() 152 svcpu->gpr[4] = vcpu->arch.gpr[4]; kvmppc_copy_to_svcpu() 153 svcpu->gpr[5] = vcpu->arch.gpr[5]; kvmppc_copy_to_svcpu() 154 svcpu->gpr[6] = vcpu->arch.gpr[6]; kvmppc_copy_to_svcpu() 155 svcpu->gpr[7] = vcpu->arch.gpr[7]; kvmppc_copy_to_svcpu() 156 svcpu->gpr[8] = vcpu->arch.gpr[8]; kvmppc_copy_to_svcpu() 157 svcpu->gpr[9] = vcpu->arch.gpr[9]; kvmppc_copy_to_svcpu() 158 svcpu->gpr[10] = vcpu->arch.gpr[10]; kvmppc_copy_to_svcpu() 159 svcpu->gpr[11] = vcpu->arch.gpr[11]; kvmppc_copy_to_svcpu() 160 svcpu->gpr[12] = vcpu->arch.gpr[12]; kvmppc_copy_to_svcpu() 161 svcpu->gpr[13] = vcpu->arch.gpr[13]; kvmppc_copy_to_svcpu() 162 svcpu->cr = vcpu->arch.cr; kvmppc_copy_to_svcpu() 163 svcpu->xer = vcpu->arch.xer; kvmppc_copy_to_svcpu() 164 svcpu->ctr = vcpu->arch.ctr; kvmppc_copy_to_svcpu() 165 svcpu->lr = vcpu->arch.lr; kvmppc_copy_to_svcpu() 166 svcpu->pc = vcpu->arch.pc; kvmppc_copy_to_svcpu() 168 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; kvmppc_copy_to_svcpu() 174 vcpu->arch.entry_tb = get_tb(); kvmppc_copy_to_svcpu() 175 vcpu->arch.entry_vtb = get_vtb(); kvmppc_copy_to_svcpu() 177 vcpu->arch.entry_ic = mfspr(SPRN_IC); kvmppc_copy_to_svcpu() 198 vcpu->arch.gpr[0] = svcpu->gpr[0]; kvmppc_copy_from_svcpu() 199 vcpu->arch.gpr[1] = svcpu->gpr[1]; kvmppc_copy_from_svcpu() 200 vcpu->arch.gpr[2] = svcpu->gpr[2]; kvmppc_copy_from_svcpu() 201 vcpu->arch.gpr[3] = svcpu->gpr[3]; kvmppc_copy_from_svcpu() 202 vcpu->arch.gpr[4] = svcpu->gpr[4]; kvmppc_copy_from_svcpu() 203 vcpu->arch.gpr[5] = svcpu->gpr[5]; kvmppc_copy_from_svcpu() 204 vcpu->arch.gpr[6] = svcpu->gpr[6]; kvmppc_copy_from_svcpu() 205 vcpu->arch.gpr[7] = svcpu->gpr[7]; kvmppc_copy_from_svcpu() 206 vcpu->arch.gpr[8] = svcpu->gpr[8]; kvmppc_copy_from_svcpu() 207 vcpu->arch.gpr[9] = svcpu->gpr[9]; kvmppc_copy_from_svcpu() 208 vcpu->arch.gpr[10] = svcpu->gpr[10]; kvmppc_copy_from_svcpu() 209 vcpu->arch.gpr[11] = svcpu->gpr[11]; kvmppc_copy_from_svcpu() 210 vcpu->arch.gpr[12] = svcpu->gpr[12]; kvmppc_copy_from_svcpu() 211 vcpu->arch.gpr[13] = svcpu->gpr[13]; kvmppc_copy_from_svcpu() 212 vcpu->arch.cr = svcpu->cr; kvmppc_copy_from_svcpu() 213 vcpu->arch.xer = svcpu->xer; kvmppc_copy_from_svcpu() 214 vcpu->arch.ctr = svcpu->ctr; kvmppc_copy_from_svcpu() 215 vcpu->arch.lr = svcpu->lr; kvmppc_copy_from_svcpu() 216 vcpu->arch.pc = svcpu->pc; kvmppc_copy_from_svcpu() 217 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; kvmppc_copy_from_svcpu() 218 vcpu->arch.fault_dar = svcpu->fault_dar; kvmppc_copy_from_svcpu() 219 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; kvmppc_copy_from_svcpu() 220 vcpu->arch.last_inst = svcpu->last_inst; kvmppc_copy_from_svcpu() 222 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; kvmppc_copy_from_svcpu() 227 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; kvmppc_copy_from_svcpu() 228 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; kvmppc_copy_from_svcpu() 229 vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; kvmppc_copy_from_svcpu() 231 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; kvmppc_copy_from_svcpu() 329 smsr |= (guest_msr & vcpu->arch.guest_owned_ext); kvmppc_recalc_shadow_msr() 334 vcpu->arch.shadow_msr = smsr; kvmppc_recalc_shadow_msr() 350 if (!vcpu->arch.pending_exceptions) { kvmppc_set_msr_pr() 372 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { kvmppc_set_msr_pr() 373 struct kvm_vcpu_arch *a = &vcpu->arch; kvmppc_set_msr_pr() 390 if (vcpu->arch.magic_page_pa && kvmppc_set_msr_pr() 393 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, kvmppc_set_msr_pr() 406 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; kvmppc_set_pvr_pr() 407 vcpu->arch.pvr = pvr; kvmppc_set_pvr_pr() 414 vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_set_pvr_pr() 422 vcpu->arch.cpu_type = KVM_CPU_3S_32; kvmppc_set_pvr_pr() 429 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; kvmppc_set_pvr_pr() 430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && kvmppc_set_pvr_pr() 432 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; kvmppc_set_pvr_pr() 451 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | kvmppc_set_pvr_pr() 458 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; kvmppc_set_pvr_pr() 473 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; kvmppc_set_pvr_pr() 517 ulong mp_pa = vcpu->arch.magic_page_pa; kvmppc_visible_gpa() 545 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) kvmppc_handle_pagefault() 550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); kvmppc_handle_pagefault() 567 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && kvmppc_handle_pagefault() 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); kvmppc_handle_pagefault() 585 if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_handle_pagefault() 586 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { kvmppc_handle_pagefault() 597 u64 ssrr1 = vcpu->arch.shadow_srr1; kvmppc_handle_pagefault() 600 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); kvmppc_handle_pagefault() 605 u32 dsisr = vcpu->arch.fault_dsisr; kvmppc_handle_pagefault() 606 u64 ssrr1 = vcpu->arch.shadow_srr1; kvmppc_handle_pagefault() 619 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { kvmppc_handle_pagefault() 631 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_handle_pagefault() 632 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) kvmppc_handle_pagefault() 637 vcpu->arch.paddr_accessed = pte.raddr; kvmppc_handle_pagefault() 638 vcpu->arch.vaddr_accessed = pte.eaddr; kvmppc_handle_pagefault() 659 msr &= vcpu->arch.guest_owned_ext; kvmppc_giveup_ext() 686 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); kvmppc_giveup_ext() 694 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { kvmppc_giveup_fac() 701 vcpu->arch.tar = mfspr(SPRN_TAR); kvmppc_giveup_fac() 703 vcpu->arch.shadow_fscr &= ~FSCR_TAR; kvmppc_giveup_fac() 716 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) kvmppc_handle_ext() 742 msr &= ~vcpu->arch.guest_owned_ext; kvmppc_handle_ext() 753 load_fp_state(&vcpu->arch.fp); kvmppc_handle_ext() 754 t->fp_save_area = &vcpu->arch.fp; kvmppc_handle_ext() 762 load_vr_state(&vcpu->arch.vr); kvmppc_handle_ext() 763 t->vr_save_area = &vcpu->arch.vr; kvmppc_handle_ext() 769 vcpu->arch.guest_owned_ext |= msr; kvmppc_handle_ext() 783 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; kvmppc_handle_lost_ext() 790 load_fp_state(&vcpu->arch.fp); kvmppc_handle_lost_ext() 797 load_vr_state(&vcpu->arch.vr); kvmppc_handle_lost_ext() 809 vcpu->arch.fscr &= ~(0xffULL << 56); kvmppc_trigger_fac_interrupt() 810 vcpu->arch.fscr |= (fac << 56); kvmppc_trigger_fac_interrupt() 840 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); kvmppc_handle_fac() 860 mtspr(SPRN_TAR, vcpu->arch.tar); kvmppc_handle_fac() 861 vcpu->arch.shadow_fscr |= FSCR_TAR; kvmppc_handle_fac() 873 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { kvmppc_set_fscr() 877 vcpu->arch.fscr = fscr; kvmppc_set_fscr() 900 ulong shadow_srr1 = vcpu->arch.shadow_srr1; kvmppc_handle_exit_pr() 930 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_handle_exit_pr() 931 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { kvmppc_handle_exit_pr() 951 u32 fault_dsisr = vcpu->arch.fault_dsisr; kvmppc_handle_exit_pr() 1030 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; kvmppc_handle_exit_pr() 1084 if (vcpu->arch.papr_enabled) { kvmppc_handle_exit_pr() 1094 if (vcpu->arch.papr_enabled && kvmppc_handle_exit_pr() 1114 vcpu->arch.hcall_needed = 1; kvmppc_handle_exit_pr() 1116 } else if (vcpu->arch.osi_enabled && kvmppc_handle_exit_pr() 1126 vcpu->arch.osi_needed = 1; kvmppc_handle_exit_pr() 1149 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { kvmppc_handle_exit_pr() 1201 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); kvmppc_handle_exit_pr() 1212 ulong shadow_srr1 = vcpu->arch.shadow_srr1; kvmppc_handle_exit_pr() 1255 sregs->pvr = vcpu->arch.pvr; kvm_arch_vcpu_ioctl_get_sregs_pr() 1258 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { kvm_arch_vcpu_ioctl_get_sregs_pr() 1260 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; kvm_arch_vcpu_ioctl_get_sregs_pr() 1261 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; kvm_arch_vcpu_ioctl_get_sregs_pr() 1285 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { kvm_arch_vcpu_ioctl_set_sregs_pr() 1287 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, kvm_arch_vcpu_ioctl_set_sregs_pr() 1292 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); kvm_arch_vcpu_ioctl_set_sregs_pr() 1329 if (vcpu->arch.intr_msr & MSR_LE) kvmppc_get_one_reg_pr() 1345 vcpu->arch.intr_msr |= MSR_LE; kvmppc_set_lpcr_pr() 1347 vcpu->arch.intr_msr &= ~MSR_LE; kvmppc_set_lpcr_pr() 1387 vcpu->arch.book3s = vcpu_book3s; kvmppc_core_vcpu_create_pr() 1390 vcpu->arch.shadow_vcpu = kvmppc_core_vcpu_create_pr() 1391 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); kvmppc_core_vcpu_create_pr() 1392 if (!vcpu->arch.shadow_vcpu) kvmppc_core_vcpu_create_pr() 1404 vcpu->arch.shared = (void *)p; kvmppc_core_vcpu_create_pr() 1408 vcpu->arch.shared_big_endian = true; kvmppc_core_vcpu_create_pr() 1410 vcpu->arch.shared_big_endian = false; kvmppc_core_vcpu_create_pr() 1418 vcpu->arch.pvr = 0x3C0301; kvmppc_core_vcpu_create_pr() 1420 vcpu->arch.pvr = mfspr(SPRN_PVR); kvmppc_core_vcpu_create_pr() 1421 vcpu->arch.intr_msr = MSR_SF; kvmppc_core_vcpu_create_pr() 1424 vcpu->arch.pvr = 0x84202; kvmppc_core_vcpu_create_pr() 1426 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); kvmppc_core_vcpu_create_pr() 1427 vcpu->arch.slb_nr = 64; kvmppc_core_vcpu_create_pr() 1429 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; kvmppc_core_vcpu_create_pr() 1441 kfree(vcpu->arch.shadow_vcpu); kvmppc_core_vcpu_create_pr() 1455 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); kvmppc_core_vcpu_free_pr() 1458 kfree(vcpu->arch.shadow_vcpu); kvmppc_core_vcpu_free_pr() 1472 if (!vcpu->arch.sane) { kvmppc_vcpu_run_pr() 1628 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { kvm_vm_ioctl_get_smmu_info_pr() 1659 mutex_init(&kvm->arch.hpt_mutex); kvmppc_core_init_vm_pr() 1678 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); kvmppc_core_destroy_vm_pr()
|
H A D | book3s.c | 9 * This file is derived from arch/powerpc/kvm/44x.c, 70 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { kvmppc_unfixup_split_real() 74 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; kvmppc_unfixup_split_real() 129 vcpu->arch.mmu.reset_msr(vcpu); kvmppc_inject_interrupt() 163 unsigned long old_pending = vcpu->arch.pending_exceptions; kvmppc_book3s_dequeue_irqprio() 166 &vcpu->arch.pending_exceptions); kvmppc_book3s_dequeue_irqprio() 168 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, kvmppc_book3s_dequeue_irqprio() 177 &vcpu->arch.pending_exceptions); kvmppc_book3s_queue_irqprio() 199 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); kvmppc_core_pending_dec() 339 unsigned long *pending = &vcpu->arch.pending_exceptions; kvmppc_core_prepare_to_enter() 340 unsigned long old_pending = vcpu->arch.pending_exceptions; kvmppc_core_prepare_to_enter() 344 if (vcpu->arch.pending_exceptions) kvmppc_core_prepare_to_enter() 345 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); kvmppc_core_prepare_to_enter() 351 clear_bit(priority, &vcpu->arch.pending_exceptions); kvmppc_core_prepare_to_enter() 370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; kvmppc_gpa_to_pfn() 379 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; kvmppc_gpa_to_pfn() 402 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); kvmppc_xlate() 414 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && kvmppc_xlate() 457 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); kvm_arch_vcpu_ioctl_get_sregs() 463 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); kvm_arch_vcpu_ioctl_set_sregs() 478 regs->pid = vcpu->arch.pid; kvm_arch_vcpu_ioctl_get_regs() 537 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); kvmppc_get_one_reg() 552 *val = get_reg_val(id, vcpu->arch.fp.fpscr); kvmppc_get_one_reg() 558 val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; kvmppc_get_one_reg() 559 val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; kvmppc_get_one_reg() 570 if (!vcpu->arch.icp) { kvmppc_get_one_reg() 578 *val = get_reg_val(id, vcpu->arch.fscr); kvmppc_get_one_reg() 581 *val = get_reg_val(id, vcpu->arch.tar); kvmppc_get_one_reg() 584 *val = get_reg_val(id, vcpu->arch.ebbhr); kvmppc_get_one_reg() 587 *val = get_reg_val(id, vcpu->arch.ebbrr); kvmppc_get_one_reg() 590 *val = get_reg_val(id, vcpu->arch.bescr); kvmppc_get_one_reg() 593 *val = get_reg_val(id, vcpu->arch.vtb); kvmppc_get_one_reg() 596 *val = get_reg_val(id, vcpu->arch.ic); kvmppc_get_one_reg() 613 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); kvmppc_set_one_reg() 628 vcpu->arch.fp.fpscr = set_reg_val(id, *val); kvmppc_set_one_reg() 634 vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; kvmppc_set_one_reg() 635 vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; kvmppc_set_one_reg() 643 if (!vcpu->arch.icp) { kvmppc_set_one_reg() 652 vcpu->arch.fscr = set_reg_val(id, *val); kvmppc_set_one_reg() 655 vcpu->arch.tar = set_reg_val(id, *val); kvmppc_set_one_reg() 658 vcpu->arch.ebbhr = set_reg_val(id, *val); kvmppc_set_one_reg() 661 vcpu->arch.ebbrr = set_reg_val(id, *val); kvmppc_set_one_reg() 664 vcpu->arch.bescr = set_reg_val(id, *val); kvmppc_set_one_reg() 667 vcpu->arch.vtb = set_reg_val(id, *val); kvmppc_set_one_reg() 670 vcpu->arch.ic = set_reg_val(id, *val); kvmppc_set_one_reg() 683 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); kvmppc_core_vcpu_load() 688 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); kvmppc_core_vcpu_put() 693 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); kvmppc_set_msr() 699 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); kvmppc_vcpu_run() 723 return kvm->arch.kvm_ops->vcpu_create(kvm, id); kvmppc_core_vcpu_create() 728 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); kvmppc_core_vcpu_free() 733 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); kvmppc_core_check_requests() 738 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); kvm_vm_ioctl_get_dirty_log() 744 kvm->arch.kvm_ops->free_memslot(free, dont); kvmppc_core_free_memslot() 750 return kvm->arch.kvm_ops->create_memslot(slot, npages); kvmppc_core_create_memslot() 755 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); kvmppc_core_flush_memslot() 762 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); kvmppc_core_prepare_memory_region() 769 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); kvmppc_core_commit_memory_region() 774 return kvm->arch.kvm_ops->unmap_hva(kvm, hva); kvm_unmap_hva() 780 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); kvm_unmap_hva_range() 785 return kvm->arch.kvm_ops->age_hva(kvm, start, end); kvm_age_hva() 790 return kvm->arch.kvm_ops->test_age_hva(kvm, hva); kvm_test_age_hva() 795 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); kvm_set_spte_hva() 800 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); kvmppc_mmu_destroy() 807 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); kvmppc_core_init_vm() 808 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); kvmppc_core_init_vm() 811 return kvm->arch.kvm_ops->init_vm(kvm); kvmppc_core_init_vm() 816 kvm->arch.kvm_ops->destroy_vm(kvm); kvmppc_core_destroy_vm() 820 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); kvmppc_core_destroy_vm() 918 return kvm->arch.kvm_ops->hcall_implemented(hcall); kvmppc_book3s_hcall_implemented()
|
H A D | book3s_emulate.c | 79 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) spr_allowed() 155 if (vcpu->arch.mmu.mfsrin) { kvmppc_core_emulate_op_pr() 157 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_core_emulate_op_pr() 167 if (vcpu->arch.mmu.mfsrin) { kvmppc_core_emulate_op_pr() 169 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_core_emulate_op_pr() 175 vcpu->arch.mmu.mtsrin(vcpu, kvmppc_core_emulate_op_pr() 180 vcpu->arch.mmu.mtsrin(vcpu, kvmppc_core_emulate_op_pr() 189 vcpu->arch.mmu.tlbie(vcpu, addr, large); kvmppc_core_emulate_op_pr() 200 !vcpu->arch.papr_enabled) { kvmppc_core_emulate_op_pr() 215 vcpu->arch.hcall_needed = 1; kvmppc_core_emulate_op_pr() 223 if (!vcpu->arch.mmu.slbmte) kvmppc_core_emulate_op_pr() 226 vcpu->arch.mmu.slbmte(vcpu, kvmppc_core_emulate_op_pr() 231 if (!vcpu->arch.mmu.slbie) kvmppc_core_emulate_op_pr() 234 vcpu->arch.mmu.slbie(vcpu, kvmppc_core_emulate_op_pr() 238 if (!vcpu->arch.mmu.slbia) kvmppc_core_emulate_op_pr() 241 vcpu->arch.mmu.slbia(vcpu); kvmppc_core_emulate_op_pr() 244 if (!vcpu->arch.mmu.slbmfee) { kvmppc_core_emulate_op_pr() 250 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); kvmppc_core_emulate_op_pr() 255 if (!vcpu->arch.mmu.slbmfev) { kvmppc_core_emulate_op_pr() 261 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); kvmppc_core_emulate_op_pr() 289 vcpu->arch.fault_dar = vaddr; kvmppc_core_emulate_op_pr() 298 vcpu->arch.fault_dsisr = dsisr; kvmppc_core_emulate_op_pr() 410 switch (vcpu->arch.pvr) { kvmppc_core_emulate_mtspr_pr() 420 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { kvmppc_core_emulate_mtspr_pr() 423 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; kvmppc_core_emulate_mtspr_pr() 426 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; kvmppc_core_emulate_mtspr_pr() 438 if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_core_emulate_mtspr_pr() 440 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; kvmppc_core_emulate_mtspr_pr() 457 vcpu->arch.bescr = spr_val; kvmppc_core_emulate_mtspr_pr() 460 vcpu->arch.ebbhr = spr_val; kvmppc_core_emulate_mtspr_pr() 463 vcpu->arch.ebbrr = spr_val; kvmppc_core_emulate_mtspr_pr() 467 vcpu->arch.tfhar = spr_val; kvmppc_core_emulate_mtspr_pr() 470 vcpu->arch.texasr = spr_val; kvmppc_core_emulate_mtspr_pr() 473 vcpu->arch.tfiar = spr_val; kvmppc_core_emulate_mtspr_pr() 572 *spr_val = vcpu->arch.purr; kvmppc_core_emulate_mfspr_pr() 578 *spr_val = vcpu->arch.spurr; kvmppc_core_emulate_mfspr_pr() 581 *spr_val = vcpu->arch.vtb; kvmppc_core_emulate_mfspr_pr() 584 *spr_val = vcpu->arch.ic; kvmppc_core_emulate_mfspr_pr() 598 *spr_val = vcpu->arch.fscr; kvmppc_core_emulate_mfspr_pr() 601 *spr_val = vcpu->arch.bescr; kvmppc_core_emulate_mfspr_pr() 604 *spr_val = vcpu->arch.ebbhr; kvmppc_core_emulate_mfspr_pr() 607 *spr_val = vcpu->arch.ebbrr; kvmppc_core_emulate_mfspr_pr() 611 *spr_val = vcpu->arch.tfhar; kvmppc_core_emulate_mfspr_pr() 614 *spr_val = vcpu->arch.texasr; kvmppc_core_emulate_mfspr_pr() 617 *spr_val = vcpu->arch.tfiar; kvmppc_core_emulate_mfspr_pr() 669 return vcpu->arch.fault_dar; kvmppc_alignment_dar()
|
H A D | book3s_64_mmu.c | 41 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); kvmppc_mmu_book3s_64_reset_msr() 52 for (i = 0; i < vcpu->arch.slb_nr; i++) { kvmppc_mmu_book3s_64_find_slbe() 55 if (!vcpu->arch.slb[i].valid) kvmppc_mmu_book3s_64_find_slbe() 58 if (vcpu->arch.slb[i].tb) kvmppc_mmu_book3s_64_find_slbe() 61 if (vcpu->arch.slb[i].esid == cmp_esid) kvmppc_mmu_book3s_64_find_slbe() 62 return &vcpu->arch.slb[i]; kvmppc_mmu_book3s_64_find_slbe() 67 for (i = 0; i < vcpu->arch.slb_nr; i++) { kvmppc_mmu_book3s_64_find_slbe() 68 if (vcpu->arch.slb[i].vsid) kvmppc_mmu_book3s_64_find_slbe() 70 vcpu->arch.slb[i].valid ? 'v' : ' ', kvmppc_mmu_book3s_64_find_slbe() 71 vcpu->arch.slb[i].large ? 'l' : ' ', kvmppc_mmu_book3s_64_find_slbe() 72 vcpu->arch.slb[i].tb ? 't' : ' ', kvmppc_mmu_book3s_64_find_slbe() 73 vcpu->arch.slb[i].esid, kvmppc_mmu_book3s_64_find_slbe() 74 vcpu->arch.slb[i].vsid); kvmppc_mmu_book3s_64_find_slbe() 162 if (vcpu->arch.papr_enabled) kvmppc_mmu_book3s_64_get_pteg() 224 ulong mp_ea = vcpu->arch.magic_page_ea; kvmppc_mmu_book3s_64_xlate() 232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); kvmppc_mmu_book3s_64_xlate() 260 mutex_lock(&vcpu->kvm->arch.hpt_mutex); kvmppc_mmu_book3s_64_xlate() 285 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { kvmppc_mmu_book3s_64_xlate() 316 if (unlikely(vcpu->arch.disable_kernel_nx) && kvmppc_mmu_book3s_64_xlate() 363 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_mmu_book3s_64_xlate() 370 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_mmu_book3s_64_xlate() 393 if (slb_nr > vcpu->arch.slb_nr) kvmppc_mmu_book3s_64_slbmte() 396 slbe = &vcpu->arch.slb[slb_nr]; kvmppc_mmu_book3s_64_slbmte() 410 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { kvmppc_mmu_book3s_64_slbmte() 434 if (slb_nr > vcpu->arch.slb_nr) kvmppc_mmu_book3s_64_slbmfee() 437 slbe = &vcpu->arch.slb[slb_nr]; kvmppc_mmu_book3s_64_slbmfee() 446 if (slb_nr > vcpu->arch.slb_nr) kvmppc_mmu_book3s_64_slbmfev() 449 slbe = &vcpu->arch.slb[slb_nr]; kvmppc_mmu_book3s_64_slbmfev() 482 for (i = 1; i < vcpu->arch.slb_nr; i++) { kvmppc_mmu_book3s_64_slbia() 483 vcpu->arch.slb[i].valid = false; kvmppc_mmu_book3s_64_slbia() 484 vcpu->arch.slb[i].orige = 0; kvmppc_mmu_book3s_64_slbia() 485 vcpu->arch.slb[i].origv = 0; kvmppc_mmu_book3s_64_slbia() 549 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { kvmppc_mmu_book3s_64_tlbie() 570 ulong mp_ea = vcpu->arch.magic_page_ea; segment_contains_magic_page() 583 ulong mp_ea = vcpu->arch.magic_page_ea; kvmppc_mmu_book3s_64_esid_to_vsid() 658 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; kvmppc_mmu_book3s_64_init() 674 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; kvmppc_mmu_book3s_64_init()
|
H A D | book3s_paired_singles.c | 163 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); kvmppc_sync_qpr() 196 vcpu->arch.paddr_accessed = addr; kvmppc_emulate_fpr_load() 213 vcpu->arch.qpr[rs] = *((u32*)tmp); kvmppc_emulate_fpr_load() 258 vcpu->arch.paddr_accessed = addr; kvmppc_emulate_fpr_store() 288 vcpu->arch.paddr_accessed = addr; kvmppc_emulate_psq_load() 295 vcpu->arch.qpr[rs] = tmp[1]; kvmppc_emulate_psq_load() 307 vcpu->arch.qpr[rs] = tmp[1]; kvmppc_emulate_psq_load() 325 tmp[1] = vcpu->arch.qpr[rs]; kvmppc_emulate_psq_store() 328 vcpu->arch.paddr_accessed = addr; kvmppc_emulate_psq_store() 357 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) kvmppc_inst_is_paired_single() 508 u32 *qpr = vcpu->arch.qpr; kvmppc_ps_three_in() 524 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); kvmppc_ps_three_in() 541 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); kvmppc_ps_three_in() 556 u32 *qpr = vcpu->arch.qpr; kvmppc_ps_two_in() 573 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2); kvmppc_ps_two_in() 589 func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2); kvmppc_ps_two_in() 606 u32 *qpr = vcpu->arch.qpr; kvmppc_ps_one_in() 615 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in); kvmppc_ps_one_in() 624 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in); kvmppc_ps_one_in() 678 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { kvmppc_emulate_paired_single() 682 i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]); kvmppc_emulate_paired_single() 770 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; kvmppc_emulate_paired_single() 771 vcpu->arch.qpr[ax_rd] ^= 0x80000000; kvmppc_emulate_paired_single() 780 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; kvmppc_emulate_paired_single() 790 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; kvmppc_emulate_paired_single() 791 vcpu->arch.qpr[ax_rd] |= 0x80000000; kvmppc_emulate_paired_single() 797 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; kvmppc_emulate_paired_single() 798 vcpu->arch.qpr[ax_rd] &= ~0x80000000; kvmppc_emulate_paired_single() 803 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ kvmppc_emulate_paired_single() 805 &vcpu->arch.qpr[ax_rd]); kvmppc_emulate_paired_single() 810 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; kvmppc_emulate_paired_single() 814 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ kvmppc_emulate_paired_single() 815 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], kvmppc_emulate_paired_single() 817 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ kvmppc_emulate_paired_single() 819 &vcpu->arch.qpr[ax_rd]); kvmppc_emulate_paired_single() 823 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ kvmppc_emulate_paired_single() 824 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], kvmppc_emulate_paired_single() 826 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; kvmppc_emulate_paired_single() 865 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; kvmppc_emulate_paired_single() 1110 fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1114 fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1118 fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1122 fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1126 fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1132 fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_emulate_paired_single() 1136 fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1140 fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1144 fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1148 fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1163 *fpr_d = vcpu->arch.fp.fpscr; kvmppc_emulate_paired_single() 1168 vcpu->arch.fp.fpscr = *fpr_b; kvmppc_emulate_paired_single() 1176 fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1187 fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1193 fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1199 fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1202 fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1205 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1208 fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1211 fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_emulate_paired_single() 1214 fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1217 fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1220 fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1228 fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_emulate_paired_single() 1230 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); kvmppc_emulate_paired_single() 1236 fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_emulate_paired_single() 1239 fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1242 fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1245 fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1248 fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1251 fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_emulate_paired_single() 1258 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { kvmppc_emulate_paired_single()
|
H A D | powerpc.c | 51 return !!(v->arch.pending_exceptions) || kvm_arch_vcpu_runnable() 131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; kvmppc_swab_shared() 173 if (vcpu->arch.intr_msr & MSR_LE) KVM_HCALL_TOKEN() 175 if (shared_big_endian != vcpu->arch.shared_big_endian) KVM_HCALL_TOKEN() 177 vcpu->arch.shared_big_endian = shared_big_endian; KVM_HCALL_TOKEN() 186 vcpu->arch.disable_kernel_nx = true; KVM_HCALL_TOKEN() 190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; KVM_HCALL_TOKEN() 191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; KVM_HCALL_TOKEN() 198 if ((vcpu->arch.magic_page_pa & 0xf000) != KVM_HCALL_TOKEN() 199 ((ulong)vcpu->arch.shared & 0xf000)) { KVM_HCALL_TOKEN() 200 void *old_shared = vcpu->arch.shared; KVM_HCALL_TOKEN() 201 ulong shared = (ulong)vcpu->arch.shared; KVM_HCALL_TOKEN() 205 shared |= vcpu->arch.magic_page_pa & 0xf000; KVM_HCALL_TOKEN() 208 vcpu->arch.shared = new_shared; KVM_HCALL_TOKEN() 246 if (!vcpu->arch.pvr) kvmppc_sanity_check() 250 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) kvmppc_sanity_check() 254 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) kvmppc_sanity_check() 265 vcpu->arch.sane = r; kvmppc_sanity_check() 315 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; kvmppc_st() 335 void *magic = vcpu->arch.shared; kvmppc_st() 351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; kvmppc_ld() 374 void *magic = vcpu->arch.shared; kvmppc_ld() 429 kvm->arch.kvm_ops = kvm_ops; kvm_arch_init_vm() 454 module_put(kvm->arch.kvm_ops->owner); kvm_arch_destroy_vm() 623 vcpu->arch.wqp = &vcpu->wq; kvm_arch_vcpu_create() 636 hrtimer_cancel(&vcpu->arch.dec_timer); kvm_arch_vcpu_free() 640 switch (vcpu->arch.irq_type) { kvm_arch_vcpu_free() 642 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); kvm_arch_vcpu_free() 666 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); kvmppc_decrementer_wakeup() 676 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); kvm_arch_vcpu_init() 677 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; kvm_arch_vcpu_init() 678 vcpu->arch.dec_expires = ~(u64)0; kvm_arch_vcpu_init() 681 mutex_init(&vcpu->arch.exit_timing_lock); kvm_arch_vcpu_init() 703 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); kvm_arch_vcpu_load() 712 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); kvm_arch_vcpu_put() 726 if (!vcpu->arch.mmio_host_swabbed) { kvmppc_complete_mmio_load() 742 if (vcpu->arch.mmio_sign_extend) { kvmppc_complete_mmio_load() 758 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); kvmppc_complete_mmio_load() 760 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { kvmppc_complete_mmio_load() 762 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); kvmppc_complete_mmio_load() 765 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; kvmppc_complete_mmio_load() 769 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; kvmppc_complete_mmio_load() 772 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; kvmppc_complete_mmio_load() 773 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; kvmppc_complete_mmio_load() 800 run->mmio.phys_addr = vcpu->arch.paddr_accessed; kvmppc_handle_load() 804 vcpu->arch.io_gpr = rt; kvmppc_handle_load() 805 vcpu->arch.mmio_host_swabbed = host_swabbed; kvmppc_handle_load() 808 vcpu->arch.mmio_sign_extend = 0; kvmppc_handle_load() 834 vcpu->arch.mmio_sign_extend = 1; kvmppc_handle_loads() 859 run->mmio.phys_addr = vcpu->arch.paddr_accessed; kvmppc_handle_store() 918 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; kvm_vcpu_ioctl_get_one_reg() 925 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); kvm_vcpu_ioctl_get_one_reg() 928 val = get_reg_val(reg->id, vcpu->arch.vrsave); kvm_vcpu_ioctl_get_one_reg() 969 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; kvm_vcpu_ioctl_set_one_reg() 976 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); kvm_vcpu_ioctl_set_one_reg() 983 vcpu->arch.vrsave = set_reg_val(reg->id, val); kvm_vcpu_ioctl_set_one_reg() 1007 } else if (vcpu->arch.osi_needed) { kvm_arch_vcpu_ioctl_run() 1013 vcpu->arch.osi_needed = 0; kvm_arch_vcpu_ioctl_run() 1014 } else if (vcpu->arch.hcall_needed) { kvm_arch_vcpu_ioctl_run() 1020 vcpu->arch.hcall_needed = 0; kvm_arch_vcpu_ioctl_run() 1022 } else if (vcpu->arch.epr_needed) { kvm_arch_vcpu_ioctl_run() 1024 vcpu->arch.epr_needed = 0; kvm_arch_vcpu_ioctl_run() 1061 vcpu->arch.osi_enabled = true; kvm_vcpu_ioctl_enable_cap() 1065 vcpu->arch.papr_enabled = true; kvm_vcpu_ioctl_enable_cap() 1070 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; kvm_vcpu_ioctl_enable_cap() 1072 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; kvm_vcpu_ioctl_enable_cap() 1077 vcpu->arch.watchdog_enabled = true; kvm_vcpu_ioctl_enable_cap() 1286 set_bit(hcall / 4, kvm->arch.enabled_hcalls); kvm_vm_ioctl_enable_cap() 1288 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); kvm_vm_ioctl_enable_cap() 1344 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); kvm_arch_vm_ioctl() 1357 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); kvm_arch_vm_ioctl()
|
H A D | book3s_64_mmu_hv.c | 64 kvm->arch.hpt_cma_alloc = 0; kvmppc_alloc_hpt() 69 kvm->arch.hpt_cma_alloc = 1; kvmppc_alloc_hpt() 83 kvm->arch.hpt_virt = hpt; kvmppc_alloc_hpt() 84 kvm->arch.hpt_order = order; kvmppc_alloc_hpt() 86 kvm->arch.hpt_npte = 1ul << (order - 4); kvmppc_alloc_hpt() 88 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; kvmppc_alloc_hpt() 91 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); kvmppc_alloc_hpt() 96 kvm->arch.revmap = rev; kvmppc_alloc_hpt() 97 kvm->arch.sdr1 = __pa(hpt) | (order - 18); kvmppc_alloc_hpt() 100 hpt, order, kvm->arch.lpid); kvmppc_alloc_hpt() 107 if (kvm->arch.hpt_cma_alloc) kvmppc_alloc_hpt() 120 if (kvm->arch.hpte_setup_done) { kvmppc_alloc_reset_hpt() 121 kvm->arch.hpte_setup_done = 0; kvmppc_alloc_reset_hpt() 124 if (atomic_read(&kvm->arch.vcpus_running)) { kvmppc_alloc_reset_hpt() 125 kvm->arch.hpte_setup_done = 1; kvmppc_alloc_reset_hpt() 129 if (kvm->arch.hpt_virt) { kvmppc_alloc_reset_hpt() 130 order = kvm->arch.hpt_order; kvmppc_alloc_reset_hpt() 132 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); kvmppc_alloc_reset_hpt() 138 cpumask_setall(&kvm->arch.need_tlb_flush); kvmppc_alloc_reset_hpt() 152 kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_hpt() 153 vfree(kvm->arch.revmap); kvmppc_free_hpt() 154 if (kvm->arch.hpt_cma_alloc) kvmppc_free_hpt() 155 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt), kvmppc_free_hpt() 156 1 << (kvm->arch.hpt_order - PAGE_SHIFT)); kvmppc_free_hpt() 158 free_pages(kvm->arch.hpt_virt, kvmppc_free_hpt() 159 kvm->arch.hpt_order - PAGE_SHIFT); kvmppc_free_hpt() 194 if (npages > kvm->arch.hpt_mask + 1) kvmppc_map_vrma() 195 npages = kvm->arch.hpt_mask + 1; kvmppc_map_vrma() 205 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; kvmppc_map_vrma() 247 unsigned long msr = vcpu->arch.intr_msr; kvmppc_mmu_book3s_64_hv_reset_msr() 250 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) kvmppc_mmu_book3s_64_hv_reset_msr() 253 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; kvmppc_mmu_book3s_64_hv_reset_msr() 283 for (i = 0; i < vcpu->arch.slb_nr; i++) { kvmppc_mmu_book3s_hv_find_slbe() 284 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) kvmppc_mmu_book3s_hv_find_slbe() 287 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) kvmppc_mmu_book3s_hv_find_slbe() 292 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) kvmppc_mmu_book3s_hv_find_slbe() 293 return &vcpu->arch.slb[i]; kvmppc_mmu_book3s_hv_find_slbe() 317 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); kvmppc_mmu_book3s_64_hv_xlate() 327 slb_v = vcpu->kvm->arch.vrma_slb_v; kvmppc_mmu_book3s_64_hv_xlate() 338 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); kvmppc_mmu_book3s_64_hv_xlate() 340 gr = kvm->arch.revmap[index].guest_rpte; kvmppc_mmu_book3s_64_hv_xlate() 350 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; kvmppc_mmu_book3s_64_hv_xlate() 360 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); kvmppc_mmu_book3s_64_hv_xlate() 430 vcpu->arch.paddr_accessed = gpa; kvmppc_hv_emulate_mmio() 431 vcpu->arch.vaddr_accessed = ea; kvmppc_hv_emulate_mmio() 460 if (ea != vcpu->arch.pgfault_addr) kvmppc_book3s_hv_page_fault() 462 index = vcpu->arch.pgfault_index; kvmppc_book3s_hv_page_fault() 463 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); kvmppc_book3s_hv_page_fault() 464 rev = &kvm->arch.revmap[index]; kvmppc_book3s_hv_page_fault() 474 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || kvmppc_book3s_hv_page_fault() 475 hpte[1] != vcpu->arch.pgfault_hpte[1]) kvmppc_book3s_hv_page_fault() 593 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; kvmppc_book3s_hv_page_fault() 659 memset(memslot->arch.rmap, 0, kvm_for_each_memslot() 660 memslot->npages * sizeof(*memslot->arch.rmap)); kvm_for_each_memslot() 697 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); kvm_for_each_memslot() 715 struct revmap_entry *rev = kvm->arch.revmap; kvm_unmap_rmapp() 733 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); kvm_unmap_rmapp() 794 rmapp = memslot->arch.rmap; kvmppc_core_flush_memslot_hv() 813 struct revmap_entry *rev = kvm->arch.revmap; kvm_age_rmapp() 831 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); kvm_age_rmapp() 871 struct revmap_entry *rev = kvm->arch.revmap; kvm_test_age_rmapp() 886 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); kvm_test_age_rmapp() 911 return atomic_read(&kvm->arch.vcpus_running) != 0; vcpus_running() 920 struct revmap_entry *rev = kvm->arch.revmap; kvm_test_clear_dirty_npages() 941 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); kvm_test_clear_dirty_npages() 1029 rmapp = memslot->arch.rmap; kvmppc_hv_get_dirty_log() 1046 spin_lock(&vcpu->arch.vpa_update_lock); kvm_for_each_vcpu() 1047 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); kvm_for_each_vcpu() 1048 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); kvm_for_each_vcpu() 1049 spin_unlock(&vcpu->arch.vpa_update_lock); kvm_for_each_vcpu() 1105 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; kvmppc_unpin_guest_page() 1248 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); kvm_htab_read() 1249 revp = kvm->arch.revmap + i; kvm_htab_read() 1264 while (i < kvm->arch.hpt_npte && kvm_htab_read() 1274 while (i < kvm->arch.hpt_npte && kvm_htab_read() 1290 while (i < kvm->arch.hpt_npte && kvm_htab_read() 1311 if (i >= kvm->arch.hpt_npte) { kvm_htab_read() 1343 hpte_setup = kvm->arch.hpte_setup_done; kvm_htab_write() 1345 kvm->arch.hpte_setup_done = 0; /* temporarily */ kvm_htab_write() 1348 if (atomic_read(&kvm->arch.vcpus_running)) { kvm_htab_write() 1349 kvm->arch.hpte_setup_done = 1; kvm_htab_write() 1370 if (i >= kvm->arch.hpt_npte || kvm_htab_write() 1371 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) kvm_htab_write() 1374 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); kvm_htab_write() 1407 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | kvm_htab_write() 1429 kvm->arch.hpte_setup_done = hpte_setup; kvm_htab_write() 1443 atomic_dec(&ctx->kvm->arch.hpte_mod_interest); kvm_htab_release() 1483 atomic_inc(&kvm->arch.hpte_mod_interest); kvm_vm_ioctl_get_htab_fd() 1561 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); debugfs_htab_read() 1562 for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) { debugfs_htab_read() 1572 gr = kvm->arch.revmap[i].guest_rpte; debugfs_htab_read() 1622 kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, kvmppc_mmu_debugfs_init() 1623 kvm->arch.debugfs_dir, kvm, kvmppc_mmu_debugfs_init() 1629 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; kvmppc_mmu_book3s_hv_init() 1631 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ kvmppc_mmu_book3s_hv_init() 1636 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; kvmppc_mmu_book3s_hv_init()
|
H A D | book3s_hv_rm_mmu.c | 53 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global_invalidates() 61 cpumask_setall(&kvm->arch.need_tlb_flush); global_invalidates() 63 &kvm->arch.need_tlb_flush); global_invalidates() 81 head = &kvm->arch.revmap[i]; kvmppc_add_revmap_chain() 84 tail = &kvm->arch.revmap[head->back]; kvmppc_add_revmap_chain() 118 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); remove_revmap_chain() 122 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); remove_revmap_chain() 123 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); remove_revmap_chain() 185 rmap = &memslot->arch.rmap[slot_fn]; kvmppc_do_h_enter() 252 if (pte_index >= kvm->arch.hpt_npte) kvmppc_do_h_enter() 256 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); kvmppc_do_h_enter() 287 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); kvmppc_do_h_enter() 304 rev = &kvm->arch.revmap[pte_index]; kvmppc_do_h_enter() 348 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); kvmppc_h_enter() 381 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) do_tlbies() 387 "r" (rbvalues[i]), "r" (kvm->arch.lpid)); do_tlbies() 389 kvm->arch.tlbie_lock = 0; do_tlbies() 408 if (pte_index >= kvm->arch.hpt_npte) kvmppc_do_h_remove() 410 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); kvmppc_do_h_remove() 421 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); kvmppc_do_h_remove() 453 &vcpu->arch.gpr[4]); kvmppc_h_remove() 459 unsigned long *args = &vcpu->arch.gpr[4]; kvmppc_h_bulk_remove() 484 pte_index >= kvm->arch.hpt_npte) { kvmppc_h_bulk_remove() 490 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4)); kvmppc_h_bulk_remove() 522 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); kvmppc_h_bulk_remove() 576 if (pte_index >= kvm->arch.hpt_npte) kvmppc_h_protect() 579 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); kvmppc_h_protect() 597 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); kvmppc_h_protect() 639 if (pte_index >= kvm->arch.hpt_npte) kvmppc_h_read() 645 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); kvmppc_h_read() 647 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); kvmppc_h_read() 658 vcpu->arch.gpr[4 + i * 2] = v; kvmppc_h_read() 659 vcpu->arch.gpr[5 + i * 2] = r; kvmppc_h_read() 731 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; kvmppc_hv_find_lock_hpte() 742 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7)); kvmppc_hv_find_lock_hpte() 772 hash = hash ^ kvm->arch.hpt_mask; kvmppc_hv_find_lock_hpte() 811 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); kvmppc_hpte_hv_fault() 814 rev = real_vmalloc_addr(&kvm->arch.revmap[index]); kvmppc_hpte_hv_fault() 825 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; kvmppc_hpte_hv_fault() 842 if (data && (vcpu->arch.shregs.msr & MSR_DR)) { kvmppc_hpte_hv_fault() 843 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); kvmppc_hpte_hv_fault() 851 vcpu->arch.pgfault_addr = addr; kvmppc_hpte_hv_fault() 852 vcpu->arch.pgfault_index = index; kvmppc_hpte_hv_fault() 853 vcpu->arch.pgfault_hpte[0] = v; kvmppc_hpte_hv_fault() 854 vcpu->arch.pgfault_hpte[1] = r; kvmppc_hpte_hv_fault() 857 if (data && (vcpu->arch.shregs.msr & MSR_IR) && kvmppc_hpte_hv_fault()
|
H A D | emulate.c | 42 pr_debug("mtDEC: %x\n", vcpu->arch.dec); kvmppc_emulate_dec() 43 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_emulate_dec() 50 if (vcpu->arch.dec & 0x80000000) { kvmppc_emulate_dec() 58 if (vcpu->arch.dec == 0) kvmppc_emulate_dec() 68 dec_time = vcpu->arch.dec; kvmppc_emulate_dec() 76 hrtimer_start(&vcpu->arch.dec_timer, kvmppc_emulate_dec() 78 vcpu->arch.dec_jiffies = get_tb(); kvmppc_emulate_dec() 83 u64 jd = tb - vcpu->arch.dec_jiffies; kvmppc_get_dec() 86 if (vcpu->arch.dec < jd) kvmppc_get_dec() 90 return vcpu->arch.dec - jd; kvmppc_get_dec() 112 vcpu->arch.dec = spr_val; kvmppc_emulate_mtspr() 133 emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, kvmppc_emulate_mtspr() 159 spr_val = vcpu->arch.pvr; kvmppc_emulate_mfspr() 194 emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, kvmppc_emulate_mfspr() 239 vcpu->arch.shared->esr | ESR_PTR); kvmppc_emulate_instruction() 255 vcpu->arch.shared->esr | ESR_PTR); kvmppc_emulate_instruction() 284 run->debug.arch.address = kvmppc_get_pc(vcpu); kvmppc_emulate_instruction() 297 emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, kvmppc_emulate_instruction()
|
/linux-4.1.27/arch/ia64/ |
H A D | Makefile | 25 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/ia64/module.lds 33 GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)") 34 KBUILD_CPPFLAGS += $(shell $(srctree)/arch/ia64/scripts/toolchain-flags "$(CC)" "$(OBJDUMP)" "$(READELF)") 45 head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o 47 libs-y += arch/ia64/lib/ 48 core-y += arch/ia64/kernel/ arch/ia64/mm/ 49 core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ 50 core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ 51 core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 52 core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 53 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 54 core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 55 core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ 57 drivers-$(CONFIG_PCI) += arch/ia64/pci/ 58 drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ 59 drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ 60 drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ 61 drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/ 62 drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/ 64 boot := arch/ia64/hp/sim/boot 78 -$(Q)READELF=$(READELF) $(PYTHON) $(srctree)/arch/ia64/scripts/unwcheck.py $< 89 sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)" 102 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
|
/linux-4.1.27/arch/sh/include/mach-sdk7786/mach/ |
H A D | irq.h | 4 /* arch/sh/boards/mach-sdk7786/irq.c */
|
/linux-4.1.27/arch/blackfin/mach-bf538/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf538/Makefile
|
/linux-4.1.27/arch/blackfin/mm/ |
H A D | Makefile | 2 # arch/blackfin/mm/Makefile
|
/linux-4.1.27/arch/m68k/apollo/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/amiga source directory
|
/linux-4.1.27/arch/m68k/bvme6000/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/bvme6000 source directory
|
/linux-4.1.27/arch/m68k/hp300/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/hp300 source directory
|
/linux-4.1.27/arch/m68k/mvme16x/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/mvme16x source directory
|
/linux-4.1.27/arch/m68k/q40/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/q40 source directory
|
/linux-4.1.27/arch/m68k/sun3x/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/sun3x source directory
|
/linux-4.1.27/arch/arm/boot/bootp/ |
H A D | kernel.S | 3 .incbin "arch/arm/boot/zImage"
|
H A D | Makefile | 2 # linux/arch/arm/boot/bootp/Makefile 25 $(obj)/kernel.o: arch/arm/boot/zImage FORCE
|
/linux-4.1.27/arch/arm/boot/compressed/ |
H A D | piggy.gzip.S | 4 .incbin "arch/arm/boot/compressed/piggy.gzip"
|
H A D | piggy.lz4.S | 4 .incbin "arch/arm/boot/compressed/piggy.lz4"
|
H A D | piggy.lzma.S | 4 .incbin "arch/arm/boot/compressed/piggy.lzma"
|
H A D | piggy.lzo.S | 4 .incbin "arch/arm/boot/compressed/piggy.lzo"
|
H A D | piggy.xzkern.S | 4 .incbin "arch/arm/boot/compressed/piggy.xzkern"
|
/linux-4.1.27/arch/s390/kvm/ |
H A D | guestdbg.c | 65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; enable_all_hw_bp() 66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; enable_all_hw_bp() 67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; enable_all_hw_bp() 70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || enable_all_hw_bp() 71 vcpu->arch.guestdbg.hw_bp_info == NULL) enable_all_hw_bp() 82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { enable_all_hw_bp() 83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; enable_all_hw_bp() 84 len = vcpu->arch.guestdbg.hw_bp_info[i].len; enable_all_hw_bp() 105 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; enable_all_hw_wp() 106 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; enable_all_hw_wp() 107 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; enable_all_hw_wp() 110 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 || enable_all_hw_wp() 111 vcpu->arch.guestdbg.hw_wp_info == NULL) enable_all_hw_wp() 124 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { enable_all_hw_wp() 125 start = vcpu->arch.guestdbg.hw_wp_info[i].addr; enable_all_hw_wp() 126 len = vcpu->arch.guestdbg.hw_wp_info[i].len; enable_all_hw_wp() 135 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; kvm_s390_backup_guest_per_regs() 136 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; kvm_s390_backup_guest_per_regs() 137 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; kvm_s390_backup_guest_per_regs() 138 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; kvm_s390_backup_guest_per_regs() 143 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; kvm_s390_restore_guest_per_regs() 144 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; kvm_s390_restore_guest_per_regs() 145 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; kvm_s390_restore_guest_per_regs() 146 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; kvm_s390_restore_guest_per_regs() 159 vcpu->arch.sie_block->gcr[0] &= ~0x800ul; kvm_s390_patch_guest_per_regs() 160 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; kvm_s390_patch_guest_per_regs() 161 vcpu->arch.sie_block->gcr[10] = 0; kvm_s390_patch_guest_per_regs() 162 vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN; kvm_s390_patch_guest_per_regs() 171 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) kvm_s390_patch_guest_per_regs() 172 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; kvm_s390_patch_guest_per_regs() 214 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp) kvm_s390_import_bp_data() 216 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) kvm_s390_import_bp_data() 219 size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint); kvm_s390_import_bp_data() 226 if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) { kvm_s390_import_bp_data() 231 for (i = 0; i < dbg->arch.nr_hw_bp; i++) { kvm_s390_import_bp_data() 261 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) { kvm_s390_import_bp_data() 278 vcpu->arch.guestdbg.nr_hw_bp = nr_bp; kvm_s390_import_bp_data() 279 vcpu->arch.guestdbg.hw_bp_info = bp_info; kvm_s390_import_bp_data() 280 vcpu->arch.guestdbg.nr_hw_wp = nr_wp; kvm_s390_import_bp_data() 281 vcpu->arch.guestdbg.hw_wp_info = wp_info; kvm_s390_import_bp_data() 295 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { kvm_s390_clear_bp_data() 296 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; kvm_s390_clear_bp_data() 300 kfree(vcpu->arch.guestdbg.hw_wp_info); kvm_s390_clear_bp_data() 301 vcpu->arch.guestdbg.hw_wp_info = NULL; kvm_s390_clear_bp_data() 303 kfree(vcpu->arch.guestdbg.hw_bp_info); kvm_s390_clear_bp_data() 304 vcpu->arch.guestdbg.hw_bp_info = NULL; kvm_s390_clear_bp_data() 306 vcpu->arch.guestdbg.nr_hw_wp = 0; kvm_s390_clear_bp_data() 307 vcpu->arch.guestdbg.nr_hw_bp = 0; kvm_s390_clear_bp_data() 324 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info; find_hw_bp() 327 if (vcpu->arch.guestdbg.nr_hw_bp == 0) find_hw_bp() 330 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { find_hw_bp() 352 if (vcpu->arch.guestdbg.nr_hw_wp == 0) any_wp_changed() 355 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { any_wp_changed() 356 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; any_wp_changed() 392 u32 perc = (vcpu->arch.sie_block->perc << 24); debug_exit_required() 393 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; debug_exit_required() 396 unsigned long addr = vcpu->arch.sie_block->gpsw.addr; debug_exit_required() 397 unsigned long peraddr = vcpu->arch.sie_block->peraddr; debug_exit_required() 401 vcpu->arch.guestdbg.nr_hw_wp > 0) { debug_exit_required() 410 vcpu->arch.guestdbg.nr_hw_bp > 0) { debug_exit_required() 416 vcpu->arch.guestdbg.last_bp = addr; debug_exit_required() 421 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) { debug_exit_required() 440 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) 444 u32 perc = vcpu->arch.sie_block->perc << 24; filter_guest_per_event() 445 u64 peraddr = vcpu->arch.sie_block->peraddr; filter_guest_per_event() 446 u64 addr = vcpu->arch.sie_block->gpsw.addr; filter_guest_per_event() 447 u64 cr9 = vcpu->arch.sie_block->gcr[9]; filter_guest_per_event() 448 u64 cr10 = vcpu->arch.sie_block->gcr[10]; filter_guest_per_event() 449 u64 cr11 = vcpu->arch.sie_block->gcr[11]; filter_guest_per_event() 470 vcpu->arch.sie_block->perc = guest_perc >> 24; filter_guest_per_event() 473 vcpu->arch.sie_block->iprcc &= ~PGM_PER; filter_guest_per_event()
|
H A D | kvm-s390.c | 216 struct gmap *gmap = kvm->arch.gmap; kvm_s390_sync_dirty_log() 278 kvm->arch.use_irqchip = 1; kvm_vm_ioctl_enable_cap() 282 kvm->arch.user_sigp = 1; kvm_vm_ioctl_enable_cap() 290 set_kvm_facility(kvm->arch.model.fac->mask, 129); kvm_vm_ioctl_enable_cap() 291 set_kvm_facility(kvm->arch.model.fac->list, 129); kvm_vm_ioctl_enable_cap() 298 kvm->arch.user_stsi = 1; kvm_vm_ioctl_enable_cap() 315 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) kvm_s390_get_mem_control() 334 kvm->arch.use_cmma = 1; kvm_s390_set_mem_control() 342 s390_reset_cmma(kvm->arch.gmap->mm); kvm_s390_set_mem_control() 356 if (new_limit > kvm->arch.gmap->asce_end) kvm_s390_set_mem_control() 368 gmap_free(kvm->arch.gmap); kvm_s390_set_mem_control() 370 kvm->arch.gmap = new; kvm_s390_set_mem_control() 398 kvm->arch.crypto.crycb->aes_wrapping_key_mask, kvm_s390_vm_set_crypto() 399 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); kvm_s390_vm_set_crypto() 400 kvm->arch.crypto.aes_kw = 1; kvm_s390_vm_set_crypto() 404 kvm->arch.crypto.crycb->dea_wrapping_key_mask, kvm_s390_vm_set_crypto() 405 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); kvm_s390_vm_set_crypto() 406 kvm->arch.crypto.dea_kw = 1; kvm_s390_vm_set_crypto() 409 kvm->arch.crypto.aes_kw = 0; kvm_s390_vm_set_crypto() 410 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, kvm_s390_vm_set_crypto() 411 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); kvm_s390_vm_set_crypto() 414 kvm->arch.crypto.dea_kw = 0; kvm_s390_vm_set_crypto() 415 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, kvm_s390_vm_set_crypto() 416 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); kvm_s390_vm_set_crypto() 460 kvm->arch.epoch = gtod - host_tod; kvm_for_each_vcpu() 462 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; kvm_for_each_vcpu() 510 gtod = host_tod + kvm->arch.epoch; kvm_s390_get_tod_low() 555 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, kvm_s390_set_processor() 557 kvm->arch.model.ibc = proc->ibc; kvm_s390_set_processor() 558 memcpy(kvm->arch.model.fac->list, proc->fac_list, kvm_s390_set_processor() 590 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); kvm_s390_get_processor() 591 proc->ibc = kvm->arch.model.ibc; kvm_s390_get_processor() 592 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); kvm_s390_get_processor() 612 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, kvm_s390_get_machine() 877 if (kvm->arch.use_irqchip) { kvm_arch_vm_ioctl() 974 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; kvm_s390_set_crycb_format() 977 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; kvm_s390_set_crycb_format() 979 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; kvm_s390_set_crycb_format() 993 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), kvm_s390_crypto_init() 995 if (!kvm->arch.crypto.crycb) kvm_s390_crypto_init() 1001 kvm->arch.crypto.aes_kw = 1; kvm_s390_crypto_init() 1002 kvm->arch.crypto.dea_kw = 1; kvm_s390_crypto_init() 1003 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, kvm_s390_crypto_init() 1004 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); kvm_s390_crypto_init() 1005 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, kvm_s390_crypto_init() 1006 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); kvm_s390_crypto_init() 1034 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); kvm_arch_init_vm() 1035 if (!kvm->arch.sca) kvm_arch_init_vm() 1041 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); kvm_arch_init_vm() 1046 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); kvm_arch_init_vm() 1047 if (!kvm->arch.dbf) kvm_arch_init_vm() 1053 * page to hold the guest facility list (arch.model.fac->list) and the kvm_arch_init_vm() 1054 * facility mask (arch.model.fac->mask). Its address size has to be kvm_arch_init_vm() 1057 kvm->arch.model.fac = kvm_arch_init_vm() 1059 if (!kvm->arch.model.fac) kvm_arch_init_vm() 1063 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, kvm_arch_init_vm() 1067 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; kvm_arch_init_vm() 1069 kvm->arch.model.fac->mask[i] = 0UL; kvm_arch_init_vm() 1073 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, kvm_arch_init_vm() 1076 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); kvm_arch_init_vm() 1077 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; kvm_arch_init_vm() 1082 spin_lock_init(&kvm->arch.float_int.lock); kvm_arch_init_vm() 1084 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); kvm_arch_init_vm() 1085 init_waitqueue_head(&kvm->arch.ipte_wq); kvm_arch_init_vm() 1086 mutex_init(&kvm->arch.ipte_mutex); kvm_arch_init_vm() 1088 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); kvm_arch_init_vm() 1092 kvm->arch.gmap = NULL; kvm_arch_init_vm() 1094 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); kvm_arch_init_vm() 1095 if (!kvm->arch.gmap) kvm_arch_init_vm() 1097 kvm->arch.gmap->private = kvm; kvm_arch_init_vm() 1098 kvm->arch.gmap->pfault_enabled = 0; kvm_arch_init_vm() 1101 kvm->arch.css_support = 0; kvm_arch_init_vm() 1102 kvm->arch.use_irqchip = 0; kvm_arch_init_vm() 1103 kvm->arch.epoch = 0; kvm_arch_init_vm() 1105 spin_lock_init(&kvm->arch.start_stop_lock); kvm_arch_init_vm() 1109 kfree(kvm->arch.crypto.crycb); kvm_arch_init_vm() 1110 free_page((unsigned long)kvm->arch.model.fac); kvm_arch_init_vm() 1111 debug_unregister(kvm->arch.dbf); kvm_arch_init_vm() 1112 free_page((unsigned long)(kvm->arch.sca)); kvm_arch_init_vm() 1124 (unsigned long *) &vcpu->kvm->arch.sca->mcn); kvm_arch_vcpu_destroy() 1125 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == kvm_arch_vcpu_destroy() 1126 (__u64) vcpu->arch.sie_block) kvm_arch_vcpu_destroy() 1127 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; kvm_arch_vcpu_destroy() 1132 gmap_free(vcpu->arch.gmap); kvm_arch_vcpu_destroy() 1136 free_page((unsigned long)(vcpu->arch.sie_block)); kvm_arch_vcpu_destroy() 1161 free_page((unsigned long)kvm->arch.model.fac); kvm_arch_destroy_vm() 1162 free_page((unsigned long)(kvm->arch.sca)); kvm_arch_destroy_vm() 1163 debug_unregister(kvm->arch.dbf); kvm_arch_destroy_vm() 1164 kfree(kvm->arch.crypto.crycb); kvm_arch_destroy_vm() 1166 gmap_free(kvm->arch.gmap); kvm_arch_destroy_vm() 1174 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); __kvm_ucontrol_vcpu_init() 1175 if (!vcpu->arch.gmap) __kvm_ucontrol_vcpu_init() 1177 vcpu->arch.gmap->private = vcpu->kvm; __kvm_ucontrol_vcpu_init() 1184 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; kvm_arch_vcpu_init() 1203 save_fp_ctl(&vcpu->arch.host_fpregs.fpc); kvm_arch_vcpu_load() 1205 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); kvm_arch_vcpu_load() 1207 save_fp_regs(vcpu->arch.host_fpregs.fprs); kvm_arch_vcpu_load() 1208 save_access_regs(vcpu->arch.host_acrs); kvm_arch_vcpu_load() 1213 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); kvm_arch_vcpu_load() 1214 restore_fp_regs(vcpu->arch.guest_fpregs.fprs); kvm_arch_vcpu_load() 1217 gmap_enable(vcpu->arch.gmap); kvm_arch_vcpu_load() 1218 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); kvm_arch_vcpu_load() 1223 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); kvm_arch_vcpu_put() 1224 gmap_disable(vcpu->arch.gmap); kvm_arch_vcpu_put() 1229 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); kvm_arch_vcpu_put() 1230 save_fp_regs(vcpu->arch.guest_fpregs.fprs); kvm_arch_vcpu_put() 1233 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); kvm_arch_vcpu_put() 1235 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); kvm_arch_vcpu_put() 1237 restore_fp_regs(vcpu->arch.host_fpregs.fprs); kvm_arch_vcpu_put() 1238 restore_access_regs(vcpu->arch.host_acrs); kvm_arch_vcpu_put() 1244 vcpu->arch.sie_block->gpsw.mask = 0UL; kvm_s390_vcpu_initial_reset() 1245 vcpu->arch.sie_block->gpsw.addr = 0UL; kvm_s390_vcpu_initial_reset() 1247 vcpu->arch.sie_block->cputm = 0UL; kvm_s390_vcpu_initial_reset() 1248 vcpu->arch.sie_block->ckc = 0UL; kvm_s390_vcpu_initial_reset() 1249 vcpu->arch.sie_block->todpr = 0; kvm_s390_vcpu_initial_reset() 1250 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); kvm_s390_vcpu_initial_reset() 1251 vcpu->arch.sie_block->gcr[0] = 0xE0UL; kvm_s390_vcpu_initial_reset() 1252 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; kvm_s390_vcpu_initial_reset() 1253 vcpu->arch.guest_fpregs.fpc = 0; kvm_s390_vcpu_initial_reset() 1254 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); kvm_s390_vcpu_initial_reset() 1255 vcpu->arch.sie_block->gbea = 1; kvm_s390_vcpu_initial_reset() 1256 vcpu->arch.sie_block->pp = 0; kvm_s390_vcpu_initial_reset() 1257 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; kvm_s390_vcpu_initial_reset() 1267 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; kvm_arch_vcpu_postcreate() 1270 vcpu->arch.gmap = vcpu->kvm->arch.gmap; kvm_arch_vcpu_postcreate() 1278 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); kvm_s390_vcpu_crypto_setup() 1280 if (vcpu->kvm->arch.crypto.aes_kw) kvm_s390_vcpu_crypto_setup() 1281 vcpu->arch.sie_block->ecb3 |= ECB3_AES; kvm_s390_vcpu_crypto_setup() 1282 if (vcpu->kvm->arch.crypto.dea_kw) kvm_s390_vcpu_crypto_setup() 1283 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; kvm_s390_vcpu_crypto_setup() 1285 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; kvm_s390_vcpu_crypto_setup() 1290 free_page(vcpu->arch.sie_block->cbrlo); kvm_s390_vcpu_unsetup_cmma() 1291 vcpu->arch.sie_block->cbrlo = 0; kvm_s390_vcpu_unsetup_cmma() 1296 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); kvm_s390_vcpu_setup_cmma() 1297 if (!vcpu->arch.sie_block->cbrlo) kvm_s390_vcpu_setup_cmma() 1300 vcpu->arch.sie_block->ecb2 |= 0x80; kvm_s390_vcpu_setup_cmma() 1301 vcpu->arch.sie_block->ecb2 &= ~0x08; kvm_s390_vcpu_setup_cmma() 1307 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; kvm_s390_vcpu_setup_model() 1309 vcpu->arch.cpu_id = model->cpu_id; kvm_s390_vcpu_setup_model() 1310 vcpu->arch.sie_block->ibc = model->ibc; kvm_s390_vcpu_setup_model() 1311 vcpu->arch.sie_block->fac = (int) (long) model->fac->list; kvm_s390_vcpu_setup_model() 1318 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | kvm_arch_vcpu_setup() 1324 vcpu->arch.sie_block->ecb = 6; kvm_arch_vcpu_setup() 1326 vcpu->arch.sie_block->ecb |= 0x10; kvm_arch_vcpu_setup() 1328 vcpu->arch.sie_block->ecb2 = 8; kvm_arch_vcpu_setup() 1329 vcpu->arch.sie_block->eca = 0xC1002000U; kvm_arch_vcpu_setup() 1331 vcpu->arch.sie_block->eca |= 1; kvm_arch_vcpu_setup() 1333 vcpu->arch.sie_block->eca |= 0x10000000U; kvm_arch_vcpu_setup() 1335 vcpu->arch.sie_block->eca |= 0x00020000; kvm_arch_vcpu_setup() 1336 vcpu->arch.sie_block->ecd |= 0x20000000; kvm_arch_vcpu_setup() 1338 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; kvm_arch_vcpu_setup() 1345 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); kvm_arch_vcpu_setup() 1346 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; kvm_arch_vcpu_setup() 1373 vcpu->arch.sie_block = &sie_page->sie_block; kvm_arch_vcpu_create() 1374 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; kvm_arch_vcpu_create() 1375 vcpu->arch.host_vregs = &sie_page->vregs; kvm_arch_vcpu_create() 1377 vcpu->arch.sie_block->icpua = id; kvm_arch_vcpu_create() 1379 if (!kvm->arch.sca) { kvm_arch_vcpu_create() 1383 if (!kvm->arch.sca->cpu[id].sda) kvm_arch_vcpu_create() 1384 kvm->arch.sca->cpu[id].sda = kvm_arch_vcpu_create() 1385 (__u64) vcpu->arch.sie_block; kvm_arch_vcpu_create() 1386 vcpu->arch.sie_block->scaoh = kvm_arch_vcpu_create() 1387 (__u32)(((__u64)kvm->arch.sca) >> 32); kvm_arch_vcpu_create() 1388 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; kvm_arch_vcpu_create() 1389 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); kvm_arch_vcpu_create() 1392 spin_lock_init(&vcpu->arch.local_int.lock); kvm_arch_vcpu_create() 1393 vcpu->arch.local_int.float_int = &kvm->arch.float_int; kvm_arch_vcpu_create() 1394 vcpu->arch.local_int.wq = &vcpu->wq; kvm_arch_vcpu_create() 1395 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; kvm_arch_vcpu_create() 1401 vcpu->arch.sie_block); kvm_arch_vcpu_create() 1402 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); kvm_arch_vcpu_create() 1406 free_page((unsigned long)(vcpu->arch.sie_block)); kvm_arch_vcpu_create() 1420 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); s390_vcpu_block() 1425 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); s390_vcpu_unblock() 1434 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); exit_sie() 1435 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) exit_sie() 1476 r = put_user(vcpu->arch.sie_block->todpr, kvm_arch_vcpu_ioctl_get_one_reg() 1480 r = put_user(vcpu->arch.sie_block->epoch, kvm_arch_vcpu_ioctl_get_one_reg() 1484 r = put_user(vcpu->arch.sie_block->cputm, kvm_arch_vcpu_ioctl_get_one_reg() 1488 r = put_user(vcpu->arch.sie_block->ckc, kvm_arch_vcpu_ioctl_get_one_reg() 1492 r = put_user(vcpu->arch.pfault_token, kvm_arch_vcpu_ioctl_get_one_reg() 1496 r = put_user(vcpu->arch.pfault_compare, kvm_arch_vcpu_ioctl_get_one_reg() 1500 r = put_user(vcpu->arch.pfault_select, kvm_arch_vcpu_ioctl_get_one_reg() 1504 r = put_user(vcpu->arch.sie_block->pp, kvm_arch_vcpu_ioctl_get_one_reg() 1508 r = put_user(vcpu->arch.sie_block->gbea, kvm_arch_vcpu_ioctl_get_one_reg() 1525 r = get_user(vcpu->arch.sie_block->todpr, kvm_arch_vcpu_ioctl_set_one_reg() 1529 r = get_user(vcpu->arch.sie_block->epoch, kvm_arch_vcpu_ioctl_set_one_reg() 1533 r = get_user(vcpu->arch.sie_block->cputm, kvm_arch_vcpu_ioctl_set_one_reg() 1537 r = get_user(vcpu->arch.sie_block->ckc, kvm_arch_vcpu_ioctl_set_one_reg() 1541 r = get_user(vcpu->arch.pfault_token, kvm_arch_vcpu_ioctl_set_one_reg() 1543 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) kvm_arch_vcpu_ioctl_set_one_reg() 1547 r = get_user(vcpu->arch.pfault_compare, kvm_arch_vcpu_ioctl_set_one_reg() 1551 r = get_user(vcpu->arch.pfault_select, kvm_arch_vcpu_ioctl_set_one_reg() 1555 r = get_user(vcpu->arch.sie_block->pp, kvm_arch_vcpu_ioctl_set_one_reg() 1559 r = get_user(vcpu->arch.sie_block->gbea, kvm_arch_vcpu_ioctl_set_one_reg() 1591 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); kvm_arch_vcpu_ioctl_set_sregs() 1600 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); kvm_arch_vcpu_ioctl_get_sregs() 1608 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); kvm_arch_vcpu_ioctl_set_fpu() 1609 vcpu->arch.guest_fpregs.fpc = fpu->fpc; kvm_arch_vcpu_ioctl_set_fpu() 1610 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); kvm_arch_vcpu_ioctl_set_fpu() 1611 restore_fp_regs(vcpu->arch.guest_fpregs.fprs); kvm_arch_vcpu_ioctl_set_fpu() 1617 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); kvm_arch_vcpu_ioctl_get_fpu() 1618 fpu->fpc = vcpu->arch.guest_fpregs.fpc; kvm_arch_vcpu_ioctl_get_fpu() 1659 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); kvm_arch_vcpu_ioctl_set_guest_debug() 1664 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); kvm_arch_vcpu_ioctl_set_guest_debug() 1665 vcpu->arch.guestdbg.last_bp = 0; kvm_arch_vcpu_ioctl_set_guest_debug() 1671 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); kvm_arch_vcpu_ioctl_set_guest_debug() 1691 vcpu->kvm->arch.user_cpu_state_ctrl = 1; kvm_arch_vcpu_ioctl_set_mpstate() 1717 if (!kvm->arch.use_cmma) kvm_s390_cmma_enabled() 1724 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; ibs_enabled() 1740 rc = gmap_ipte_notify(vcpu->arch.gmap, kvm_s390_handle_requests() 1749 vcpu->arch.sie_block->ihcpu = 0xffff; kvm_s390_handle_requests() 1757 &vcpu->arch.sie_block->cpuflags); kvm_s390_handle_requests() 1766 &vcpu->arch.sie_block->cpuflags); kvm_s390_handle_requests() 1789 return gmap_fault(vcpu->arch.gmap, gpa, kvm_arch_fault_in_page() 1813 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); kvm_arch_async_page_not_present() 1814 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); kvm_arch_async_page_not_present() 1820 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); kvm_arch_async_page_present() 1821 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); kvm_arch_async_page_present() 1842 struct kvm_arch_async_pf arch; kvm_arch_setup_async_pf() local 1845 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) kvm_arch_setup_async_pf() 1847 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != kvm_arch_setup_async_pf() 1848 vcpu->arch.pfault_compare) kvm_arch_setup_async_pf() 1854 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) kvm_arch_setup_async_pf() 1856 if (!vcpu->arch.gmap->pfault_enabled) kvm_arch_setup_async_pf() 1861 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) kvm_arch_setup_async_pf() 1864 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); kvm_arch_setup_async_pf() 1879 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); vcpu_pre_run() 1902 vcpu->arch.sie_block->icptcode = 0; vcpu_pre_run() 1903 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); vcpu_pre_run() 1912 psw_t *psw = &vcpu->arch.sie_block->gpsw; vcpu_post_run_fault_in_sie() 1940 vcpu->arch.sie_block->icptcode); vcpu_post_run() 1941 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); vcpu_post_run() 1969 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); vcpu_post_run() 1974 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; vcpu_post_run() 2005 exit_reason = sie64a(vcpu->arch.sie_block, __vcpu_run() 2019 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; sync_regs() 2020 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; sync_regs() 2024 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); sync_regs() 2029 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; sync_regs() 2030 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; sync_regs() 2031 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; sync_regs() 2032 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; sync_regs() 2033 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; sync_regs() 2036 vcpu->arch.pfault_token = kvm_run->s.regs.pft; sync_regs() 2037 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; sync_regs() 2038 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; sync_regs() 2039 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) sync_regs() 2047 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; store_regs() 2048 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; store_regs() 2050 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); store_regs() 2051 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; store_regs() 2052 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; store_regs() 2053 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; store_regs() 2054 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; store_regs() 2055 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; store_regs() 2056 kvm_run->s.regs.pft = vcpu->arch.pfault_token; store_regs() 2057 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; store_regs() 2058 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; store_regs() 2100 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; kvm_arch_vcpu_ioctl_run() 2101 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; kvm_arch_vcpu_ioctl_run() 2102 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; kvm_arch_vcpu_ioctl_run() 2144 vcpu->arch.guest_fpregs.fprs, 128); kvm_s390_store_status_unloaded() 2148 &vcpu->arch.sie_block->gpsw, 16); kvm_s390_store_status_unloaded() 2154 &vcpu->arch.guest_fpregs.fpc, 4); kvm_s390_store_status_unloaded() 2156 &vcpu->arch.sie_block->todpr, 4); kvm_s390_store_status_unloaded() 2158 &vcpu->arch.sie_block->cputm, 8); kvm_s390_store_status_unloaded() 2159 clkcomp = vcpu->arch.sie_block->ckc >> 8; kvm_s390_store_status_unloaded() 2165 &vcpu->arch.sie_block->gcr, 128); kvm_s390_store_status_unloaded() 2176 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); kvm_s390_vcpu_store_status() 2177 save_fp_regs(vcpu->arch.guest_fpregs.fprs); kvm_s390_vcpu_store_status() 2245 spin_lock(&vcpu->kvm->arch.start_stop_lock); kvm_s390_vcpu_start() 2265 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_start() 2271 spin_unlock(&vcpu->kvm->arch.start_stop_lock); kvm_s390_vcpu_start() 2285 spin_lock(&vcpu->kvm->arch.start_stop_lock); kvm_s390_vcpu_stop() 2291 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_stop() 2309 spin_unlock(&vcpu->kvm->arch.start_stop_lock); kvm_s390_vcpu_stop() 2323 if (!vcpu->kvm->arch.css_support) { kvm_vcpu_ioctl_enable_cap() 2324 vcpu->kvm->arch.css_support = 1; kvm_vcpu_ioctl_enable_cap() 2389 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); kvm_s390_guest_mem_op() 2468 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, kvm_arch_vcpu_ioctl() 2485 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, kvm_arch_vcpu_ioctl() 2491 r = gmap_fault(vcpu->arch.gmap, arg, 0); kvm_arch_vcpu_ioctl() 2555 vmf->page = virt_to_page(vcpu->arch.sie_block); kvm_arch_vcpu_fault() 2607 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, kvm_arch_commit_memory_region()
|
H A D | intercept.c | 43 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; kvm_s390_rewind_psw() 56 switch (vcpu->arch.sie_block->icptcode) { handle_noop() 71 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; handle_stop() 105 int viwhy = vcpu->arch.sie_block->ipb >> 16; handle_validity() 119 vcpu->arch.sie_block->ipa, handle_instruction() 120 vcpu->arch.sie_block->ipb); handle_instruction() 121 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; handle_instruction() 131 pgm_info->code = vcpu->arch.sie_block->iprcc; __extract_prog_irq() 133 switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { __extract_prog_irq() 144 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; __extract_prog_irq() 152 pgm_info->exc_access_id = vcpu->arch.sie_block->eai; __extract_prog_irq() 160 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; __extract_prog_irq() 161 pgm_info->exc_access_id = vcpu->arch.sie_block->eai; __extract_prog_irq() 162 pgm_info->op_access_id = vcpu->arch.sie_block->oai; __extract_prog_irq() 165 pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; __extract_prog_irq() 166 pgm_info->mon_code = vcpu->arch.sie_block->tecmc; __extract_prog_irq() 170 pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; __extract_prog_irq() 173 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; __extract_prog_irq() 174 pgm_info->exc_access_id = vcpu->arch.sie_block->eai; __extract_prog_irq() 180 if (vcpu->arch.sie_block->iprcc & PGM_PER) { __extract_prog_irq() 181 pgm_info->per_code = vcpu->arch.sie_block->perc; __extract_prog_irq() 182 pgm_info->per_atmid = vcpu->arch.sie_block->peratmid; __extract_prog_irq() 183 pgm_info->per_address = vcpu->arch.sie_block->peraddr; __extract_prog_irq() 184 pgm_info->per_access_id = vcpu->arch.sie_block->peraid; __extract_prog_irq() 201 itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba; handle_itdb() 210 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) 223 if (vcpu->arch.sie_block->iprcc == 0) handle_prog() 227 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); handle_prog() 228 if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) { handle_prog() 253 vcpu->arch.sie_block->icptcode = 0x04; handle_instruction_and_prog() 269 u16 eic = vcpu->arch.sie_block->eic; handle_external_interrupt() 293 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; handle_external_interrupt() 346 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ handle_partial_execution() 348 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ handle_partial_execution() 371 u8 code = vcpu->arch.sie_block->icptcode; kvm_handle_sie_intercept()
|
H A D | kvm-s390.h | 26 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) 28 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 32 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ 38 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ 40 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ 46 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; is_vcpu_stopped() 52 if (kvm->arch.gmap) kvm_is_ucontrol() 63 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; kvm_s390_get_prefix() 68 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; kvm_s390_set_prefix() 77 u32 base2 = vcpu->arch.sie_block->ipb >> 28; kvm_s390_get_base_disp_s() 78 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); kvm_s390_get_base_disp_s() 90 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; kvm_s390_get_base_disp_sse() 91 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; kvm_s390_get_base_disp_sse() 92 u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; kvm_s390_get_base_disp_sse() 93 u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; kvm_s390_get_base_disp_sse() 107 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; kvm_s390_get_regs_rre() 109 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; kvm_s390_get_regs_rre() 114 u32 base2 = vcpu->arch.sie_block->ipb >> 28; kvm_s390_get_base_disp_rsy() 115 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + kvm_s390_get_base_disp_rsy() 116 ((vcpu->arch.sie_block->ipb & 0xff00) << 4); kvm_s390_get_base_disp_rsy() 129 u32 base2 = vcpu->arch.sie_block->ipb >> 28; kvm_s390_get_base_disp_rs() 130 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); kvm_s390_get_base_disp_rs() 141 vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44); kvm_s390_set_psw_cc() 142 vcpu->arch.sie_block->gpsw.mask |= cc << 44; kvm_s390_set_psw_cc() 148 return __test_facility(nr, kvm->arch.model.fac->mask) && test_kvm_facility() 149 __test_facility(nr, kvm->arch.model.fac->list); test_kvm_facility() 166 return kvm->arch.user_cpu_state_ctrl != 0; kvm_s390_user_cpu_state_ctrl() 249 * has been stored in vcpu->arch.pgm and can be injected with 261 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); kvm_s390_inject_prog_cond()
|
H A D | interrupt.c | 40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); psw_extint_disabled() 45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); psw_ioint_disabled() 50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); psw_mchk_disabled() 55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || psw_interrupts_disabled() 56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || psw_interrupts_disabled() 57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) psw_interrupts_disabled() 65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) ckc_interrupts_enabled() 75 if (!(vcpu->arch.sie_block->ckc < ckc_irq_pending() 76 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) ckc_irq_pending() 84 (vcpu->arch.sie_block->gcr[0] & 0x400ul); cpu_timer_interrupts_enabled() 89 return (vcpu->arch.sie_block->cputm >> 63) && cpu_timer_irq_pending() 111 return vcpu->kvm->arch.float_int.pending_irqs; pending_floating_irqs() 116 return vcpu->arch.local_int.pending_irqs; pending_local_irqs() 125 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) disable_iscs() 144 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) deliverable_irqs() 146 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) deliverable_irqs() 148 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) deliverable_irqs() 150 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) deliverable_irqs() 152 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) deliverable_irqs() 156 if (!(vcpu->arch.sie_block->gcr[14] & deliverable_irqs() 157 vcpu->kvm->arch.float_int.mchk.cr14)) deliverable_irqs() 171 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); __set_cpu_idle() 172 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); __set_cpu_idle() 177 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); __unset_cpu_idle() 178 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); __unset_cpu_idle() 184 &vcpu->arch.sie_block->cpuflags); __reset_intercept_indicators() 185 vcpu->arch.sie_block->lctl = 0x0000; __reset_intercept_indicators() 186 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); __reset_intercept_indicators() 189 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | __reset_intercept_indicators() 191 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); __reset_intercept_indicators() 197 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); __set_cpuflag() 207 vcpu->arch.sie_block->lctl |= LCTL_CR6; set_intercept_indicators_io() 217 vcpu->arch.sie_block->lctl |= LCTL_CR0; set_intercept_indicators_ext() 225 vcpu->arch.sie_block->ictl |= ICTL_LPSW; set_intercept_indicators_mchk() 227 vcpu->arch.sie_block->lctl |= LCTL_CR14; set_intercept_indicators_mchk() 247 switch (vcpu->arch.sie_block->icptcode) { get_ilc() 254 return insn_length(vcpu->arch.sie_block->ipa >> 8); get_ilc() 256 return vcpu->arch.sie_block->pgmilc; get_ilc() 264 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_cpu_timer() 274 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_cpu_timer() 276 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_cpu_timer() 283 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_ckc() 293 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_ckc() 295 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_ckc() 302 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_pfault_init() 321 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_pfault_init() 323 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_pfault_init() 330 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; __deliver_machine_check() 331 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_machine_check() 390 &vcpu->arch.sie_block->gpsw, __deliver_machine_check() 393 &vcpu->arch.sie_block->gpsw, __deliver_machine_check() 401 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_restart() 410 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_restart() 412 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_restart() 419 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_set_prefix() 440 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_emergency_signal() 460 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_emergency_signal() 462 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_emergency_signal() 468 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_external_call() 488 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_external_call() 489 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, __deliver_external_call() 496 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __deliver_prog() 592 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) __deliver_prog() 596 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, __deliver_prog() 601 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_prog() 603 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_prog() 609 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; __deliver_service() 632 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_service() 634 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __deliver_service() 643 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; __deliver_pfault_done() 668 &vcpu->arch.sie_block->gpsw, __deliver_pfault_done() 671 &vcpu->arch.sie_block->gpsw, __deliver_pfault_done() 682 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; __deliver_virtio() 712 &vcpu->arch.sie_block->gpsw, __deliver_virtio() 715 &vcpu->arch.sie_block->gpsw, __deliver_virtio() 734 fi = &vcpu->kvm->arch.float_int; __deliver_io() 767 &vcpu->arch.sie_block->gpsw, __deliver_io() 770 &vcpu->arch.sie_block->gpsw, __deliver_io() 799 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_ext_call_pending() 800 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; kvm_s390_ext_call_pending() 806 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND); kvm_s390_ext_call_pending() 821 (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) kvm_s390_vcpu_has_irq() 856 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; kvm_s390_handle_wait() 857 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); kvm_s390_handle_wait() 860 if (vcpu->arch.sie_block->ckc < now) kvm_s390_handle_wait() 864 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); kvm_s390_handle_wait() 872 hrtimer_cancel(&vcpu->arch.ckc_timer); kvm_s390_handle_wait() 894 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); kvm_s390_idle_wakeup() 895 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; kvm_s390_idle_wakeup() 896 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); kvm_s390_idle_wakeup() 902 if (vcpu->arch.sie_block->ckc > now && kvm_s390_idle_wakeup() 911 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_clear_local_irqs() 921 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; kvm_s390_clear_local_irqs() 926 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_deliver_pending_interrupts() 972 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_prog() 981 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_inject_program_int() 998 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_inject_prog_irq() 1016 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_pfault_init() 1033 uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; __inject_extcall_sigpif() 1041 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); __inject_extcall_sigpif() 1047 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_extcall() 1072 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_set_prefix() 1091 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_sigp_stop() 1117 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_sigp_restart() 1129 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_sigp_emergency() 1148 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_mchk() 1179 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_ckc() 1192 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; __inject_cpu_timer() 1206 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; get_io_int() 1252 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; __inject_service() 1277 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; __inject_virtio() 1294 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; __inject_pfault_done() 1313 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; __inject_float_mchk() 1330 fi = &kvm->arch.float_int; __inject_io() 1355 fi = &kvm->arch.float_int; __inject_vm() 1388 li = &dst_vcpu->arch.local_int; __inject_vm() 1505 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_is_stop_irq_pending() 1512 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_clear_stop_irq() 1569 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_inject_vcpu() 1608 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; kvm_s390_clear_float_irqs() 1647 fi = &kvm->arch.float_int; get_all_floating_irqs() 1788 return kvm->arch.adapters[id]; get_io_adapter() 1802 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) register_io_adapter() 1817 dev->kvm->arch.adapters[adapter->id] = adapter; register_io_adapter() 1850 map->addr = gmap_translate(kvm->arch.gmap, addr); kvm_s390_adapter_map() 1905 if (!kvm->arch.adapters[i]) kvm_s390_destroy_adapters() 1908 &kvm->arch.adapters[i]->maps, list) { kvm_s390_destroy_adapters() 1913 kfree(kvm->arch.adapters[i]); kvm_s390_destroy_adapters() 1963 dev->kvm->arch.gmap->pfault_enabled = 1; flic_set_attr() 1966 dev->kvm->arch.gmap->pfault_enabled = 0; flic_set_attr() 1993 if (dev->kvm->arch.flic) flic_create() 1995 dev->kvm->arch.flic = dev; flic_create() 2001 dev->kvm->arch.flic = NULL; flic_destroy() 2135 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_set_irq_state() 2217 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; kvm_s390_get_irq_state() 2219 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; kvm_s390_get_irq_state() 2238 store_local_irq(&vcpu->arch.local_int, &irq, irq_type); kvm_s390_get_irq_state() 2258 (atomic_read(&vcpu->arch.sie_block->cpuflags) &
|
H A D | diag.c | 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; diag_release_pages() 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; diag_release_pages() 43 gmap_discard(vcpu->arch.gmap, start, end); diag_release_pages() 51 gmap_discard(vcpu->arch.gmap, start, prefix); diag_release_pages() 53 gmap_discard(vcpu->arch.gmap, 0, 4096); diag_release_pages() 55 gmap_discard(vcpu->arch.gmap, 4096, 8192); diag_release_pages() 56 gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); diag_release_pages() 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; __diag_page_ref_service() 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); __diag_page_ref_service() 88 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { __diag_page_ref_service() 105 vcpu->arch.pfault_token = parm.token_addr; __diag_page_ref_service() 106 vcpu->arch.pfault_select = parm.select_mask; __diag_page_ref_service() 107 vcpu->arch.pfault_compare = parm.compare_mask; __diag_page_ref_service() 126 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) __diag_page_ref_service() 129 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; __diag_page_ref_service() 156 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; __diag_time_slice_end_directed() 174 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; __diag_ipl_functions() 206 if (!vcpu->kvm->arch.css_support || __diag_virtio_hypercall() 235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) kvm_s390_handle_diag()
|
H A D | priv.c | 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_set_clock() 60 cpup->arch.sie_block->epoch = val; handle_set_clock() 76 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_set_prefix() 116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_store_prefix() 146 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_store_cpu_address() 166 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) __skey_check_enable() 171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); __skey_check_enable() 184 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_skey() 195 if (psw_bits(vcpu->arch.sie_block->gpsw).p) handle_ipte_interlock() 197 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); handle_ipte_interlock() 208 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_test_block() 215 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); handle_test_block() 244 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); handle_tpi() 321 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; handle_tsch() 330 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_io_inst() 333 if (vcpu->kvm->arch.css_support) { handle_io_inst() 338 if (vcpu->arch.sie_block->ipa == 0xb236) handle_io_inst() 340 if (vcpu->arch.sie_block->ipa == 0xb235) handle_io_inst() 361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_stfl() 368 fac = *vcpu->kvm->arch.model.fac->list >> 32; handle_stfl() 402 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; kvm_s390_handle_lpsw() 435 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_lpswe() 444 vcpu->arch.sie_block->gpsw = new_psw; handle_lpswe() 445 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) handle_lpswe() 452 u64 stidp_data = vcpu->arch.stidp_data; handle_stidp() 459 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_stidp() 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_stsi() 573 if (vcpu->kvm->arch.user_stsi) { handle_stsi() 631 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; kvm_s390_handle_b2() 646 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; handle_epsw() 650 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; handle_epsw() 677 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_pfmf() 711 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); handle_pfmf() 752 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; handle_essa() 758 gmap = vcpu->arch.gmap; handle_essa() 763 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_essa() 766 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) handle_essa() 771 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ handle_essa() 772 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); handle_essa() 802 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; kvm_s390_handle_b9() 811 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; kvm_s390_handle_lctl() 812 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; kvm_s390_handle_lctl() 820 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) kvm_s390_handle_lctl() 838 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; kvm_s390_handle_lctl() 839 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; kvm_s390_handle_lctl() 850 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; kvm_s390_handle_stctl() 851 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; kvm_s390_handle_stctl() 859 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) kvm_s390_handle_stctl() 873 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; kvm_s390_handle_stctl() 884 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; handle_lctlg() 885 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; handle_lctlg() 893 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_lctlg() 911 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; handle_lctlg() 922 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; handle_stctg() 923 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; handle_stctg() 931 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_stctg() 945 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; handle_stctg() 963 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; kvm_s390_handle_eb() 979 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_tprot() 989 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) handle_tprot() 1018 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) handle_tprot() 1026 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) kvm_s390_handle_e5() 1035 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) handle_sckpf() 1043 vcpu->arch.sie_block->todpr = value; handle_sckpf() 1056 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; kvm_s390_handle_01()
|
/linux-4.1.27/arch/m32r/ |
H A D | Makefile | 34 head-y := arch/m32r/kernel/head.o 38 libs-y += arch/m32r/lib/ $(LIBGCC) 39 core-y += arch/m32r/kernel/ \ 40 arch/m32r/mm/ \ 41 arch/m32r/boot/ \ 42 arch/m32r/platforms/ 44 drivers-$(CONFIG_OPROFILE) += arch/m32r/oprofile/ 46 boot := arch/m32r/boot 61 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
|
/linux-4.1.27/tools/perf/arch/ |
H A D | common.c | 105 static const char *normalize_arch(char *arch) normalize_arch() argument 107 if (!strcmp(arch, "x86_64")) normalize_arch() 109 if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6') normalize_arch() 111 if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5)) normalize_arch() 113 if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64")) normalize_arch() 115 if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) normalize_arch() 117 if (!strncmp(arch, "s390", 4)) normalize_arch() 119 if (!strncmp(arch, "parisc", 6)) normalize_arch() 121 if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3)) normalize_arch() 123 if (!strncmp(arch, "mips", 4)) normalize_arch() 125 if (!strncmp(arch, "sh", 2) && isdigit(arch[2])) normalize_arch() 128 return arch; normalize_arch() 136 const char *arch, *cross_env; perf_session_env__lookup_binutils_path() local 141 arch = normalize_arch(env->arch); perf_session_env__lookup_binutils_path() 150 if (!strcmp(normalize_arch(uts.machine), arch)) perf_session_env__lookup_binutils_path() 167 if (!strcmp(arch, "arm")) perf_session_env__lookup_binutils_path() 169 else if (!strcmp(arch, "arm64")) perf_session_env__lookup_binutils_path() 171 else if (!strcmp(arch, "powerpc")) perf_session_env__lookup_binutils_path() 173 else if (!strcmp(arch, "sh")) perf_session_env__lookup_binutils_path() 175 else if (!strcmp(arch, "s390")) perf_session_env__lookup_binutils_path() 177 else if (!strcmp(arch, "sparc")) perf_session_env__lookup_binutils_path() 179 else if (!strcmp(arch, "x86")) perf_session_env__lookup_binutils_path() 181 else if (!strcmp(arch, "mips")) perf_session_env__lookup_binutils_path() 184 ui__error("binutils for %s not supported.\n", arch); perf_session_env__lookup_binutils_path() 193 name, arch, name); perf_session_env__lookup_binutils_path() 212 * For live mode, env->arch will be NULL and we can use perf_session_env__lookup_objdump() 215 if (env->arch == NULL) perf_session_env__lookup_objdump()
|
/linux-4.1.27/arch/sh/mm/ |
H A D | extable_32.c | 2 * linux/arch/sh/mm/extable.c 4 * linux/arch/i386/mm/extable.c
|
/linux-4.1.27/arch/metag/kernel/ |
H A D | machines.c | 2 * arch/metag/kernel/machines.c 11 #include <asm/mach/arch.h>
|
/linux-4.1.27/arch/x86/kvm/ |
H A D | kvm_cache_regs.h | 12 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) kvm_register_read() 15 return vcpu->arch.regs[reg]; kvm_register_read() 22 vcpu->arch.regs[reg] = val; kvm_register_write() 23 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); kvm_register_write() 24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); kvm_register_write() 42 (unsigned long *)&vcpu->arch.regs_avail)) kvm_pdptr_read() 45 return vcpu->arch.walk_mmu->pdptrs[index]; kvm_pdptr_read() 51 if (tmask & vcpu->arch.cr0_guest_owned_bits) kvm_read_cr0_bits() 53 return vcpu->arch.cr0 & mask; kvm_read_cr0_bits() 64 if (tmask & vcpu->arch.cr4_guest_owned_bits) kvm_read_cr4_bits() 66 return vcpu->arch.cr4 & mask; kvm_read_cr4_bits() 71 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) kvm_read_cr3() 73 return vcpu->arch.cr3; kvm_read_cr3() 89 vcpu->arch.hflags |= HF_GUEST_MASK; enter_guest_mode() 94 vcpu->arch.hflags &= ~HF_GUEST_MASK; leave_guest_mode() 99 return vcpu->arch.hflags & HF_GUEST_MASK; is_guest_mode()
|
H A D | x86.h | 9 vcpu->arch.exception.pending = false; kvm_clear_exception_queue() 15 vcpu->arch.interrupt.pending = true; kvm_queue_interrupt() 16 vcpu->arch.interrupt.soft = soft; kvm_queue_interrupt() 17 vcpu->arch.interrupt.nr = vector; kvm_queue_interrupt() 22 vcpu->arch.interrupt.pending = false; kvm_clear_interrupt_queue() 27 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || kvm_event_needs_reinjection() 28 vcpu->arch.nmi_injected; kvm_event_needs_reinjection() 44 return vcpu->arch.efer & EFER_LMA; is_long_mode() 62 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; mmu_is_nested() 88 vcpu->arch.mmio_gva = gva & PAGE_MASK; vcpu_cache_mmio_info() 89 vcpu->arch.access = access; vcpu_cache_mmio_info() 90 vcpu->arch.mmio_gfn = gfn; vcpu_cache_mmio_info() 91 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; vcpu_cache_mmio_info() 96 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; vcpu_match_mmio_gen() 107 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) vcpu_clear_mmio_info() 110 vcpu->arch.mmio_gva = 0; vcpu_clear_mmio_info() 115 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && vcpu_match_mmio_gva() 116 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) vcpu_match_mmio_gva() 124 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && vcpu_match_mmio_gpa() 125 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) vcpu_match_mmio_gpa()
|
H A D | x86.c | 73 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) 181 vcpu->arch.apf.gfns[i] = ~0; kvm_async_pf_hash_reset() 271 return vcpu->arch.apic_base; kvm_get_apic_base() 277 u64 old_state = vcpu->arch.apic_base & kvm_set_apic_base() 360 if (!vcpu->arch.exception.pending) { kvm_multiple_exception() 364 vcpu->arch.exception.pending = true; kvm_multiple_exception() 365 vcpu->arch.exception.has_error_code = has_error; kvm_multiple_exception() 366 vcpu->arch.exception.nr = nr; kvm_multiple_exception() 367 vcpu->arch.exception.error_code = error_code; kvm_multiple_exception() 368 vcpu->arch.exception.reinject = reinject; kvm_multiple_exception() 373 prev_nr = vcpu->arch.exception.nr; kvm_multiple_exception() 384 vcpu->arch.exception.pending = true; kvm_multiple_exception() 385 vcpu->arch.exception.has_error_code = true; kvm_multiple_exception() 386 vcpu->arch.exception.nr = DF_VECTOR; kvm_multiple_exception() 387 vcpu->arch.exception.error_code = 0; kvm_multiple_exception() 419 vcpu->arch.cr2 = fault->address; kvm_inject_page_fault() 427 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); kvm_propagate_fault() 429 vcpu->arch.mmu.inject_page_fault(vcpu, fault); kvm_propagate_fault() 436 atomic_inc(&vcpu->arch.nmi_queued); kvm_inject_nmi() 503 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, kvm_read_nested_guest_page() 527 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { load_pdptrs() 536 (unsigned long *)&vcpu->arch.regs_avail); load_pdptrs() 538 (unsigned long *)&vcpu->arch.regs_dirty); load_pdptrs() 547 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; pdptrs_changed() 557 (unsigned long *)&vcpu->arch.regs_avail)) pdptrs_changed() 566 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; pdptrs_changed() 595 if ((vcpu->arch.efer & EFER_LME)) { kvm_set_cr0() 605 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_set_cr0() 637 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); kvm_load_guest_xcr0() 645 if (vcpu->arch.xcr0 != host_xcr0) kvm_put_guest_xcr0() 654 u64 old_xcr0 = vcpu->arch.xcr0; __kvm_set_xcr() 670 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; __kvm_set_xcr() 683 vcpu->arch.xcr0 = xcr0; __kvm_set_xcr() 727 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_set_cr4() 770 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) kvm_set_cr3() 773 vcpu->arch.cr3 = cr3; kvm_set_cr3() 774 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr3() 787 vcpu->arch.cr8 = cr8; kvm_set_cr8() 797 return vcpu->arch.cr8; kvm_get_cr8() 807 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; kvm_update_dr0123() 808 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; kvm_update_dr0123() 815 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); kvm_update_dr6() 823 dr7 = vcpu->arch.guest_debug_dr7; kvm_update_dr7() 825 dr7 = vcpu->arch.dr7; kvm_update_dr7() 827 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; kvm_update_dr7() 829 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; kvm_update_dr7() 845 vcpu->arch.db[dr] = val; __kvm_set_dr() 847 vcpu->arch.eff_db[dr] = val; __kvm_set_dr() 854 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); __kvm_set_dr() 862 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; __kvm_set_dr() 884 *val = vcpu->arch.db[dr]; kvm_get_dr() 890 *val = vcpu->arch.dr6; kvm_get_dr() 897 *val = vcpu->arch.dr7; kvm_get_dr() 982 u64 old_efer = vcpu->arch.efer; set_efer() 988 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) set_efer() 992 efer |= vcpu->arch.efer & EFER_LMA; set_efer() 1138 if (kvm->arch.kvmclock_offset) { kvm_write_wall_clock() 1139 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); kvm_write_wall_clock() 1209 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, nsec_to_cycles() 1210 vcpu->arch.virtual_tsc_shift); nsec_to_cycles() 1231 &vcpu->arch.virtual_tsc_shift, kvm_set_tsc_khz() 1232 &vcpu->arch.virtual_tsc_mult); kvm_set_tsc_khz() 1233 vcpu->arch.virtual_tsc_khz = this_tsc_khz; kvm_set_tsc_khz() 1252 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, compute_guest_tsc() 1253 vcpu->arch.virtual_tsc_mult, compute_guest_tsc() 1254 vcpu->arch.virtual_tsc_shift); compute_guest_tsc() 1255 tsc += vcpu->arch.this_tsc_write; compute_guest_tsc() 1263 struct kvm_arch *ka = &vcpu->kvm->arch; kvm_track_tsc_matching() 1290 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; update_ia32_tsc_adjust_msr() 1303 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); kvm_write_tsc() 1306 elapsed = ns - kvm->arch.last_tsc_nsec; kvm_write_tsc() 1308 if (vcpu->arch.virtual_tsc_khz) { kvm_write_tsc() 1312 usdiff = data - kvm->arch.last_tsc_write; kvm_write_tsc() 1314 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; kvm_write_tsc() 1329 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); kvm_write_tsc() 1354 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { kvm_write_tsc() 1356 offset = kvm->arch.cur_tsc_offset; kvm_write_tsc() 1365 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); kvm_write_tsc() 1374 * These values are tracked in kvm->arch.cur_xxx variables. kvm_write_tsc() 1376 kvm->arch.cur_tsc_generation++; kvm_write_tsc() 1377 kvm->arch.cur_tsc_nsec = ns; kvm_write_tsc() 1378 kvm->arch.cur_tsc_write = data; kvm_write_tsc() 1379 kvm->arch.cur_tsc_offset = offset; kvm_write_tsc() 1382 kvm->arch.cur_tsc_generation, data); kvm_write_tsc() 1389 kvm->arch.last_tsc_nsec = ns; kvm_write_tsc() 1390 kvm->arch.last_tsc_write = data; kvm_write_tsc() 1391 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; kvm_write_tsc() 1393 vcpu->arch.last_guest_tsc = data; kvm_write_tsc() 1396 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; kvm_write_tsc() 1397 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; kvm_write_tsc() 1398 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; kvm_write_tsc() 1403 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); kvm_write_tsc() 1405 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); kvm_write_tsc() 1407 kvm->arch.nr_vcpus_matched_tsc = 0; kvm_write_tsc() 1409 kvm->arch.nr_vcpus_matched_tsc++; kvm_write_tsc() 1413 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); kvm_write_tsc() 1538 struct kvm_arch *ka = &kvm->arch; pvclock_update_vm_gtod_copy() 1571 struct kvm_arch *ka = &kvm->arch; kvm_gen_update_masterclock() 1592 struct kvm_vcpu_arch *vcpu = &v->arch; kvm_guest_time_update() 1593 struct kvm_arch *ka = &v->kvm->arch; kvm_guest_time_update() 1662 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; kvm_guest_time_update() 1743 struct kvm *kvm = container_of(ka, struct kvm, arch); kvmclock_update_fn() 1757 schedule_delayed_work(&kvm->arch.kvmclock_update_work, kvm_gen_kvmclock_update() 1768 struct kvm *kvm = container_of(ka, struct kvm, arch); kvmclock_sync_fn() 1770 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); kvmclock_sync_fn() 1771 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn() 1856 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; set_msr_mtrr() 1862 vcpu->arch.mtrr_state.def_type = data; set_msr_mtrr() 1863 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; set_msr_mtrr() 1871 vcpu->arch.pat = data; set_msr_mtrr() 1880 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; set_msr_mtrr() 1883 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; set_msr_mtrr() 1893 u64 mcg_cap = vcpu->arch.mcg_cap; set_msr_mce() 1898 vcpu->arch.mcg_status = data; set_msr_mce() 1905 vcpu->arch.mcg_ctl = data; set_msr_mce() 1919 vcpu->arch.mce_banks[offset] = data; set_msr_mce() 1931 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 xen_hvm_config() 1932 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; xen_hvm_config() 1933 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 xen_hvm_config() 1934 : kvm->arch.xen_hvm_config.blob_size_32; xen_hvm_config() 1960 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; kvm_hv_hypercall_enabled() 1984 kvm->arch.hv_guest_os_id = data; set_msr_hyperv_pw() 1986 if (!kvm->arch.hv_guest_os_id) set_msr_hyperv_pw() 1987 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; set_msr_hyperv_pw() 1995 if (!kvm->arch.hv_guest_os_id) set_msr_hyperv_pw() 1998 kvm->arch.hv_hypercall = data; set_msr_hyperv_pw() 2009 kvm->arch.hv_hypercall = data; set_msr_hyperv_pw() 2017 kvm->arch.hv_tsc_page = data; set_msr_hyperv_pw() 2043 vcpu->arch.hv_vapic = data; set_msr_hyperv() 2054 vcpu->arch.hv_vapic = data; set_msr_hyperv() 2083 vcpu->arch.apf.msr_val = data; kvm_pv_enable_async_pf() 2091 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, kvm_pv_enable_async_pf() 2095 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_pv_enable_async_pf() 2102 vcpu->arch.pv_time_enabled = false; kvmclock_reset() 2109 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) accumulate_steal_time() 2112 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; accumulate_steal_time() 2113 vcpu->arch.st.last_steal = current->sched_info.run_delay; accumulate_steal_time() 2114 vcpu->arch.st.accum_steal = delta; accumulate_steal_time() 2121 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) record_steal_time() 2124 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, record_steal_time() 2125 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) record_steal_time() 2128 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; record_steal_time() 2129 vcpu->arch.st.steal.version += 2; record_steal_time() 2130 vcpu->arch.st.accum_steal = 0; record_steal_time() 2132 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, record_steal_time() 2133 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); record_steal_time() 2195 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; kvm_set_msr_common() 2198 vcpu->arch.ia32_tsc_adjust_msr = data; kvm_set_msr_common() 2202 vcpu->arch.ia32_misc_enable_msr = data; kvm_set_msr_common() 2206 vcpu->kvm->arch.wall_clock = data; kvm_set_msr_common() 2212 struct kvm_arch *ka = &vcpu->kvm->arch; kvm_set_msr_common() 2226 vcpu->arch.time = data; kvm_set_msr_common() 2236 &vcpu->arch.pv_time, data & ~1ULL, kvm_set_msr_common() 2238 vcpu->arch.pv_time_enabled = false; kvm_set_msr_common() 2240 vcpu->arch.pv_time_enabled = true; kvm_set_msr_common() 2256 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, kvm_set_msr_common() 2261 vcpu->arch.st.msr_val = data; kvm_set_msr_common() 2345 vcpu->arch.osvw.length = data; kvm_set_msr_common() 2350 vcpu->arch.osvw.status = data; kvm_set_msr_common() 2353 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) kvm_set_msr_common() 2385 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; get_msr_mtrr() 2391 *pdata = vcpu->arch.mtrr_state.def_type + get_msr_mtrr() 2392 (vcpu->arch.mtrr_state.enabled << 10); get_msr_mtrr() 2400 *pdata = vcpu->arch.pat; get_msr_mtrr() 2409 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; get_msr_mtrr() 2412 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; get_msr_mtrr() 2422 u64 mcg_cap = vcpu->arch.mcg_cap; get_msr_mce() 2431 data = vcpu->arch.mcg_cap; get_msr_mce() 2436 data = vcpu->arch.mcg_ctl; get_msr_mce() 2439 data = vcpu->arch.mcg_status; get_msr_mce() 2445 data = vcpu->arch.mce_banks[offset]; get_msr_mce() 2461 data = kvm->arch.hv_guest_os_id; get_msr_hyperv_pw() 2464 data = kvm->arch.hv_hypercall; get_msr_hyperv_pw() 2468 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); get_msr_hyperv_pw() 2472 data = kvm->arch.hv_tsc_page; get_msr_hyperv_pw() 2506 data = vcpu->arch.hv_vapic; get_msr_hyperv() 2588 data = (u64)vcpu->arch.ia32_tsc_adjust_msr; kvm_get_msr_common() 2591 data = vcpu->arch.ia32_misc_enable_msr; kvm_get_msr_common() 2600 data = vcpu->arch.efer; kvm_get_msr_common() 2604 data = vcpu->kvm->arch.wall_clock; kvm_get_msr_common() 2608 data = vcpu->arch.time; kvm_get_msr_common() 2611 data = vcpu->arch.apf.msr_val; kvm_get_msr_common() 2614 data = vcpu->arch.st.msr_val; kvm_get_msr_common() 2617 data = vcpu->arch.pv_eoi.msr_val; kvm_get_msr_common() 2664 data = vcpu->arch.osvw.length; kvm_get_msr_common() 2669 data = vcpu->arch.osvw.status; kvm_get_msr_common() 2927 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); kvm_arch_vcpu_load() 2936 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { kvm_arch_vcpu_load() 2937 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); kvm_arch_vcpu_load() 2938 vcpu->arch.tsc_offset_adjustment = 0; kvm_arch_vcpu_load() 2943 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : kvm_arch_vcpu_load() 2944 native_read_tsc() - vcpu->arch.last_host_tsc; kvm_arch_vcpu_load() 2949 vcpu->arch.last_guest_tsc); kvm_arch_vcpu_load() 2951 vcpu->arch.tsc_catchup = 1; kvm_arch_vcpu_load() 2957 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_arch_vcpu_load() 2971 vcpu->arch.last_host_tsc = native_read_tsc(); kvm_arch_vcpu_put() 2978 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); kvm_vcpu_ioctl_get_lapic() 3018 vcpu->arch.tpr_access_reporting = !!tac->enabled; vcpu_ioctl_tpr_access_reporting() 3034 vcpu->arch.mcg_cap = mcg_cap; kvm_vcpu_ioctl_x86_setup_mce() 3037 vcpu->arch.mcg_ctl = ~(u64)0; kvm_vcpu_ioctl_x86_setup_mce() 3040 vcpu->arch.mce_banks[bank*4] = ~(u64)0; kvm_vcpu_ioctl_x86_setup_mce() 3048 u64 mcg_cap = vcpu->arch.mcg_cap; kvm_vcpu_ioctl_x86_set_mce() 3050 u64 *banks = vcpu->arch.mce_banks; kvm_vcpu_ioctl_x86_set_mce() 3059 vcpu->arch.mcg_ctl != ~(u64)0) kvm_vcpu_ioctl_x86_set_mce() 3069 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || kvm_vcpu_ioctl_x86_set_mce() 3078 vcpu->arch.mcg_status = mce->mcg_status; kvm_vcpu_ioctl_x86_set_mce() 3098 vcpu->arch.exception.pending && kvm_vcpu_ioctl_x86_get_vcpu_events() 3099 !kvm_exception_is_soft(vcpu->arch.exception.nr); kvm_vcpu_ioctl_x86_get_vcpu_events() 3100 events->exception.nr = vcpu->arch.exception.nr; kvm_vcpu_ioctl_x86_get_vcpu_events() 3101 events->exception.has_error_code = vcpu->arch.exception.has_error_code; kvm_vcpu_ioctl_x86_get_vcpu_events() 3103 events->exception.error_code = vcpu->arch.exception.error_code; kvm_vcpu_ioctl_x86_get_vcpu_events() 3106 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; kvm_vcpu_ioctl_x86_get_vcpu_events() 3107 events->interrupt.nr = vcpu->arch.interrupt.nr; kvm_vcpu_ioctl_x86_get_vcpu_events() 3111 events->nmi.injected = vcpu->arch.nmi_injected; kvm_vcpu_ioctl_x86_get_vcpu_events() 3112 events->nmi.pending = vcpu->arch.nmi_pending != 0; kvm_vcpu_ioctl_x86_get_vcpu_events() 3132 vcpu->arch.exception.pending = events->exception.injected; kvm_vcpu_ioctl_x86_set_vcpu_events() 3133 vcpu->arch.exception.nr = events->exception.nr; kvm_vcpu_ioctl_x86_set_vcpu_events() 3134 vcpu->arch.exception.has_error_code = events->exception.has_error_code; kvm_vcpu_ioctl_x86_set_vcpu_events() 3135 vcpu->arch.exception.error_code = events->exception.error_code; kvm_vcpu_ioctl_x86_set_vcpu_events() 3137 vcpu->arch.interrupt.pending = events->interrupt.injected; kvm_vcpu_ioctl_x86_set_vcpu_events() 3138 vcpu->arch.interrupt.nr = events->interrupt.nr; kvm_vcpu_ioctl_x86_set_vcpu_events() 3139 vcpu->arch.interrupt.soft = events->interrupt.soft; kvm_vcpu_ioctl_x86_set_vcpu_events() 3144 vcpu->arch.nmi_injected = events->nmi.injected; kvm_vcpu_ioctl_x86_set_vcpu_events() 3146 vcpu->arch.nmi_pending = events->nmi.pending; kvm_vcpu_ioctl_x86_set_vcpu_events() 3151 vcpu->arch.apic->sipi_vector = events->sipi_vector; kvm_vcpu_ioctl_x86_set_vcpu_events() 3163 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); kvm_vcpu_ioctl_x86_get_debugregs() 3166 dbgregs->dr7 = vcpu->arch.dr7; kvm_vcpu_ioctl_x86_get_debugregs() 3182 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); kvm_vcpu_ioctl_x86_set_debugregs() 3184 vcpu->arch.dr6 = dbgregs->dr6; kvm_vcpu_ioctl_x86_set_debugregs() 3186 vcpu->arch.dr7 = dbgregs->dr7; kvm_vcpu_ioctl_x86_set_debugregs() 3196 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; fill_xsave() 3232 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; load_xsave() 3277 &vcpu->arch.guest_fpu.state->fxsave, kvm_vcpu_ioctl_x86_get_xsave() 3302 memcpy(&vcpu->arch.guest_fpu.state->fxsave, kvm_vcpu_ioctl_x86_set_xsave() 3319 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; kvm_vcpu_ioctl_x86_get_xcrs() 3353 if (!vcpu->arch.pv_time_enabled) kvm_set_guest_paused() 3355 vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_set_guest_paused() 3377 if (!vcpu->arch.apic) kvm_arch_vcpu_ioctl() 3395 if (!vcpu->arch.apic) kvm_arch_vcpu_ioctl() 3613 r = vcpu->arch.virtual_tsc_khz; kvm_arch_vcpu_ioctl() 3646 kvm->arch.ept_identity_map_addr = ident_addr; kvm_vm_ioctl_set_identity_map_addr() 3659 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; kvm_vm_ioctl_set_nr_mmu_pages() 3667 return kvm->arch.n_max_mmu_pages; kvm_vm_ioctl_get_nr_mmu_pages() 3731 mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_get_pit() 3732 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); kvm_vm_ioctl_get_pit() 3733 mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_get_pit() 3740 mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_set_pit() 3741 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_vm_ioctl_set_pit() 3744 mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_set_pit() 3752 mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_get_pit2() 3753 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, kvm_vm_ioctl_get_pit2() 3755 ps->flags = kvm->arch.vpit->pit_state.flags; kvm_vm_ioctl_get_pit2() 3756 mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_get_pit2() 3766 mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_set_pit2() 3767 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; kvm_vm_ioctl_set_pit2() 3771 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, kvm_vm_ioctl_set_pit2() 3772 sizeof(kvm->arch.vpit->pit_state.channels)); kvm_vm_ioctl_set_pit2() 3773 kvm->arch.vpit->pit_state.flags = ps->flags; kvm_vm_ioctl_set_pit2() 3775 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start); kvm_vm_ioctl_set_pit2() 3776 mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_set_pit2() 3783 if (!kvm->arch.vpit) kvm_vm_ioctl_reinject() 3785 mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_reinject() 3786 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; kvm_vm_ioctl_reinject() 3787 mutex_unlock(&kvm->arch.vpit->pit_state.lock); kvm_vm_ioctl_reinject() 3890 if (kvm->arch.vpic) kvm_arch_vm_ioctl() 3914 kvm->arch.vpic = vpic; kvm_arch_vm_ioctl() 3940 if (kvm->arch.vpit) kvm_arch_vm_ioctl() 3943 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); kvm_arch_vm_ioctl() 3944 if (kvm->arch.vpit) kvm_arch_vm_ioctl() 3999 if (!kvm->arch.vpit) kvm_arch_vm_ioctl() 4015 if (!kvm->arch.vpit) kvm_arch_vm_ioctl() 4022 if (!kvm->arch.vpit) kvm_arch_vm_ioctl() 4038 if (!kvm->arch.vpit) kvm_arch_vm_ioctl() 4053 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, kvm_arch_vm_ioctl() 4057 if (kvm->arch.xen_hvm_config.flags) kvm_arch_vm_ioctl() 4080 kvm->arch.kvmclock_offset = delta; kvm_arch_vm_ioctl() 4090 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; kvm_arch_vm_ioctl() 4151 if (!(vcpu->arch.apic && vcpu_mmio_write() 4152 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) vcpu_mmio_write() 4171 if (!(vcpu->arch.apic && vcpu_mmio_read() 4172 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, vcpu_mmio_read() 4207 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); translate_nested_gpa() 4216 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); kvm_mmu_gva_to_gpa_read() 4224 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); kvm_mmu_gva_to_gpa_fetch() 4232 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); kvm_mmu_gva_to_gpa_write() 4239 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); kvm_mmu_gva_to_gpa_system() 4250 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, kvm_read_guest_virt_helper() 4284 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, kvm_fetch_guest_virt() 4330 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, kvm_write_guest_virt_system() 4362 && !permission_fault(vcpu, vcpu->arch.walk_mmu, vcpu_mmio_gva_to_gpa() 4363 vcpu->arch.access, access)) { vcpu_mmio_gva_to_gpa() 4364 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | vcpu_mmio_gva_to_gpa() 4370 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); vcpu_mmio_gva_to_gpa() 4668 if (vcpu->arch.pio.in) kernel_pio() 4669 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, kernel_pio() 4670 vcpu->arch.pio.size, pd); kernel_pio() 4673 vcpu->arch.pio.port, vcpu->arch.pio.size, kernel_pio() 4682 vcpu->arch.pio.port = port; emulator_pio_in_out() 4683 vcpu->arch.pio.in = in; emulator_pio_in_out() 4684 vcpu->arch.pio.count = count; emulator_pio_in_out() 4685 vcpu->arch.pio.size = size; emulator_pio_in_out() 4687 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { emulator_pio_in_out() 4688 vcpu->arch.pio.count = 0; emulator_pio_in_out() 4709 if (vcpu->arch.pio.count) emulator_pio_in_emulated() 4715 memcpy(val, vcpu->arch.pio_data, size * count); emulator_pio_in_emulated() 4716 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); emulator_pio_in_emulated() 4717 vcpu->arch.pio.count = 0; emulator_pio_in_emulated() 4730 memcpy(vcpu->arch.pio_data, val, size * count); emulator_pio_out_emulated() 4731 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); emulator_pio_out_emulated() 4753 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); kvm_emulate_wbinvd_noskip() 4754 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, kvm_emulate_wbinvd_noskip() 4757 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); kvm_emulate_wbinvd_noskip() 4805 value = vcpu->arch.cr2; emulator_get_cr() 4834 vcpu->arch.cr2 = val; emulator_set_cr() 4979 emul_to_vcpu(ctxt)->arch.halt_request = 1; emulator_halt() 5085 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; inject_emulated_exception() 5099 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; init_emulate_ctxt() 5114 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; init_emulate_ctxt() 5119 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; kvm_inject_realmode_interrupt() 5137 vcpu->arch.nmi_pending = 0; kvm_inject_realmode_interrupt() 5139 vcpu->arch.interrupt.pending = false; kvm_inject_realmode_interrupt() 5172 if (!vcpu->arch.mmu.direct_map) { reexecute_instruction() 5205 if (vcpu->arch.mmu.direct_map) { reexecute_instruction() 5209 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; reexecute_instruction() 5239 last_retry_eip = vcpu->arch.last_retry_eip; retry_instruction() 5240 last_retry_addr = vcpu->arch.last_retry_addr; retry_instruction() 5255 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; retry_instruction() 5266 vcpu->arch.last_retry_eip = ctxt->eip; retry_instruction() 5267 vcpu->arch.last_retry_addr = cr2; retry_instruction() 5269 if (!vcpu->arch.mmu.direct_map) retry_instruction() 5309 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | kvm_vcpu_check_singlestep() 5311 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_vcpu_check_singlestep() 5312 kvm_run->debug.arch.exception = DB_VECTOR; kvm_vcpu_check_singlestep() 5316 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; kvm_vcpu_check_singlestep() 5322 vcpu->arch.dr6 &= ~15; kvm_vcpu_check_singlestep() 5323 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; kvm_vcpu_check_singlestep() 5332 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { kvm_vcpu_check_breakpoint() 5336 vcpu->arch.guest_debug_dr7, kvm_vcpu_check_breakpoint() 5337 vcpu->arch.eff_db); kvm_vcpu_check_breakpoint() 5340 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_vcpu_check_breakpoint() 5341 kvm_run->debug.arch.pc = eip; kvm_vcpu_check_breakpoint() 5342 kvm_run->debug.arch.exception = DB_VECTOR; kvm_vcpu_check_breakpoint() 5349 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && kvm_vcpu_check_breakpoint() 5353 vcpu->arch.dr7, kvm_vcpu_check_breakpoint() 5354 vcpu->arch.db); kvm_vcpu_check_breakpoint() 5357 vcpu->arch.dr6 &= ~15; kvm_vcpu_check_breakpoint() 5358 vcpu->arch.dr6 |= dr6 | DR6_RTM; kvm_vcpu_check_breakpoint() 5375 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; x86_emulate_instruction() 5377 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; x86_emulate_instruction() 5383 vcpu->arch.write_fault_to_shadow_pgtable = false; x86_emulate_instruction() 5433 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { x86_emulate_instruction() 5434 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; x86_emulate_instruction() 5456 } else if (vcpu->arch.pio.count) { x86_emulate_instruction() 5457 if (!vcpu->arch.pio.in) { x86_emulate_instruction() 5459 vcpu->arch.pio.count = 0; x86_emulate_instruction() 5462 vcpu->arch.complete_userspace_io = complete_emulated_pio; x86_emulate_instruction() 5469 vcpu->arch.complete_userspace_io = complete_emulated_mmio; x86_emulate_instruction() 5478 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; x86_emulate_instruction() 5495 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; x86_emulate_instruction() 5504 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, kvm_fast_pio_out() 5507 vcpu->arch.pio.count = 0; kvm_fast_pio_out() 5869 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; kvm_vcpu_halt() 6062 if (!vcpu->arch.apic) update_cr8_intercept() 6065 if (!vcpu->arch.apic->vapic_addr) update_cr8_intercept() 6083 if (vcpu->arch.exception.pending) { inject_pending_event() 6084 trace_kvm_inj_exception(vcpu->arch.exception.nr, inject_pending_event() 6085 vcpu->arch.exception.has_error_code, inject_pending_event() 6086 vcpu->arch.exception.error_code); inject_pending_event() 6088 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) inject_pending_event() 6092 if (vcpu->arch.exception.nr == DB_VECTOR && inject_pending_event() 6093 (vcpu->arch.dr7 & DR7_GD)) { inject_pending_event() 6094 vcpu->arch.dr7 &= ~DR7_GD; inject_pending_event() 6098 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, inject_pending_event() 6099 vcpu->arch.exception.has_error_code, inject_pending_event() 6100 vcpu->arch.exception.error_code, inject_pending_event() 6101 vcpu->arch.exception.reinject); inject_pending_event() 6105 if (vcpu->arch.nmi_injected) { inject_pending_event() 6110 if (vcpu->arch.interrupt.pending) { inject_pending_event() 6122 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { inject_pending_event() 6123 --vcpu->arch.nmi_pending; inject_pending_event() 6124 vcpu->arch.nmi_injected = true; inject_pending_event() 6157 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) process_nmi() 6160 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); process_nmi() 6161 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); process_nmi() 6170 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) vcpu_scan_ioapic() 6267 vcpu->arch.apf.halted = true; vcpu_enter_guest() 6287 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { vcpu_enter_guest() 6296 if (vcpu->arch.nmi_pending) vcpu_enter_guest() 6354 if (unlikely(vcpu->arch.switch_db_regs)) { vcpu_enter_guest() 6356 set_debugreg(vcpu->arch.eff_db[0], 0); vcpu_enter_guest() 6357 set_debugreg(vcpu->arch.eff_db[1], 1); vcpu_enter_guest() 6358 set_debugreg(vcpu->arch.eff_db[2], 2); vcpu_enter_guest() 6359 set_debugreg(vcpu->arch.eff_db[3], 3); vcpu_enter_guest() 6360 set_debugreg(vcpu->arch.dr6, 6); vcpu_enter_guest() 6361 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; vcpu_enter_guest() 6374 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { vcpu_enter_guest() 6380 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; vcpu_enter_guest() 6393 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, vcpu_enter_guest() 6428 if (unlikely(vcpu->arch.tsc_always_catchup)) vcpu_enter_guest() 6431 if (vcpu->arch.apic_attention) vcpu_enter_guest() 6439 if (unlikely(vcpu->arch.apic_attention)) vcpu_enter_guest() 6456 switch(vcpu->arch.mp_state) { vcpu_block() 6458 vcpu->arch.pv.pv_unhalted = false; vcpu_block() 6459 vcpu->arch.mp_state = vcpu_block() 6462 vcpu->arch.apf.halted = false; vcpu_block() 6481 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && vcpu_run() 6482 !vcpu->arch.apf.halted) vcpu_run() 6533 BUG_ON(!vcpu->arch.pio.count); complete_emulated_pio() 6597 vcpu->arch.complete_userspace_io = complete_emulated_mmio; complete_emulated_mmio() 6613 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_arch_vcpu_ioctl_run() 6629 if (unlikely(vcpu->arch.complete_userspace_io)) { kvm_arch_vcpu_ioctl_run() 6630 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; kvm_arch_vcpu_ioctl_run() 6631 vcpu->arch.complete_userspace_io = NULL; kvm_arch_vcpu_ioctl_run() 6636 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); kvm_arch_vcpu_ioctl_run() 6650 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { kvm_arch_vcpu_ioctl_get_regs() 6658 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); kvm_arch_vcpu_ioctl_get_regs() 6659 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_arch_vcpu_ioctl_get_regs() 6688 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; kvm_arch_vcpu_ioctl_set_regs() 6689 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_arch_vcpu_ioctl_set_regs() 6713 vcpu->arch.exception.pending = false; kvm_arch_vcpu_ioctl_set_regs() 6753 sregs->cr2 = vcpu->arch.cr2; kvm_arch_vcpu_ioctl_get_sregs() 6757 sregs->efer = vcpu->arch.efer; kvm_arch_vcpu_ioctl_get_sregs() 6762 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) kvm_arch_vcpu_ioctl_get_sregs() 6763 set_bit(vcpu->arch.interrupt.nr, kvm_arch_vcpu_ioctl_get_sregs() 6773 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && kvm_arch_vcpu_ioctl_get_mpstate() 6774 vcpu->arch.pv.pv_unhalted) kvm_arch_vcpu_ioctl_get_mpstate() 6777 mp_state->mp_state = vcpu->arch.mp_state; kvm_arch_vcpu_ioctl_get_mpstate() 6790 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; kvm_arch_vcpu_ioctl_set_mpstate() 6791 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); kvm_arch_vcpu_ioctl_set_mpstate() 6793 vcpu->arch.mp_state = mp_state->mp_state; kvm_arch_vcpu_ioctl_set_mpstate() 6801 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; kvm_task_switch() 6837 vcpu->arch.cr2 = sregs->cr2; kvm_arch_vcpu_ioctl_set_sregs() 6839 vcpu->arch.cr3 = sregs->cr3; kvm_arch_vcpu_ioctl_set_sregs() 6840 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_arch_vcpu_ioctl_set_sregs() 6844 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_arch_vcpu_ioctl_set_sregs() 6852 vcpu->arch.cr0 = sregs->cr0; kvm_arch_vcpu_ioctl_set_sregs() 6861 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); kvm_arch_vcpu_ioctl_set_sregs() 6893 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_arch_vcpu_ioctl_set_sregs() 6908 if (vcpu->arch.exception.pending) kvm_arch_vcpu_ioctl_set_guest_debug() 6928 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; kvm_arch_vcpu_ioctl_set_guest_debug() 6929 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; kvm_arch_vcpu_ioctl_set_guest_debug() 6932 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; kvm_arch_vcpu_ioctl_set_guest_debug() 6937 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + kvm_arch_vcpu_ioctl_set_guest_debug() 6979 &vcpu->arch.guest_fpu.state->fxsave; kvm_arch_vcpu_ioctl_get_fpu() 6996 &vcpu->arch.guest_fpu.state->fxsave; kvm_arch_vcpu_ioctl_set_fpu() 7014 err = fpu_alloc(&vcpu->arch.guest_fpu); fx_init() 7018 fpu_finit(&vcpu->arch.guest_fpu); fx_init() 7020 vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = fx_init() 7026 vcpu->arch.xcr0 = XSTATE_FP; fx_init() 7028 vcpu->arch.cr0 |= X86_CR0_ET; fx_init() 7036 fpu_free(&vcpu->arch.guest_fpu); fx_free() 7051 fpu_restore_checking(&vcpu->arch.guest_fpu); kvm_load_guest_fpu() 7061 fpu_save_init(&vcpu->arch.guest_fpu); kvm_put_guest_fpu() 7064 if (!vcpu->arch.eager_fpu) kvm_put_guest_fpu() 7074 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); kvm_arch_vcpu_free() 7103 vcpu->arch.mtrr_state.have_fixed = 1; kvm_arch_vcpu_setup() 7127 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, kvm_arch_vcpu_postcreate() 7134 vcpu->arch.apf.msr_val = 0; kvm_arch_vcpu_destroy() 7147 atomic_set(&vcpu->arch.nmi_queued, 0); kvm_vcpu_reset() 7148 vcpu->arch.nmi_pending = 0; kvm_vcpu_reset() 7149 vcpu->arch.nmi_injected = false; kvm_vcpu_reset() 7153 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); kvm_vcpu_reset() 7155 vcpu->arch.dr6 = DR6_INIT; kvm_vcpu_reset() 7157 vcpu->arch.dr7 = DR7_FIXED_1; kvm_vcpu_reset() 7160 vcpu->arch.cr2 = 0; kvm_vcpu_reset() 7163 vcpu->arch.apf.msr_val = 0; kvm_vcpu_reset() 7164 vcpu->arch.st.msr_val = 0; kvm_vcpu_reset() 7170 vcpu->arch.apf.halted = false; kvm_vcpu_reset() 7174 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); kvm_vcpu_reset() 7175 vcpu->arch.regs_avail = ~0; kvm_vcpu_reset() 7176 vcpu->arch.regs_dirty = ~0; kvm_vcpu_reset() 7213 if (stable && vcpu->arch.last_host_tsc > local_tsc) { kvm_for_each_vcpu() 7215 if (vcpu->arch.last_host_tsc > max_tsc) kvm_for_each_vcpu() 7216 max_tsc = vcpu->arch.last_host_tsc; kvm_for_each_vcpu() 7264 vcpu->arch.tsc_offset_adjustment += delta_cyc; kvm_for_each_vcpu() 7265 vcpu->arch.last_host_tsc = local_tsc; kvm_for_each_vcpu() 7275 kvm->arch.last_tsc_nsec = 0; 7276 kvm->arch.last_tsc_write = 0; 7313 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); kvm_vcpu_compatible() 7327 vcpu->arch.pv.pv_unhalted = false; kvm_arch_vcpu_init() 7328 vcpu->arch.emulate_ctxt.ops = &emulate_ops; kvm_arch_vcpu_init() 7330 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_arch_vcpu_init() 7332 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; kvm_arch_vcpu_init() 7339 vcpu->arch.pio_data = page_address(page); kvm_arch_vcpu_init() 7354 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, kvm_arch_vcpu_init() 7356 if (!vcpu->arch.mce_banks) { kvm_arch_vcpu_init() 7360 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; kvm_arch_vcpu_init() 7362 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { kvm_arch_vcpu_init() 7371 vcpu->arch.ia32_tsc_adjust_msr = 0x0; kvm_arch_vcpu_init() 7372 vcpu->arch.pv_time_enabled = false; kvm_arch_vcpu_init() 7374 vcpu->arch.guest_supported_xcr0 = 0; kvm_arch_vcpu_init() 7375 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; kvm_arch_vcpu_init() 7377 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); kvm_arch_vcpu_init() 7384 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); kvm_arch_vcpu_init() 7386 kfree(vcpu->arch.mce_banks); kvm_arch_vcpu_init() 7392 free_page((unsigned long)vcpu->arch.pio_data); kvm_arch_vcpu_init() 7402 kfree(vcpu->arch.mce_banks); kvm_arch_vcpu_uninit() 7407 free_page((unsigned long)vcpu->arch.pio_data); kvm_arch_vcpu_uninit() 7422 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); kvm_arch_init_vm() 7423 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); kvm_arch_init_vm() 7424 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); kvm_arch_init_vm() 7425 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); kvm_arch_init_vm() 7426 atomic_set(&kvm->arch.noncoherent_dma_count, 0); kvm_arch_init_vm() 7429 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); kvm_arch_init_vm() 7432 &kvm->arch.irq_sources_bitmap); kvm_arch_init_vm() 7434 raw_spin_lock_init(&kvm->arch.tsc_write_lock); kvm_arch_init_vm() 7435 mutex_init(&kvm->arch.apic_map_lock); kvm_arch_init_vm() 7436 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); kvm_arch_init_vm() 7440 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); kvm_arch_init_vm() 7441 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); kvm_arch_init_vm() 7480 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); kvm_arch_sync_events() 7481 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); kvm_arch_sync_events() 7506 kfree(kvm->arch.vpic); kvm_arch_destroy_vm() 7507 kfree(kvm->arch.vioapic); kvm_arch_destroy_vm() 7509 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kvm_arch_destroy_vm() 7518 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { kvm_arch_free_memslot() 7519 kvfree(free->arch.rmap[i]); kvm_arch_free_memslot() 7520 free->arch.rmap[i] = NULL; kvm_arch_free_memslot() 7525 if (!dont || free->arch.lpage_info[i - 1] != kvm_arch_free_memslot() 7526 dont->arch.lpage_info[i - 1]) { kvm_arch_free_memslot() 7527 kvfree(free->arch.lpage_info[i - 1]); kvm_arch_free_memslot() 7528 free->arch.lpage_info[i - 1] = NULL; kvm_arch_free_memslot() 7546 slot->arch.rmap[i] = kvm_arch_create_memslot() 7547 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); kvm_arch_create_memslot() 7548 if (!slot->arch.rmap[i]) kvm_arch_create_memslot() 7553 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * kvm_arch_create_memslot() 7554 sizeof(*slot->arch.lpage_info[i - 1])); kvm_arch_create_memslot() 7555 if (!slot->arch.lpage_info[i - 1]) kvm_arch_create_memslot() 7559 slot->arch.lpage_info[i - 1][0].write_count = 1; kvm_arch_create_memslot() 7561 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; kvm_arch_create_memslot() 7573 slot->arch.lpage_info[i - 1][j].write_count = 1; kvm_arch_create_memslot() 7581 kvfree(slot->arch.rmap[i]); kvm_arch_create_memslot() 7582 slot->arch.rmap[i] = NULL; kvm_arch_create_memslot() 7586 kvfree(slot->arch.lpage_info[i - 1]); kvm_arch_create_memslot() 7587 slot->arch.lpage_info[i - 1] = NULL; kvm_arch_create_memslot() 7699 if (!kvm->arch.n_requested_mmu_pages) kvm_arch_commit_memory_region() 7753 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && kvm_arch_vcpu_runnable() 7754 !vcpu->arch.apf.halted) kvm_arch_vcpu_runnable() 7757 || vcpu->arch.pv.pv_unhalted kvm_arch_vcpu_runnable() 7758 || atomic_read(&vcpu->arch.nmi_queued) || kvm_arch_vcpu_runnable() 7802 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) __kvm_set_rflags() 7818 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || kvm_arch_async_page_ready() 7826 if (!vcpu->arch.mmu.direct_map && kvm_arch_async_page_ready() 7827 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) kvm_arch_async_page_ready() 7830 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); kvm_arch_async_page_ready() 7847 while (vcpu->arch.apf.gfns[key] != ~0) kvm_add_async_pf_gfn() 7850 vcpu->arch.apf.gfns[key] = gfn; kvm_add_async_pf_gfn() 7859 (vcpu->arch.apf.gfns[key] != gfn && kvm_async_pf_gfn_slot() 7860 vcpu->arch.apf.gfns[key] != ~0); i++) kvm_async_pf_gfn_slot() 7868 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; kvm_find_async_pf_gfn() 7877 vcpu->arch.apf.gfns[i] = ~0; kvm_del_async_pf_gfn() 7880 if (vcpu->arch.apf.gfns[j] == ~0) kvm_del_async_pf_gfn() 7882 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); kvm_del_async_pf_gfn() 7889 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; kvm_del_async_pf_gfn() 7897 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, apf_put_user() 7906 trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_arch_async_page_not_present() 7907 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); kvm_arch_async_page_not_present() 7909 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || kvm_arch_async_page_not_present() 7910 (vcpu->arch.apf.send_user_only && kvm_arch_async_page_not_present() 7918 fault.address = work->arch.token; kvm_arch_async_page_not_present() 7928 trace_kvm_async_pf_ready(work->arch.token, work->gva); kvm_arch_async_page_present() 7930 work->arch.token = ~0; /* broadcast wakeup */ kvm_arch_async_page_present() 7932 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); kvm_arch_async_page_present() 7934 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && kvm_arch_async_page_present() 7940 fault.address = work->arch.token; kvm_arch_async_page_present() 7943 vcpu->arch.apf.halted = false; kvm_arch_async_page_present() 7944 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_arch_async_page_present() 7949 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) kvm_arch_can_inject_async_page_present() 7958 atomic_inc(&kvm->arch.noncoherent_dma_count); kvm_arch_register_noncoherent_dma() 7964 atomic_dec(&kvm->arch.noncoherent_dma_count); kvm_arch_unregister_noncoherent_dma() 7970 return atomic_read(&kvm->arch.noncoherent_dma_count); kvm_arch_has_noncoherent_dma()
|
H A D | lapic.c | 90 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_pending_eoi() 166 mutex_lock(&kvm->arch.apic_map_lock); recalculate_apic_map() 172 struct kvm_lapic *apic = vcpu->arch.apic; kvm_for_each_vcpu() 204 old = rcu_dereference_protected(kvm->arch.apic_map, 205 lockdep_is_held(&kvm->arch.apic_map_lock)); 206 rcu_assign_pointer(kvm->arch.apic_map, new); 207 mutex_unlock(&kvm->arch.apic_map_lock); 275 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_set_version() 339 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_update_irr() 481 highest_irr = apic_find_highest_irr(vcpu->arch.apic); kvm_lapic_find_highest_irr() 493 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_set_irq() 502 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, pv_eoi_put_user() 509 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, pv_eoi_get_user() 515 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; pv_eoi_enabled() 523 (unsigned long long)vcpu->arch.pv_eoi.msr_val); pv_eoi_get_pending() 531 (unsigned long long)vcpu->arch.pv_eoi.msr_val); pv_eoi_set_pending() 534 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); pv_eoi_set_pending() 541 (unsigned long long)vcpu->arch.pv_eoi.msr_val); pv_eoi_clr_pending() 544 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); pv_eoi_clr_pending() 549 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_update_tmr() 654 struct kvm_lapic *target = vcpu->arch.apic; kvm_apic_match_dest() 706 map = rcu_dereference(kvm->arch.apic_map); kvm_irq_delivery_to_apic_fast() 775 vcpu->arch.apic_arb_prio++; __apic_accept_irq() 798 vcpu->arch.pv.pv_unhalted = 1; __apic_accept_irq() 859 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; kvm_apic_compare_prio() 901 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_set_eoi_accelerated() 975 if (apic->vcpu->arch.tpr_access_reporting) report_tpr_access() 1132 struct kvm_lapic *apic = vcpu->arch.apic; lapic_timer_int_injected() 1150 struct kvm_lapic *apic = vcpu->arch.apic; wait_lapic_expire() 1223 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; start_apic_timer() 1255 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); apic_manage_nmi_watchdog() 1258 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); apic_manage_nmi_watchdog() 1429 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); kvm_lapic_set_eoi() 1441 apic_reg_read(vcpu->arch.apic, offset, 4, &val); kvm_apic_write_nodecode() 1444 apic_reg_write(vcpu->arch.apic, offset, val); kvm_apic_write_nodecode() 1450 struct kvm_lapic *apic = vcpu->arch.apic; kvm_free_lapic() 1452 if (!vcpu->arch.apic) kvm_free_lapic() 1457 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) kvm_free_lapic() 1477 struct kvm_lapic *apic = vcpu->arch.apic; kvm_get_lapic_tscdeadline_msr() 1488 struct kvm_lapic *apic = vcpu->arch.apic; kvm_set_lapic_tscdeadline_msr() 1501 struct kvm_lapic *apic = vcpu->arch.apic; kvm_lapic_set_tpr() 1517 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); kvm_lapic_get_cr8() 1524 u64 old_value = vcpu->arch.apic_base; kvm_lapic_set_base() 1525 struct kvm_lapic *apic = vcpu->arch.apic; kvm_lapic_set_base() 1529 vcpu->arch.apic_base = value; kvm_lapic_set_base() 1533 vcpu->arch.apic_base = value; kvm_lapic_set_base() 1554 apic->base_address = apic->vcpu->arch.apic_base & kvm_lapic_set_base() 1563 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); kvm_lapic_set_base() 1575 apic = vcpu->arch.apic; kvm_lapic_reset() 1611 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); kvm_lapic_reset() 1612 vcpu->arch.pv_eoi.msr_val = 0; kvm_lapic_reset() 1615 vcpu->arch.apic_arb_prio = 0; kvm_lapic_reset() 1616 vcpu->arch.apic_attention = 0; kvm_lapic_reset() 1621 vcpu->arch.apic_base, apic->base_address); kvm_lapic_reset() 1637 struct kvm_lapic *apic = vcpu->arch.apic; apic_has_pending_timer() 1663 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_nmi_wd_deliver() 1699 vcpu->arch.apic = apic; kvm_create_lapic() 1717 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; kvm_create_lapic() 1734 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_has_interrupt() 1750 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); kvm_apic_accept_pic_intr() 1753 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) kvm_apic_accept_pic_intr() 1763 struct kvm_lapic *apic = vcpu->arch.apic; kvm_inject_apic_timer_irqs() 1779 struct kvm_lapic *apic = vcpu->arch.apic; kvm_get_apic_interrupt() 1800 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_post_state_restore() 1802 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); kvm_apic_post_state_restore() 1805 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); kvm_apic_post_state_restore() 1837 timer = &vcpu->arch.apic->lapic_timer.timer; __kvm_migrate_apic_timer() 1883 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic() 1884 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); kvm_lapic_sync_from_vapic() 1886 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic() 1889 kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, kvm_lapic_sync_from_vapic() 1892 apic_set_tpr(vcpu->arch.apic, data & 0xff); kvm_lapic_sync_from_vapic() 1925 struct kvm_lapic *apic = vcpu->arch.apic; kvm_lapic_sync_to_vapic() 1929 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) kvm_lapic_sync_to_vapic() 1941 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, kvm_lapic_sync_to_vapic() 1949 &vcpu->arch.apic->vapic_cache, kvm_lapic_set_vapic_addr() 1952 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); kvm_lapic_set_vapic_addr() 1954 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); kvm_lapic_set_vapic_addr() 1957 vcpu->arch.apic->vapic_addr = vapic_addr; kvm_lapic_set_vapic_addr() 1963 struct kvm_lapic *apic = vcpu->arch.apic; kvm_x2apic_msr_write() 1980 struct kvm_lapic *apic = vcpu->arch.apic; kvm_x2apic_msr_read() 2004 struct kvm_lapic *apic = vcpu->arch.apic; kvm_hv_vapic_msr_write() 2017 struct kvm_lapic *apic = vcpu->arch.apic; kvm_hv_vapic_msr_read() 2039 vcpu->arch.pv_eoi.msr_val = data; kvm_lapic_enable_pv_eoi() 2042 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, kvm_lapic_enable_pv_eoi() 2048 struct kvm_lapic *apic = vcpu->arch.apic; kvm_apic_accept_events() 2061 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_apic_accept_events() 2063 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; kvm_apic_accept_events() 2066 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { kvm_apic_accept_events() 2073 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_apic_accept_events()
|
H A D | iommu.c | 78 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_iommu_map_pages() 91 if (!kvm->arch.iommu_noncoherent) kvm_iommu_map_pages() 157 if (kvm->arch.iommu_noncoherent) kvm_iommu_map_memslots() 175 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_assign_device() 195 if (noncoherent != kvm->arch.iommu_noncoherent) { kvm_assign_device() 197 kvm->arch.iommu_noncoherent = noncoherent; kvm_assign_device() 215 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_deassign_device() 244 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); kvm_iommu_map_guest() 245 if (!kvm->arch.iommu_domain) { kvm_iommu_map_guest() 256 iommu_domain_free(kvm->arch.iommu_domain); kvm_iommu_map_guest() 257 kvm->arch.iommu_domain = NULL; kvm_iommu_map_guest() 279 domain = kvm->arch.iommu_domain; kvm_iommu_put_pages() 333 if (kvm->arch.iommu_noncoherent) kvm_iommu_unmap_memslots() 341 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_iommu_unmap_guest() 349 kvm->arch.iommu_domain = NULL; kvm_iommu_unmap_guest() 350 kvm->arch.iommu_noncoherent = false; kvm_iommu_unmap_guest()
|
/linux-4.1.27/arch/x86/um/vdso/ |
H A D | vdso.S | 7 .incbin "arch/x86/um/vdso/vdso.so"
|
/linux-4.1.27/arch/sh/kernel/vsyscall/ |
H A D | vsyscall-syscall.S | 7 .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
|
/linux-4.1.27/arch/sh/include/cpu-sh4a/cpu/ |
H A D | serial.h | 4 /* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */
|
/linux-4.1.27/arch/parisc/include/asm/ |
H A D | mckinley.h | 5 /* declared in arch/parisc/kernel/setup.c */
|
H A D | runway.h | 5 /* declared in arch/parisc/kernel/setup.c */
|
H A D | syscall.h | 44 int arch = AUDIT_ARCH_PARISC; syscall_get_arch() local 47 arch = AUDIT_ARCH_PARISC64; syscall_get_arch() 49 return arch; syscall_get_arch()
|
/linux-4.1.27/arch/blackfin/mach-bf561/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf561/Makefile
|
/linux-4.1.27/arch/blackfin/mach-bf609/ |
H A D | Makefile | 2 # arch/blackfin/mach-bf609/Makefile
|
/linux-4.1.27/arch/m68k/amiga/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/amiga source directory
|
/linux-4.1.27/arch/m68k/atari/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/atari source directory
|
/linux-4.1.27/arch/m68k/mac/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/mac source directory
|
/linux-4.1.27/arch/m68k/sun3/ |
H A D | Makefile | 2 # Makefile for Linux arch/m68k/sun3 source directory
|
/linux-4.1.27/tools/perf/util/include/asm/ |
H A D | cpufeature.h | 5 /* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */
|
H A D | alternative-asm.h | 4 /* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
|
H A D | dwarf2.h | 5 /* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
H A D | crisksyms.c | 3 #include <arch/dma.h> 4 #include <arch/intmem.h> 6 #include <arch/io.h>
|
H A D | cache.c | 3 #include <arch/cache.h> 4 #include <arch/hwregs/dma.h>
|
/linux-4.1.27/arch/avr32/ |
H A D | Makefile | 27 machdirs := $(patsubst %,arch/avr32/mach-%/, $(machine-y)) 31 head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o 32 head-y += arch/avr32/kernel/head.o 34 core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/ 35 core-$(CONFIG_BOARD_ATNGW100_COMMON) += arch/avr32/boards/atngw100/ 36 core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/ 37 core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/ 38 core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/ 39 core-$(CONFIG_BOARD_MIMC200) += arch/avr32/boards/mimc200/ 40 core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/ 41 core-y += arch/avr32/kernel/ 42 core-y += arch/avr32/mm/ 43 drivers-$(CONFIG_OPROFILE) += arch/avr32/oprofile/ 44 libs-y += arch/avr32/lib/ 50 boot := arch/$(ARCH)/boot/images
|
/linux-4.1.27/include/linux/ |
H A D | irq_cpustat.h | 6 * architecture. Some arch (like s390) have per cpu hardware pages and 15 * here, even ones that are arch dependent. That way we get common 16 * definitions instead of differing sets for each arch. 24 /* arch independent irq_stat fields */ 28 /* arch dependent irq_stat fields */
|
/linux-4.1.27/arch/xtensa/ |
H A D | Makefile | 61 vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) 62 plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) 74 buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/) 75 buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/) 81 head-y := arch/xtensa/kernel/head.o 82 core-y += arch/xtensa/kernel/ arch/xtensa/mm/ 85 libs-y += arch/xtensa/lib/ $(LIBGCC) 86 drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/ 89 core-$(CONFIG_OF) += arch/xtensa/boot/dts/ 92 boot := arch/xtensa/boot 105 @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
|
/linux-4.1.27/arch/mn10300/ |
H A D | Makefile | 54 head-y := arch/mn10300/kernel/head.o 56 core-y += arch/mn10300/kernel/ arch/mn10300/mm/ 59 core-y += arch/mn10300/proc-$(PROCESSOR)/ 62 core-y += arch/mn10300/unit-$(UNIT)/ 64 libs-y += arch/mn10300/lib/ 66 drivers-$(CONFIG_OPROFILE) += arch/mn10300/oprofile/ 68 boot := arch/mn10300/boot 87 $(Q)$(MAKE) $(clean)=arch/mn10300/proc-mn103e010 88 $(Q)$(MAKE) $(clean)=arch/mn10300/unit-asb2303 89 $(Q)$(MAKE) $(clean)=arch/mn10300/unit-asb2305 92 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' 98 KBUILD_CPPFLAGS += -I$(srctree)/arch/mn10300/proc-$(PROCESSOR)/include 99 KBUILD_CPPFLAGS += -I$(srctree)/arch/mn10300/unit-$(UNIT)/include
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | string.h | 14 #define __HAVE_ARCH_MEMCHR /* inline & arch function */ 15 #define __HAVE_ARCH_MEMCMP /* arch function */ 16 #define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */ 17 #define __HAVE_ARCH_MEMSCAN /* inline & arch function */ 18 #define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */ 19 #define __HAVE_ARCH_STRCAT /* inline & arch function */ 20 #define __HAVE_ARCH_STRCMP /* arch function */ 21 #define __HAVE_ARCH_STRCPY /* inline & arch function */ 22 #define __HAVE_ARCH_STRLCAT /* arch function */ 23 #define __HAVE_ARCH_STRLCPY /* arch function */ 24 #define __HAVE_ARCH_STRLEN /* inline & arch function */ 25 #define __HAVE_ARCH_STRNCAT /* arch function */ 26 #define __HAVE_ARCH_STRNCPY /* arch function */ 27 #define __HAVE_ARCH_STRNLEN /* inline & arch function */ 28 #define __HAVE_ARCH_STRRCHR /* arch function */ 29 #define __HAVE_ARCH_STRSTR /* arch function */ 31 /* Prototypes for non-inlined arch strings functions. */
|
/linux-4.1.27/arch/score/ |
H A D | Makefile | 2 # arch/score/Makefile 28 head-y := arch/score/kernel/head.o 29 libs-y += arch/score/lib/ 30 core-y += arch/score/kernel/ arch/score/mm/ 32 boot := arch/score/boot
|
/linux-4.1.27/arch/cris/boot/compressed/ |
H A D | Makefile | 2 # arch/cris/boot/compressed/Makefile 9 # -I$(srctree)/include/asm/arch 11 # -I$(srctree)/include/asm/arch 13 arch-$(CONFIG_ETRAX_ARCH_V10) = v10 14 arch-$(CONFIG_ETRAX_ARCH_V32) = v32 16 ldflags-y += -T $(srctree)/$(src)/decompress_$(arch-y).lds
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | string.h | 13 #define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */ 14 #define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */ 15 #define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */
|
/linux-4.1.27/arch/alpha/ |
H A D | Makefile | 39 head-y := arch/alpha/kernel/head.o 41 core-y += arch/alpha/kernel/ arch/alpha/mm/ 42 core-$(CONFIG_MATHEMU) += arch/alpha/math-emu/ 43 drivers-$(CONFIG_OPROFILE) += arch/alpha/oprofile/ 44 libs-y += arch/alpha/lib/ 46 # export what is needed by arch/alpha/boot/Makefile 50 boot := arch/alpha/boot 65 echo '* boot - Compressed kernel image (arch/alpha/boot/vmlinux.gz)' 66 echo ' bootimage - SRM bootable image (arch/alpha/boot/bootimage)' 67 echo ' bootpfile - BOOTP bootable image (arch/alpha/boot/bootpfile)' 68 echo ' bootpzfile - compressed kernel BOOTP image (arch/alpha/boot/bootpzfile)'
|
/linux-4.1.27/arch/um/include/asm/ |
H A D | kmap_types.h | 9 /* No more #include "asm/arch/kmap_types.h" ! */
|
H A D | mmu.h | 14 struct uml_arch_mm_context arch; member in struct:mm_context
|
/linux-4.1.27/arch/um/os-Linux/skas/ |
H A D | Makefile | 10 include arch/um/scripts/Makefile.rules
|
/linux-4.1.27/arch/microblaze/oprofile/ |
H A D | Makefile | 2 # arch/microblaze/oprofile/Makefile
|
/linux-4.1.27/fs/hostfs/ |
H A D | Makefile | 11 include arch/um/scripts/Makefile.rules
|
/linux-4.1.27/arch/s390/kernel/vdso32/ |
H A D | vdso32_wrapper.S | 10 .incbin "arch/s390/kernel/vdso32/vdso32.so"
|
/linux-4.1.27/arch/s390/kernel/vdso64/ |
H A D | vdso64_wrapper.S | 10 .incbin "arch/s390/kernel/vdso64/vdso64.so"
|
/linux-4.1.27/arch/sh/include/mach-x3proto/mach/ |
H A D | hardware.h | 6 /* arch/sh/boards/mach-x3proto/gpio.c */
|
/linux-4.1.27/arch/powerpc/kernel/vdso32/ |
H A D | vdso32_wrapper.S | 9 .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
|
/linux-4.1.27/arch/powerpc/kernel/vdso64/ |
H A D | vdso64_wrapper.S | 9 .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
|
/linux-4.1.27/arch/blackfin/lib/ |
H A D | Makefile | 2 # arch/blackfin/lib/Makefile
|
/linux-4.1.27/arch/blackfin/oprofile/ |
H A D | Makefile | 2 # arch/blackfin/oprofile/Makefile
|
/linux-4.1.27/arch/c6x/ |
H A D | Makefile | 2 # linux/arch/c6x/Makefile 30 head-y := arch/c6x/kernel/head.o 31 core-y += arch/c6x/kernel/ arch/c6x/mm/ arch/c6x/platforms/ 32 libs-y += arch/c6x/lib/ 37 boot := arch/$(ARCH)/boot 59 @echo ' vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)' 60 @echo ' dtbImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
|
/linux-4.1.27/arch/c6x/lib/ |
H A D | Makefile | 2 # Makefile for arch/c6x/lib/
|
/linux-4.1.27/arch/c6x/platforms/ |
H A D | Makefile | 2 # Makefile for arch/c6x/platforms
|
/linux-4.1.27/arch/arm/mach-iop32x/include/mach/ |
H A D | glantank.h | 2 * arch/arm/mach-iop32x/include/mach/glantank.h
|
H A D | n2100.h | 2 * arch/arm/mach-iop32x/include/mach/n2100.h
|
H A D | hardware.h | 2 * arch/arm/mach-iop32x/include/mach/hardware.h 19 * arch/arm/plat-iop/pci.c.
|
/linux-4.1.27/arch/arm/mach-mmp/include/mach/ |
H A D | dma.h | 2 * linux/arch/arm/mach-mmp/include/mach/dma.h
|
/linux-4.1.27/tools/perf/util/include/linux/ |
H A D | linkage.h | 5 /* linkage.h ... for including arch/x86/lib/memcpy_64.S */
|
/linux-4.1.27/arch/mips/kvm/ |
H A D | interrupt.c | 27 set_bit(priority, &vcpu->arch.pending_exceptions); kvm_mips_queue_irq() 32 clear_bit(priority, &vcpu->arch.pending_exceptions); kvm_mips_dequeue_irq() 42 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); kvm_mips_queue_timer_int_cb() 51 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); kvm_mips_dequeue_timer_int_cb() 67 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); kvm_mips_queue_io_int_cb() 73 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); kvm_mips_queue_io_int_cb() 78 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); kvm_mips_queue_io_int_cb() 95 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); kvm_mips_dequeue_io_int_cb() 100 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); kvm_mips_dequeue_io_int_cb() 105 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); kvm_mips_dequeue_io_int_cb() 122 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_irq_deliver_cb() local 123 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_irq_deliver_cb() 170 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_irq_deliver_cb() 178 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); kvm_mips_irq_deliver_cb() 188 arch->pc = KVM_GUEST_KSEG0 + 0x200; kvm_mips_irq_deliver_cb() 190 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_irq_deliver_cb() 192 clear_bit(priority, &vcpu->arch.pending_exceptions); kvm_mips_irq_deliver_cb() 206 unsigned long *pending = &vcpu->arch.pending_exceptions; kvm_mips_deliver_interrupts() 207 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr; kvm_mips_deliver_interrupts() 241 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions); kvm_mips_pending_timer()
|
H A D | emulate.c | 47 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_compute_return_epc() local 65 arch->gprs[insn.r_format.rd] = epc + 8; kvm_compute_return_epc() 68 nextpc = arch->gprs[insn.r_format.rs]; kvm_compute_return_epc() 82 if ((long)arch->gprs[insn.i_format.rs] < 0) kvm_compute_return_epc() 91 if ((long)arch->gprs[insn.i_format.rs] >= 0) kvm_compute_return_epc() 100 arch->gprs[31] = epc + 8; kvm_compute_return_epc() 101 if ((long)arch->gprs[insn.i_format.rs] < 0) kvm_compute_return_epc() 110 arch->gprs[31] = epc + 8; kvm_compute_return_epc() 111 if ((long)arch->gprs[insn.i_format.rs] >= 0) kvm_compute_return_epc() 134 arch->gprs[31] = instpc + 8; kvm_compute_return_epc() 146 if (arch->gprs[insn.i_format.rs] == kvm_compute_return_epc() 147 arch->gprs[insn.i_format.rt]) kvm_compute_return_epc() 156 if (arch->gprs[insn.i_format.rs] != kvm_compute_return_epc() 157 arch->gprs[insn.i_format.rt]) kvm_compute_return_epc() 167 if ((long)arch->gprs[insn.i_format.rs] <= 0) kvm_compute_return_epc() 177 if ((long)arch->gprs[insn.i_format.rs] > 0) kvm_compute_return_epc() 207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); update_pc() 211 vcpu->arch.pc = branch_pc; update_pc() 213 vcpu->arch.pc); update_pc() 216 vcpu->arch.pc += 4; update_pc() 218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); update_pc() 233 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_count_disabled() 235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || kvm_mips_count_disabled() 242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. 252 delta = now_ns + vcpu->arch.count_dyn_bias; kvm_mips_ktime_to_count() 254 if (delta >= vcpu->arch.count_period) { kvm_mips_ktime_to_count() 256 periods = div64_s64(now_ns, vcpu->arch.count_period); kvm_mips_ktime_to_count() 257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; kvm_mips_ktime_to_count() 259 delta = now_ns + vcpu->arch.count_dyn_bias; kvm_mips_ktime_to_count() 272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); kvm_mips_ktime_to_count() 287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) kvm_mips_count_time() 288 return vcpu->arch.count_resume; kvm_mips_count_time() 305 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_read_count_running() 311 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); kvm_mips_read_count_running() 327 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); kvm_mips_read_count_running() 328 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); kvm_mips_read_count_running() 334 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_mips_read_count_running() 345 vcpu->arch.count_period); kvm_mips_read_count_running() 346 hrtimer_start(&vcpu->arch.comparecount_timer, expires, kvm_mips_read_count_running() 365 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_read_count() 396 hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_mips_freeze_hrtimer() 424 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_resume_hrtimer() 432 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); kvm_mips_resume_hrtimer() 436 hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_mips_resume_hrtimer() 437 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); kvm_mips_resume_hrtimer() 449 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_write_count() 454 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); kvm_mips_write_count() 474 vcpu->arch.count_hz = 100*1000*1000; kvm_mips_init_count() 475 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, kvm_mips_init_count() 476 vcpu->arch.count_hz); kvm_mips_init_count() 477 vcpu->arch.count_dyn_bias = 0; kvm_mips_init_count() 496 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_set_count_hz() 505 if (vcpu->arch.count_hz == count_hz) kvm_mips_set_count_hz() 518 vcpu->arch.count_hz = count_hz; kvm_mips_set_count_hz() 519 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); kvm_mips_set_count_hz() 520 vcpu->arch.count_dyn_bias = 0; kvm_mips_set_count_hz() 523 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); kvm_mips_set_count_hz() 543 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_write_compare() 587 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_count_disable() 592 hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_mips_count_disable() 614 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_count_disable_cause() 617 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) kvm_mips_count_disable_cause() 634 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_count_enable_cause() 660 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_set_count_ctl() 661 s64 changed = count_ctl ^ vcpu->arch.count_ctl; kvm_mips_set_count_ctl() 671 vcpu->arch.count_ctl = count_ctl; kvm_mips_set_count_ctl() 679 vcpu->arch.count_resume = ktime_get(); kvm_mips_set_count_ctl() 682 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); kvm_mips_set_count_ctl() 692 vcpu->arch.count_hz); kvm_mips_set_count_ctl() 693 expire = ktime_add_ns(vcpu->arch.count_resume, delta); kvm_mips_set_count_ctl() 730 vcpu->arch.count_resume = ns_to_ktime(count_resume); kvm_mips_set_count_resume() 745 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, kvm_mips_count_timeout() 746 vcpu->arch.count_period); kvm_mips_count_timeout() 752 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emul_eret() 756 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_mips_emul_eret() 759 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); kvm_mips_emul_eret() 763 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); kvm_mips_emul_eret() 766 vcpu->arch.pc); kvm_mips_emul_eret() 775 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, kvm_mips_emul_wait() 776 vcpu->arch.pending_exceptions); kvm_mips_emul_wait() 780 if (!vcpu->arch.pending_exceptions) { kvm_mips_emul_wait() 781 vcpu->arch.wait = 1; kvm_mips_emul_wait() 803 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emul_tlbr() 804 uint32_t pc = vcpu->arch.pc; kvm_mips_emul_tlbr() 813 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emul_tlbwi() 816 uint32_t pc = vcpu->arch.pc; kvm_mips_emul_tlbwi() 828 tlb = &vcpu->arch.guest_tlb[index]; kvm_mips_emul_tlbwi() 852 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emul_tlbwr() 854 uint32_t pc = vcpu->arch.pc; kvm_mips_emul_tlbwr() 860 tlb = &vcpu->arch.guest_tlb[index]; kvm_mips_emul_tlbwr() 883 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emul_tlbp() 885 uint32_t pc = vcpu->arch.pc; kvm_mips_emul_tlbp() 910 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) kvm_mips_config1_wrmask() 929 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) kvm_mips_config3_wrmask() 960 if (kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_config5_wrmask() 967 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { kvm_mips_config5_wrmask() 980 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_CP0() 983 uint32_t pc = vcpu->arch.pc; kvm_mips_emulate_CP0() 990 curr_pc = vcpu->arch.pc; kvm_mips_emulate_CP0() 1036 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); kvm_mips_emulate_CP0() 1038 vcpu->arch.gprs[rt] = 0x0; kvm_mips_emulate_CP0() 1043 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; kvm_mips_emulate_CP0() 1051 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", kvm_mips_emulate_CP0() 1052 pc, rd, sel, rt, vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1057 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; kvm_mips_emulate_CP0() 1065 && (vcpu->arch.gprs[rt] >= kvm_mips_emulate_CP0() 1068 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1077 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1082 vcpu->arch.gprs[rt] & ASID_MASK; kvm_mips_emulate_CP0() 1083 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && kvm_mips_emulate_CP0() 1089 vcpu->arch.gprs[rt] kvm_mips_emulate_CP0() 1096 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1100 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1105 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1110 vcpu->arch.gprs[rt], kvm_mips_emulate_CP0() 1116 val = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0() 1127 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_emulate_CP0() 1158 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) kvm_mips_emulate_CP0() 1169 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) kvm_mips_emulate_CP0() 1181 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_emulate_CP0() 1188 val = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0() 1204 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) kvm_mips_emulate_CP0() 1214 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) kvm_mips_emulate_CP0() 1225 new_cause = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0() 1237 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0() 1249 vcpu->arch.pc, rt, rd, sel); kvm_mips_emulate_CP0() 1258 vcpu->arch.gprs[rt] = kvm_mips_emulate_CP0() 1264 vcpu->arch.pc); kvm_mips_emulate_CP0() 1268 vcpu->arch.pc); kvm_mips_emulate_CP0() 1289 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0() 1290 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0() 1295 vcpu->arch.pc, copz); kvm_mips_emulate_CP0() 1304 vcpu->arch.pc = curr_pc; kvm_mips_emulate_CP0() 1330 curr_pc = vcpu->arch.pc; kvm_mips_emulate_store() 1348 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. kvm_mips_emulate_store() 1358 *(u8 *) data = vcpu->arch.gprs[rt]; kvm_mips_emulate_store() 1360 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], kvm_mips_emulate_store() 1372 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. kvm_mips_emulate_store() 1383 *(uint32_t *) data = vcpu->arch.gprs[rt]; kvm_mips_emulate_store() 1386 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, kvm_mips_emulate_store() 1387 vcpu->arch.gprs[rt], *(uint32_t *) data); kvm_mips_emulate_store() 1397 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. kvm_mips_emulate_store() 1408 *(uint16_t *) data = vcpu->arch.gprs[rt]; kvm_mips_emulate_store() 1411 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, kvm_mips_emulate_store() 1412 vcpu->arch.gprs[rt], *(uint32_t *) data); kvm_mips_emulate_store() 1423 vcpu->arch.pc = curr_pc; kvm_mips_emulate_store() 1441 vcpu->arch.pending_load_cause = cause; kvm_mips_emulate_load() 1442 vcpu->arch.io_gpr = rt; kvm_mips_emulate_load() 1454 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. kvm_mips_emulate_load() 1477 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. kvm_mips_emulate_load() 1506 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. kvm_mips_emulate_load() 1543 if (gfn >= kvm->arch.guest_pmap_npages) { kvm_mips_sync_icache() 1549 pfn = kvm->arch.guest_pmap[gfn]; kvm_mips_sync_icache() 1577 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_cache() 1580 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_cache() local 1588 curr_pc = vcpu->arch.pc; kvm_mips_emulate_cache() 1599 va = arch->gprs[base] + offset; kvm_mips_emulate_cache() 1602 cache, op, base, arch->gprs[base], offset); kvm_mips_emulate_cache() 1611 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, kvm_mips_emulate_cache() 1612 arch->gprs[base], offset); kvm_mips_emulate_cache() 1651 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); kvm_mips_emulate_cache() 1652 vcpu->arch.host_cp0_badvaddr = va; kvm_mips_emulate_cache() 1658 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; kvm_mips_emulate_cache() 1680 cache, op, base, arch->gprs[base], offset); kvm_mips_emulate_cache() 1711 cache, op, base, arch->gprs[base], offset); kvm_mips_emulate_cache() 1721 vcpu->arch.pc = curr_pc; kvm_mips_emulate_cache() 1778 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_syscall() 1779 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_syscall() local 1784 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_syscall() 1792 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); kvm_mips_emulate_syscall() 1798 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_syscall() 1813 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_tlbmiss_ld() 1814 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_tlbmiss_ld() local 1815 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | kvm_mips_emulate_tlbmiss_ld() 1820 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_tlbmiss_ld() 1829 arch->pc); kvm_mips_emulate_tlbmiss_ld() 1832 arch->pc = KVM_GUEST_KSEG0 + 0x0; kvm_mips_emulate_tlbmiss_ld() 1836 arch->pc); kvm_mips_emulate_tlbmiss_ld() 1838 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbmiss_ld() 1845 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_mips_emulate_tlbmiss_ld() 1859 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_tlbinv_ld() 1860 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_tlbinv_ld() local 1862 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | kvm_mips_emulate_tlbinv_ld() 1867 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_tlbinv_ld() 1876 arch->pc); kvm_mips_emulate_tlbinv_ld() 1879 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbinv_ld() 1883 arch->pc); kvm_mips_emulate_tlbinv_ld() 1884 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbinv_ld() 1891 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_mips_emulate_tlbinv_ld() 1905 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_tlbmiss_st() 1906 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_tlbmiss_st() local 1907 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | kvm_mips_emulate_tlbmiss_st() 1912 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_tlbmiss_st() 1921 arch->pc); kvm_mips_emulate_tlbmiss_st() 1924 arch->pc = KVM_GUEST_KSEG0 + 0x0; kvm_mips_emulate_tlbmiss_st() 1927 arch->pc); kvm_mips_emulate_tlbmiss_st() 1928 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbmiss_st() 1935 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_mips_emulate_tlbmiss_st() 1949 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_tlbinv_st() 1950 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_tlbinv_st() local 1951 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | kvm_mips_emulate_tlbinv_st() 1956 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_tlbinv_st() 1965 arch->pc); kvm_mips_emulate_tlbinv_st() 1968 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbinv_st() 1971 arch->pc); kvm_mips_emulate_tlbinv_st() 1972 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbinv_st() 1979 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_mips_emulate_tlbinv_st() 1995 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_handle_tlbmod() 1996 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | kvm_mips_handle_tlbmod() 2004 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); kvm_mips_handle_tlbmod() 2022 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_tlbmod() 2023 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | kvm_mips_emulate_tlbmod() 2025 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_tlbmod() local 2029 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_tlbmod() 2038 arch->pc); kvm_mips_emulate_tlbmod() 2040 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbmod() 2043 arch->pc); kvm_mips_emulate_tlbmod() 2044 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_tlbmod() 2050 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_mips_emulate_tlbmod() 2064 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_fpu_exc() 2065 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_fpu_exc() local 2069 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_fpu_exc() 2079 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_fpu_exc() 2093 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_ri_exc() 2094 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_ri_exc() local 2099 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_ri_exc() 2107 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); kvm_mips_emulate_ri_exc() 2113 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_ri_exc() 2128 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_bp_exc() 2129 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_bp_exc() local 2134 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_bp_exc() 2142 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); kvm_mips_emulate_bp_exc() 2148 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_bp_exc() 2163 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_trap_exc() 2164 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_trap_exc() local 2169 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_trap_exc() 2177 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); kvm_mips_emulate_trap_exc() 2183 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_trap_exc() 2198 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_msafpe_exc() 2199 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_msafpe_exc() local 2204 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_msafpe_exc() 2212 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); kvm_mips_emulate_msafpe_exc() 2218 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_msafpe_exc() 2233 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_fpe_exc() 2234 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_fpe_exc() local 2239 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_fpe_exc() 2247 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); kvm_mips_emulate_fpe_exc() 2253 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_fpe_exc() 2268 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_msadis_exc() 2269 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_msadis_exc() local 2274 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_msadis_exc() 2282 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); kvm_mips_emulate_msadis_exc() 2288 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_msadis_exc() 2317 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_handle_ri() 2318 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_handle_ri() local 2327 curr_pc = vcpu->arch.pc; kvm_mips_handle_ri() 2355 arch->gprs[rt] = 0; kvm_mips_handle_ri() 2358 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, kvm_mips_handle_ri() 2362 arch->gprs[rt] = kvm_mips_read_count(vcpu); kvm_mips_handle_ri() 2368 arch->gprs[rt] = 1; kvm_mips_handle_ri() 2371 arch->gprs[rt] = 2; kvm_mips_handle_ri() 2375 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); kvm_mips_handle_ri() 2394 vcpu->arch.pc = curr_pc; kvm_mips_handle_ri() 2401 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; kvm_mips_complete_mmio_load() 2410 er = update_pc(vcpu, vcpu->arch.pending_load_cause); kvm_mips_complete_mmio_load() 2434 if (vcpu->arch.pending_load_cause & CAUSEF_BD) kvm_mips_complete_mmio_load() 2436 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, kvm_mips_complete_mmio_load() 2449 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_emulate_exc() 2450 struct kvm_vcpu_arch *arch = &vcpu->arch; kvm_mips_emulate_exc() local 2455 kvm_write_c0_guest_epc(cop0, arch->pc); kvm_mips_emulate_exc() 2467 arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_mips_emulate_exc() 2468 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_mips_emulate_exc() 2488 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_mips_check_privilege() 2584 unsigned long va = vcpu->arch.host_cp0_badvaddr; kvm_mips_handle_tlbmiss() 2588 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); kvm_mips_handle_tlbmiss() 2599 (vcpu->arch.cop0) & ASID_MASK)); kvm_mips_handle_tlbmiss() 2611 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; kvm_mips_handle_tlbmiss()
|
H A D | mips.c | 67 vcpu->arch.guest_kernel_asid[i] = 0; for_each_possible_cpu() 68 vcpu->arch.guest_user_asid[i] = 0; for_each_possible_cpu() 80 return !!(vcpu->arch.pending_exceptions); kvm_arch_vcpu_runnable() 114 kvm->arch.commpage_tlb = wired; kvm_mips_init_tlbs() 117 kvm->arch.commpage_tlb); kvm_mips_init_tlbs() 146 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { kvm_mips_free_vcpus() 147 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) kvm_mips_free_vcpus() 148 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); kvm_mips_free_vcpus() 150 kfree(kvm->arch.guest_pmap); kvm_mips_free_vcpus() 220 if (!kvm->arch.guest_pmap) { kvm_arch_commit_memory_region() 225 kvm->arch.guest_pmap_npages = npages; kvm_arch_commit_memory_region() 226 kvm->arch.guest_pmap = kvm_arch_commit_memory_region() 229 if (!kvm->arch.guest_pmap) { kvm_arch_commit_memory_region() 235 npages, kvm->arch.guest_pmap); kvm_arch_commit_memory_region() 239 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; kvm_arch_commit_memory_region() 274 vcpu->arch.host_ebase = (void *)read_c0_ebase(); kvm_arch_vcpu_create() 286 vcpu->arch.guest_ebase = gebase; kvm_arch_vcpu_create() 323 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); kvm_arch_vcpu_create() 325 if (!vcpu->arch.kseg0_commpage) { kvm_arch_vcpu_create() 330 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); kvm_arch_vcpu_create() 334 vcpu->arch.last_sched_cpu = -1; kvm_arch_vcpu_create() 356 hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_arch_vcpu_free() 362 kfree(vcpu->arch.guest_ebase); kvm_arch_vcpu_free() 363 kfree(vcpu->arch.kseg0_commpage); kvm_arch_vcpu_free() 397 kvm_read_c0_guest_cause(vcpu->arch.cop0)); kvm_arch_vcpu_ioctl_run() 444 dvcpu->arch.wait = 0; kvm_vcpu_ioctl_interrupt() 533 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_get_reg() 534 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; kvm_mips_get_reg() 543 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; kvm_mips_get_reg() 546 v = (long)vcpu->arch.hi; kvm_mips_get_reg() 549 v = (long)vcpu->arch.lo; kvm_mips_get_reg() 552 v = (long)vcpu->arch.pc; kvm_mips_get_reg() 557 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_get_reg() 567 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_get_reg() 576 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_get_reg() 581 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_get_reg() 588 if (!kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_get_reg() 605 if (!kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_get_reg() 610 if (!kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_get_reg() 712 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_set_reg() 713 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; kvm_mips_set_reg() 744 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; kvm_mips_set_reg() 747 vcpu->arch.hi = v; kvm_mips_set_reg() 750 vcpu->arch.lo = v; kvm_mips_set_reg() 753 vcpu->arch.pc = v; kvm_mips_set_reg() 758 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_set_reg() 768 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_set_reg() 777 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_set_reg() 782 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_mips_set_reg() 789 if (!kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_set_reg() 803 if (!kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_set_reg() 808 if (!kvm_mips_guest_has_msa(&vcpu->arch)) kvm_mips_set_reg() 884 vcpu->arch.fpu_enabled = true; kvm_vcpu_ioctl_enable_cap() 887 vcpu->arch.msa_enabled = true; kvm_vcpu_ioctl_enable_cap() 1116 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); kvm_arch_vcpu_dump_regs() 1117 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); kvm_arch_vcpu_dump_regs() 1121 vcpu->arch.gprs[i], kvm_arch_vcpu_dump_regs() 1122 vcpu->arch.gprs[i + 1], kvm_arch_vcpu_dump_regs() 1123 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); kvm_arch_vcpu_dump_regs() 1125 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); kvm_arch_vcpu_dump_regs() 1126 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); kvm_arch_vcpu_dump_regs() 1128 cop0 = vcpu->arch.cop0; kvm_arch_vcpu_dump_regs() 1142 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) kvm_arch_vcpu_ioctl_set_regs() 1143 vcpu->arch.gprs[i] = regs->gpr[i]; kvm_arch_vcpu_ioctl_set_regs() 1144 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ kvm_arch_vcpu_ioctl_set_regs() 1145 vcpu->arch.hi = regs->hi; kvm_arch_vcpu_ioctl_set_regs() 1146 vcpu->arch.lo = regs->lo; kvm_arch_vcpu_ioctl_set_regs() 1147 vcpu->arch.pc = regs->pc; kvm_arch_vcpu_ioctl_set_regs() 1156 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) kvm_arch_vcpu_ioctl_get_regs() 1157 regs->gpr[i] = vcpu->arch.gprs[i]; kvm_arch_vcpu_ioctl_get_regs() 1159 regs->hi = vcpu->arch.hi; kvm_arch_vcpu_ioctl_get_regs() 1160 regs->lo = vcpu->arch.lo; kvm_arch_vcpu_ioctl_get_regs() 1161 regs->pc = vcpu->arch.pc; kvm_arch_vcpu_ioctl_get_regs() 1172 vcpu->arch.wait = 0; kvm_mips_comparecount_func() 1182 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); kvm_mips_comparecount_wakeup() 1190 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, kvm_arch_vcpu_init() 1192 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; kvm_arch_vcpu_init() 1224 uint32_t cause = vcpu->arch.host_cp0_cause; kvm_mips_handle_exit() 1226 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_mips_handle_exit() 1227 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_mips_handle_exit() 1294 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, kvm_mips_handle_exit() 1368 kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_mips_handle_exit() 1402 if (kvm_mips_guest_has_fpu(&vcpu->arch) && kvm_mips_handle_exit() 1404 __kvm_restore_fcsr(&vcpu->arch); kvm_mips_handle_exit() 1406 if (kvm_mips_guest_has_msa(&vcpu->arch) && kvm_mips_handle_exit() 1408 __kvm_restore_msacsr(&vcpu->arch); kvm_mips_handle_exit() 1420 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_own_fpu() 1438 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) kvm_own_fpu() 1453 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { kvm_own_fpu() 1454 __kvm_restore_fpu(&vcpu->arch); kvm_own_fpu() 1455 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; kvm_own_fpu() 1465 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_own_msa() 1474 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { kvm_own_msa() 1482 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | kvm_own_msa() 1497 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { kvm_own_msa() 1502 __kvm_restore_msa_upper(&vcpu->arch); kvm_own_msa() 1503 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; kvm_own_msa() 1507 __kvm_restore_msa(&vcpu->arch); kvm_own_msa() 1508 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; kvm_own_msa() 1509 if (kvm_mips_guest_has_fpu(&vcpu->arch)) kvm_own_msa() 1510 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; kvm_own_msa() 1524 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { kvm_drop_fpu() 1526 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; kvm_drop_fpu() 1528 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { kvm_drop_fpu() 1530 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; kvm_drop_fpu() 1546 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { kvm_lose_fpu() 1550 __kvm_save_msa(&vcpu->arch); kvm_lose_fpu() 1554 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) kvm_lose_fpu() 1556 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); kvm_lose_fpu() 1557 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { kvm_lose_fpu() 1561 __kvm_save_fpu(&vcpu->arch); kvm_lose_fpu() 1562 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; kvm_lose_fpu()
|
H A D | commpage.c | 29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; kvm_mips_commpage_init() 32 vcpu->arch.cop0 = &page->cop0; kvm_mips_commpage_init()
|
H A D | trap_emul.c | 42 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_trap_emul_handle_cop_unusable() 44 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_cop_unusable() 45 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_cop_unusable() 51 if (!kvm_mips_guest_has_fpu(&vcpu->arch) || kvm_trap_emul_handle_cop_unusable() 91 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_tlb_mod() 92 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_trap_emul_handle_tlb_mod() 93 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_tlb_mod() 135 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_tlb_st_miss() 136 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_trap_emul_handle_tlb_st_miss() 137 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_tlb_st_miss() 164 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { kvm_trap_emul_handle_tlb_st_miss() 182 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_tlb_ld_miss() 183 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_trap_emul_handle_tlb_ld_miss() 184 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_tlb_ld_miss() 197 vcpu->arch.pc, badvaddr); kvm_trap_emul_handle_tlb_ld_miss() 218 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { kvm_trap_emul_handle_tlb_ld_miss() 236 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_addr_err_st() 237 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_trap_emul_handle_addr_err_st() 238 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_addr_err_st() 266 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_addr_err_ld() 267 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; kvm_trap_emul_handle_addr_err_ld() 268 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_addr_err_ld() 296 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_syscall() 297 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_syscall() 314 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_res_inst() 315 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_res_inst() 332 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_break() 333 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_break() 350 uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; kvm_trap_emul_handle_trap() 351 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_trap() 368 uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; kvm_trap_emul_handle_msa_fpe() 369 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_msa_fpe() 386 uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; kvm_trap_emul_handle_fpe() 387 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_fpe() 409 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_trap_emul_handle_msa_disabled() 411 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; kvm_trap_emul_handle_msa_disabled() 412 unsigned long cause = vcpu->arch.host_cp0_cause; kvm_trap_emul_handle_msa_disabled() 416 if (!kvm_mips_guest_has_msa(&vcpu->arch) || kvm_trap_emul_handle_msa_disabled() 460 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_trap_emul_vcpu_setup() 523 *v = vcpu->arch.count_ctl; kvm_trap_emul_get_one_reg() 526 *v = ktime_to_ns(vcpu->arch.count_resume); kvm_trap_emul_get_one_reg() 529 *v = vcpu->arch.count_hz; kvm_trap_emul_get_one_reg() 541 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_trap_emul_set_one_reg()
|
H A D | tlb.c | 52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; kvm_mips_get_kernel_asid() 57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; kvm_mips_get_user_asid() 62 return vcpu->kvm->arch.commpage_tlb; kvm_mips_get_commpage_asid() 118 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_mips_dump_guest_tlbs() 126 tlb = vcpu->arch.guest_tlb[i]; kvm_mips_dump_guest_tlbs() 149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) kvm_mips_map_page() 161 kvm->arch.guest_pmap[gfn] = pfn; kvm_mips_map_page() 183 if (gfn >= kvm->arch.guest_pmap_npages) { kvm_mips_translate_guest_kseg0_to_hpa() 192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; kvm_mips_translate_guest_kseg0_to_hpa() 234 vcpu->arch.pc, idx, read_c0_entryhi(), kvm_mips_host_tlb_write() 279 if (gfn >= kvm->arch.guest_pmap_npages) { kvm_mips_handle_kseg0_tlb_fault() 295 pfn0 = kvm->arch.guest_pmap[gfn]; kvm_mips_handle_kseg0_tlb_fault() 296 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1]; kvm_mips_handle_kseg0_tlb_fault() 298 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1]; kvm_mips_handle_kseg0_tlb_fault() 299 pfn1 = kvm->arch.guest_pmap[gfn]; kvm_mips_handle_kseg0_tlb_fault() 320 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; kvm_mips_handle_commpage_tlb_fault() 343 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), kvm_mips_handle_commpage_tlb_fault() 377 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) kvm_mips_handle_mapped_seg_tlb_fault() 379 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) kvm_mips_handle_mapped_seg_tlb_fault() 398 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, kvm_mips_handle_mapped_seg_tlb_fault() 410 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; kvm_mips_guest_tlb_lookup() 660 if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) kvm_mips_migrate_count() 661 hrtimer_restart(&vcpu->arch.comparecount_timer); kvm_mips_migrate_count() 676 if (((vcpu->arch. kvm_arch_vcpu_load() 678 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); kvm_arch_vcpu_load() 679 vcpu->arch.guest_kernel_asid[cpu] = kvm_arch_vcpu_load() 680 vcpu->arch.guest_kernel_mm.context.asid[cpu]; kvm_arch_vcpu_load() 681 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); kvm_arch_vcpu_load() 682 vcpu->arch.guest_user_asid[cpu] = kvm_arch_vcpu_load() 683 vcpu->arch.guest_user_mm.context.asid[cpu]; kvm_arch_vcpu_load() 689 cpu, vcpu->arch.guest_kernel_asid[cpu]); kvm_arch_vcpu_load() 691 vcpu->arch.guest_user_asid[cpu]); kvm_arch_vcpu_load() 694 if (vcpu->arch.last_sched_cpu != cpu) { kvm_arch_vcpu_load() 696 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); kvm_arch_vcpu_load() 711 write_c0_entryhi(vcpu->arch. kvm_arch_vcpu_load() 725 write_c0_entryhi(vcpu->arch. kvm_arch_vcpu_load() 729 write_c0_entryhi(vcpu->arch. kvm_arch_vcpu_load() 754 vcpu->arch.preempt_entryhi = read_c0_entryhi(); kvm_arch_vcpu_put() 755 vcpu->arch.last_sched_cpu = cpu; kvm_arch_vcpu_put() 775 struct mips_coproc *cop0 = vcpu->arch.cop0; kvm_get_inst() 798 &vcpu->arch. kvm_get_inst()
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | asm-offsets.c | 437 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); main() 438 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); main() 439 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); main() 440 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); main() 441 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); main() 442 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); main() 444 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); main() 446 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); main() 447 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); main() 448 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); main() 450 DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); main() 452 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); main() 453 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); main() 455 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); main() 456 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); main() 457 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); main() 458 DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); main() 459 DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); main() 460 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); main() 461 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); main() 464 DEFINE(VCPU_TB_RMENTRY, offsetof(struct kvm_vcpu, arch.rm_entry)); main() 465 DEFINE(VCPU_TB_RMINTR, offsetof(struct kvm_vcpu, arch.rm_intr)); main() 466 DEFINE(VCPU_TB_RMEXIT, offsetof(struct kvm_vcpu, arch.rm_exit)); main() 467 DEFINE(VCPU_TB_GUEST, offsetof(struct kvm_vcpu, arch.guest_time)); main() 468 DEFINE(VCPU_TB_CEDE, offsetof(struct kvm_vcpu, arch.cede_time)); main() 469 DEFINE(VCPU_CUR_ACTIVITY, offsetof(struct kvm_vcpu, arch.cur_activity)); main() 470 DEFINE(VCPU_ACTIVITY_START, offsetof(struct kvm_vcpu, arch.cur_tb_start)); main() 481 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); main() 482 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); main() 483 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); main() 485 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); main() 487 DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); main() 498 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); main() 502 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); main() 503 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); main() 504 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); main() 505 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); main() 506 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); main() 507 DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); main() 508 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); main() 509 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); main() 510 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); main() 511 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); main() 512 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); main() 513 DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); main() 514 DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst)); main() 518 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); main() 519 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); main() 520 DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); main() 521 DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb)); main() 522 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); main() 523 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); main() 524 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); main() 525 DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); main() 526 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); main() 527 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); main() 528 DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); main() 529 DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); main() 530 DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); main() 531 DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); main() 532 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); main() 533 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); main() 534 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); main() 535 DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); main() 536 DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); main() 537 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); main() 538 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); main() 539 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); main() 540 DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); main() 541 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); main() 542 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); main() 543 DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); main() 544 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); main() 545 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); main() 546 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); main() 547 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); main() 548 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); main() 549 DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); main() 550 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); main() 551 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); main() 552 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); main() 553 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); main() 554 DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); main() 555 DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr)); main() 556 DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); main() 557 DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); main() 558 DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); main() 559 DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); main() 560 DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); main() 561 DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); main() 562 DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); main() 563 DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); main() 564 DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); main() 565 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); main() 578 DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); main() 579 DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); main() 580 DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); main() 581 DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); main() 582 DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); main() 583 DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); main() 584 DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); main() 585 DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); main() 586 DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); main() 587 DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); main() 588 DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); main() 589 DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); main() 590 DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); main() 591 DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); main() 687 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); main() 688 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); main() 689 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); main() 690 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); main() 691 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); main() 692 DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9)); main() 693 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); main() 694 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); main() 695 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); main() 696 DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save)); main() 729 DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); main() 730 DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); main() 731 DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); main() 732 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); main() 736 DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); main() 737 DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); main() 738 DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc)); main() 743 arch.timing_exit.tv32.tbu)); main() 745 arch.timing_exit.tv32.tbl)); main() 747 arch.timing_last_enter.tv32.tbu)); main() 749 arch.timing_last_enter.tv32.tbl)); main()
|
/linux-4.1.27/arch/unicore32/ |
H A D | Makefile | 2 # arch/unicore32/Makefile 35 head-y := arch/unicore32/kernel/head.o 37 core-y += arch/unicore32/kernel/ 38 core-y += arch/unicore32/mm/ 40 libs-y += arch/unicore32/lib/ 42 boot := arch/unicore32/boot 57 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' 58 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | clock.h | 10 /* arch/sh/kernel/cpu/clock-cpg.c */ 13 /* arch/sh/kernel/cpu/clock.c */
|
H A D | hardirq.h | 9 unsigned int __nmi_count; /* arch dependent */
|
H A D | kdebug.h | 13 /* arch/sh/kernel/dumpstack.c */
|
H A D | mmzone.h | 28 /* arch/sh/mm/numa.c */ 40 /* arch/sh/kernel/setup.c */ 43 /* arch/sh/mm/init.c */
|
/linux-4.1.27/arch/arm/mach-pxa/include/mach/ |
H A D | io.h | 2 * arch/arm/mach-pxa/include/mach/io.h 4 * Copied from asm/arch/sa1100/io.h
|
/linux-4.1.27/arch/hexagon/ |
H A D | Makefile | 1 # Makefile for the Hexagon arch 37 head-y := arch/hexagon/kernel/head.o 39 core-y += arch/hexagon/kernel/ \ 40 arch/hexagon/mm/ \ 41 arch/hexagon/lib/
|
/linux-4.1.27/arch/m68k/ |
H A D | Makefile | 74 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds 93 head-y := arch/m68k/kernel/head.o 94 head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o 95 head-$(CONFIG_M68360) := arch/m68k/68360/head.o 96 head-$(CONFIG_M68000) := arch/m68k/68000/head.o 97 head-$(CONFIG_COLDFIRE) := arch/m68k/coldfire/head.o 99 core-y += arch/m68k/kernel/ arch/m68k/mm/ 100 libs-y += arch/m68k/lib/ 102 core-$(CONFIG_Q40) += arch/m68k/q40/ 103 core-$(CONFIG_AMIGA) += arch/m68k/amiga/ 104 core-$(CONFIG_ATARI) += arch/m68k/atari/ 105 core-$(CONFIG_MAC) += arch/m68k/mac/ 106 core-$(CONFIG_HP300) += arch/m68k/hp300/ 107 core-$(CONFIG_APOLLO) += arch/m68k/apollo/ 108 core-$(CONFIG_MVME147) += arch/m68k/mvme147/ 109 core-$(CONFIG_MVME16x) += arch/m68k/mvme16x/ 110 core-$(CONFIG_BVME6000) += arch/m68k/bvme6000/ 111 core-$(CONFIG_SUN3X) += arch/m68k/sun3x/ arch/m68k/sun3/ 112 core-$(CONFIG_SUN3) += arch/m68k/sun3/ arch/m68k/sun3/prom/ 113 core-$(CONFIG_NATFEAT) += arch/m68k/emu/ 114 core-$(CONFIG_M68040) += arch/m68k/fpsp040/ 115 core-$(CONFIG_M68060) += arch/m68k/ifpsp060/ 116 core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/ 117 core-$(CONFIG_M68360) += arch/m68k/68360/ 118 core-$(CONFIG_M68000) += arch/m68k/68000/ 119 core-$(CONFIG_COLDFIRE) += arch/m68k/coldfire/ 161 sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
|
/linux-4.1.27/arch/blackfin/ |
H A D | Makefile | 2 # arch/blackfin/Makefile 111 core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ arch/$(ARCH)/mach-common/ 115 core-y += arch/$(ARCH)/mach-$(MACHINE)/ 116 core-y += arch/$(ARCH)/mach-$(MACHINE)/boards/ 120 core-y += arch/$(ARCH)/kernel/cplb-mpu/ 122 core-y += arch/$(ARCH)/kernel/cplb-nompu/ 125 drivers-$(CONFIG_OPROFILE) += arch/$(ARCH)/oprofile/ 127 libs-y += arch/$(ARCH)/lib/ 129 machdirs := $(patsubst %,arch/blackfin/mach-%/, $(machine-y)) 137 arch/$(ARCH)/kernel/asm-offsets.s \ 143 boot := arch/$(ARCH)/boot 158 echo ' vmImage.bin - Uncompressed Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bin)' 159 echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)' 160 echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)' 161 echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)' 162 echo ' vmImage.lzo - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzo)' 163 echo ' vmImage.xip - XIP Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.xip)'
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | kvm_booke.h | 39 vcpu->arch.gpr[num] = val; kvmppc_set_gpr() 44 return vcpu->arch.gpr[num]; kvmppc_get_gpr() 49 vcpu->arch.cr = val; kvmppc_set_cr() 54 return vcpu->arch.cr; kvmppc_get_cr() 59 vcpu->arch.xer = val; kvmppc_set_xer() 64 return vcpu->arch.xer; kvmppc_get_xer() 75 vcpu->arch.ctr = val; kvmppc_set_ctr() 80 return vcpu->arch.ctr; kvmppc_get_ctr() 85 vcpu->arch.lr = val; kvmppc_set_lr() 90 return vcpu->arch.lr; kvmppc_get_lr() 95 vcpu->arch.pc = val; kvmppc_set_pc() 100 return vcpu->arch.pc; kvmppc_get_pc() 105 return vcpu->arch.fault_dear; kvmppc_get_fault_dar()
|
/linux-4.1.27/arch/arm64/ |
H A D | Makefile | 2 # arch/arm64/Makefile 40 head-y := arch/arm64/kernel/head.o 51 core-y += arch/arm64/kernel/ arch/arm64/mm/ 52 core-$(CONFIG_NET) += arch/arm64/net/ 53 core-$(CONFIG_KVM) += arch/arm64/kvm/ 54 core-$(CONFIG_XEN) += arch/arm64/xen/ 55 core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ 56 libs-y := arch/arm64/lib/ $(libs-y) 65 boot := arch/arm64/boot 86 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ 94 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' 95 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
|
/linux-4.1.27/arch/cris/boot/rescue/ |
H A D | Makefile | 6 # ccflags-$(CONFIG_ETRAX_ARCH_V32) += -I$(srctree)/include/asm/arch/mach/ \ 7 # -I$(srctree)/include/asm/arch 8 # asflags-y += -I $(srctree)/include/asm/arch/mach/ -I $(srctree)/include/asm/arch 13 arch-$(CONFIG_ETRAX_ARCH_V10) = v10 14 arch-$(CONFIG_ETRAX_ARCH_V32) = v32 16 ldflags-y += -T $(srctree)/$(src)/rescue_$(arch-y).lds
|
/linux-4.1.27/arch/sparc/ |
H A D | Makefile | 10 # We are not yet configured - so test on arch 52 head-y := arch/sparc/kernel/head_$(BITS).o 54 # See arch/sparc/Kbuild for the core part of the kernel 55 core-y += arch/sparc/ 57 libs-y += arch/sparc/prom/ 58 libs-y += arch/sparc/lib/ 60 drivers-$(CONFIG_PM) += arch/sparc/power/ 61 drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/ 63 boot := arch/sparc/boot
|
/linux-4.1.27/arch/frv/ |
H A D | Makefile | 15 # - Derived from arch/m68knommu/Makefile, 22 # Based on arch/m68k/Makefile: 77 head-y := arch/frv/kernel/head.o 79 core-y += arch/frv/kernel/ arch/frv/mm/ 80 libs-y += arch/frv/lib/ 82 core-$(CONFIG_MB93090_MB00) += arch/frv/mb93090-mb00/ 87 $(Q)$(MAKE) $(build)=arch/frv/boot $@ 90 $(Q)$(MAKE) $(clean)=arch/frv/boot
|
/linux-4.1.27/arch/metag/ |
H A D | Makefile | 43 head-y := arch/metag/kernel/head.o 45 core-y += arch/metag/boot/dts/ 46 core-y += arch/metag/kernel/ 47 core-y += arch/metag/mm/ 49 libs-y += arch/metag/lib/ 50 libs-y += arch/metag/tbx/ 52 drivers-$(CONFIG_OPROFILE) += arch/metag/oprofile/ 54 boot := arch/metag/boot 81 echo '* vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)'
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | asm-offsets.c | 23 #include <asm/mach/arch.h> 174 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); main() 175 DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); main() 176 DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); main() 177 DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context)); main() 178 DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); main() 179 DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); main() 180 DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); main() 181 DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); main() 182 DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); main() 183 DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); main() 184 DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); main() 185 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); main() 186 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); main() 187 DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); main() 188 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); main() 189 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); main() 190 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); main() 191 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); main() 192 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); main() 193 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); main() 202 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); main() 203 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); main() 204 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); main() 205 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); main() 206 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); main() 207 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); main()
|
/linux-4.1.27/arch/x86/um/os-Linux/ |
H A D | Makefile | 13 include arch/um/scripts/Makefile.rules
|
/linux-4.1.27/arch/um/os-Linux/drivers/ |
H A D | Makefile | 13 include arch/um/scripts/Makefile.rules
|
/linux-4.1.27/arch/arm/vfp/ |
H A D | Makefile | 2 # linux/arch/arm/vfp/Makefile
|
/linux-4.1.27/arch/arm64/mm/ |
H A D | extable.c | 2 * Based on arch/arm/mm/extable.c
|
/linux-4.1.27/arch/blackfin/mach-common/ |
H A D | Makefile | 2 # arch/blackfin/mach-common/Makefile
|
/linux-4.1.27/arch/c6x/kernel/ |
H A D | Makefile | 2 # Makefile for arch/c6x/kernel/
|
/linux-4.1.27/arch/cris/arch-v10/kernel/ |
H A D | crisksyms.c | 3 #include <arch/svinto.h>
|
/linux-4.1.27/arch/frv/mm/ |
H A D | Makefile | 2 # Makefile for the arch-specific parts of the memory manager.
|
/linux-4.1.27/arch/m32r/mm/ |
H A D | extable.c | 2 * linux/arch/m32r/mm/extable.c
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | bugs.h | 15 extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */
|
/linux-4.1.27/arch/arm/mach-s3c64xx/include/mach/ |
H A D | hardware.h | 1 /* linux/arch/arm/mach-s3c6400/include/mach/hardware.h
|
/linux-4.1.27/arch/arm/mach-vexpress/ |
H A D | v2m.c | 1 #include <asm/mach/arch.h>
|
/linux-4.1.27/arch/arm/mach-efm32/ |
H A D | dtmachine.c | 5 #include <asm/mach/arch.h>
|
/linux-4.1.27/arch/arm/mach-iop33x/include/mach/ |
H A D | iq80331.h | 2 * arch/arm/mach-iop33x/include/mach/iq80331.h
|
H A D | iq80332.h | 2 * arch/arm/mach-iop33x/include/mach/iq80332.h
|
H A D | hardware.h | 2 * arch/arm/mach-iop33x/include/mach/hardware.h 19 * arch/arm/mach-iop3xx/iop3xx-pci.c
|
/linux-4.1.27/arch/alpha/lib/ |
H A D | clear_page.S | 2 * arch/alpha/lib/clear_page.S
|
H A D | fls.c | 2 * arch/alpha/lib/fls.c
|
H A D | srm_puts.c | 2 * arch/alpha/lib/srm_puts.c
|
/linux-4.1.27/tools/perf/util/ |
H A D | tsc.h | 6 #include "../arch/x86/util/tsc.h"
|
/linux-4.1.27/arch/arm/mach-w90x900/include/mach/ |
H A D | hardware.h | 2 * arch/arm/mach-w90x900/include/mach/hardware.h 9 * Based on arch/arm/mach-s3c2410/include/mach/hardware.h
|
H A D | mfp.h | 2 * arch/arm/mach-w90x900/include/mach/mfp.h 8 * Based on arch/arm/mach-s3c2410/include/mach/map.h
|
/linux-4.1.27/arch/arm/mach-exynos/ |
H A D | Makefile | 0 # arch/arm/mach-exynos/Makefile 8 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include
|
/linux-4.1.27/arch/openrisc/ |
H A D | Makefile | 18 # arch/i386/Makefile 41 head-y := arch/openrisc/kernel/head.o 43 core-y += arch/openrisc/lib/ \ 44 arch/openrisc/kernel/ \ 45 arch/openrisc/mm/ 53 core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
|
/linux-4.1.27/arch/x86/ |
H A D | Makefile | 20 CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h 66 include arch/x86/Makefile_32.cpu 175 $(Q)$(MAKE) $(build)=arch/x86/tools relocs 181 $(Q)$(MAKE) $(build)=arch/x86/syscalls all 185 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c 191 head-y := arch/x86/kernel/head_$(BITS).o 192 head-y += arch/x86/kernel/head$(BITS).o 193 head-y += arch/x86/kernel/head.o 195 libs-y += arch/x86/lib/ 197 # See arch/x86/Kbuild for content of core part of the kernel 198 core-y += arch/x86/ 201 drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/ 202 drivers-$(CONFIG_PCI) += arch/x86/pci/ 205 drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/ 208 drivers-$(CONFIG_PM) += arch/x86/power/ 210 drivers-$(CONFIG_FB) += arch/x86/video/ 215 boot := arch/x86/boot 229 $(Q)$(MAKE) $(build)=arch/x86/tools posttest 232 $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot 233 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ 244 $(Q)$(MAKE) $(build)=arch/x86/vdso $@ 247 $(Q)rm -rf $(objtree)/arch/i386 248 $(Q)rm -rf $(objtree)/arch/x86_64 250 $(Q)$(MAKE) $(clean)=arch/x86/tools 251 $(Q)$(MAKE) $(clean)=arch/x86/purgatory 254 echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' 259 echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)' 260 echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)' 261 echo ' fdimage288 - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)' 262 echo ' isoimage - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
|
/linux-4.1.27/arch/arm/ |
H A D | Makefile | 2 # arch/arm/Makefile 29 # Do not use arch/arm/defconfig - it's always outdated. 65 arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m 66 arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) 67 arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) 71 arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) 73 arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) 74 arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t 75 arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 76 arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 78 # Evaluate arch cc-option calls now 79 arch-y := $(arch-y) 129 KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm 130 KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float 135 head-y := arch/arm/kernel/head$(MMUEXT).o 138 # provide a means to avoid/resolve conflicts in multi-arch kernels. 242 MACHINE := arch/arm/mach-$(word 1,$(machine-y))/ 250 machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y)) 251 platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) 264 FASTFPE :=arch/arm/fastfpe 269 core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/ 271 core-$(CONFIG_VFP) += arch/arm/vfp/ 272 core-$(CONFIG_XEN) += arch/arm/xen/ 273 core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ 274 core-$(CONFIG_VDSO) += arch/arm/vdso/ 277 core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ 278 core-y += arch/arm/probes/ 279 core-y += arch/arm/net/ 280 core-y += arch/arm/crypto/ 281 core-y += arch/arm/firmware/ 284 drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ 286 libs-y := arch/arm/lib/ $(libs-y) 302 boot := arch/arm/boot 305 $(Q)$(MAKE) $(build)=arch/arm/tools include/generated/mach-types.h 335 $(Q)$(MAKE) $(build)=arch/arm/vdso $@ 348 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' 349 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' 350 echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
|
/linux-4.1.27/arch/powerpc/ |
H A D | Makefile | 168 KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o 172 -T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds 213 head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o 214 head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o 215 head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o 216 head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o 217 head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o 219 head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o 220 head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o 221 head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o 222 head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o 224 core-y += arch/powerpc/kernel/ \ 225 arch/powerpc/mm/ \ 226 arch/powerpc/lib/ \ 227 arch/powerpc/sysdev/ \ 228 arch/powerpc/platforms/ \ 229 arch/powerpc/math-emu/ \ 230 arch/powerpc/crypto/ \ 231 arch/powerpc/net/ 232 core-$(CONFIG_XMON) += arch/powerpc/xmon/ 233 core-$(CONFIG_KVM) += arch/powerpc/kvm/ 234 core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ 236 drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ 247 boot := arch/$(ARCH)/boot 254 relocs_check: arch/powerpc/relocs_check.sh vmlinux 274 @echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)' 285 @echo ' *_defconfig - Select default config from arch/$(ARCH)/configs' 290 @echo ' name of a dts file from the arch/$(ARCH)/boot/dts/ directory' 299 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ 301 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
|
/linux-4.1.27/arch/microblaze/ |
H A D | Makefile | 47 head-y := arch/microblaze/kernel/head.o 48 libs-y += arch/microblaze/lib/ 49 core-y += arch/microblaze/kernel/ 50 core-y += arch/microblaze/mm/ 51 core-$(CONFIG_PCI) += arch/microblaze/pci/ 53 drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ 55 boot := arch/microblaze/boot 86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' 89 echo ' *_defconfig - Select default config from arch/microblaze/configs' 94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
|
/linux-4.1.27/arch/s390/ |
H A D | Makefile | 48 KBUILD_IMAGE :=arch/s390/boot/image 94 head-y := arch/s390/kernel/head.o 95 head-y += arch/s390/kernel/head64.o 97 # See arch/s390/Kbuild for content of core part of the kernel 98 core-y += arch/s390/ 100 libs-y += arch/s390/lib/ 104 drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/ 106 boot := arch/s390/boot 120 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ 121 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | machine_kexec_32.c | 73 free_page((unsigned long)image->arch.pgd); machine_kexec_free_page_tables() 75 free_page((unsigned long)image->arch.pmd0); machine_kexec_free_page_tables() 76 free_page((unsigned long)image->arch.pmd1); machine_kexec_free_page_tables() 78 free_page((unsigned long)image->arch.pte0); machine_kexec_free_page_tables() 79 free_page((unsigned long)image->arch.pte1); machine_kexec_free_page_tables() 84 image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); machine_kexec_alloc_page_tables() 86 image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL); machine_kexec_alloc_page_tables() 87 image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL); machine_kexec_alloc_page_tables() 89 image->arch.pte0 = (pte_t *)get_zeroed_page(GFP_KERNEL); machine_kexec_alloc_page_tables() 90 image->arch.pte1 = (pte_t *)get_zeroed_page(GFP_KERNEL); machine_kexec_alloc_page_tables() 91 if (!image->arch.pgd || machine_kexec_alloc_page_tables() 93 !image->arch.pmd0 || !image->arch.pmd1 || machine_kexec_alloc_page_tables() 95 !image->arch.pte0 || !image->arch.pte1) { machine_kexec_alloc_page_tables() 128 pmd = image->arch.pmd0; machine_kexec_prepare_page_tables() 131 image->arch.pgd, pmd, image->arch.pte0, machine_kexec_prepare_page_tables() 134 pmd = image->arch.pmd1; machine_kexec_prepare_page_tables() 137 image->arch.pgd, pmd, image->arch.pte1, machine_kexec_prepare_page_tables() 224 page_list[PA_PGD] = __pa(image->arch.pgd); machine_kexec()
|
H A D | vsyscall_trace.h | 27 #define TRACE_INCLUDE_PATH ../../arch/x86/kernel
|
/linux-4.1.27/arch/x86/um/ |
H A D | ldt.c | 41 * for arch/i386/kernel/ldt.c 53 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; read_ldt() 118 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; write_ldt() 302 mutex_init(&new_mm->arch.ldt.lock); init_new_ldt() 319 new_mm->arch.ldt.entry_count = 0; init_new_ldt() 330 mutex_lock(&from_mm->arch.ldt.lock); init_new_ldt() 331 if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) init_new_ldt() 332 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, init_new_ldt() 333 sizeof(new_mm->arch.ldt.u.entries)); init_new_ldt() 335 i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; init_new_ldt() 342 new_mm->arch.ldt.u.pages[i] = init_new_ldt() 344 memcpy(new_mm->arch.ldt.u.pages[i], init_new_ldt() 345 from_mm->arch.ldt.u.pages[i], PAGE_SIZE); init_new_ldt() 348 new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; init_new_ldt() 349 mutex_unlock(&from_mm->arch.ldt.lock); init_new_ldt() 360 if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { free_ldt() 361 i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; free_ldt() 363 free_page((long) mm->arch.ldt.u.pages[i]); free_ldt() 365 mm->arch.ldt.entry_count = 0; free_ldt()
|
H A D | tls_64.c | 14 t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; arch_copy_tls()
|
/linux-4.1.27/arch/avr32/kernel/ |
H A D | module.c | 24 vfree(mod->arch.syminfo); module_arch_freeing_init() 25 mod->arch.syminfo = NULL; module_arch_freeing_init() 33 info = module->arch.syminfo + ELF32_R_SYM(rela->r_info); check_rela() 49 info->got_offset = module->arch.got_size; check_rela() 50 module->arch.got_size += sizeof(void *); check_rela() 84 module->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); module_frob_arch_sections() 85 module->arch.syminfo = vmalloc(module->arch.nsyms module_frob_arch_sections() 87 if (!module->arch.syminfo) module_frob_arch_sections() 92 for (i = 0; i < module->arch.nsyms; i++) { module_frob_arch_sections() 98 module->arch.syminfo[i].got_offset = -1UL; module_frob_arch_sections() 99 module->arch.syminfo[i].got_initialized = 0; module_frob_arch_sections() 103 module->arch.got_size = 0; module_frob_arch_sections() 122 module->arch.got_offset = module->core_size; module_frob_arch_sections() 123 module->core_size += module->arch.got_size; module_frob_arch_sections() 128 vfree(module->arch.syminfo); module_frob_arch_sections() 129 module->arch.syminfo = NULL; module_frob_arch_sections() 167 info = module->arch.syminfo + ELF32_R_SYM(rel->r_info); apply_relocate_add() 181 + module->arch.got_offset apply_relocate_add() 257 relocation, module->arch.got_offset, apply_relocate_add() 260 + module->arch.got_offset); apply_relocate_add()
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | module.c | 310 if (mod->arch.init_unw_table) { module_arch_freeing_init() 311 unw_remove_unwind_table(mod->arch.init_unw_table); module_arch_freeing_init() 312 mod->arch.init_unw_table = NULL; module_arch_freeing_init() 433 mod->arch.core_plt = s; module_frob_arch_sections() 435 mod->arch.init_plt = s; module_frob_arch_sections() 437 mod->arch.got = s; module_frob_arch_sections() 439 mod->arch.opd = s; module_frob_arch_sections() 441 mod->arch.unwind = s; module_frob_arch_sections() 445 mod->arch.paravirt_bundles = s; module_frob_arch_sections() 448 mod->arch.paravirt_insts = s; module_frob_arch_sections() 451 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { module_frob_arch_sections() 472 mod->arch.core_plt->sh_type = SHT_NOBITS; module_frob_arch_sections() 473 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; module_frob_arch_sections() 474 mod->arch.core_plt->sh_addralign = 16; module_frob_arch_sections() 475 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry); module_frob_arch_sections() 476 mod->arch.init_plt->sh_type = SHT_NOBITS; module_frob_arch_sections() 477 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; module_frob_arch_sections() 478 mod->arch.init_plt->sh_addralign = 16; module_frob_arch_sections() 479 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry); module_frob_arch_sections() 480 mod->arch.got->sh_type = SHT_NOBITS; module_frob_arch_sections() 481 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC; module_frob_arch_sections() 482 mod->arch.got->sh_addralign = 8; module_frob_arch_sections() 483 mod->arch.got->sh_size = gots * sizeof(struct got_entry); module_frob_arch_sections() 484 mod->arch.opd->sh_type = SHT_NOBITS; module_frob_arch_sections() 485 mod->arch.opd->sh_flags = SHF_ALLOC; module_frob_arch_sections() 486 mod->arch.opd->sh_addralign = 8; module_frob_arch_sections() 487 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); module_frob_arch_sections() 489 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, module_frob_arch_sections() 490 mod->arch.got->sh_size, mod->arch.opd->sh_size); module_frob_arch_sections() 523 got = (void *) mod->arch.got->sh_addr; get_ltoff() 524 for (e = got; e < got + mod->arch.next_got_entry; ++e) get_ltoff() 529 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); get_ltoff() 532 ++mod->arch.next_got_entry; get_ltoff() 534 return (uint64_t) e - mod->arch.gp; get_ltoff() 540 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF; gp_addressable() 554 plt = (void *) mod->arch.init_plt->sh_addr; get_plt() 555 plt_end = (void *) plt + mod->arch.init_plt->sh_size; get_plt() 557 plt = (void *) mod->arch.core_plt->sh_addr; get_plt() 558 plt_end = (void *) plt + mod->arch.core_plt->sh_size; get_plt() 593 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr; get_fdesc() 614 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size) get_fdesc() 620 fdesc->gp = mod->arch.gp; get_fdesc() 640 case RV_GPREL: val -= mod->arch.gp; break; do_reloc() 724 val -= mod->arch.gp; do_reloc() 815 if (!mod->arch.gp) { apply_relocate_add() 817 * XXX Should have an arch-hook for running this after final section apply_relocate_add() 830 mod->arch.gp = gp; apply_relocate_add() 855 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr; register_unwind_table() 856 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start); register_unwind_table() 892 mod->name, mod->arch.gp, num_init, num_core); register_unwind_table() 898 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, register_unwind_table() 901 mod->arch.core_unw_table, core, core + num_core); register_unwind_table() 904 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, register_unwind_table() 907 mod->arch.init_unw_table, init, init + num_init); register_unwind_table() 915 if (mod->arch.unwind) module_finalize() 918 if (mod->arch.paravirt_bundles) { module_finalize() 921 mod->arch.paravirt_bundles->sh_addr; module_finalize() 924 (mod->arch.paravirt_bundles->sh_addr + module_finalize() 925 mod->arch.paravirt_bundles->sh_size); module_finalize() 929 if (mod->arch.paravirt_insts) { module_finalize() 932 mod->arch.paravirt_insts->sh_addr; module_finalize() 935 (mod->arch.paravirt_insts->sh_addr + module_finalize() 936 mod->arch.paravirt_insts->sh_size); module_finalize() 947 if (mod->arch.init_unw_table) module_arch_cleanup() 948 unw_remove_unwind_table(mod->arch.init_unw_table); module_arch_cleanup() 949 if (mod->arch.core_unw_table) module_arch_cleanup() 950 unw_remove_unwind_table(mod->arch.core_unw_table); module_arch_cleanup()
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | hardirq.h | 9 unsigned int __nmi_count; /* arch dependent */ 11 unsigned int apic_timer_irqs; /* arch dependent */ 18 unsigned int x86_platform_ipis; /* arch dependent */
|
/linux-4.1.27/arch/metag/oprofile/ |
H A D | common.c | 2 * arch/metag/oprofile/common.c 6 * Based on arch/sh/oprofile/common.c: 10 * Based on arch/mips/oprofile/common.c:
|
/linux-4.1.27/arch/mips/include/uapi/asm/ |
H A D | swab.h | 23 " .set arch=mips32r2 \n" __arch_swab16() 37 " .set arch=mips32r2 \n" __arch_swab32() 57 " .set arch=mips64r2 \n" __arch_swab64()
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | procinfo.h | 2 * arch/arm/include/asm/procinfo.h 27 * arch/arm/mm/proc-*.S and arch/arm/kernel/head.S
|
/linux-4.1.27/arch/nios2/ |
H A D | Makefile | 36 head-y := arch/nios2/kernel/head.o 37 libs-y += arch/nios2/lib/ $(LIBGCC) 38 core-y += arch/nios2/kernel/ arch/nios2/mm/ 39 core-y += arch/nios2/platform/ 42 nios2-boot := arch/$(ARCH)/boot
|
/linux-4.1.27/arch/tile/include/arch/ |
H A D | usb_host.h | 20 #include <arch/abi.h> 21 #include <arch/usb_host_def.h>
|
/linux-4.1.27/arch/tile/include/uapi/arch/ |
H A D | spr_def.h | 20 #include <arch/spr_def_64.h> 22 #include <arch/spr_def_32.h>
|
/linux-4.1.27/arch/s390/kernel/ |
H A D | module.c | 54 vfree(mod->arch.syminfo); module_arch_freeing_init() 55 mod->arch.syminfo = NULL; module_arch_freeing_init() 62 info = me->arch.syminfo + ELF_R_SYM (rela->r_info); check_rela() 77 info->got_offset = me->arch.got_size; check_rela() 78 me->arch.got_size += sizeof(void*); check_rela() 89 info->plt_offset = me->arch.plt_size; check_rela() 90 me->arch.plt_size += PLT_ENTRY_SIZE; check_rela() 130 me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); module_frob_arch_sections() 131 me->arch.syminfo = vmalloc(me->arch.nsyms * module_frob_arch_sections() 133 if (!me->arch.syminfo) module_frob_arch_sections() 137 for (i = 0; i < me->arch.nsyms; i++) { module_frob_arch_sections() 143 me->arch.syminfo[i].got_offset = -1UL; module_frob_arch_sections() 144 me->arch.syminfo[i].plt_offset = -1UL; module_frob_arch_sections() 145 me->arch.syminfo[i].got_initialized = 0; module_frob_arch_sections() 146 me->arch.syminfo[i].plt_initialized = 0; module_frob_arch_sections() 150 me->arch.got_size = me->arch.plt_size = 0; module_frob_arch_sections() 163 me->arch.got_offset = me->core_size; module_frob_arch_sections() 164 me->core_size += me->arch.got_size; module_frob_arch_sections() 165 me->arch.plt_offset = me->core_size; module_frob_arch_sections() 166 me->core_size += me->arch.plt_size; module_frob_arch_sections() 223 info = me->arch.syminfo + r_sym; apply_rela() 282 gotent = me->module_core + me->arch.got_offset + apply_rela() 318 ip = me->module_core + me->arch.plt_offset + apply_rela() 330 val = me->arch.plt_offset - me->arch.got_offset + apply_rela() 338 me->arch.plt_offset + apply_rela() 359 ((Elf_Addr) me->module_core + me->arch.got_offset); apply_rela() 369 val = (Elf_Addr) me->module_core + me->arch.got_offset + apply_rela() 428 vfree(me->arch.syminfo); module_finalize() 429 me->arch.syminfo = NULL; module_finalize()
|
/linux-4.1.27/arch/sh/boot/romimage/ |
H A D | Makefile | 2 # linux/arch/sh/boot/romimage/Makefile 29 $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | dmi.h | 2 * arch/arm64/include/asm/dmi.h 7 * based on arch/ia64/include/asm/dmi.h
|
/linux-4.1.27/arch/arm/mach-omap2/ |
H A D | gpmc-smsc911x.h | 2 * arch/arm/plat-omap/include/plat/gpmc-smsc911x.h 7 * Modified from arch/arm/plat-omap/include/plat/gpmc-smc91x.h
|
/linux-4.1.27/arch/arm/mach-spear/ |
H A D | spear1340.c | 2 * arch/arm/mach-spear13xx/spear1340.c 17 #include <asm/mach/arch.h>
|
/linux-4.1.27/arch/arm/mach-w90x900/ |
H A D | mach-nuc910evb.c | 2 * linux/arch/arm/mach-w90x900/mach-nuc910evb.c 17 #include <asm/mach/arch.h>
|
H A D | mach-nuc960evb.c | 2 * linux/arch/arm/mach-w90x900/mach-nuc960evb.c 17 #include <asm/mach/arch.h>
|
/linux-4.1.27/arch/arm/plat-samsung/ |
H A D | dev-uart.c | 1 /* linux/arch/arm/plat-samsung/dev-uart.c 2 * originally from arch/arm/plat-s3c24xx/devs.c
|
/linux-4.1.27/arch/arm/mach-mvebu/ |
H A D | dove.c | 2 * arch/arm/mach-mvebu/dove.c 16 #include <asm/mach/arch.h>
|
/linux-4.1.27/arch/blackfin/kernel/ |
H A D | module.c | 28 * in mod->arch to correctly free L1 I/D sram later. module_frob_arch_sections() 29 * NOTE: this breaks the semantic of mod->arch structure. module_frob_arch_sections() 45 mod->arch.text_l1 = dest; module_frob_arch_sections() 57 mod->arch.data_a_l1 = dest; module_frob_arch_sections() 69 mod->arch.bss_a_l1 = dest; module_frob_arch_sections() 78 mod->arch.data_b_l1 = dest; module_frob_arch_sections() 88 mod->arch.bss_b_l1 = dest; module_frob_arch_sections() 100 mod->arch.text_l2 = dest; module_frob_arch_sections() 112 mod->arch.data_l2 = dest; module_frob_arch_sections() 124 mod->arch.bss_l2 = dest; module_frob_arch_sections() 281 l1_inst_sram_free(mod->arch.text_l1); module_arch_cleanup() 282 l1_data_A_sram_free(mod->arch.data_a_l1); module_arch_cleanup() 283 l1_data_A_sram_free(mod->arch.bss_a_l1); module_arch_cleanup() 284 l1_data_B_sram_free(mod->arch.data_b_l1); module_arch_cleanup() 285 l1_data_B_sram_free(mod->arch.bss_b_l1); module_arch_cleanup() 286 l2_sram_free(mod->arch.text_l2); module_arch_cleanup() 287 l2_sram_free(mod->arch.data_l2); module_arch_cleanup() 288 l2_sram_free(mod->arch.bss_l2); module_arch_cleanup()
|
/linux-4.1.27/virt/kvm/arm/ |
H A D | vgic-v3.c | 50 u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; vgic_v3_get_lr() 52 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) vgic_v3_get_lr() 59 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) vgic_v3_get_lr() 87 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) vgic_v3_set_lr() 99 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; vgic_v3_set_lr() 106 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); vgic_v3_sync_lr_elrsr() 108 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); vgic_v3_sync_lr_elrsr() 113 return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr; vgic_v3_get_elrsr() 118 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; vgic_v3_get_eisr() 123 vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; vgic_v3_clear_eisr() 128 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; vgic_v3_get_interrupt_status() 141 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; vgic_v3_get_vmcr() 151 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE; vgic_v3_enable_underflow() 156 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE; vgic_v3_disable_underflow() 168 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; vgic_v3_set_vmcr() 173 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; vgic_v3_enable() 187 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) vgic_v3_enable()
|
/linux-4.1.27/arch/x86/realmode/ |
H A D | Makefile | 2 # arch/x86/realmode/Makefile
|
/linux-4.1.27/arch/x86/um/shared/sysdep/ |
H A D | archsetjmp_32.h | 2 * arch/um/include/sysdep-i386/archsetjmp.h
|
H A D | archsetjmp_64.h | 2 * arch/um/include/sysdep-x86_64/archsetjmp.h
|
/linux-4.1.27/arch/xtensa/boot/dts/ |
H A D | Makefile | 2 # arch/xtensa/boot/dts/Makefile
|
/linux-4.1.27/arch/xtensa/platforms/xtfpga/include/platform/ |
H A D | serial.h | 2 * arch/xtensa/platform/xtavnet/include/platform/serial.h
|
/linux-4.1.27/arch/um/kernel/skas/ |
H A D | Makefile | 15 include arch/um/scripts/Makefile.rules
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | mmu.h | 2 * linux/arch/unicore32/include/asm/mmu.h
|
/linux-4.1.27/arch/unicore32/include/uapi/asm/ |
H A D | unistd.h | 2 * linux/arch/unicore32/include/asm/unistd.h
|
/linux-4.1.27/arch/mips/boot/compressed/ |
H A D | string.c | 2 * arch/mips/boot/compressed/string.c
|
/linux-4.1.27/include/xen/interface/io/ |
H A D | protocols.h | 18 # error arch fixup needed here
|
/linux-4.1.27/arch/score/boot/ |
H A D | Makefile | 2 # arch/score/boot/Makefile
|