Searched refs:sie_block (Results 1 – 14 of 14) sorted by relevance
/linux-4.4.14/arch/s390/kvm/ |
D | intercept.c | 43 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_rewind_psw() local 46 if (sie_block->icptstatus & 1) { in kvm_s390_rewind_psw() 47 ilc = (sie_block->icptstatus >> 4) & 0x6; in kvm_s390_rewind_psw() 51 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc); in kvm_s390_rewind_psw() 56 switch (vcpu->arch.sie_block->icptcode) { in handle_noop() 105 int viwhy = vcpu->arch.sie_block->ipb >> 16; in handle_validity() 119 vcpu->arch.sie_block->ipa, in handle_instruction() 120 vcpu->arch.sie_block->ipb); in handle_instruction() 121 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; in handle_instruction() 131 pgm_info->code = vcpu->arch.sie_block->iprcc; in __extract_prog_irq() [all …]
|
D | guestdbg.c | 65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 105 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp() 106 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp() 107 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_wp() 135 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; in kvm_s390_backup_guest_per_regs() 136 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; in kvm_s390_backup_guest_per_regs() 137 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; in kvm_s390_backup_guest_per_regs() 138 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; in kvm_s390_backup_guest_per_regs() [all …]
|
D | kvm-s390.h | 26 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) 28 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 47 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ 53 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; in is_vcpu_stopped() 70 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; in kvm_s390_get_prefix() 77 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; in kvm_s390_set_prefix() 86 u32 base2 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_s() 87 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); in kvm_s390_get_base_disp_s() 99 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; in kvm_s390_get_base_disp_sse() 100 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; in kvm_s390_get_base_disp_sse() [all …]
|
D | priv.c | 40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock() 66 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_prefix() 104 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_prefix() 134 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_cpu_address() 154 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) in __skey_check_enable() 160 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in __skey_check_enable() 173 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_skey() 184 if (psw_bits(vcpu->arch.sie_block->gpsw).p) in handle_ipte_interlock() 197 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_test_block() 233 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi() [all …]
|
D | kvm-s390.c | 160 vcpu->arch.sie_block->epoch -= *delta; in kvm_clock_sync() 1195 (__u64) vcpu->arch.sie_block) in kvm_arch_vcpu_destroy() 1205 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy() 1290 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_load() 1295 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_put() 1313 vcpu->arch.sie_block->gpsw.mask = 0UL; in kvm_s390_vcpu_initial_reset() 1314 vcpu->arch.sie_block->gpsw.addr = 0UL; in kvm_s390_vcpu_initial_reset() 1316 vcpu->arch.sie_block->cputm = 0UL; in kvm_s390_vcpu_initial_reset() 1317 vcpu->arch.sie_block->ckc = 0UL; in kvm_s390_vcpu_initial_reset() 1318 vcpu->arch.sie_block->todpr = 0; in kvm_s390_vcpu_initial_reset() [all …]
|
D | interrupt.c | 39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); in psw_extint_disabled() 44 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); in psw_ioint_disabled() 49 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); in psw_mchk_disabled() 62 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) in ckc_interrupts_enabled() 72 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) in ckc_irq_pending() 80 (vcpu->arch.sie_block->gcr[0] & 0x400ul); in cpu_timer_interrupts_enabled() 85 return (vcpu->arch.sie_block->cputm >> 63) && in cpu_timer_irq_pending() 117 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) in disable_iscs() 137 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) in deliverable_irqs() 139 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) in deliverable_irqs() [all …]
|
D | gaccess.c | 264 if (vcpu->arch.sie_block->eca & 1) in ipte_lock_held() 345 if (vcpu->arch.sie_block->eca & 1) in ipte_lock() 353 if (vcpu->arch.sie_block->eca & 1) in ipte_unlock() 377 asce->val = vcpu->arch.sie_block->gcr[1]; in ar_translation() 380 asce->val = vcpu->arch.sie_block->gcr[7]; in ar_translation() 388 ald_addr = vcpu->arch.sie_block->gcr[5]; in ar_translation() 390 ald_addr = vcpu->arch.sie_block->gcr[2]; in ar_translation() 423 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; in ar_translation() 467 psw_t *psw = &vcpu->arch.sie_block->gpsw; in get_vcpu_asce() 482 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { in get_vcpu_asce() [all …]
|
D | diag.c | 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; in diag_release_pages() 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; in __diag_page_ref_service() 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); in __diag_page_ref_service() 163 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed() 181 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; in __diag_ipl_functions() 244 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_diag()
|
D | trace-s390.h | 43 struct kvm_s390_sie_block *sie_block), 44 TP_ARGS(id, vcpu, sie_block), 49 __field(struct kvm_s390_sie_block *, sie_block) 55 __entry->sie_block = sie_block; 59 __entry->vcpu, __entry->sie_block)
|
D | sigp.c | 82 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); in __sigp_conditional_emergency() 83 psw = &dst_vcpu->arch.sie_block->gpsw; in __sigp_conditional_emergency() 84 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ in __sigp_conditional_emergency() 85 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ in __sigp_conditional_emergency() 419 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_sigp() 420 int r3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_sigp() 427 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_sigp() 469 int r3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_sigp_pei()
|
D | trace.h | 26 __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \ 27 __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
|
D | gaccess.h | 58 psw_t *psw = &vcpu->arch.sie_block->gpsw; in kvm_s390_logical_to_effective()
|
/linux-4.4.14/arch/s390/kernel/ |
D | perf_event.c | 49 static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) in sie_block() function 72 return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE; in guest_is_user_mode() 77 return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN; in instruction_pointer_guest()
|
/linux-4.4.14/arch/s390/include/asm/ |
D | kvm_host.h | 198 struct kvm_s390_sie_block sie_block; member 506 struct kvm_s390_sie_block *sie_block; member
|