delta_tsc        6451 arch/x86/kvm/vmx/vmx.c 	u32 delta_tsc;
delta_tsc        6460 arch/x86/kvm/vmx/vmx.c 			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
delta_tsc        6463 arch/x86/kvm/vmx/vmx.c 			delta_tsc = 0;
delta_tsc        6465 arch/x86/kvm/vmx/vmx.c 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
delta_tsc        7205 arch/x86/kvm/vmx/vmx.c 	u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
delta_tsc        7215 arch/x86/kvm/vmx/vmx.c 	delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
delta_tsc        7219 arch/x86/kvm/vmx/vmx.c 	if (delta_tsc > lapic_timer_advance_cycles)
delta_tsc        7220 arch/x86/kvm/vmx/vmx.c 		delta_tsc -= lapic_timer_advance_cycles;
delta_tsc        7222 arch/x86/kvm/vmx/vmx.c 		delta_tsc = 0;
delta_tsc        7226 arch/x86/kvm/vmx/vmx.c 	    delta_tsc && u64_shl_div_u64(delta_tsc,
delta_tsc        7228 arch/x86/kvm/vmx/vmx.c 				vcpu->arch.tsc_scaling_ratio, &delta_tsc))
delta_tsc        7237 arch/x86/kvm/vmx/vmx.c 	if (delta_tsc >> (cpu_preemption_timer_multi + 32))
delta_tsc        7240 arch/x86/kvm/vmx/vmx.c 	vmx->hv_deadline_tsc = tscl + delta_tsc;
delta_tsc        7241 arch/x86/kvm/vmx/vmx.c 	*expired = !delta_tsc;