1/*
2 * KVM Microsoft Hyper-V emulation
3 *
4 * derived from arch/x86/kvm/x86.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11 *
12 * Authors:
13 *   Avi Kivity   <avi@qumranet.com>
14 *   Yaniv Kamay  <yaniv@qumranet.com>
15 *   Amit Shah    <amit.shah@qumranet.com>
16 *   Ben-Ami Yassour <benami@il.ibm.com>
17 *   Andrey Smetanin <asmetanin@virtuozzo.com>
18 *
19 * This work is licensed under the terms of the GNU GPL, version 2.  See
20 * the COPYING file in the top-level directory.
21 *
22 */
23
24#include "x86.h"
25#include "lapic.h"
26#include "hyperv.h"
27
28#include <linux/kvm_host.h>
29#include <trace/events/kvm.h>
30
31#include "trace.h"
32
33static bool kvm_hv_msr_partition_wide(u32 msr)
34{
35	bool r = false;
36
37	switch (msr) {
38	case HV_X64_MSR_GUEST_OS_ID:
39	case HV_X64_MSR_HYPERCALL:
40	case HV_X64_MSR_REFERENCE_TSC:
41	case HV_X64_MSR_TIME_REF_COUNT:
42	case HV_X64_MSR_CRASH_CTL:
43	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
44	case HV_X64_MSR_RESET:
45		r = true;
46		break;
47	}
48
49	return r;
50}
51
52static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
53				     u32 index, u64 *pdata)
54{
55	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
56
57	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
58		return -EINVAL;
59
60	*pdata = hv->hv_crash_param[index];
61	return 0;
62}
63
64static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
65{
66	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
67
68	*pdata = hv->hv_crash_ctl;
69	return 0;
70}
71
72static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
73{
74	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
75
76	if (host)
77		hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
78
79	if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
80
81		vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
82			  hv->hv_crash_param[0],
83			  hv->hv_crash_param[1],
84			  hv->hv_crash_param[2],
85			  hv->hv_crash_param[3],
86			  hv->hv_crash_param[4]);
87
88		/* Send notification about crash to user space */
89		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
90	}
91
92	return 0;
93}
94
95static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
96				     u32 index, u64 data)
97{
98	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
99
100	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
101		return -EINVAL;
102
103	hv->hv_crash_param[index] = data;
104	return 0;
105}
106
107static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
108			     bool host)
109{
110	struct kvm *kvm = vcpu->kvm;
111	struct kvm_hv *hv = &kvm->arch.hyperv;
112
113	switch (msr) {
114	case HV_X64_MSR_GUEST_OS_ID:
115		hv->hv_guest_os_id = data;
116		/* setting guest os id to zero disables hypercall page */
117		if (!hv->hv_guest_os_id)
118			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
119		break;
120	case HV_X64_MSR_HYPERCALL: {
121		u64 gfn;
122		unsigned long addr;
123		u8 instructions[4];
124
125		/* if guest os id is not set hypercall should remain disabled */
126		if (!hv->hv_guest_os_id)
127			break;
128		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
129			hv->hv_hypercall = data;
130			break;
131		}
132		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
133		addr = gfn_to_hva(kvm, gfn);
134		if (kvm_is_error_hva(addr))
135			return 1;
136		kvm_x86_ops->patch_hypercall(vcpu, instructions);
137		((unsigned char *)instructions)[3] = 0xc3; /* ret */
138		if (__copy_to_user((void __user *)addr, instructions, 4))
139			return 1;
140		hv->hv_hypercall = data;
141		mark_page_dirty(kvm, gfn);
142		break;
143	}
144	case HV_X64_MSR_REFERENCE_TSC: {
145		u64 gfn;
146		HV_REFERENCE_TSC_PAGE tsc_ref;
147
148		memset(&tsc_ref, 0, sizeof(tsc_ref));
149		hv->hv_tsc_page = data;
150		if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
151			break;
152		gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
153		if (kvm_write_guest(
154				kvm,
155				gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
156				&tsc_ref, sizeof(tsc_ref)))
157			return 1;
158		mark_page_dirty(kvm, gfn);
159		break;
160	}
161	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
162		return kvm_hv_msr_set_crash_data(vcpu,
163						 msr - HV_X64_MSR_CRASH_P0,
164						 data);
165	case HV_X64_MSR_CRASH_CTL:
166		return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
167	case HV_X64_MSR_RESET:
168		if (data == 1) {
169			vcpu_debug(vcpu, "hyper-v reset requested\n");
170			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
171		}
172		break;
173	default:
174		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
175			    msr, data);
176		return 1;
177	}
178	return 0;
179}
180
181/* Calculate cpu time spent by current task in 100ns units */
182static u64 current_task_runtime_100ns(void)
183{
184	cputime_t utime, stime;
185
186	task_cputime_adjusted(current, &utime, &stime);
187	return div_u64(cputime_to_nsecs(utime + stime), 100);
188}
189
190static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
191{
192	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
193
194	switch (msr) {
195	case HV_X64_MSR_APIC_ASSIST_PAGE: {
196		u64 gfn;
197		unsigned long addr;
198
199		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
200			hv->hv_vapic = data;
201			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
202				return 1;
203			break;
204		}
205		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
206		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
207		if (kvm_is_error_hva(addr))
208			return 1;
209		if (__clear_user((void __user *)addr, PAGE_SIZE))
210			return 1;
211		hv->hv_vapic = data;
212		kvm_vcpu_mark_page_dirty(vcpu, gfn);
213		if (kvm_lapic_enable_pv_eoi(vcpu,
214					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
215			return 1;
216		break;
217	}
218	case HV_X64_MSR_EOI:
219		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
220	case HV_X64_MSR_ICR:
221		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
222	case HV_X64_MSR_TPR:
223		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
224	case HV_X64_MSR_VP_RUNTIME:
225		if (!host)
226			return 1;
227		hv->runtime_offset = data - current_task_runtime_100ns();
228		break;
229	default:
230		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
231			    msr, data);
232		return 1;
233	}
234
235	return 0;
236}
237
238static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
239{
240	u64 data = 0;
241	struct kvm *kvm = vcpu->kvm;
242	struct kvm_hv *hv = &kvm->arch.hyperv;
243
244	switch (msr) {
245	case HV_X64_MSR_GUEST_OS_ID:
246		data = hv->hv_guest_os_id;
247		break;
248	case HV_X64_MSR_HYPERCALL:
249		data = hv->hv_hypercall;
250		break;
251	case HV_X64_MSR_TIME_REF_COUNT: {
252		data =
253		     div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
254		break;
255	}
256	case HV_X64_MSR_REFERENCE_TSC:
257		data = hv->hv_tsc_page;
258		break;
259	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
260		return kvm_hv_msr_get_crash_data(vcpu,
261						 msr - HV_X64_MSR_CRASH_P0,
262						 pdata);
263	case HV_X64_MSR_CRASH_CTL:
264		return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
265	case HV_X64_MSR_RESET:
266		data = 0;
267		break;
268	default:
269		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
270		return 1;
271	}
272
273	*pdata = data;
274	return 0;
275}
276
277static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
278{
279	u64 data = 0;
280	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
281
282	switch (msr) {
283	case HV_X64_MSR_VP_INDEX: {
284		int r;
285		struct kvm_vcpu *v;
286
287		kvm_for_each_vcpu(r, v, vcpu->kvm) {
288			if (v == vcpu) {
289				data = r;
290				break;
291			}
292		}
293		break;
294	}
295	case HV_X64_MSR_EOI:
296		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
297	case HV_X64_MSR_ICR:
298		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
299	case HV_X64_MSR_TPR:
300		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
301	case HV_X64_MSR_APIC_ASSIST_PAGE:
302		data = hv->hv_vapic;
303		break;
304	case HV_X64_MSR_VP_RUNTIME:
305		data = current_task_runtime_100ns() + hv->runtime_offset;
306		break;
307	default:
308		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
309		return 1;
310	}
311	*pdata = data;
312	return 0;
313}
314
315int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
316{
317	if (kvm_hv_msr_partition_wide(msr)) {
318		int r;
319
320		mutex_lock(&vcpu->kvm->lock);
321		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
322		mutex_unlock(&vcpu->kvm->lock);
323		return r;
324	} else
325		return kvm_hv_set_msr(vcpu, msr, data, host);
326}
327
328int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
329{
330	if (kvm_hv_msr_partition_wide(msr)) {
331		int r;
332
333		mutex_lock(&vcpu->kvm->lock);
334		r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
335		mutex_unlock(&vcpu->kvm->lock);
336		return r;
337	} else
338		return kvm_hv_get_msr(vcpu, msr, pdata);
339}
340
341bool kvm_hv_hypercall_enabled(struct kvm *kvm)
342{
343	return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
344}
345
346int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
347{
348	u64 param, ingpa, outgpa, ret;
349	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
350	bool fast, longmode;
351
352	/*
353	 * hypercall generates UD from non zero cpl and real mode
354	 * per HYPER-V spec
355	 */
356	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
357		kvm_queue_exception(vcpu, UD_VECTOR);
358		return 0;
359	}
360
361	longmode = is_64_bit_mode(vcpu);
362
363	if (!longmode) {
364		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
365			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
366		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
367			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
368		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
369			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
370	}
371#ifdef CONFIG_X86_64
372	else {
373		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
374		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
375		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
376	}
377#endif
378
379	code = param & 0xffff;
380	fast = (param >> 16) & 0x1;
381	rep_cnt = (param >> 32) & 0xfff;
382	rep_idx = (param >> 48) & 0xfff;
383
384	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
385
386	switch (code) {
387	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
388		kvm_vcpu_on_spin(vcpu);
389		break;
390	default:
391		res = HV_STATUS_INVALID_HYPERCALL_CODE;
392		break;
393	}
394
395	ret = res | (((u64)rep_done & 0xfff) << 32);
396	if (longmode) {
397		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
398	} else {
399		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
400		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
401	}
402
403	return 1;
404}
405