1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <asm/cputype.h>
26#include <asm/uaccess.h>
27#include <asm/kvm.h>
28#include <asm/kvm_asm.h>
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_coproc.h>
31
32#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
33#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
34
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36	{ NULL }
37};
38
39int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
40{
41	return 0;
42}
43
44static u64 core_reg_offset_from_id(u64 id)
45{
46	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
47}
48
49static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
50{
51	u32 __user *uaddr = (u32 __user *)(long)reg->addr;
52	struct kvm_regs *regs = &vcpu->arch.regs;
53	u64 off;
54
55	if (KVM_REG_SIZE(reg->id) != 4)
56		return -ENOENT;
57
58	/* Our ID is an index into the kvm_regs struct. */
59	off = core_reg_offset_from_id(reg->id);
60	if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
61		return -ENOENT;
62
63	return put_user(((u32 *)regs)[off], uaddr);
64}
65
66static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
67{
68	u32 __user *uaddr = (u32 __user *)(long)reg->addr;
69	struct kvm_regs *regs = &vcpu->arch.regs;
70	u64 off, val;
71
72	if (KVM_REG_SIZE(reg->id) != 4)
73		return -ENOENT;
74
75	/* Our ID is an index into the kvm_regs struct. */
76	off = core_reg_offset_from_id(reg->id);
77	if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
78		return -ENOENT;
79
80	if (get_user(val, uaddr) != 0)
81		return -EFAULT;
82
83	if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
84		unsigned long mode = val & MODE_MASK;
85		switch (mode) {
86		case USR_MODE:
87		case FIQ_MODE:
88		case IRQ_MODE:
89		case SVC_MODE:
90		case ABT_MODE:
91		case UND_MODE:
92			break;
93		default:
94			return -EINVAL;
95		}
96	}
97
98	((u32 *)regs)[off] = val;
99	return 0;
100}
101
102int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
103{
104	return -EINVAL;
105}
106
107int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
108{
109	return -EINVAL;
110}
111
112#define NUM_TIMER_REGS 3
113
114static bool is_timer_reg(u64 index)
115{
116	switch (index) {
117	case KVM_REG_ARM_TIMER_CTL:
118	case KVM_REG_ARM_TIMER_CNT:
119	case KVM_REG_ARM_TIMER_CVAL:
120		return true;
121	}
122	return false;
123}
124
125static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
126{
127	if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
128		return -EFAULT;
129	uindices++;
130	if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
131		return -EFAULT;
132	uindices++;
133	if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
134		return -EFAULT;
135
136	return 0;
137}
138
139static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
140{
141	void __user *uaddr = (void __user *)(long)reg->addr;
142	u64 val;
143	int ret;
144
145	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
146	if (ret != 0)
147		return -EFAULT;
148
149	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
150}
151
152static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
153{
154	void __user *uaddr = (void __user *)(long)reg->addr;
155	u64 val;
156
157	val = kvm_arm_timer_get_reg(vcpu, reg->id);
158	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
159}
160
161static unsigned long num_core_regs(void)
162{
163	return sizeof(struct kvm_regs) / sizeof(u32);
164}
165
166/**
167 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
168 *
169 * This is for all registers.
170 */
171unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
172{
173	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
174		+ NUM_TIMER_REGS;
175}
176
177/**
178 * kvm_arm_copy_reg_indices - get indices of all registers.
179 *
180 * We do core registers right here, then we apppend coproc regs.
181 */
182int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
183{
184	unsigned int i;
185	const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
186	int ret;
187
188	for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
189		if (put_user(core_reg | i, uindices))
190			return -EFAULT;
191		uindices++;
192	}
193
194	ret = copy_timer_indices(vcpu, uindices);
195	if (ret)
196		return ret;
197	uindices += NUM_TIMER_REGS;
198
199	return kvm_arm_copy_coproc_indices(vcpu, uindices);
200}
201
202int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
203{
204	/* We currently use nothing arch-specific in upper 32 bits */
205	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
206		return -EINVAL;
207
208	/* Register group 16 means we want a core register. */
209	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
210		return get_core_reg(vcpu, reg);
211
212	if (is_timer_reg(reg->id))
213		return get_timer_reg(vcpu, reg);
214
215	return kvm_arm_coproc_get_reg(vcpu, reg);
216}
217
218int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
219{
220	/* We currently use nothing arch-specific in upper 32 bits */
221	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
222		return -EINVAL;
223
224	/* Register group 16 means we set a core register. */
225	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
226		return set_core_reg(vcpu, reg);
227
228	if (is_timer_reg(reg->id))
229		return set_timer_reg(vcpu, reg);
230
231	return kvm_arm_coproc_set_reg(vcpu, reg);
232}
233
234int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
235				  struct kvm_sregs *sregs)
236{
237	return -EINVAL;
238}
239
240int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
241				  struct kvm_sregs *sregs)
242{
243	return -EINVAL;
244}
245
246int __attribute_const__ kvm_target_cpu(void)
247{
248	switch (read_cpuid_part()) {
249	case ARM_CPU_PART_CORTEX_A7:
250		return KVM_ARM_TARGET_CORTEX_A7;
251	case ARM_CPU_PART_CORTEX_A15:
252		return KVM_ARM_TARGET_CORTEX_A15;
253	default:
254		return -EINVAL;
255	}
256}
257
258int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
259{
260	int target = kvm_target_cpu();
261
262	if (target < 0)
263		return -ENODEV;
264
265	memset(init, 0, sizeof(*init));
266
267	/*
268	 * For now, we don't return any features.
269	 * In future, we might use features to return target
270	 * specific features available for the preferred
271	 * target type.
272	 */
273	init->target = (__u32)target;
274
275	return 0;
276}
277
278int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
279{
280	return -EINVAL;
281}
282
283int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
284{
285	return -EINVAL;
286}
287
288int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
289				  struct kvm_translation *tr)
290{
291	return -EINVAL;
292}
293