1/*
2 * Copyright (C) 2011. Freescale Inc. All rights reserved.
3 *
4 * Authors:
5 *    Alexander Graf <agraf@suse.de>
6 *    Paul Mackerras <paulus@samba.org>
7 *
8 * Description:
9 *
10 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
11 * processors.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/anon_inodes.h>
19
20#include <asm/uaccess.h>
21#include <asm/kvm_ppc.h>
22#include <asm/kvm_book3s.h>
23
24#define HPTE_SIZE	16		/* bytes per HPT entry */
25
26static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
27{
28	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
29	unsigned long pteg_addr;
30
31	pte_index <<= 4;
32	pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
33	pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
34	pteg_addr |= pte_index;
35
36	return pteg_addr;
37}
38
39static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
40{
41	long flags = kvmppc_get_gpr(vcpu, 4);
42	long pte_index = kvmppc_get_gpr(vcpu, 5);
43	__be64 pteg[2 * 8];
44	__be64 *hpte;
45	unsigned long pteg_addr, i;
46	long int ret;
47
48	i = pte_index & 7;
49	pte_index &= ~7UL;
50	pteg_addr = get_pteg_addr(vcpu, pte_index);
51
52	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
53	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
54	hpte = pteg;
55
56	ret = H_PTEG_FULL;
57	if (likely((flags & H_EXACT) == 0)) {
58		for (i = 0; ; ++i) {
59			if (i == 8)
60				goto done;
61			if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0)
62				break;
63			hpte += 2;
64		}
65	} else {
66		hpte += i * 2;
67		if (*hpte & HPTE_V_VALID)
68			goto done;
69	}
70
71	hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
72	hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
73	pteg_addr += i * HPTE_SIZE;
74	copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
75	kvmppc_set_gpr(vcpu, 4, pte_index | i);
76	ret = H_SUCCESS;
77
78 done:
79	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
80	kvmppc_set_gpr(vcpu, 3, ret);
81
82	return EMULATE_DONE;
83}
84
85static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
86{
87	unsigned long flags= kvmppc_get_gpr(vcpu, 4);
88	unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
89	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
90	unsigned long v = 0, pteg, rb;
91	unsigned long pte[2];
92	long int ret;
93
94	pteg = get_pteg_addr(vcpu, pte_index);
95	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
96	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
97	pte[0] = be64_to_cpu((__force __be64)pte[0]);
98	pte[1] = be64_to_cpu((__force __be64)pte[1]);
99
100	ret = H_NOT_FOUND;
101	if ((pte[0] & HPTE_V_VALID) == 0 ||
102	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
103	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
104		goto done;
105
106	copy_to_user((void __user *)pteg, &v, sizeof(v));
107
108	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
109	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
110
111	ret = H_SUCCESS;
112	kvmppc_set_gpr(vcpu, 4, pte[0]);
113	kvmppc_set_gpr(vcpu, 5, pte[1]);
114
115 done:
116	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
117	kvmppc_set_gpr(vcpu, 3, ret);
118
119	return EMULATE_DONE;
120}
121
122/* Request defs for kvmppc_h_pr_bulk_remove() */
123#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
124#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
125#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
126#define   H_BULK_REMOVE_END            0xc000000000000000ULL
127#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
128#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
129#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
130#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
131#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
132#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
133#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
134#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
135#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
136#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
137#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
138#define H_BULK_REMOVE_MAX_BATCH        4
139
140static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
141{
142	int i;
143	int paramnr = 4;
144	int ret = H_SUCCESS;
145
146	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
147	for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
148		unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
149		unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
150		unsigned long pteg, rb, flags;
151		unsigned long pte[2];
152		unsigned long v = 0;
153
154		if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
155			break; /* Exit success */
156		} else if ((tsh & H_BULK_REMOVE_TYPE) !=
157			   H_BULK_REMOVE_REQUEST) {
158			ret = H_PARAMETER;
159			break; /* Exit fail */
160		}
161
162		tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
163		tsh |= H_BULK_REMOVE_RESPONSE;
164
165		if ((tsh & H_BULK_REMOVE_ANDCOND) &&
166		    (tsh & H_BULK_REMOVE_AVPN)) {
167			tsh |= H_BULK_REMOVE_PARM;
168			kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
169			ret = H_PARAMETER;
170			break; /* Exit fail */
171		}
172
173		pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
174		copy_from_user(pte, (void __user *)pteg, sizeof(pte));
175		pte[0] = be64_to_cpu((__force __be64)pte[0]);
176		pte[1] = be64_to_cpu((__force __be64)pte[1]);
177
178		/* tsl = AVPN */
179		flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
180
181		if ((pte[0] & HPTE_V_VALID) == 0 ||
182		    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
183		    ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
184			tsh |= H_BULK_REMOVE_NOT_FOUND;
185		} else {
186			/* Splat the pteg in (userland) hpt */
187			copy_to_user((void __user *)pteg, &v, sizeof(v));
188
189			rb = compute_tlbie_rb(pte[0], pte[1],
190					      tsh & H_BULK_REMOVE_PTEX);
191			vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
192			tsh |= H_BULK_REMOVE_SUCCESS;
193			tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
194		}
195		kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
196	}
197	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
198	kvmppc_set_gpr(vcpu, 3, ret);
199
200	return EMULATE_DONE;
201}
202
203static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
204{
205	unsigned long flags = kvmppc_get_gpr(vcpu, 4);
206	unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
207	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
208	unsigned long rb, pteg, r, v;
209	unsigned long pte[2];
210	long int ret;
211
212	pteg = get_pteg_addr(vcpu, pte_index);
213	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
214	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
215	pte[0] = be64_to_cpu((__force __be64)pte[0]);
216	pte[1] = be64_to_cpu((__force __be64)pte[1]);
217
218	ret = H_NOT_FOUND;
219	if ((pte[0] & HPTE_V_VALID) == 0 ||
220	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
221		goto done;
222
223	v = pte[0];
224	r = pte[1];
225	r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
226	       HPTE_R_KEY_LO);
227	r |= (flags << 55) & HPTE_R_PP0;
228	r |= (flags << 48) & HPTE_R_KEY_HI;
229	r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
230
231	pte[1] = r;
232
233	rb = compute_tlbie_rb(v, r, pte_index);
234	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
235	pte[0] = (__force u64)cpu_to_be64(pte[0]);
236	pte[1] = (__force u64)cpu_to_be64(pte[1]);
237	copy_to_user((void __user *)pteg, pte, sizeof(pte));
238	ret = H_SUCCESS;
239
240 done:
241	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
242	kvmppc_set_gpr(vcpu, 3, ret);
243
244	return EMULATE_DONE;
245}
246
247static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
248{
249	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
250	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
251	unsigned long tce = kvmppc_get_gpr(vcpu, 6);
252	long rc;
253
254	rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
255	if (rc == H_TOO_HARD)
256		return EMULATE_FAIL;
257	kvmppc_set_gpr(vcpu, 3, rc);
258	return EMULATE_DONE;
259}
260
261static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
262{
263	long rc;
264
265	rc = kvmppc_h_logical_ci_load(vcpu);
266	if (rc == H_TOO_HARD)
267		return EMULATE_FAIL;
268	kvmppc_set_gpr(vcpu, 3, rc);
269	return EMULATE_DONE;
270}
271
272static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
273{
274	long rc;
275
276	rc = kvmppc_h_logical_ci_store(vcpu);
277	if (rc == H_TOO_HARD)
278		return EMULATE_FAIL;
279	kvmppc_set_gpr(vcpu, 3, rc);
280	return EMULATE_DONE;
281}
282
283static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
284{
285	long rc = kvmppc_xics_hcall(vcpu, cmd);
286	kvmppc_set_gpr(vcpu, 3, rc);
287	return EMULATE_DONE;
288}
289
290int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
291{
292	int rc, idx;
293
294	if (cmd <= MAX_HCALL_OPCODE &&
295	    !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
296		return EMULATE_FAIL;
297
298	switch (cmd) {
299	case H_ENTER:
300		return kvmppc_h_pr_enter(vcpu);
301	case H_REMOVE:
302		return kvmppc_h_pr_remove(vcpu);
303	case H_PROTECT:
304		return kvmppc_h_pr_protect(vcpu);
305	case H_BULK_REMOVE:
306		return kvmppc_h_pr_bulk_remove(vcpu);
307	case H_PUT_TCE:
308		return kvmppc_h_pr_put_tce(vcpu);
309	case H_CEDE:
310		kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
311		kvm_vcpu_block(vcpu);
312		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
313		vcpu->stat.halt_wakeup++;
314		return EMULATE_DONE;
315	case H_LOGICAL_CI_LOAD:
316		return kvmppc_h_pr_logical_ci_load(vcpu);
317	case H_LOGICAL_CI_STORE:
318		return kvmppc_h_pr_logical_ci_store(vcpu);
319	case H_XIRR:
320	case H_CPPR:
321	case H_EOI:
322	case H_IPI:
323	case H_IPOLL:
324	case H_XIRR_X:
325		if (kvmppc_xics_enabled(vcpu))
326			return kvmppc_h_pr_xics_hcall(vcpu, cmd);
327		break;
328	case H_RTAS:
329		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
330			break;
331		idx = srcu_read_lock(&vcpu->kvm->srcu);
332		rc = kvmppc_rtas_hcall(vcpu);
333		srcu_read_unlock(&vcpu->kvm->srcu, idx);
334		if (rc)
335			break;
336		kvmppc_set_gpr(vcpu, 3, 0);
337		return EMULATE_DONE;
338	}
339
340	return EMULATE_FAIL;
341}
342
343int kvmppc_hcall_impl_pr(unsigned long cmd)
344{
345	switch (cmd) {
346	case H_ENTER:
347	case H_REMOVE:
348	case H_PROTECT:
349	case H_BULK_REMOVE:
350	case H_PUT_TCE:
351	case H_CEDE:
352	case H_LOGICAL_CI_LOAD:
353	case H_LOGICAL_CI_STORE:
354#ifdef CONFIG_KVM_XICS
355	case H_XIRR:
356	case H_CPPR:
357	case H_EOI:
358	case H_IPI:
359	case H_IPOLL:
360	case H_XIRR_X:
361#endif
362		return 1;
363	}
364	return 0;
365}
366
367/*
368 * List of hcall numbers to enable by default.
369 * For compatibility with old userspace, we enable by default
370 * all hcalls that were implemented before the hcall-enabling
371 * facility was added.  Note this list should not include H_RTAS.
372 */
373static unsigned int default_hcall_list[] = {
374	H_ENTER,
375	H_REMOVE,
376	H_PROTECT,
377	H_BULK_REMOVE,
378	H_PUT_TCE,
379	H_CEDE,
380#ifdef CONFIG_KVM_XICS
381	H_XIRR,
382	H_CPPR,
383	H_EOI,
384	H_IPI,
385	H_IPOLL,
386	H_XIRR_X,
387#endif
388	0
389};
390
391void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
392{
393	int i;
394	unsigned int hcall;
395
396	for (i = 0; default_hcall_list[i]; ++i) {
397		hcall = default_hcall_list[i];
398		WARN_ON(!kvmppc_hcall_impl_pr(hcall));
399		__set_bit(hcall / 4, kvm->arch.enabled_hcalls);
400	}
401}
402