1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 *    Alexander Graf <agraf@suse.de>
6 *    Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * Description:
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kvm_host.h>
18#include <linux/err.h>
19#include <linux/export.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/miscdevice.h>
23
24#include <asm/reg.h>
25#include <asm/cputable.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/kvm_ppc.h>
31#include <asm/kvm_book3s.h>
32#include <asm/mmu_context.h>
33#include <asm/page.h>
34#include <linux/gfp.h>
35#include <linux/sched.h>
36#include <linux/vmalloc.h>
37#include <linux/highmem.h>
38
39#include "book3s.h"
40#include "trace.h"
41
42#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44/* #define EXIT_DEBUG */
45
46struct kvm_stats_debugfs_item debugfs_entries[] = {
47	{ "exits",       VCPU_STAT(sum_exits) },
48	{ "mmio",        VCPU_STAT(mmio_exits) },
49	{ "sig",         VCPU_STAT(signal_exits) },
50	{ "sysc",        VCPU_STAT(syscall_exits) },
51	{ "inst_emu",    VCPU_STAT(emulated_inst_exits) },
52	{ "dec",         VCPU_STAT(dec_exits) },
53	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
54	{ "queue_intr",  VCPU_STAT(queue_intr) },
55	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
56	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
57	{ "pf_storage",  VCPU_STAT(pf_storage) },
58	{ "sp_storage",  VCPU_STAT(sp_storage) },
59	{ "pf_instruc",  VCPU_STAT(pf_instruc) },
60	{ "sp_instruc",  VCPU_STAT(sp_instruc) },
61	{ "ld",          VCPU_STAT(ld) },
62	{ "ld_slow",     VCPU_STAT(ld_slow) },
63	{ "st",          VCPU_STAT(st) },
64	{ "st_slow",     VCPU_STAT(st_slow) },
65	{ NULL }
66};
67
68void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
69{
70	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
71		ulong pc = kvmppc_get_pc(vcpu);
72		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
73			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
74		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
75	}
76}
77EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
78
79static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
80{
81	if (!is_kvmppc_hv_enabled(vcpu->kvm))
82		return to_book3s(vcpu)->hior;
83	return 0;
84}
85
86static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
87			unsigned long pending_now, unsigned long old_pending)
88{
89	if (is_kvmppc_hv_enabled(vcpu->kvm))
90		return;
91	if (pending_now)
92		kvmppc_set_int_pending(vcpu, 1);
93	else if (old_pending)
94		kvmppc_set_int_pending(vcpu, 0);
95}
96
97static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
98{
99	ulong crit_raw;
100	ulong crit_r1;
101	bool crit;
102
103	if (is_kvmppc_hv_enabled(vcpu->kvm))
104		return false;
105
106	crit_raw = kvmppc_get_critical(vcpu);
107	crit_r1 = kvmppc_get_gpr(vcpu, 1);
108
109	/* Truncate crit indicators in 32 bit mode */
110	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
111		crit_raw &= 0xffffffff;
112		crit_r1 &= 0xffffffff;
113	}
114
115	/* Critical section when crit == r1 */
116	crit = (crit_raw == crit_r1);
117	/* ... and we're in supervisor mode */
118	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
119
120	return crit;
121}
122
123void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
124{
125	kvmppc_unfixup_split_real(vcpu);
126	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
127	kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
128	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
129	vcpu->arch.mmu.reset_msr(vcpu);
130}
131
132static int kvmppc_book3s_vec2irqprio(unsigned int vec)
133{
134	unsigned int prio;
135
136	switch (vec) {
137	case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;		break;
138	case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;	break;
139	case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;		break;
140	case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;		break;
141	case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;		break;
142	case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;		break;
143	case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;		break;
144	case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;	break;
145	case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;		break;
146	case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;		break;
147	case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;		break;
148	case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;		break;
149	case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;		break;
150	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;
151	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;
152	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break;
153	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;
154	default:    prio = BOOK3S_IRQPRIO_MAX;			break;
155	}
156
157	return prio;
158}
159
160void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
161					  unsigned int vec)
162{
163	unsigned long old_pending = vcpu->arch.pending_exceptions;
164
165	clear_bit(kvmppc_book3s_vec2irqprio(vec),
166		  &vcpu->arch.pending_exceptions);
167
168	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
169				  old_pending);
170}
171
172void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
173{
174	vcpu->stat.queue_intr++;
175
176	set_bit(kvmppc_book3s_vec2irqprio(vec),
177		&vcpu->arch.pending_exceptions);
178#ifdef EXIT_DEBUG
179	printk(KERN_INFO "Queueing interrupt %x\n", vec);
180#endif
181}
182EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
183
184void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
185{
186	/* might as well deliver this straight away */
187	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
188}
189EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
190
191void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
192{
193	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
194}
195EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
196
197int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
198{
199	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
200}
201EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
202
203void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
204{
205	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
206}
207EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
208
209void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
210                                struct kvm_interrupt *irq)
211{
212	unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
213
214	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
215		vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
216
217	kvmppc_book3s_queue_irqprio(vcpu, vec);
218}
219
220void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
221{
222	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
223	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
224}
225
226void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
227				    ulong flags)
228{
229	kvmppc_set_dar(vcpu, dar);
230	kvmppc_set_dsisr(vcpu, flags);
231	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
232}
233
234void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
235{
236	u64 msr = kvmppc_get_msr(vcpu);
237	msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
238	msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
239	kvmppc_set_msr_fast(vcpu, msr);
240	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
241}
242
243int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
244{
245	int deliver = 1;
246	int vec = 0;
247	bool crit = kvmppc_critical_section(vcpu);
248
249	switch (priority) {
250	case BOOK3S_IRQPRIO_DECREMENTER:
251		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
252		vec = BOOK3S_INTERRUPT_DECREMENTER;
253		break;
254	case BOOK3S_IRQPRIO_EXTERNAL:
255	case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
256		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
257		vec = BOOK3S_INTERRUPT_EXTERNAL;
258		break;
259	case BOOK3S_IRQPRIO_SYSTEM_RESET:
260		vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
261		break;
262	case BOOK3S_IRQPRIO_MACHINE_CHECK:
263		vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
264		break;
265	case BOOK3S_IRQPRIO_DATA_STORAGE:
266		vec = BOOK3S_INTERRUPT_DATA_STORAGE;
267		break;
268	case BOOK3S_IRQPRIO_INST_STORAGE:
269		vec = BOOK3S_INTERRUPT_INST_STORAGE;
270		break;
271	case BOOK3S_IRQPRIO_DATA_SEGMENT:
272		vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
273		break;
274	case BOOK3S_IRQPRIO_INST_SEGMENT:
275		vec = BOOK3S_INTERRUPT_INST_SEGMENT;
276		break;
277	case BOOK3S_IRQPRIO_ALIGNMENT:
278		vec = BOOK3S_INTERRUPT_ALIGNMENT;
279		break;
280	case BOOK3S_IRQPRIO_PROGRAM:
281		vec = BOOK3S_INTERRUPT_PROGRAM;
282		break;
283	case BOOK3S_IRQPRIO_VSX:
284		vec = BOOK3S_INTERRUPT_VSX;
285		break;
286	case BOOK3S_IRQPRIO_ALTIVEC:
287		vec = BOOK3S_INTERRUPT_ALTIVEC;
288		break;
289	case BOOK3S_IRQPRIO_FP_UNAVAIL:
290		vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
291		break;
292	case BOOK3S_IRQPRIO_SYSCALL:
293		vec = BOOK3S_INTERRUPT_SYSCALL;
294		break;
295	case BOOK3S_IRQPRIO_DEBUG:
296		vec = BOOK3S_INTERRUPT_TRACE;
297		break;
298	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
299		vec = BOOK3S_INTERRUPT_PERFMON;
300		break;
301	case BOOK3S_IRQPRIO_FAC_UNAVAIL:
302		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
303		break;
304	default:
305		deliver = 0;
306		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
307		break;
308	}
309
310#if 0
311	printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
312#endif
313
314	if (deliver)
315		kvmppc_inject_interrupt(vcpu, vec, 0);
316
317	return deliver;
318}
319
320/*
321 * This function determines if an irqprio should be cleared once issued.
322 */
323static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
324{
325	switch (priority) {
326		case BOOK3S_IRQPRIO_DECREMENTER:
327			/* DEC interrupts get cleared by mtdec */
328			return false;
329		case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
330			/* External interrupts get cleared by userspace */
331			return false;
332	}
333
334	return true;
335}
336
337int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
338{
339	unsigned long *pending = &vcpu->arch.pending_exceptions;
340	unsigned long old_pending = vcpu->arch.pending_exceptions;
341	unsigned int priority;
342
343#ifdef EXIT_DEBUG
344	if (vcpu->arch.pending_exceptions)
345		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
346#endif
347	priority = __ffs(*pending);
348	while (priority < BOOK3S_IRQPRIO_MAX) {
349		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
350		    clear_irqprio(vcpu, priority)) {
351			clear_bit(priority, &vcpu->arch.pending_exceptions);
352			break;
353		}
354
355		priority = find_next_bit(pending,
356					 BITS_PER_BYTE * sizeof(*pending),
357					 priority + 1);
358	}
359
360	/* Tell the guest about our interrupt status */
361	kvmppc_update_int_pending(vcpu, *pending, old_pending);
362
363	return 0;
364}
365EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
366
367pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
368			bool *writable)
369{
370	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
371	gfn_t gfn = gpa >> PAGE_SHIFT;
372
373	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
374		mp_pa = (uint32_t)mp_pa;
375
376	/* Magic page override */
377	gpa &= ~0xFFFULL;
378	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
379		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
380		pfn_t pfn;
381
382		pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
383		get_page(pfn_to_page(pfn));
384		if (writable)
385			*writable = true;
386		return pfn;
387	}
388
389	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
390}
391EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
392
393int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
394		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
395{
396	bool data = (xlid == XLATE_DATA);
397	bool iswrite = (xlrw == XLATE_WRITE);
398	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
399	int r;
400
401	if (relocated) {
402		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
403	} else {
404		pte->eaddr = eaddr;
405		pte->raddr = eaddr & KVM_PAM;
406		pte->vpage = VSID_REAL | eaddr >> 12;
407		pte->may_read = true;
408		pte->may_write = true;
409		pte->may_execute = true;
410		r = 0;
411
412		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
413		    !data) {
414			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
415			    ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
416			pte->raddr &= ~SPLIT_HACK_MASK;
417		}
418	}
419
420	return r;
421}
422
423int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
424					 u32 *inst)
425{
426	ulong pc = kvmppc_get_pc(vcpu);
427	int r;
428
429	if (type == INST_SC)
430		pc -= 4;
431
432	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
433	if (r == EMULATE_DONE)
434		return r;
435	else
436		return EMULATE_AGAIN;
437}
438EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
439
440int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
441{
442	return 0;
443}
444
445int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
446{
447	return 0;
448}
449
450void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
451{
452}
453
454int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
455				  struct kvm_sregs *sregs)
456{
457	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
458}
459
460int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
461				  struct kvm_sregs *sregs)
462{
463	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
464}
465
466int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
467{
468	int i;
469
470	regs->pc = kvmppc_get_pc(vcpu);
471	regs->cr = kvmppc_get_cr(vcpu);
472	regs->ctr = kvmppc_get_ctr(vcpu);
473	regs->lr = kvmppc_get_lr(vcpu);
474	regs->xer = kvmppc_get_xer(vcpu);
475	regs->msr = kvmppc_get_msr(vcpu);
476	regs->srr0 = kvmppc_get_srr0(vcpu);
477	regs->srr1 = kvmppc_get_srr1(vcpu);
478	regs->pid = vcpu->arch.pid;
479	regs->sprg0 = kvmppc_get_sprg0(vcpu);
480	regs->sprg1 = kvmppc_get_sprg1(vcpu);
481	regs->sprg2 = kvmppc_get_sprg2(vcpu);
482	regs->sprg3 = kvmppc_get_sprg3(vcpu);
483	regs->sprg4 = kvmppc_get_sprg4(vcpu);
484	regs->sprg5 = kvmppc_get_sprg5(vcpu);
485	regs->sprg6 = kvmppc_get_sprg6(vcpu);
486	regs->sprg7 = kvmppc_get_sprg7(vcpu);
487
488	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
489		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
490
491	return 0;
492}
493
494int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
495{
496	int i;
497
498	kvmppc_set_pc(vcpu, regs->pc);
499	kvmppc_set_cr(vcpu, regs->cr);
500	kvmppc_set_ctr(vcpu, regs->ctr);
501	kvmppc_set_lr(vcpu, regs->lr);
502	kvmppc_set_xer(vcpu, regs->xer);
503	kvmppc_set_msr(vcpu, regs->msr);
504	kvmppc_set_srr0(vcpu, regs->srr0);
505	kvmppc_set_srr1(vcpu, regs->srr1);
506	kvmppc_set_sprg0(vcpu, regs->sprg0);
507	kvmppc_set_sprg1(vcpu, regs->sprg1);
508	kvmppc_set_sprg2(vcpu, regs->sprg2);
509	kvmppc_set_sprg3(vcpu, regs->sprg3);
510	kvmppc_set_sprg4(vcpu, regs->sprg4);
511	kvmppc_set_sprg5(vcpu, regs->sprg5);
512	kvmppc_set_sprg6(vcpu, regs->sprg6);
513	kvmppc_set_sprg7(vcpu, regs->sprg7);
514
515	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
516		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
517
518	return 0;
519}
520
521int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
522{
523	return -ENOTSUPP;
524}
525
526int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
527{
528	return -ENOTSUPP;
529}
530
531int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
532			union kvmppc_one_reg *val)
533{
534	int r = 0;
535	long int i;
536
537	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
538	if (r == -EINVAL) {
539		r = 0;
540		switch (id) {
541		case KVM_REG_PPC_DAR:
542			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
543			break;
544		case KVM_REG_PPC_DSISR:
545			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
546			break;
547		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
548			i = id - KVM_REG_PPC_FPR0;
549			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
550			break;
551		case KVM_REG_PPC_FPSCR:
552			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
553			break;
554#ifdef CONFIG_VSX
555		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
556			if (cpu_has_feature(CPU_FTR_VSX)) {
557				i = id - KVM_REG_PPC_VSR0;
558				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
559				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
560			} else {
561				r = -ENXIO;
562			}
563			break;
564#endif /* CONFIG_VSX */
565		case KVM_REG_PPC_DEBUG_INST:
566			*val = get_reg_val(id, INS_TW);
567			break;
568#ifdef CONFIG_KVM_XICS
569		case KVM_REG_PPC_ICP_STATE:
570			if (!vcpu->arch.icp) {
571				r = -ENXIO;
572				break;
573			}
574			*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
575			break;
576#endif /* CONFIG_KVM_XICS */
577		case KVM_REG_PPC_FSCR:
578			*val = get_reg_val(id, vcpu->arch.fscr);
579			break;
580		case KVM_REG_PPC_TAR:
581			*val = get_reg_val(id, vcpu->arch.tar);
582			break;
583		case KVM_REG_PPC_EBBHR:
584			*val = get_reg_val(id, vcpu->arch.ebbhr);
585			break;
586		case KVM_REG_PPC_EBBRR:
587			*val = get_reg_val(id, vcpu->arch.ebbrr);
588			break;
589		case KVM_REG_PPC_BESCR:
590			*val = get_reg_val(id, vcpu->arch.bescr);
591			break;
592		case KVM_REG_PPC_VTB:
593			*val = get_reg_val(id, vcpu->arch.vtb);
594			break;
595		case KVM_REG_PPC_IC:
596			*val = get_reg_val(id, vcpu->arch.ic);
597			break;
598		default:
599			r = -EINVAL;
600			break;
601		}
602	}
603
604	return r;
605}
606
607int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
608			union kvmppc_one_reg *val)
609{
610	int r = 0;
611	long int i;
612
613	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
614	if (r == -EINVAL) {
615		r = 0;
616		switch (id) {
617		case KVM_REG_PPC_DAR:
618			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
619			break;
620		case KVM_REG_PPC_DSISR:
621			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
622			break;
623		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
624			i = id - KVM_REG_PPC_FPR0;
625			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
626			break;
627		case KVM_REG_PPC_FPSCR:
628			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
629			break;
630#ifdef CONFIG_VSX
631		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
632			if (cpu_has_feature(CPU_FTR_VSX)) {
633				i = id - KVM_REG_PPC_VSR0;
634				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
635				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
636			} else {
637				r = -ENXIO;
638			}
639			break;
640#endif /* CONFIG_VSX */
641#ifdef CONFIG_KVM_XICS
642		case KVM_REG_PPC_ICP_STATE:
643			if (!vcpu->arch.icp) {
644				r = -ENXIO;
645				break;
646			}
647			r = kvmppc_xics_set_icp(vcpu,
648						set_reg_val(id, *val));
649			break;
650#endif /* CONFIG_KVM_XICS */
651		case KVM_REG_PPC_FSCR:
652			vcpu->arch.fscr = set_reg_val(id, *val);
653			break;
654		case KVM_REG_PPC_TAR:
655			vcpu->arch.tar = set_reg_val(id, *val);
656			break;
657		case KVM_REG_PPC_EBBHR:
658			vcpu->arch.ebbhr = set_reg_val(id, *val);
659			break;
660		case KVM_REG_PPC_EBBRR:
661			vcpu->arch.ebbrr = set_reg_val(id, *val);
662			break;
663		case KVM_REG_PPC_BESCR:
664			vcpu->arch.bescr = set_reg_val(id, *val);
665			break;
666		case KVM_REG_PPC_VTB:
667			vcpu->arch.vtb = set_reg_val(id, *val);
668			break;
669		case KVM_REG_PPC_IC:
670			vcpu->arch.ic = set_reg_val(id, *val);
671			break;
672		default:
673			r = -EINVAL;
674			break;
675		}
676	}
677
678	return r;
679}
680
681void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
682{
683	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
684}
685
686void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
687{
688	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
689}
690
691void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
692{
693	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
694}
695EXPORT_SYMBOL_GPL(kvmppc_set_msr);
696
697int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
698{
699	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
700}
701
702int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
703                                  struct kvm_translation *tr)
704{
705	return 0;
706}
707
708int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
709					struct kvm_guest_debug *dbg)
710{
711	vcpu->guest_debug = dbg->control;
712	return 0;
713}
714
715void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
716{
717	kvmppc_core_queue_dec(vcpu);
718	kvm_vcpu_kick(vcpu);
719}
720
721struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
722{
723	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
724}
725
726void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
727{
728	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
729}
730
731int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
732{
733	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
734}
735
736int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
737{
738	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
739}
740
741void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
742			      struct kvm_memory_slot *dont)
743{
744	kvm->arch.kvm_ops->free_memslot(free, dont);
745}
746
747int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
748			       unsigned long npages)
749{
750	return kvm->arch.kvm_ops->create_memslot(slot, npages);
751}
752
753void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
754{
755	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
756}
757
758int kvmppc_core_prepare_memory_region(struct kvm *kvm,
759				struct kvm_memory_slot *memslot,
760				struct kvm_userspace_memory_region *mem)
761{
762	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
763}
764
765void kvmppc_core_commit_memory_region(struct kvm *kvm,
766				struct kvm_userspace_memory_region *mem,
767				const struct kvm_memory_slot *old)
768{
769	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
770}
771
772int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
773{
774	return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
775}
776EXPORT_SYMBOL_GPL(kvm_unmap_hva);
777
778int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
779{
780	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
781}
782
783int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
784{
785	return kvm->arch.kvm_ops->age_hva(kvm, start, end);
786}
787
788int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
789{
790	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
791}
792
793void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
794{
795	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
796}
797
798void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
799{
800	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
801}
802
803int kvmppc_core_init_vm(struct kvm *kvm)
804{
805
806#ifdef CONFIG_PPC64
807	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
808	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
809#endif
810
811	return kvm->arch.kvm_ops->init_vm(kvm);
812}
813
814void kvmppc_core_destroy_vm(struct kvm *kvm)
815{
816	kvm->arch.kvm_ops->destroy_vm(kvm);
817
818#ifdef CONFIG_PPC64
819	kvmppc_rtas_tokens_free(kvm);
820	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
821#endif
822}
823
824int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
825{
826	unsigned long size = kvmppc_get_gpr(vcpu, 4);
827	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
828	u64 buf;
829	int srcu_idx;
830	int ret;
831
832	if (!is_power_of_2(size) || (size > sizeof(buf)))
833		return H_TOO_HARD;
834
835	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
836	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
837	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
838	if (ret != 0)
839		return H_TOO_HARD;
840
841	switch (size) {
842	case 1:
843		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
844		break;
845
846	case 2:
847		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
848		break;
849
850	case 4:
851		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
852		break;
853
854	case 8:
855		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
856		break;
857
858	default:
859		BUG();
860	}
861
862	return H_SUCCESS;
863}
864EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
865
866int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
867{
868	unsigned long size = kvmppc_get_gpr(vcpu, 4);
869	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
870	unsigned long val = kvmppc_get_gpr(vcpu, 6);
871	u64 buf;
872	int srcu_idx;
873	int ret;
874
875	switch (size) {
876	case 1:
877		*(u8 *)&buf = val;
878		break;
879
880	case 2:
881		*(__be16 *)&buf = cpu_to_be16(val);
882		break;
883
884	case 4:
885		*(__be32 *)&buf = cpu_to_be32(val);
886		break;
887
888	case 8:
889		*(__be64 *)&buf = cpu_to_be64(val);
890		break;
891
892	default:
893		return H_TOO_HARD;
894	}
895
896	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
897	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
898	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
899	if (ret != 0)
900		return H_TOO_HARD;
901
902	return H_SUCCESS;
903}
904EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
905
906int kvmppc_core_check_processor_compat(void)
907{
908	/*
909	 * We always return 0 for book3s. We check
910	 * for compatability while loading the HV
911	 * or PR module
912	 */
913	return 0;
914}
915
916int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
917{
918	return kvm->arch.kvm_ops->hcall_implemented(hcall);
919}
920
921static int kvmppc_book3s_init(void)
922{
923	int r;
924
925	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
926	if (r)
927		return r;
928#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
929	r = kvmppc_book3s_init_pr();
930#endif
931	return r;
932
933}
934
935static void kvmppc_book3s_exit(void)
936{
937#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
938	kvmppc_book3s_exit_pr();
939#endif
940	kvm_exit();
941}
942
943module_init(kvmppc_book3s_init);
944module_exit(kvmppc_book3s_exit);
945
946/* On 32bit this is our one and only kernel module */
947#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
948MODULE_ALIAS_MISCDEV(KVM_MINOR);
949MODULE_ALIAS("devname:kvm");
950#endif
951