1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <asm/cacheflush.h>
20 
21 #include "commpage.h"
22 
23 #define SYNCI_TEMPLATE  0x041f0000
24 #define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
25 #define SYNCI_OFFSET    ((x) & 0xffff)
26 
27 #define LW_TEMPLATE     0x8c000000
28 #define CLEAR_TEMPLATE  0x00000020
29 #define SW_TEMPLATE     0xac000000
30 
kvm_mips_trans_cache_index(uint32_t inst,uint32_t * opc,struct kvm_vcpu * vcpu)31 int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32 			       struct kvm_vcpu *vcpu)
33 {
34 	int result = 0;
35 	unsigned long kseg0_opc;
36 	uint32_t synci_inst = 0x0;
37 
38 	/* Replace the CACHE instruction, with a NOP */
39 	kseg0_opc =
40 	    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
41 		       (vcpu, (unsigned long) opc));
42 	memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
43 	local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
44 
45 	return result;
46 }
47 
48 /*
49  * Address based CACHE instructions are transformed into synci(s). A little
50  * heavy for just D-cache invalidates, but avoids an expensive trap
51  */
kvm_mips_trans_cache_va(uint32_t inst,uint32_t * opc,struct kvm_vcpu * vcpu)52 int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
53 			    struct kvm_vcpu *vcpu)
54 {
55 	int result = 0;
56 	unsigned long kseg0_opc;
57 	uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
58 
59 	base = (inst >> 21) & 0x1f;
60 	offset = inst & 0xffff;
61 	synci_inst |= (base << 21);
62 	synci_inst |= offset;
63 
64 	kseg0_opc =
65 	    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
66 		       (vcpu, (unsigned long) opc));
67 	memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
68 	local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
69 
70 	return result;
71 }
72 
kvm_mips_trans_mfc0(uint32_t inst,uint32_t * opc,struct kvm_vcpu * vcpu)73 int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
74 {
75 	int32_t rt, rd, sel;
76 	uint32_t mfc0_inst;
77 	unsigned long kseg0_opc, flags;
78 
79 	rt = (inst >> 16) & 0x1f;
80 	rd = (inst >> 11) & 0x1f;
81 	sel = inst & 0x7;
82 
83 	if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
84 		mfc0_inst = CLEAR_TEMPLATE;
85 		mfc0_inst |= ((rt & 0x1f) << 16);
86 	} else {
87 		mfc0_inst = LW_TEMPLATE;
88 		mfc0_inst |= ((rt & 0x1f) << 16);
89 		mfc0_inst |=
90 		    offsetof(struct mips_coproc,
91 			     reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
92 						      cop0);
93 	}
94 
95 	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
96 		kseg0_opc =
97 		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
98 			       (vcpu, (unsigned long) opc));
99 		memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
100 		local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
101 	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
102 		local_irq_save(flags);
103 		memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
104 		local_flush_icache_range((unsigned long)opc,
105 					 (unsigned long)opc + 32);
106 		local_irq_restore(flags);
107 	} else {
108 		kvm_err("%s: Invalid address: %p\n", __func__, opc);
109 		return -EFAULT;
110 	}
111 
112 	return 0;
113 }
114 
kvm_mips_trans_mtc0(uint32_t inst,uint32_t * opc,struct kvm_vcpu * vcpu)115 int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
116 {
117 	int32_t rt, rd, sel;
118 	uint32_t mtc0_inst = SW_TEMPLATE;
119 	unsigned long kseg0_opc, flags;
120 
121 	rt = (inst >> 16) & 0x1f;
122 	rd = (inst >> 11) & 0x1f;
123 	sel = inst & 0x7;
124 
125 	mtc0_inst |= ((rt & 0x1f) << 16);
126 	mtc0_inst |=
127 	    offsetof(struct mips_coproc,
128 		     reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
129 
130 	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
131 		kseg0_opc =
132 		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
133 			       (vcpu, (unsigned long) opc));
134 		memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
135 		local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
136 	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
137 		local_irq_save(flags);
138 		memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
139 		local_flush_icache_range((unsigned long)opc,
140 					 (unsigned long)opc + 32);
141 		local_irq_restore(flags);
142 	} else {
143 		kvm_err("%s: Invalid address: %p\n", __func__, opc);
144 		return -EFAULT;
145 	}
146 
147 	return 0;
148 }
149