1/*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu <yu.liu@freescale.com>
5 *         Scott Wood <scottwood@freescale.com>
6 *         Ashish Kalra <ashish.kalra@freescale.com>
7 *         Varun Sethi <varun.sethi@freescale.com>
8 *
9 * Description:
10 * This file is based on arch/powerpc/kvm/44x_tlb.h and
11 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
12 * Copyright IBM Corp. 2007-2008
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#ifndef KVM_E500_H
20#define KVM_E500_H
21
22#include <linux/kvm_host.h>
23#include <asm/mmu-book3e.h>
24#include <asm/tlb.h>
25#include <asm/cputhreads.h>
26
27enum vcpu_ftr {
28	VCPU_FTR_MMU_V2
29};
30
31#define E500_PID_NUM   3
32#define E500_TLB_NUM   2
33
34/* entry is mapped somewhere in host TLB */
35#define E500_TLB_VALID		(1 << 31)
36/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
37#define E500_TLB_BITMAP		(1 << 30)
38/* TLB1 entry is mapped by host TLB0 */
39#define E500_TLB_TLB0		(1 << 29)
40/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
41#define E500_TLB_MAS2_ATTR	(0x7f)
42
43struct tlbe_ref {
44	pfn_t pfn;		/* valid only for TLB0, except briefly */
45	unsigned int flags;	/* E500_TLB_* */
46};
47
48struct tlbe_priv {
49	struct tlbe_ref ref;
50};
51
52#ifdef CONFIG_KVM_E500V2
53struct vcpu_id_table;
54#endif
55
56struct kvmppc_e500_tlb_params {
57	int entries, ways, sets;
58};
59
60struct kvmppc_vcpu_e500 {
61	struct kvm_vcpu vcpu;
62
63	/* Unmodified copy of the guest's TLB -- shared with host userspace. */
64	struct kvm_book3e_206_tlb_entry *gtlb_arch;
65
66	/* Starting entry number in gtlb_arch[] */
67	int gtlb_offset[E500_TLB_NUM];
68
69	/* KVM internal information associated with each guest TLB entry */
70	struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
71
72	struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
73
74	unsigned int gtlb_nv[E500_TLB_NUM];
75
76	unsigned int host_tlb1_nv;
77
78	u32 svr;
79	u32 l1csr0;
80	u32 l1csr1;
81	u32 hid0;
82	u32 hid1;
83	u64 mcar;
84
85	struct page **shared_tlb_pages;
86	int num_shared_tlb_pages;
87
88	u64 *g2h_tlb1_map;
89	unsigned int *h2g_tlb1_rmap;
90
91	/* Minimum and maximum address mapped my TLB1 */
92	unsigned long tlb1_min_eaddr;
93	unsigned long tlb1_max_eaddr;
94
95#ifdef CONFIG_KVM_E500V2
96	u32 pid[E500_PID_NUM];
97
98	/* vcpu id table */
99	struct vcpu_id_table *idt;
100#endif
101};
102
103static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
104{
105	return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
106}
107
108
109/* This geometry is the legacy default -- can be overridden by userspace */
110#define KVM_E500_TLB0_WAY_SIZE		128
111#define KVM_E500_TLB0_WAY_NUM		2
112
113#define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
114#define KVM_E500_TLB1_SIZE  16
115
116#define index_of(tlbsel, esel)	(((tlbsel) << 16) | ((esel) & 0xFFFF))
117#define tlbsel_of(index)	((index) >> 16)
118#define esel_of(index)		((index) & 0xFFFF)
119
120#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
121#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
122#define MAS2_ATTRIB_MASK \
123	  (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
124#define MAS3_ATTRIB_MASK \
125	  (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
126	   | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
127
128int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
129				ulong value);
130int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
131int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
132int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
133int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
134int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
135int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
136void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
137
138void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
139int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
140
141int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
142				union kvmppc_one_reg *val);
143int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
144			       union kvmppc_one_reg *val);
145
146#ifdef CONFIG_KVM_E500V2
147unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
148				 unsigned int as, unsigned int gid,
149				 unsigned int pr, int avoid_recursion);
150#endif
151
152/* TLB helper functions */
153static inline unsigned int
154get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
155{
156	return (tlbe->mas1 >> 7) & 0x1f;
157}
158
159static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
160{
161	return tlbe->mas2 & MAS2_EPN;
162}
163
164static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
165{
166	unsigned int pgsize = get_tlb_size(tlbe);
167	return 1ULL << 10 << pgsize;
168}
169
170static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
171{
172	u64 bytes = get_tlb_bytes(tlbe);
173	return get_tlb_eaddr(tlbe) + bytes - 1;
174}
175
176static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
177{
178	return tlbe->mas7_3 & ~0xfffULL;
179}
180
181static inline unsigned int
182get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
183{
184	return (tlbe->mas1 >> 16) & 0xff;
185}
186
187static inline unsigned int
188get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
189{
190	return (tlbe->mas1 >> 12) & 0x1;
191}
192
193static inline unsigned int
194get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
195{
196	return (tlbe->mas1 >> 31) & 0x1;
197}
198
199static inline unsigned int
200get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
201{
202	return (tlbe->mas1 >> 30) & 0x1;
203}
204
205static inline unsigned int
206get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
207{
208	return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
209}
210
211static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
212{
213	return vcpu->arch.pid & 0xff;
214}
215
216static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
217{
218	return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
219}
220
221static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
222{
223	return !!(vcpu->arch.shared->msr & MSR_PR);
224}
225
226static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
227{
228	return (vcpu->arch.shared->mas6 >> 16) & 0xff;
229}
230
231static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
232{
233	return vcpu->arch.shared->mas6 & 0x1;
234}
235
236static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
237{
238	/*
239	 * Manual says that tlbsel has 2 bits wide.
240	 * Since we only have two TLBs, only lower bit is used.
241	 */
242	return (vcpu->arch.shared->mas0 >> 28) & 0x1;
243}
244
245static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
246{
247	return vcpu->arch.shared->mas0 & 0xfff;
248}
249
250static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
251{
252	return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
253}
254
255static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
256			const struct kvm_book3e_206_tlb_entry *tlbe)
257{
258	gpa_t gpa;
259
260	if (!get_tlb_v(tlbe))
261		return 0;
262
263#ifndef CONFIG_KVM_BOOKE_HV
264	/* Does it match current guest AS? */
265	/* XXX what about IS != DS? */
266	if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
267		return 0;
268#endif
269
270	gpa = get_tlb_raddr(tlbe);
271	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
272		/* Mapping is not for RAM. */
273		return 0;
274
275	return 1;
276}
277
278static inline struct kvm_book3e_206_tlb_entry *get_entry(
279	struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
280{
281	int offset = vcpu_e500->gtlb_offset[tlbsel];
282	return &vcpu_e500->gtlb_arch[offset + entry];
283}
284
285void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
286			   struct kvm_book3e_206_tlb_entry *gtlbe);
287void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
288
289#ifdef CONFIG_KVM_BOOKE_HV
290#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
291#define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
292#define get_tlb_sts(gtlbe)              (gtlbe->mas1 & MAS1_TS)
293
294/*
295 * These functions should be called with preemption disabled
296 * and the returned value is valid only in that context
297 */
298static inline int get_thread_specific_lpid(int vm_lpid)
299{
300	int vcpu_lpid = vm_lpid;
301
302	if (threads_per_core == 2)
303		vcpu_lpid |= smp_processor_id() & 1;
304
305	return vcpu_lpid;
306}
307
308static inline int get_lpid(struct kvm_vcpu *vcpu)
309{
310	return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
311}
312#else
313unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
314				      struct kvm_book3e_206_tlb_entry *gtlbe);
315
316static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
317{
318	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
319	unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
320
321	return vcpu_e500->pid[tidseld];
322}
323
324/* Force TS=1 for all guest mappings. */
325#define get_tlb_sts(gtlbe)              (MAS1_TS)
326#endif /* !BOOKE_HV */
327
328static inline bool has_feature(const struct kvm_vcpu *vcpu,
329			       enum vcpu_ftr ftr)
330{
331	bool has_ftr;
332	switch (ftr) {
333	case VCPU_FTR_MMU_V2:
334		has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
335		break;
336	default:
337		return false;
338	}
339	return has_ftr;
340}
341
342#endif /* KVM_E500_H */
343