1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/emulate.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/mm.h>
23#include <linux/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/ptrace.h>
26
27#define VCPU_NR_MODES 6
28#define REG_OFFSET(_reg) \
29	(offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
30
31#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
32
33static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
34	/* USR Registers */
35	{
36		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
37		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
38		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
39		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
40		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14),
41		REG_OFFSET(pc)
42	},
43
44	/* FIQ Registers */
45	{
46		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
47		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
48		USR_REG_OFFSET(6), USR_REG_OFFSET(7),
49		REG_OFFSET(compat_r8_fiq),  /* r8 */
50		REG_OFFSET(compat_r9_fiq),  /* r9 */
51		REG_OFFSET(compat_r10_fiq), /* r10 */
52		REG_OFFSET(compat_r11_fiq), /* r11 */
53		REG_OFFSET(compat_r12_fiq), /* r12 */
54		REG_OFFSET(compat_sp_fiq),  /* r13 */
55		REG_OFFSET(compat_lr_fiq),  /* r14 */
56		REG_OFFSET(pc)
57	},
58
59	/* IRQ Registers */
60	{
61		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
62		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
63		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
64		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
65		USR_REG_OFFSET(12),
66		REG_OFFSET(compat_sp_irq), /* r13 */
67		REG_OFFSET(compat_lr_irq), /* r14 */
68		REG_OFFSET(pc)
69	},
70
71	/* SVC Registers */
72	{
73		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
74		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
75		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
76		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
77		USR_REG_OFFSET(12),
78		REG_OFFSET(compat_sp_svc), /* r13 */
79		REG_OFFSET(compat_lr_svc), /* r14 */
80		REG_OFFSET(pc)
81	},
82
83	/* ABT Registers */
84	{
85		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
86		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
87		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
88		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
89		USR_REG_OFFSET(12),
90		REG_OFFSET(compat_sp_abt), /* r13 */
91		REG_OFFSET(compat_lr_abt), /* r14 */
92		REG_OFFSET(pc)
93	},
94
95	/* UND Registers */
96	{
97		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
98		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
99		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
100		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
101		USR_REG_OFFSET(12),
102		REG_OFFSET(compat_sp_und), /* r13 */
103		REG_OFFSET(compat_lr_und), /* r14 */
104		REG_OFFSET(pc)
105	},
106};
107
108/*
109 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU.
111 */
112unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
113{
114	unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
115	unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
116
117	switch (mode) {
118	case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
119		mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
120		break;
121
122	case COMPAT_PSR_MODE_ABT:
123		mode = 4;
124		break;
125
126	case COMPAT_PSR_MODE_UND:
127		mode = 5;
128		break;
129
130	case COMPAT_PSR_MODE_SYS:
131		mode = 0;	/* SYS maps to USR */
132		break;
133
134	default:
135		BUG();
136	}
137
138	return reg_array + vcpu_reg_offsets[mode][reg_num];
139}
140
141/*
142 * Return the SPSR for the current mode of the virtual CPU.
143 */
144unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
145{
146	unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
147	switch (mode) {
148	case COMPAT_PSR_MODE_SVC:
149		mode = KVM_SPSR_SVC;
150		break;
151	case COMPAT_PSR_MODE_ABT:
152		mode = KVM_SPSR_ABT;
153		break;
154	case COMPAT_PSR_MODE_UND:
155		mode = KVM_SPSR_UND;
156		break;
157	case COMPAT_PSR_MODE_IRQ:
158		mode = KVM_SPSR_IRQ;
159		break;
160	case COMPAT_PSR_MODE_FIQ:
161		mode = KVM_SPSR_FIQ;
162		break;
163	default:
164		BUG();
165	}
166
167	return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
168}
169