1/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 *
9 * Authors:
10 *   Yaniv Kamay  <yaniv@qumranet.com>
11 *   Avi Kivity   <avi@qumranet.com>
12 *   Marcelo Tosatti <mtosatti@redhat.com>
13 *   Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2.  See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20#include <linux/ratelimit.h>
21
22char const *audit_point_name[] = {
23	"pre page fault",
24	"post page fault",
25	"pre pte write",
26	"post pte write",
27	"pre sync",
28	"post sync"
29};
30
31#define audit_printk(kvm, fmt, args...)		\
32	printk(KERN_ERR "audit: (%s) error: "	\
33		fmt, audit_point_name[kvm->arch.audit_point], ##args)
34
35typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
36
37static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
38			    inspect_spte_fn fn, int level)
39{
40	int i;
41
42	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
43		u64 *ent = sp->spt;
44
45		fn(vcpu, ent + i, level);
46
47		if (is_shadow_present_pte(ent[i]) &&
48		      !is_last_spte(ent[i], level)) {
49			struct kvm_mmu_page *child;
50
51			child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
52			__mmu_spte_walk(vcpu, child, fn, level - 1);
53		}
54	}
55}
56
57static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
58{
59	int i;
60	struct kvm_mmu_page *sp;
61
62	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
63		return;
64
65	if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
66		hpa_t root = vcpu->arch.mmu.root_hpa;
67
68		sp = page_header(root);
69		__mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
70		return;
71	}
72
73	for (i = 0; i < 4; ++i) {
74		hpa_t root = vcpu->arch.mmu.pae_root[i];
75
76		if (root && VALID_PAGE(root)) {
77			root &= PT64_BASE_ADDR_MASK;
78			sp = page_header(root);
79			__mmu_spte_walk(vcpu, sp, fn, 2);
80		}
81	}
82
83	return;
84}
85
86typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
87
88static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
89{
90	struct kvm_mmu_page *sp;
91
92	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
93		fn(kvm, sp);
94}
95
96static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
97{
98	struct kvm_mmu_page *sp;
99	gfn_t gfn;
100	pfn_t pfn;
101	hpa_t hpa;
102
103	sp = page_header(__pa(sptep));
104
105	if (sp->unsync) {
106		if (level != PT_PAGE_TABLE_LEVEL) {
107			audit_printk(vcpu->kvm, "unsync sp: %p "
108				     "level = %d\n", sp, level);
109			return;
110		}
111	}
112
113	if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
114		return;
115
116	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
117	pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
118
119	if (is_error_pfn(pfn))
120		return;
121
122	hpa =  pfn << PAGE_SHIFT;
123	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
124		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
125			     "ent %llxn", vcpu->arch.mmu.root_level, pfn,
126			     hpa, *sptep);
127}
128
129static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
130{
131	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
132	unsigned long *rmapp;
133	struct kvm_mmu_page *rev_sp;
134	struct kvm_memslots *slots;
135	struct kvm_memory_slot *slot;
136	gfn_t gfn;
137
138	rev_sp = page_header(__pa(sptep));
139	gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
140
141	slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
142	slot = __gfn_to_memslot(slots, gfn);
143	if (!slot) {
144		if (!__ratelimit(&ratelimit_state))
145			return;
146		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
147		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
148		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
149		dump_stack();
150		return;
151	}
152
153	rmapp = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
154	if (!*rmapp) {
155		if (!__ratelimit(&ratelimit_state))
156			return;
157		audit_printk(kvm, "no rmap for writable spte %llx\n",
158			     *sptep);
159		dump_stack();
160	}
161}
162
163static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
164{
165	if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
166		inspect_spte_has_rmap(vcpu->kvm, sptep);
167}
168
169static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
170{
171	struct kvm_mmu_page *sp = page_header(__pa(sptep));
172
173	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
174		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
175			     "root.\n", sp);
176}
177
178static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
179{
180	int i;
181
182	if (sp->role.level != PT_PAGE_TABLE_LEVEL)
183		return;
184
185	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
186		if (!is_rmap_spte(sp->spt[i]))
187			continue;
188
189		inspect_spte_has_rmap(kvm, sp->spt + i);
190	}
191}
192
193static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
194{
195	unsigned long *rmapp;
196	u64 *sptep;
197	struct rmap_iterator iter;
198	struct kvm_memslots *slots;
199	struct kvm_memory_slot *slot;
200
201	if (sp->role.direct || sp->unsync || sp->role.invalid)
202		return;
203
204	slots = kvm_memslots_for_spte_role(kvm, sp->role);
205	slot = __gfn_to_memslot(slots, sp->gfn);
206	rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
207
208	for_each_rmap_spte(rmapp, &iter, sptep)
209		if (is_writable_pte(*sptep))
210			audit_printk(kvm, "shadow page has writable "
211				     "mappings: gfn %llx role %x\n",
212				     sp->gfn, sp->role.word);
213}
214
215static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
216{
217	check_mappings_rmap(kvm, sp);
218	audit_write_protection(kvm, sp);
219}
220
221static void audit_all_active_sps(struct kvm *kvm)
222{
223	walk_all_active_sps(kvm, audit_sp);
224}
225
226static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
227{
228	audit_sptes_have_rmaps(vcpu, sptep, level);
229	audit_mappings(vcpu, sptep, level);
230	audit_spte_after_sync(vcpu, sptep, level);
231}
232
233static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
234{
235	mmu_spte_walk(vcpu, audit_spte);
236}
237
238static bool mmu_audit;
239static struct static_key mmu_audit_key;
240
241static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
242{
243	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
244
245	if (!__ratelimit(&ratelimit_state))
246		return;
247
248	vcpu->kvm->arch.audit_point = point;
249	audit_all_active_sps(vcpu->kvm);
250	audit_vcpu_spte(vcpu);
251}
252
253static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
254{
255	if (static_key_false((&mmu_audit_key)))
256		__kvm_mmu_audit(vcpu, point);
257}
258
259static void mmu_audit_enable(void)
260{
261	if (mmu_audit)
262		return;
263
264	static_key_slow_inc(&mmu_audit_key);
265	mmu_audit = true;
266}
267
268static void mmu_audit_disable(void)
269{
270	if (!mmu_audit)
271		return;
272
273	static_key_slow_dec(&mmu_audit_key);
274	mmu_audit = false;
275}
276
277static int mmu_audit_set(const char *val, const struct kernel_param *kp)
278{
279	int ret;
280	unsigned long enable;
281
282	ret = kstrtoul(val, 10, &enable);
283	if (ret < 0)
284		return -EINVAL;
285
286	switch (enable) {
287	case 0:
288		mmu_audit_disable();
289		break;
290	case 1:
291		mmu_audit_enable();
292		break;
293	default:
294		return -EINVAL;
295	}
296
297	return 0;
298}
299
300static const struct kernel_param_ops audit_param_ops = {
301	.set = mmu_audit_set,
302	.get = param_get_bool,
303};
304
305arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
306