1#include <linux/bootmem.h>
2#include <linux/kasan.h>
3#include <linux/kdebug.h>
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/vmalloc.h>
7
8#include <asm/tlbflush.h>
9#include <asm/sections.h>
10
11extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX];
13
14static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
15static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
16static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
17
18/*
19 * This page used as early shadow. We don't use empty_zero_page
20 * at early stages, stack instrumentation could write some garbage
21 * to this page.
22 * Latter we reuse it as zero shadow for large ranges of memory
23 * that allowed to access, but not instrumented by kasan
24 * (vmalloc/vmemmap ...).
25 */
26static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
27
28static int __init map_range(struct range *range)
29{
30	unsigned long start;
31	unsigned long end;
32
33	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
34	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
35
36	/*
37	 * end + 1 here is intentional. We check several shadow bytes in advance
38	 * to slightly speed up fastpath. In some rare cases we could cross
39	 * boundary of mapped shadow, so we just map some more here.
40	 */
41	return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
42}
43
44static void __init clear_pgds(unsigned long start,
45			unsigned long end)
46{
47	for (; start < end; start += PGDIR_SIZE)
48		pgd_clear(pgd_offset_k(start));
49}
50
51static void __init kasan_map_early_shadow(pgd_t *pgd)
52{
53	int i;
54	unsigned long start = KASAN_SHADOW_START;
55	unsigned long end = KASAN_SHADOW_END;
56
57	for (i = pgd_index(start); start < end; i++) {
58		pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
59				| _KERNPG_TABLE);
60		start += PGDIR_SIZE;
61	}
62}
63
64static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
65				unsigned long end)
66{
67	pte_t *pte = pte_offset_kernel(pmd, addr);
68
69	while (addr + PAGE_SIZE <= end) {
70		WARN_ON(!pte_none(*pte));
71		set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
72					| __PAGE_KERNEL_RO));
73		addr += PAGE_SIZE;
74		pte = pte_offset_kernel(pmd, addr);
75	}
76	return 0;
77}
78
79static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
80				unsigned long end)
81{
82	int ret = 0;
83	pmd_t *pmd = pmd_offset(pud, addr);
84
85	while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
86		WARN_ON(!pmd_none(*pmd));
87		set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
88					| _KERNPG_TABLE));
89		addr += PMD_SIZE;
90		pmd = pmd_offset(pud, addr);
91	}
92	if (addr < end) {
93		if (pmd_none(*pmd)) {
94			void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
95			if (!p)
96				return -ENOMEM;
97			set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
98		}
99		ret = zero_pte_populate(pmd, addr, end);
100	}
101	return ret;
102}
103
104
105static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
106				unsigned long end)
107{
108	int ret = 0;
109	pud_t *pud = pud_offset(pgd, addr);
110
111	while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
112		WARN_ON(!pud_none(*pud));
113		set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
114					| _KERNPG_TABLE));
115		addr += PUD_SIZE;
116		pud = pud_offset(pgd, addr);
117	}
118
119	if (addr < end) {
120		if (pud_none(*pud)) {
121			void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
122			if (!p)
123				return -ENOMEM;
124			set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
125		}
126		ret = zero_pmd_populate(pud, addr, end);
127	}
128	return ret;
129}
130
131static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
132{
133	int ret = 0;
134	pgd_t *pgd = pgd_offset_k(addr);
135
136	while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
137		WARN_ON(!pgd_none(*pgd));
138		set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
139					| _KERNPG_TABLE));
140		addr += PGDIR_SIZE;
141		pgd = pgd_offset_k(addr);
142	}
143
144	if (addr < end) {
145		if (pgd_none(*pgd)) {
146			void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
147			if (!p)
148				return -ENOMEM;
149			set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
150		}
151		ret = zero_pud_populate(pgd, addr, end);
152	}
153	return ret;
154}
155
156
157static void __init populate_zero_shadow(const void *start, const void *end)
158{
159	if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
160		panic("kasan: unable to map zero shadow!");
161}
162
163
164#ifdef CONFIG_KASAN_INLINE
165static int kasan_die_handler(struct notifier_block *self,
166			     unsigned long val,
167			     void *data)
168{
169	if (val == DIE_GPF) {
170		pr_emerg("CONFIG_KASAN_INLINE enabled");
171		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
172	}
173	return NOTIFY_OK;
174}
175
176static struct notifier_block kasan_die_notifier = {
177	.notifier_call = kasan_die_handler,
178};
179#endif
180
181void __init kasan_early_init(void)
182{
183	int i;
184	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
185	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
186	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
187
188	for (i = 0; i < PTRS_PER_PTE; i++)
189		kasan_zero_pte[i] = __pte(pte_val);
190
191	for (i = 0; i < PTRS_PER_PMD; i++)
192		kasan_zero_pmd[i] = __pmd(pmd_val);
193
194	for (i = 0; i < PTRS_PER_PUD; i++)
195		kasan_zero_pud[i] = __pud(pud_val);
196
197	kasan_map_early_shadow(early_level4_pgt);
198	kasan_map_early_shadow(init_level4_pgt);
199}
200
201void __init kasan_init(void)
202{
203	int i;
204
205#ifdef CONFIG_KASAN_INLINE
206	register_die_notifier(&kasan_die_notifier);
207#endif
208
209	memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
210	load_cr3(early_level4_pgt);
211	__flush_tlb_all();
212
213	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
214
215	populate_zero_shadow((void *)KASAN_SHADOW_START,
216			kasan_mem_to_shadow((void *)PAGE_OFFSET));
217
218	for (i = 0; i < E820_X_MAX; i++) {
219		if (pfn_mapped[i].end == 0)
220			break;
221
222		if (map_range(&pfn_mapped[i]))
223			panic("kasan: unable to allocate shadow!");
224	}
225	populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
226			kasan_mem_to_shadow((void *)__START_KERNEL_map));
227
228	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
229			(unsigned long)kasan_mem_to_shadow(_end),
230			NUMA_NO_NODE);
231
232	populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
233			(void *)KASAN_SHADOW_END);
234
235	memset(kasan_zero_page, 0, PAGE_SIZE);
236
237	load_cr3(init_level4_pgt);
238	__flush_tlb_all();
239	init_task.kasan_depth = 0;
240}
241