1/*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 *        Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#define DISABLE_BRANCH_PROFILING
18
19#include <linux/export.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/kmemleak.h>
23#include <linux/memblock.h>
24#include <linux/memory.h>
25#include <linux/mm.h>
26#include <linux/module.h>
27#include <linux/printk.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/vmalloc.h>
34#include <linux/kasan.h>
35
36#include "kasan.h"
37#include "../slab.h"
38
39/*
40 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
41 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
42 */
43static void kasan_poison_shadow(const void *address, size_t size, u8 value)
44{
45	void *shadow_start, *shadow_end;
46
47	shadow_start = kasan_mem_to_shadow(address);
48	shadow_end = kasan_mem_to_shadow(address + size);
49
50	memset(shadow_start, value, shadow_end - shadow_start);
51}
52
53void kasan_unpoison_shadow(const void *address, size_t size)
54{
55	kasan_poison_shadow(address, size, 0);
56
57	if (size & KASAN_SHADOW_MASK) {
58		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
59		*shadow = size & KASAN_SHADOW_MASK;
60	}
61}
62
63
64/*
65 * All functions below always inlined so compiler could
66 * perform better optimizations in each of __asan_loadX/__assn_storeX
67 * depending on memory access size X.
68 */
69
70static __always_inline bool memory_is_poisoned_1(unsigned long addr)
71{
72	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
73
74	if (unlikely(shadow_value)) {
75		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
76		return unlikely(last_accessible_byte >= shadow_value);
77	}
78
79	return false;
80}
81
82static __always_inline bool memory_is_poisoned_2(unsigned long addr)
83{
84	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
85
86	if (unlikely(*shadow_addr)) {
87		if (memory_is_poisoned_1(addr + 1))
88			return true;
89
90		/*
91		 * If single shadow byte covers 2-byte access, we don't
92		 * need to do anything more. Otherwise, test the first
93		 * shadow byte.
94		 */
95		if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
96			return false;
97
98		return unlikely(*(u8 *)shadow_addr);
99	}
100
101	return false;
102}
103
104static __always_inline bool memory_is_poisoned_4(unsigned long addr)
105{
106	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
107
108	if (unlikely(*shadow_addr)) {
109		if (memory_is_poisoned_1(addr + 3))
110			return true;
111
112		/*
113		 * If single shadow byte covers 4-byte access, we don't
114		 * need to do anything more. Otherwise, test the first
115		 * shadow byte.
116		 */
117		if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
118			return false;
119
120		return unlikely(*(u8 *)shadow_addr);
121	}
122
123	return false;
124}
125
126static __always_inline bool memory_is_poisoned_8(unsigned long addr)
127{
128	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
129
130	if (unlikely(*shadow_addr)) {
131		if (memory_is_poisoned_1(addr + 7))
132			return true;
133
134		/*
135		 * If single shadow byte covers 8-byte access, we don't
136		 * need to do anything more. Otherwise, test the first
137		 * shadow byte.
138		 */
139		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
140			return false;
141
142		return unlikely(*(u8 *)shadow_addr);
143	}
144
145	return false;
146}
147
148static __always_inline bool memory_is_poisoned_16(unsigned long addr)
149{
150	u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
151
152	if (unlikely(*shadow_addr)) {
153		u16 shadow_first_bytes = *(u16 *)shadow_addr;
154
155		if (unlikely(shadow_first_bytes))
156			return true;
157
158		/*
159		 * If two shadow bytes covers 16-byte access, we don't
160		 * need to do anything more. Otherwise, test the last
161		 * shadow byte.
162		 */
163		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
164			return false;
165
166		return memory_is_poisoned_1(addr + 15);
167	}
168
169	return false;
170}
171
172static __always_inline unsigned long bytes_is_zero(const u8 *start,
173					size_t size)
174{
175	while (size) {
176		if (unlikely(*start))
177			return (unsigned long)start;
178		start++;
179		size--;
180	}
181
182	return 0;
183}
184
185static __always_inline unsigned long memory_is_zero(const void *start,
186						const void *end)
187{
188	unsigned int words;
189	unsigned long ret;
190	unsigned int prefix = (unsigned long)start % 8;
191
192	if (end - start <= 16)
193		return bytes_is_zero(start, end - start);
194
195	if (prefix) {
196		prefix = 8 - prefix;
197		ret = bytes_is_zero(start, prefix);
198		if (unlikely(ret))
199			return ret;
200		start += prefix;
201	}
202
203	words = (end - start) / 8;
204	while (words) {
205		if (unlikely(*(u64 *)start))
206			return bytes_is_zero(start, 8);
207		start += 8;
208		words--;
209	}
210
211	return bytes_is_zero(start, (end - start) % 8);
212}
213
214static __always_inline bool memory_is_poisoned_n(unsigned long addr,
215						size_t size)
216{
217	unsigned long ret;
218
219	ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
220			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
221
222	if (unlikely(ret)) {
223		unsigned long last_byte = addr + size - 1;
224		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
225
226		if (unlikely(ret != (unsigned long)last_shadow ||
227			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
228			return true;
229	}
230	return false;
231}
232
233static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
234{
235	if (__builtin_constant_p(size)) {
236		switch (size) {
237		case 1:
238			return memory_is_poisoned_1(addr);
239		case 2:
240			return memory_is_poisoned_2(addr);
241		case 4:
242			return memory_is_poisoned_4(addr);
243		case 8:
244			return memory_is_poisoned_8(addr);
245		case 16:
246			return memory_is_poisoned_16(addr);
247		default:
248			BUILD_BUG();
249		}
250	}
251
252	return memory_is_poisoned_n(addr, size);
253}
254
255
256static __always_inline void check_memory_region(unsigned long addr,
257						size_t size, bool write)
258{
259	if (unlikely(size == 0))
260		return;
261
262	if (unlikely((void *)addr <
263		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
264		kasan_report(addr, size, write, _RET_IP_);
265		return;
266	}
267
268	if (likely(!memory_is_poisoned(addr, size)))
269		return;
270
271	kasan_report(addr, size, write, _RET_IP_);
272}
273
274void __asan_loadN(unsigned long addr, size_t size);
275void __asan_storeN(unsigned long addr, size_t size);
276
277#undef memset
278void *memset(void *addr, int c, size_t len)
279{
280	__asan_storeN((unsigned long)addr, len);
281
282	return __memset(addr, c, len);
283}
284
285#undef memmove
286void *memmove(void *dest, const void *src, size_t len)
287{
288	__asan_loadN((unsigned long)src, len);
289	__asan_storeN((unsigned long)dest, len);
290
291	return __memmove(dest, src, len);
292}
293
294#undef memcpy
295void *memcpy(void *dest, const void *src, size_t len)
296{
297	__asan_loadN((unsigned long)src, len);
298	__asan_storeN((unsigned long)dest, len);
299
300	return __memcpy(dest, src, len);
301}
302
303void kasan_alloc_pages(struct page *page, unsigned int order)
304{
305	if (likely(!PageHighMem(page)))
306		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
307}
308
309void kasan_free_pages(struct page *page, unsigned int order)
310{
311	if (likely(!PageHighMem(page)))
312		kasan_poison_shadow(page_address(page),
313				PAGE_SIZE << order,
314				KASAN_FREE_PAGE);
315}
316
317void kasan_poison_slab(struct page *page)
318{
319	kasan_poison_shadow(page_address(page),
320			PAGE_SIZE << compound_order(page),
321			KASAN_KMALLOC_REDZONE);
322}
323
324void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
325{
326	kasan_unpoison_shadow(object, cache->object_size);
327}
328
329void kasan_poison_object_data(struct kmem_cache *cache, void *object)
330{
331	kasan_poison_shadow(object,
332			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
333			KASAN_KMALLOC_REDZONE);
334}
335
336void kasan_slab_alloc(struct kmem_cache *cache, void *object)
337{
338	kasan_kmalloc(cache, object, cache->object_size);
339}
340
341void kasan_slab_free(struct kmem_cache *cache, void *object)
342{
343	unsigned long size = cache->object_size;
344	unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
345
346	/* RCU slabs could be legally used after free within the RCU period */
347	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
348		return;
349
350	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
351}
352
353void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
354{
355	unsigned long redzone_start;
356	unsigned long redzone_end;
357
358	if (unlikely(object == NULL))
359		return;
360
361	redzone_start = round_up((unsigned long)(object + size),
362				KASAN_SHADOW_SCALE_SIZE);
363	redzone_end = round_up((unsigned long)object + cache->object_size,
364				KASAN_SHADOW_SCALE_SIZE);
365
366	kasan_unpoison_shadow(object, size);
367	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
368		KASAN_KMALLOC_REDZONE);
369}
370EXPORT_SYMBOL(kasan_kmalloc);
371
372void kasan_kmalloc_large(const void *ptr, size_t size)
373{
374	struct page *page;
375	unsigned long redzone_start;
376	unsigned long redzone_end;
377
378	if (unlikely(ptr == NULL))
379		return;
380
381	page = virt_to_page(ptr);
382	redzone_start = round_up((unsigned long)(ptr + size),
383				KASAN_SHADOW_SCALE_SIZE);
384	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
385
386	kasan_unpoison_shadow(ptr, size);
387	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
388		KASAN_PAGE_REDZONE);
389}
390
391void kasan_krealloc(const void *object, size_t size)
392{
393	struct page *page;
394
395	if (unlikely(object == ZERO_SIZE_PTR))
396		return;
397
398	page = virt_to_head_page(object);
399
400	if (unlikely(!PageSlab(page)))
401		kasan_kmalloc_large(object, size);
402	else
403		kasan_kmalloc(page->slab_cache, object, size);
404}
405
406void kasan_kfree(void *ptr)
407{
408	struct page *page;
409
410	page = virt_to_head_page(ptr);
411
412	if (unlikely(!PageSlab(page)))
413		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
414				KASAN_FREE_PAGE);
415	else
416		kasan_slab_free(page->slab_cache, ptr);
417}
418
419void kasan_kfree_large(const void *ptr)
420{
421	struct page *page = virt_to_page(ptr);
422
423	kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
424			KASAN_FREE_PAGE);
425}
426
427int kasan_module_alloc(void *addr, size_t size)
428{
429	void *ret;
430	size_t shadow_size;
431	unsigned long shadow_start;
432
433	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
434	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
435			PAGE_SIZE);
436
437	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
438		return -EINVAL;
439
440	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
441			shadow_start + shadow_size,
442			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
443			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
444			__builtin_return_address(0));
445
446	if (ret) {
447		find_vm_area(addr)->flags |= VM_KASAN;
448		kmemleak_ignore(ret);
449		return 0;
450	}
451
452	return -ENOMEM;
453}
454
455void kasan_free_shadow(const struct vm_struct *vm)
456{
457	if (vm->flags & VM_KASAN)
458		vfree(kasan_mem_to_shadow(vm->addr));
459}
460
461static void register_global(struct kasan_global *global)
462{
463	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
464
465	kasan_unpoison_shadow(global->beg, global->size);
466
467	kasan_poison_shadow(global->beg + aligned_size,
468		global->size_with_redzone - aligned_size,
469		KASAN_GLOBAL_REDZONE);
470}
471
472void __asan_register_globals(struct kasan_global *globals, size_t size)
473{
474	int i;
475
476	for (i = 0; i < size; i++)
477		register_global(&globals[i]);
478}
479EXPORT_SYMBOL(__asan_register_globals);
480
481void __asan_unregister_globals(struct kasan_global *globals, size_t size)
482{
483}
484EXPORT_SYMBOL(__asan_unregister_globals);
485
486#define DEFINE_ASAN_LOAD_STORE(size)				\
487	void __asan_load##size(unsigned long addr)		\
488	{							\
489		check_memory_region(addr, size, false);		\
490	}							\
491	EXPORT_SYMBOL(__asan_load##size);			\
492	__alias(__asan_load##size)				\
493	void __asan_load##size##_noabort(unsigned long);	\
494	EXPORT_SYMBOL(__asan_load##size##_noabort);		\
495	void __asan_store##size(unsigned long addr)		\
496	{							\
497		check_memory_region(addr, size, true);		\
498	}							\
499	EXPORT_SYMBOL(__asan_store##size);			\
500	__alias(__asan_store##size)				\
501	void __asan_store##size##_noabort(unsigned long);	\
502	EXPORT_SYMBOL(__asan_store##size##_noabort)
503
504DEFINE_ASAN_LOAD_STORE(1);
505DEFINE_ASAN_LOAD_STORE(2);
506DEFINE_ASAN_LOAD_STORE(4);
507DEFINE_ASAN_LOAD_STORE(8);
508DEFINE_ASAN_LOAD_STORE(16);
509
510void __asan_loadN(unsigned long addr, size_t size)
511{
512	check_memory_region(addr, size, false);
513}
514EXPORT_SYMBOL(__asan_loadN);
515
516__alias(__asan_loadN)
517void __asan_loadN_noabort(unsigned long, size_t);
518EXPORT_SYMBOL(__asan_loadN_noabort);
519
520void __asan_storeN(unsigned long addr, size_t size)
521{
522	check_memory_region(addr, size, true);
523}
524EXPORT_SYMBOL(__asan_storeN);
525
526__alias(__asan_storeN)
527void __asan_storeN_noabort(unsigned long, size_t);
528EXPORT_SYMBOL(__asan_storeN_noabort);
529
530/* to shut up compiler complaints */
531void __asan_handle_no_return(void) {}
532EXPORT_SYMBOL(__asan_handle_no_return);
533
534#ifdef CONFIG_MEMORY_HOTPLUG
535static int kasan_mem_notifier(struct notifier_block *nb,
536			unsigned long action, void *data)
537{
538	return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
539}
540
541static int __init kasan_memhotplug_init(void)
542{
543	pr_err("WARNING: KASAN doesn't support memory hot-add\n");
544	pr_err("Memory hot-add will be disabled\n");
545
546	hotplug_memory_notifier(kasan_mem_notifier, 0);
547
548	return 0;
549}
550
551module_init(kasan_memhotplug_init);
552#endif
553