1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 *   accesses to the object_tree_root. The object_list is the main list
31 *   holding the metadata (struct kmemleak_object) for the allocated memory
32 *   blocks. The object_tree_root is a red black tree used to look-up
33 *   metadata based on a pointer to the corresponding memory block.  The
34 *   kmemleak_object structures are added to the object_list and
35 *   object_tree_root in the create_object() function called from the
36 *   kmemleak_alloc() callback and removed in delete_object() called from the
37 *   kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 *   the metadata (e.g. count) are protected by this lock. Note that some
40 *   members of this structure may be protected by other means (atomic or
41 *   kmemleak_lock). This lock is also held when scanning the corresponding
42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
43 *   callback. This is less heavyweight than holding a global lock like
44 *   kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 *   unreferenced objects at a time. The gray_list contains the objects which
47 *   are already referenced or marked as false positives and need to be
48 *   scanned. This list is only modified during a scanning episode when the
49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 *   Note that the kmemleak_object.use_count is incremented when an object is
51 *   added to the gray_list and therefore cannot be freed. This mutex also
52 *   prevents multiple users of the "kmemleak" debugfs file together with
53 *   modifications to the memory scanning parameters including the scan_thread
54 *   pointer
55 *
56 * Locks and mutexes are acquired/nested in the following order:
57 *
58 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
62 *
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73#include <linux/init.h>
74#include <linux/kernel.h>
75#include <linux/list.h>
76#include <linux/sched.h>
77#include <linux/jiffies.h>
78#include <linux/delay.h>
79#include <linux/export.h>
80#include <linux/kthread.h>
81#include <linux/rbtree.h>
82#include <linux/fs.h>
83#include <linux/debugfs.h>
84#include <linux/seq_file.h>
85#include <linux/cpumask.h>
86#include <linux/spinlock.h>
87#include <linux/mutex.h>
88#include <linux/rcupdate.h>
89#include <linux/stacktrace.h>
90#include <linux/cache.h>
91#include <linux/percpu.h>
92#include <linux/hardirq.h>
93#include <linux/mmzone.h>
94#include <linux/slab.h>
95#include <linux/thread_info.h>
96#include <linux/err.h>
97#include <linux/uaccess.h>
98#include <linux/string.h>
99#include <linux/nodemask.h>
100#include <linux/mm.h>
101#include <linux/workqueue.h>
102#include <linux/crc32.h>
103
104#include <asm/sections.h>
105#include <asm/processor.h>
106#include <linux/atomic.h>
107
108#include <linux/kasan.h>
109#include <linux/kmemcheck.h>
110#include <linux/kmemleak.h>
111#include <linux/memory_hotplug.h>
112
113/*
114 * Kmemleak configuration and common defines.
115 */
116#define MAX_TRACE		16	/* stack trace length */
117#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
118#define SECS_FIRST_SCAN		60	/* delay before the first scan */
119#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
120#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
121
122#define BYTES_PER_POINTER	sizeof(void *)
123
124/* GFP bitmask for kmemleak internal allocations */
125#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
126					   __GFP_NOACCOUNT)) | \
127				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
128				 __GFP_NOWARN)
129
130/* scanning area inside a memory block */
131struct kmemleak_scan_area {
132	struct hlist_node node;
133	unsigned long start;
134	size_t size;
135};
136
137#define KMEMLEAK_GREY	0
138#define KMEMLEAK_BLACK	-1
139
140/*
141 * Structure holding the metadata for each allocated memory block.
142 * Modifications to such objects should be made while holding the
143 * object->lock. Insertions or deletions from object_list, gray_list or
144 * rb_node are already protected by the corresponding locks or mutex (see
145 * the notes on locking above). These objects are reference-counted
146 * (use_count) and freed using the RCU mechanism.
147 */
148struct kmemleak_object {
149	spinlock_t lock;
150	unsigned long flags;		/* object status flags */
151	struct list_head object_list;
152	struct list_head gray_list;
153	struct rb_node rb_node;
154	struct rcu_head rcu;		/* object_list lockless traversal */
155	/* object usage count; object freed when use_count == 0 */
156	atomic_t use_count;
157	unsigned long pointer;
158	size_t size;
159	/* minimum number of a pointers found before it is considered leak */
160	int min_count;
161	/* the total number of pointers found pointing to this object */
162	int count;
163	/* checksum for detecting modified objects */
164	u32 checksum;
165	/* memory ranges to be scanned inside an object (empty for all) */
166	struct hlist_head area_list;
167	unsigned long trace[MAX_TRACE];
168	unsigned int trace_len;
169	unsigned long jiffies;		/* creation timestamp */
170	pid_t pid;			/* pid of the current task */
171	char comm[TASK_COMM_LEN];	/* executable name */
172};
173
174/* flag representing the memory block allocation status */
175#define OBJECT_ALLOCATED	(1 << 0)
176/* flag set after the first reporting of an unreference object */
177#define OBJECT_REPORTED		(1 << 1)
178/* flag set to not scan the object */
179#define OBJECT_NO_SCAN		(1 << 2)
180
181/* number of bytes to print per line; must be 16 or 32 */
182#define HEX_ROW_SIZE		16
183/* number of bytes to print at a time (1, 2, 4, 8) */
184#define HEX_GROUP_SIZE		1
185/* include ASCII after the hex output */
186#define HEX_ASCII		1
187/* max number of lines to be printed */
188#define HEX_MAX_LINES		2
189
190/* the list of all allocated objects */
191static LIST_HEAD(object_list);
192/* the list of gray-colored objects (see color_gray comment below) */
193static LIST_HEAD(gray_list);
194/* search tree for object boundaries */
195static struct rb_root object_tree_root = RB_ROOT;
196/* rw_lock protecting the access to object_list and object_tree_root */
197static DEFINE_RWLOCK(kmemleak_lock);
198
199/* allocation caches for kmemleak internal data */
200static struct kmem_cache *object_cache;
201static struct kmem_cache *scan_area_cache;
202
203/* set if tracing memory operations is enabled */
204static int kmemleak_enabled;
205/* same as above but only for the kmemleak_free() callback */
206static int kmemleak_free_enabled;
207/* set in the late_initcall if there were no errors */
208static int kmemleak_initialized;
209/* enables or disables early logging of the memory operations */
210static int kmemleak_early_log = 1;
211/* set if a kmemleak warning was issued */
212static int kmemleak_warning;
213/* set if a fatal kmemleak error has occurred */
214static int kmemleak_error;
215
216/* minimum and maximum address that may be valid pointers */
217static unsigned long min_addr = ULONG_MAX;
218static unsigned long max_addr;
219
220static struct task_struct *scan_thread;
221/* used to avoid reporting of recently allocated objects */
222static unsigned long jiffies_min_age;
223static unsigned long jiffies_last_scan;
224/* delay between automatic memory scannings */
225static signed long jiffies_scan_wait;
226/* enables or disables the task stacks scanning */
227static int kmemleak_stack_scan = 1;
228/* protects the memory scanning, parameters and debug/kmemleak file access */
229static DEFINE_MUTEX(scan_mutex);
230/* setting kmemleak=on, will set this var, skipping the disable */
231static int kmemleak_skip_disable;
232/* If there are leaks that can be reported */
233static bool kmemleak_found_leaks;
234
235/*
236 * Early object allocation/freeing logging. Kmemleak is initialized after the
237 * kernel allocator. However, both the kernel allocator and kmemleak may
238 * allocate memory blocks which need to be tracked. Kmemleak defines an
239 * arbitrary buffer to hold the allocation/freeing information before it is
240 * fully initialized.
241 */
242
243/* kmemleak operation type for early logging */
244enum {
245	KMEMLEAK_ALLOC,
246	KMEMLEAK_ALLOC_PERCPU,
247	KMEMLEAK_FREE,
248	KMEMLEAK_FREE_PART,
249	KMEMLEAK_FREE_PERCPU,
250	KMEMLEAK_NOT_LEAK,
251	KMEMLEAK_IGNORE,
252	KMEMLEAK_SCAN_AREA,
253	KMEMLEAK_NO_SCAN
254};
255
256/*
257 * Structure holding the information passed to kmemleak callbacks during the
258 * early logging.
259 */
260struct early_log {
261	int op_type;			/* kmemleak operation type */
262	const void *ptr;		/* allocated/freed memory block */
263	size_t size;			/* memory block size */
264	int min_count;			/* minimum reference count */
265	unsigned long trace[MAX_TRACE];	/* stack trace */
266	unsigned int trace_len;		/* stack trace length */
267};
268
269/* early logging buffer and current position */
270static struct early_log
271	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
272static int crt_early_log __initdata;
273
274static void kmemleak_disable(void);
275
276/*
277 * Print a warning and dump the stack trace.
278 */
279#define kmemleak_warn(x...)	do {		\
280	pr_warning(x);				\
281	dump_stack();				\
282	kmemleak_warning = 1;			\
283} while (0)
284
285/*
286 * Macro invoked when a serious kmemleak condition occurred and cannot be
287 * recovered from. Kmemleak will be disabled and further allocation/freeing
288 * tracing no longer available.
289 */
290#define kmemleak_stop(x...)	do {	\
291	kmemleak_warn(x);		\
292	kmemleak_disable();		\
293} while (0)
294
295/*
296 * Printing of the objects hex dump to the seq file. The number of lines to be
297 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
298 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
299 * with the object->lock held.
300 */
301static void hex_dump_object(struct seq_file *seq,
302			    struct kmemleak_object *object)
303{
304	const u8 *ptr = (const u8 *)object->pointer;
305	size_t len;
306
307	/* limit the number of lines to HEX_MAX_LINES */
308	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309
310	seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
311	seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
312		     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
313}
314
315/*
316 * Object colors, encoded with count and min_count:
317 * - white - orphan object, not enough references to it (count < min_count)
318 * - gray  - not orphan, not marked as false positive (min_count == 0) or
319 *		sufficient references to it (count >= min_count)
320 * - black - ignore, it doesn't contain references (e.g. text section)
321 *		(min_count == -1). No function defined for this color.
322 * Newly created objects don't have any color assigned (object->count == -1)
323 * before the next memory scan when they become white.
324 */
325static bool color_white(const struct kmemleak_object *object)
326{
327	return object->count != KMEMLEAK_BLACK &&
328		object->count < object->min_count;
329}
330
331static bool color_gray(const struct kmemleak_object *object)
332{
333	return object->min_count != KMEMLEAK_BLACK &&
334		object->count >= object->min_count;
335}
336
337/*
338 * Objects are considered unreferenced only if their color is white, they have
339 * not be deleted and have a minimum age to avoid false positives caused by
340 * pointers temporarily stored in CPU registers.
341 */
342static bool unreferenced_object(struct kmemleak_object *object)
343{
344	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
345		time_before_eq(object->jiffies + jiffies_min_age,
346			       jiffies_last_scan);
347}
348
349/*
350 * Printing of the unreferenced objects information to the seq file. The
351 * print_unreferenced function must be called with the object->lock held.
352 */
353static void print_unreferenced(struct seq_file *seq,
354			       struct kmemleak_object *object)
355{
356	int i;
357	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
358
359	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
360		   object->pointer, object->size);
361	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
362		   object->comm, object->pid, object->jiffies,
363		   msecs_age / 1000, msecs_age % 1000);
364	hex_dump_object(seq, object);
365	seq_printf(seq, "  backtrace:\n");
366
367	for (i = 0; i < object->trace_len; i++) {
368		void *ptr = (void *)object->trace[i];
369		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
370	}
371}
372
373/*
374 * Print the kmemleak_object information. This function is used mainly for
375 * debugging special cases when kmemleak operations. It must be called with
376 * the object->lock held.
377 */
378static void dump_object_info(struct kmemleak_object *object)
379{
380	struct stack_trace trace;
381
382	trace.nr_entries = object->trace_len;
383	trace.entries = object->trace;
384
385	pr_notice("Object 0x%08lx (size %zu):\n",
386		  object->pointer, object->size);
387	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
388		  object->comm, object->pid, object->jiffies);
389	pr_notice("  min_count = %d\n", object->min_count);
390	pr_notice("  count = %d\n", object->count);
391	pr_notice("  flags = 0x%lx\n", object->flags);
392	pr_notice("  checksum = %u\n", object->checksum);
393	pr_notice("  backtrace:\n");
394	print_stack_trace(&trace, 4);
395}
396
397/*
398 * Look-up a memory block metadata (kmemleak_object) in the object search
399 * tree based on a pointer value. If alias is 0, only values pointing to the
400 * beginning of the memory block are allowed. The kmemleak_lock must be held
401 * when calling this function.
402 */
403static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
404{
405	struct rb_node *rb = object_tree_root.rb_node;
406
407	while (rb) {
408		struct kmemleak_object *object =
409			rb_entry(rb, struct kmemleak_object, rb_node);
410		if (ptr < object->pointer)
411			rb = object->rb_node.rb_left;
412		else if (object->pointer + object->size <= ptr)
413			rb = object->rb_node.rb_right;
414		else if (object->pointer == ptr || alias)
415			return object;
416		else {
417			kmemleak_warn("Found object by alias at 0x%08lx\n",
418				      ptr);
419			dump_object_info(object);
420			break;
421		}
422	}
423	return NULL;
424}
425
426/*
427 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
428 * that once an object's use_count reached 0, the RCU freeing was already
429 * registered and the object should no longer be used. This function must be
430 * called under the protection of rcu_read_lock().
431 */
432static int get_object(struct kmemleak_object *object)
433{
434	return atomic_inc_not_zero(&object->use_count);
435}
436
437/*
438 * RCU callback to free a kmemleak_object.
439 */
440static void free_object_rcu(struct rcu_head *rcu)
441{
442	struct hlist_node *tmp;
443	struct kmemleak_scan_area *area;
444	struct kmemleak_object *object =
445		container_of(rcu, struct kmemleak_object, rcu);
446
447	/*
448	 * Once use_count is 0 (guaranteed by put_object), there is no other
449	 * code accessing this object, hence no need for locking.
450	 */
451	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
452		hlist_del(&area->node);
453		kmem_cache_free(scan_area_cache, area);
454	}
455	kmem_cache_free(object_cache, object);
456}
457
458/*
459 * Decrement the object use_count. Once the count is 0, free the object using
460 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
461 * delete_object() path, the delayed RCU freeing ensures that there is no
462 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
463 * is also possible.
464 */
465static void put_object(struct kmemleak_object *object)
466{
467	if (!atomic_dec_and_test(&object->use_count))
468		return;
469
470	/* should only get here after delete_object was called */
471	WARN_ON(object->flags & OBJECT_ALLOCATED);
472
473	call_rcu(&object->rcu, free_object_rcu);
474}
475
476/*
477 * Look up an object in the object search tree and increase its use_count.
478 */
479static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
480{
481	unsigned long flags;
482	struct kmemleak_object *object;
483
484	rcu_read_lock();
485	read_lock_irqsave(&kmemleak_lock, flags);
486	object = lookup_object(ptr, alias);
487	read_unlock_irqrestore(&kmemleak_lock, flags);
488
489	/* check whether the object is still available */
490	if (object && !get_object(object))
491		object = NULL;
492	rcu_read_unlock();
493
494	return object;
495}
496
497/*
498 * Look up an object in the object search tree and remove it from both
499 * object_tree_root and object_list. The returned object's use_count should be
500 * at least 1, as initially set by create_object().
501 */
502static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
503{
504	unsigned long flags;
505	struct kmemleak_object *object;
506
507	write_lock_irqsave(&kmemleak_lock, flags);
508	object = lookup_object(ptr, alias);
509	if (object) {
510		rb_erase(&object->rb_node, &object_tree_root);
511		list_del_rcu(&object->object_list);
512	}
513	write_unlock_irqrestore(&kmemleak_lock, flags);
514
515	return object;
516}
517
518/*
519 * Save stack trace to the given array of MAX_TRACE size.
520 */
521static int __save_stack_trace(unsigned long *trace)
522{
523	struct stack_trace stack_trace;
524
525	stack_trace.max_entries = MAX_TRACE;
526	stack_trace.nr_entries = 0;
527	stack_trace.entries = trace;
528	stack_trace.skip = 2;
529	save_stack_trace(&stack_trace);
530
531	return stack_trace.nr_entries;
532}
533
534/*
535 * Create the metadata (struct kmemleak_object) corresponding to an allocated
536 * memory block and add it to the object_list and object_tree_root.
537 */
538static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
539					     int min_count, gfp_t gfp)
540{
541	unsigned long flags;
542	struct kmemleak_object *object, *parent;
543	struct rb_node **link, *rb_parent;
544
545	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
546	if (!object) {
547		pr_warning("Cannot allocate a kmemleak_object structure\n");
548		kmemleak_disable();
549		return NULL;
550	}
551
552	INIT_LIST_HEAD(&object->object_list);
553	INIT_LIST_HEAD(&object->gray_list);
554	INIT_HLIST_HEAD(&object->area_list);
555	spin_lock_init(&object->lock);
556	atomic_set(&object->use_count, 1);
557	object->flags = OBJECT_ALLOCATED;
558	object->pointer = ptr;
559	object->size = size;
560	object->min_count = min_count;
561	object->count = 0;			/* white color initially */
562	object->jiffies = jiffies;
563	object->checksum = 0;
564
565	/* task information */
566	if (in_irq()) {
567		object->pid = 0;
568		strncpy(object->comm, "hardirq", sizeof(object->comm));
569	} else if (in_softirq()) {
570		object->pid = 0;
571		strncpy(object->comm, "softirq", sizeof(object->comm));
572	} else {
573		object->pid = current->pid;
574		/*
575		 * There is a small chance of a race with set_task_comm(),
576		 * however using get_task_comm() here may cause locking
577		 * dependency issues with current->alloc_lock. In the worst
578		 * case, the command line is not correct.
579		 */
580		strncpy(object->comm, current->comm, sizeof(object->comm));
581	}
582
583	/* kernel backtrace */
584	object->trace_len = __save_stack_trace(object->trace);
585
586	write_lock_irqsave(&kmemleak_lock, flags);
587
588	min_addr = min(min_addr, ptr);
589	max_addr = max(max_addr, ptr + size);
590	link = &object_tree_root.rb_node;
591	rb_parent = NULL;
592	while (*link) {
593		rb_parent = *link;
594		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
595		if (ptr + size <= parent->pointer)
596			link = &parent->rb_node.rb_left;
597		else if (parent->pointer + parent->size <= ptr)
598			link = &parent->rb_node.rb_right;
599		else {
600			kmemleak_stop("Cannot insert 0x%lx into the object "
601				      "search tree (overlaps existing)\n",
602				      ptr);
603			/*
604			 * No need for parent->lock here since "parent" cannot
605			 * be freed while the kmemleak_lock is held.
606			 */
607			dump_object_info(parent);
608			kmem_cache_free(object_cache, object);
609			object = NULL;
610			goto out;
611		}
612	}
613	rb_link_node(&object->rb_node, rb_parent, link);
614	rb_insert_color(&object->rb_node, &object_tree_root);
615
616	list_add_tail_rcu(&object->object_list, &object_list);
617out:
618	write_unlock_irqrestore(&kmemleak_lock, flags);
619	return object;
620}
621
622/*
623 * Mark the object as not allocated and schedule RCU freeing via put_object().
624 */
625static void __delete_object(struct kmemleak_object *object)
626{
627	unsigned long flags;
628
629	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
630	WARN_ON(atomic_read(&object->use_count) < 1);
631
632	/*
633	 * Locking here also ensures that the corresponding memory block
634	 * cannot be freed when it is being scanned.
635	 */
636	spin_lock_irqsave(&object->lock, flags);
637	object->flags &= ~OBJECT_ALLOCATED;
638	spin_unlock_irqrestore(&object->lock, flags);
639	put_object(object);
640}
641
642/*
643 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
644 * delete it.
645 */
646static void delete_object_full(unsigned long ptr)
647{
648	struct kmemleak_object *object;
649
650	object = find_and_remove_object(ptr, 0);
651	if (!object) {
652#ifdef DEBUG
653		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
654			      ptr);
655#endif
656		return;
657	}
658	__delete_object(object);
659}
660
661/*
662 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
663 * delete it. If the memory block is partially freed, the function may create
664 * additional metadata for the remaining parts of the block.
665 */
666static void delete_object_part(unsigned long ptr, size_t size)
667{
668	struct kmemleak_object *object;
669	unsigned long start, end;
670
671	object = find_and_remove_object(ptr, 1);
672	if (!object) {
673#ifdef DEBUG
674		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
675			      "(size %zu)\n", ptr, size);
676#endif
677		return;
678	}
679
680	/*
681	 * Create one or two objects that may result from the memory block
682	 * split. Note that partial freeing is only done by free_bootmem() and
683	 * this happens before kmemleak_init() is called. The path below is
684	 * only executed during early log recording in kmemleak_init(), so
685	 * GFP_KERNEL is enough.
686	 */
687	start = object->pointer;
688	end = object->pointer + object->size;
689	if (ptr > start)
690		create_object(start, ptr - start, object->min_count,
691			      GFP_KERNEL);
692	if (ptr + size < end)
693		create_object(ptr + size, end - ptr - size, object->min_count,
694			      GFP_KERNEL);
695
696	__delete_object(object);
697}
698
699static void __paint_it(struct kmemleak_object *object, int color)
700{
701	object->min_count = color;
702	if (color == KMEMLEAK_BLACK)
703		object->flags |= OBJECT_NO_SCAN;
704}
705
706static void paint_it(struct kmemleak_object *object, int color)
707{
708	unsigned long flags;
709
710	spin_lock_irqsave(&object->lock, flags);
711	__paint_it(object, color);
712	spin_unlock_irqrestore(&object->lock, flags);
713}
714
715static void paint_ptr(unsigned long ptr, int color)
716{
717	struct kmemleak_object *object;
718
719	object = find_and_get_object(ptr, 0);
720	if (!object) {
721		kmemleak_warn("Trying to color unknown object "
722			      "at 0x%08lx as %s\n", ptr,
723			      (color == KMEMLEAK_GREY) ? "Grey" :
724			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
725		return;
726	}
727	paint_it(object, color);
728	put_object(object);
729}
730
731/*
732 * Mark an object permanently as gray-colored so that it can no longer be
733 * reported as a leak. This is used in general to mark a false positive.
734 */
735static void make_gray_object(unsigned long ptr)
736{
737	paint_ptr(ptr, KMEMLEAK_GREY);
738}
739
740/*
741 * Mark the object as black-colored so that it is ignored from scans and
742 * reporting.
743 */
744static void make_black_object(unsigned long ptr)
745{
746	paint_ptr(ptr, KMEMLEAK_BLACK);
747}
748
749/*
750 * Add a scanning area to the object. If at least one such area is added,
751 * kmemleak will only scan these ranges rather than the whole memory block.
752 */
753static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
754{
755	unsigned long flags;
756	struct kmemleak_object *object;
757	struct kmemleak_scan_area *area;
758
759	object = find_and_get_object(ptr, 1);
760	if (!object) {
761		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
762			      ptr);
763		return;
764	}
765
766	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
767	if (!area) {
768		pr_warning("Cannot allocate a scan area\n");
769		goto out;
770	}
771
772	spin_lock_irqsave(&object->lock, flags);
773	if (size == SIZE_MAX) {
774		size = object->pointer + object->size - ptr;
775	} else if (ptr + size > object->pointer + object->size) {
776		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
777		dump_object_info(object);
778		kmem_cache_free(scan_area_cache, area);
779		goto out_unlock;
780	}
781
782	INIT_HLIST_NODE(&area->node);
783	area->start = ptr;
784	area->size = size;
785
786	hlist_add_head(&area->node, &object->area_list);
787out_unlock:
788	spin_unlock_irqrestore(&object->lock, flags);
789out:
790	put_object(object);
791}
792
793/*
794 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
795 * pointer. Such object will not be scanned by kmemleak but references to it
796 * are searched.
797 */
798static void object_no_scan(unsigned long ptr)
799{
800	unsigned long flags;
801	struct kmemleak_object *object;
802
803	object = find_and_get_object(ptr, 0);
804	if (!object) {
805		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
806		return;
807	}
808
809	spin_lock_irqsave(&object->lock, flags);
810	object->flags |= OBJECT_NO_SCAN;
811	spin_unlock_irqrestore(&object->lock, flags);
812	put_object(object);
813}
814
815/*
816 * Log an early kmemleak_* call to the early_log buffer. These calls will be
817 * processed later once kmemleak is fully initialized.
818 */
819static void __init log_early(int op_type, const void *ptr, size_t size,
820			     int min_count)
821{
822	unsigned long flags;
823	struct early_log *log;
824
825	if (kmemleak_error) {
826		/* kmemleak stopped recording, just count the requests */
827		crt_early_log++;
828		return;
829	}
830
831	if (crt_early_log >= ARRAY_SIZE(early_log)) {
832		crt_early_log++;
833		kmemleak_disable();
834		return;
835	}
836
837	/*
838	 * There is no need for locking since the kernel is still in UP mode
839	 * at this stage. Disabling the IRQs is enough.
840	 */
841	local_irq_save(flags);
842	log = &early_log[crt_early_log];
843	log->op_type = op_type;
844	log->ptr = ptr;
845	log->size = size;
846	log->min_count = min_count;
847	log->trace_len = __save_stack_trace(log->trace);
848	crt_early_log++;
849	local_irq_restore(flags);
850}
851
852/*
853 * Log an early allocated block and populate the stack trace.
854 */
855static void early_alloc(struct early_log *log)
856{
857	struct kmemleak_object *object;
858	unsigned long flags;
859	int i;
860
861	if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
862		return;
863
864	/*
865	 * RCU locking needed to ensure object is not freed via put_object().
866	 */
867	rcu_read_lock();
868	object = create_object((unsigned long)log->ptr, log->size,
869			       log->min_count, GFP_ATOMIC);
870	if (!object)
871		goto out;
872	spin_lock_irqsave(&object->lock, flags);
873	for (i = 0; i < log->trace_len; i++)
874		object->trace[i] = log->trace[i];
875	object->trace_len = log->trace_len;
876	spin_unlock_irqrestore(&object->lock, flags);
877out:
878	rcu_read_unlock();
879}
880
881/*
882 * Log an early allocated block and populate the stack trace.
883 */
884static void early_alloc_percpu(struct early_log *log)
885{
886	unsigned int cpu;
887	const void __percpu *ptr = log->ptr;
888
889	for_each_possible_cpu(cpu) {
890		log->ptr = per_cpu_ptr(ptr, cpu);
891		early_alloc(log);
892	}
893}
894
895/**
896 * kmemleak_alloc - register a newly allocated object
897 * @ptr:	pointer to beginning of the object
898 * @size:	size of the object
899 * @min_count:	minimum number of references to this object. If during memory
900 *		scanning a number of references less than @min_count is found,
901 *		the object is reported as a memory leak. If @min_count is 0,
902 *		the object is never reported as a leak. If @min_count is -1,
903 *		the object is ignored (not scanned and not reported as a leak)
904 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
905 *
906 * This function is called from the kernel allocators when a new object
907 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
908 */
909void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
910			  gfp_t gfp)
911{
912	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
913
914	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
915		create_object((unsigned long)ptr, size, min_count, gfp);
916	else if (kmemleak_early_log)
917		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
918}
919EXPORT_SYMBOL_GPL(kmemleak_alloc);
920
921/**
922 * kmemleak_alloc_percpu - register a newly allocated __percpu object
923 * @ptr:	__percpu pointer to beginning of the object
924 * @size:	size of the object
925 * @gfp:	flags used for kmemleak internal memory allocations
926 *
927 * This function is called from the kernel percpu allocator when a new object
928 * (memory block) is allocated (alloc_percpu).
929 */
930void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
931				 gfp_t gfp)
932{
933	unsigned int cpu;
934
935	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
936
937	/*
938	 * Percpu allocations are only scanned and not reported as leaks
939	 * (min_count is set to 0).
940	 */
941	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
942		for_each_possible_cpu(cpu)
943			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
944				      size, 0, gfp);
945	else if (kmemleak_early_log)
946		log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
947}
948EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
949
950/**
951 * kmemleak_free - unregister a previously registered object
952 * @ptr:	pointer to beginning of the object
953 *
954 * This function is called from the kernel allocators when an object (memory
955 * block) is freed (kmem_cache_free, kfree, vfree etc.).
956 */
957void __ref kmemleak_free(const void *ptr)
958{
959	pr_debug("%s(0x%p)\n", __func__, ptr);
960
961	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
962		delete_object_full((unsigned long)ptr);
963	else if (kmemleak_early_log)
964		log_early(KMEMLEAK_FREE, ptr, 0, 0);
965}
966EXPORT_SYMBOL_GPL(kmemleak_free);
967
968/**
969 * kmemleak_free_part - partially unregister a previously registered object
970 * @ptr:	pointer to the beginning or inside the object. This also
971 *		represents the start of the range to be freed
972 * @size:	size to be unregistered
973 *
974 * This function is called when only a part of a memory block is freed
975 * (usually from the bootmem allocator).
976 */
977void __ref kmemleak_free_part(const void *ptr, size_t size)
978{
979	pr_debug("%s(0x%p)\n", __func__, ptr);
980
981	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
982		delete_object_part((unsigned long)ptr, size);
983	else if (kmemleak_early_log)
984		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
985}
986EXPORT_SYMBOL_GPL(kmemleak_free_part);
987
988/**
989 * kmemleak_free_percpu - unregister a previously registered __percpu object
990 * @ptr:	__percpu pointer to beginning of the object
991 *
992 * This function is called from the kernel percpu allocator when an object
993 * (memory block) is freed (free_percpu).
994 */
995void __ref kmemleak_free_percpu(const void __percpu *ptr)
996{
997	unsigned int cpu;
998
999	pr_debug("%s(0x%p)\n", __func__, ptr);
1000
1001	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1002		for_each_possible_cpu(cpu)
1003			delete_object_full((unsigned long)per_cpu_ptr(ptr,
1004								      cpu));
1005	else if (kmemleak_early_log)
1006		log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1007}
1008EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1009
1010/**
1011 * kmemleak_update_trace - update object allocation stack trace
1012 * @ptr:	pointer to beginning of the object
1013 *
1014 * Override the object allocation stack trace for cases where the actual
1015 * allocation place is not always useful.
1016 */
1017void __ref kmemleak_update_trace(const void *ptr)
1018{
1019	struct kmemleak_object *object;
1020	unsigned long flags;
1021
1022	pr_debug("%s(0x%p)\n", __func__, ptr);
1023
1024	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1025		return;
1026
1027	object = find_and_get_object((unsigned long)ptr, 1);
1028	if (!object) {
1029#ifdef DEBUG
1030		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1031			      ptr);
1032#endif
1033		return;
1034	}
1035
1036	spin_lock_irqsave(&object->lock, flags);
1037	object->trace_len = __save_stack_trace(object->trace);
1038	spin_unlock_irqrestore(&object->lock, flags);
1039
1040	put_object(object);
1041}
1042EXPORT_SYMBOL(kmemleak_update_trace);
1043
1044/**
1045 * kmemleak_not_leak - mark an allocated object as false positive
1046 * @ptr:	pointer to beginning of the object
1047 *
1048 * Calling this function on an object will cause the memory block to no longer
1049 * be reported as leak and always be scanned.
1050 */
1051void __ref kmemleak_not_leak(const void *ptr)
1052{
1053	pr_debug("%s(0x%p)\n", __func__, ptr);
1054
1055	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1056		make_gray_object((unsigned long)ptr);
1057	else if (kmemleak_early_log)
1058		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1059}
1060EXPORT_SYMBOL(kmemleak_not_leak);
1061
1062/**
1063 * kmemleak_ignore - ignore an allocated object
1064 * @ptr:	pointer to beginning of the object
1065 *
1066 * Calling this function on an object will cause the memory block to be
1067 * ignored (not scanned and not reported as a leak). This is usually done when
1068 * it is known that the corresponding block is not a leak and does not contain
1069 * any references to other allocated memory blocks.
1070 */
1071void __ref kmemleak_ignore(const void *ptr)
1072{
1073	pr_debug("%s(0x%p)\n", __func__, ptr);
1074
1075	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1076		make_black_object((unsigned long)ptr);
1077	else if (kmemleak_early_log)
1078		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1079}
1080EXPORT_SYMBOL(kmemleak_ignore);
1081
1082/**
1083 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1084 * @ptr:	pointer to beginning or inside the object. This also
1085 *		represents the start of the scan area
1086 * @size:	size of the scan area
1087 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1088 *
1089 * This function is used when it is known that only certain parts of an object
1090 * contain references to other objects. Kmemleak will only scan these areas
1091 * reducing the number false negatives.
1092 */
1093void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1094{
1095	pr_debug("%s(0x%p)\n", __func__, ptr);
1096
1097	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1098		add_scan_area((unsigned long)ptr, size, gfp);
1099	else if (kmemleak_early_log)
1100		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1101}
1102EXPORT_SYMBOL(kmemleak_scan_area);
1103
1104/**
1105 * kmemleak_no_scan - do not scan an allocated object
1106 * @ptr:	pointer to beginning of the object
1107 *
1108 * This function notifies kmemleak not to scan the given memory block. Useful
1109 * in situations where it is known that the given object does not contain any
1110 * references to other objects. Kmemleak will not scan such objects reducing
1111 * the number of false negatives.
1112 */
1113void __ref kmemleak_no_scan(const void *ptr)
1114{
1115	pr_debug("%s(0x%p)\n", __func__, ptr);
1116
1117	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1118		object_no_scan((unsigned long)ptr);
1119	else if (kmemleak_early_log)
1120		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1121}
1122EXPORT_SYMBOL(kmemleak_no_scan);
1123
1124/*
1125 * Update an object's checksum and return true if it was modified.
1126 */
1127static bool update_checksum(struct kmemleak_object *object)
1128{
1129	u32 old_csum = object->checksum;
1130
1131	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1132		return false;
1133
1134	kasan_disable_current();
1135	object->checksum = crc32(0, (void *)object->pointer, object->size);
1136	kasan_enable_current();
1137
1138	return object->checksum != old_csum;
1139}
1140
1141/*
1142 * Memory scanning is a long process and it needs to be interruptable. This
1143 * function checks whether such interrupt condition occurred.
1144 */
1145static int scan_should_stop(void)
1146{
1147	if (!kmemleak_enabled)
1148		return 1;
1149
1150	/*
1151	 * This function may be called from either process or kthread context,
1152	 * hence the need to check for both stop conditions.
1153	 */
1154	if (current->mm)
1155		return signal_pending(current);
1156	else
1157		return kthread_should_stop();
1158
1159	return 0;
1160}
1161
1162/*
1163 * Scan a memory block (exclusive range) for valid pointers and add those
1164 * found to the gray list.
1165 */
1166static void scan_block(void *_start, void *_end,
1167		       struct kmemleak_object *scanned)
1168{
1169	unsigned long *ptr;
1170	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1171	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1172	unsigned long flags;
1173
1174	read_lock_irqsave(&kmemleak_lock, flags);
1175	for (ptr = start; ptr < end; ptr++) {
1176		struct kmemleak_object *object;
1177		unsigned long pointer;
1178
1179		if (scan_should_stop())
1180			break;
1181
1182		/* don't scan uninitialized memory */
1183		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1184						  BYTES_PER_POINTER))
1185			continue;
1186
1187		kasan_disable_current();
1188		pointer = *ptr;
1189		kasan_enable_current();
1190
1191		if (pointer < min_addr || pointer >= max_addr)
1192			continue;
1193
1194		/*
1195		 * No need for get_object() here since we hold kmemleak_lock.
1196		 * object->use_count cannot be dropped to 0 while the object
1197		 * is still present in object_tree_root and object_list
1198		 * (with updates protected by kmemleak_lock).
1199		 */
1200		object = lookup_object(pointer, 1);
1201		if (!object)
1202			continue;
1203		if (object == scanned)
1204			/* self referenced, ignore */
1205			continue;
1206
1207		/*
1208		 * Avoid the lockdep recursive warning on object->lock being
1209		 * previously acquired in scan_object(). These locks are
1210		 * enclosed by scan_mutex.
1211		 */
1212		spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1213		if (!color_white(object)) {
1214			/* non-orphan, ignored or new */
1215			spin_unlock(&object->lock);
1216			continue;
1217		}
1218
1219		/*
1220		 * Increase the object's reference count (number of pointers
1221		 * to the memory block). If this count reaches the required
1222		 * minimum, the object's color will become gray and it will be
1223		 * added to the gray_list.
1224		 */
1225		object->count++;
1226		if (color_gray(object)) {
1227			/* put_object() called when removing from gray_list */
1228			WARN_ON(!get_object(object));
1229			list_add_tail(&object->gray_list, &gray_list);
1230		}
1231		spin_unlock(&object->lock);
1232	}
1233	read_unlock_irqrestore(&kmemleak_lock, flags);
1234}
1235
1236/*
1237 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1238 */
1239static void scan_large_block(void *start, void *end)
1240{
1241	void *next;
1242
1243	while (start < end) {
1244		next = min(start + MAX_SCAN_SIZE, end);
1245		scan_block(start, next, NULL);
1246		start = next;
1247		cond_resched();
1248	}
1249}
1250
1251/*
1252 * Scan a memory block corresponding to a kmemleak_object. A condition is
1253 * that object->use_count >= 1.
1254 */
1255static void scan_object(struct kmemleak_object *object)
1256{
1257	struct kmemleak_scan_area *area;
1258	unsigned long flags;
1259
1260	/*
1261	 * Once the object->lock is acquired, the corresponding memory block
1262	 * cannot be freed (the same lock is acquired in delete_object).
1263	 */
1264	spin_lock_irqsave(&object->lock, flags);
1265	if (object->flags & OBJECT_NO_SCAN)
1266		goto out;
1267	if (!(object->flags & OBJECT_ALLOCATED))
1268		/* already freed object */
1269		goto out;
1270	if (hlist_empty(&object->area_list)) {
1271		void *start = (void *)object->pointer;
1272		void *end = (void *)(object->pointer + object->size);
1273		void *next;
1274
1275		do {
1276			next = min(start + MAX_SCAN_SIZE, end);
1277			scan_block(start, next, object);
1278
1279			start = next;
1280			if (start >= end)
1281				break;
1282
1283			spin_unlock_irqrestore(&object->lock, flags);
1284			cond_resched();
1285			spin_lock_irqsave(&object->lock, flags);
1286		} while (object->flags & OBJECT_ALLOCATED);
1287	} else
1288		hlist_for_each_entry(area, &object->area_list, node)
1289			scan_block((void *)area->start,
1290				   (void *)(area->start + area->size),
1291				   object);
1292out:
1293	spin_unlock_irqrestore(&object->lock, flags);
1294}
1295
1296/*
1297 * Scan the objects already referenced (gray objects). More objects will be
1298 * referenced and, if there are no memory leaks, all the objects are scanned.
1299 */
1300static void scan_gray_list(void)
1301{
1302	struct kmemleak_object *object, *tmp;
1303
1304	/*
1305	 * The list traversal is safe for both tail additions and removals
1306	 * from inside the loop. The kmemleak objects cannot be freed from
1307	 * outside the loop because their use_count was incremented.
1308	 */
1309	object = list_entry(gray_list.next, typeof(*object), gray_list);
1310	while (&object->gray_list != &gray_list) {
1311		cond_resched();
1312
1313		/* may add new objects to the list */
1314		if (!scan_should_stop())
1315			scan_object(object);
1316
1317		tmp = list_entry(object->gray_list.next, typeof(*object),
1318				 gray_list);
1319
1320		/* remove the object from the list and release it */
1321		list_del(&object->gray_list);
1322		put_object(object);
1323
1324		object = tmp;
1325	}
1326	WARN_ON(!list_empty(&gray_list));
1327}
1328
1329/*
1330 * Scan data sections and all the referenced memory blocks allocated via the
1331 * kernel's standard allocators. This function must be called with the
1332 * scan_mutex held.
1333 */
1334static void kmemleak_scan(void)
1335{
1336	unsigned long flags;
1337	struct kmemleak_object *object;
1338	int i;
1339	int new_leaks = 0;
1340
1341	jiffies_last_scan = jiffies;
1342
1343	/* prepare the kmemleak_object's */
1344	rcu_read_lock();
1345	list_for_each_entry_rcu(object, &object_list, object_list) {
1346		spin_lock_irqsave(&object->lock, flags);
1347#ifdef DEBUG
1348		/*
1349		 * With a few exceptions there should be a maximum of
1350		 * 1 reference to any object at this point.
1351		 */
1352		if (atomic_read(&object->use_count) > 1) {
1353			pr_debug("object->use_count = %d\n",
1354				 atomic_read(&object->use_count));
1355			dump_object_info(object);
1356		}
1357#endif
1358		/* reset the reference count (whiten the object) */
1359		object->count = 0;
1360		if (color_gray(object) && get_object(object))
1361			list_add_tail(&object->gray_list, &gray_list);
1362
1363		spin_unlock_irqrestore(&object->lock, flags);
1364	}
1365	rcu_read_unlock();
1366
1367	/* data/bss scanning */
1368	scan_large_block(_sdata, _edata);
1369	scan_large_block(__bss_start, __bss_stop);
1370
1371#ifdef CONFIG_SMP
1372	/* per-cpu sections scanning */
1373	for_each_possible_cpu(i)
1374		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1375				 __per_cpu_end + per_cpu_offset(i));
1376#endif
1377
1378	/*
1379	 * Struct page scanning for each node.
1380	 */
1381	get_online_mems();
1382	for_each_online_node(i) {
1383		unsigned long start_pfn = node_start_pfn(i);
1384		unsigned long end_pfn = node_end_pfn(i);
1385		unsigned long pfn;
1386
1387		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1388			struct page *page;
1389
1390			if (!pfn_valid(pfn))
1391				continue;
1392			page = pfn_to_page(pfn);
1393			/* only scan if page is in use */
1394			if (page_count(page) == 0)
1395				continue;
1396			scan_block(page, page + 1, NULL);
1397		}
1398	}
1399	put_online_mems();
1400
1401	/*
1402	 * Scanning the task stacks (may introduce false negatives).
1403	 */
1404	if (kmemleak_stack_scan) {
1405		struct task_struct *p, *g;
1406
1407		read_lock(&tasklist_lock);
1408		do_each_thread(g, p) {
1409			scan_block(task_stack_page(p), task_stack_page(p) +
1410				   THREAD_SIZE, NULL);
1411		} while_each_thread(g, p);
1412		read_unlock(&tasklist_lock);
1413	}
1414
1415	/*
1416	 * Scan the objects already referenced from the sections scanned
1417	 * above.
1418	 */
1419	scan_gray_list();
1420
1421	/*
1422	 * Check for new or unreferenced objects modified since the previous
1423	 * scan and color them gray until the next scan.
1424	 */
1425	rcu_read_lock();
1426	list_for_each_entry_rcu(object, &object_list, object_list) {
1427		spin_lock_irqsave(&object->lock, flags);
1428		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1429		    && update_checksum(object) && get_object(object)) {
1430			/* color it gray temporarily */
1431			object->count = object->min_count;
1432			list_add_tail(&object->gray_list, &gray_list);
1433		}
1434		spin_unlock_irqrestore(&object->lock, flags);
1435	}
1436	rcu_read_unlock();
1437
1438	/*
1439	 * Re-scan the gray list for modified unreferenced objects.
1440	 */
1441	scan_gray_list();
1442
1443	/*
1444	 * If scanning was stopped do not report any new unreferenced objects.
1445	 */
1446	if (scan_should_stop())
1447		return;
1448
1449	/*
1450	 * Scanning result reporting.
1451	 */
1452	rcu_read_lock();
1453	list_for_each_entry_rcu(object, &object_list, object_list) {
1454		spin_lock_irqsave(&object->lock, flags);
1455		if (unreferenced_object(object) &&
1456		    !(object->flags & OBJECT_REPORTED)) {
1457			object->flags |= OBJECT_REPORTED;
1458			new_leaks++;
1459		}
1460		spin_unlock_irqrestore(&object->lock, flags);
1461	}
1462	rcu_read_unlock();
1463
1464	if (new_leaks) {
1465		kmemleak_found_leaks = true;
1466
1467		pr_info("%d new suspected memory leaks (see "
1468			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1469	}
1470
1471}
1472
1473/*
1474 * Thread function performing automatic memory scanning. Unreferenced objects
1475 * at the end of a memory scan are reported but only the first time.
1476 */
1477static int kmemleak_scan_thread(void *arg)
1478{
1479	static int first_run = 1;
1480
1481	pr_info("Automatic memory scanning thread started\n");
1482	set_user_nice(current, 10);
1483
1484	/*
1485	 * Wait before the first scan to allow the system to fully initialize.
1486	 */
1487	if (first_run) {
1488		first_run = 0;
1489		ssleep(SECS_FIRST_SCAN);
1490	}
1491
1492	while (!kthread_should_stop()) {
1493		signed long timeout = jiffies_scan_wait;
1494
1495		mutex_lock(&scan_mutex);
1496		kmemleak_scan();
1497		mutex_unlock(&scan_mutex);
1498
1499		/* wait before the next scan */
1500		while (timeout && !kthread_should_stop())
1501			timeout = schedule_timeout_interruptible(timeout);
1502	}
1503
1504	pr_info("Automatic memory scanning thread ended\n");
1505
1506	return 0;
1507}
1508
1509/*
1510 * Start the automatic memory scanning thread. This function must be called
1511 * with the scan_mutex held.
1512 */
1513static void start_scan_thread(void)
1514{
1515	if (scan_thread)
1516		return;
1517	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1518	if (IS_ERR(scan_thread)) {
1519		pr_warning("Failed to create the scan thread\n");
1520		scan_thread = NULL;
1521	}
1522}
1523
1524/*
1525 * Stop the automatic memory scanning thread. This function must be called
1526 * with the scan_mutex held.
1527 */
1528static void stop_scan_thread(void)
1529{
1530	if (scan_thread) {
1531		kthread_stop(scan_thread);
1532		scan_thread = NULL;
1533	}
1534}
1535
1536/*
1537 * Iterate over the object_list and return the first valid object at or after
1538 * the required position with its use_count incremented. The function triggers
1539 * a memory scanning when the pos argument points to the first position.
1540 */
1541static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1542{
1543	struct kmemleak_object *object;
1544	loff_t n = *pos;
1545	int err;
1546
1547	err = mutex_lock_interruptible(&scan_mutex);
1548	if (err < 0)
1549		return ERR_PTR(err);
1550
1551	rcu_read_lock();
1552	list_for_each_entry_rcu(object, &object_list, object_list) {
1553		if (n-- > 0)
1554			continue;
1555		if (get_object(object))
1556			goto out;
1557	}
1558	object = NULL;
1559out:
1560	return object;
1561}
1562
1563/*
1564 * Return the next object in the object_list. The function decrements the
1565 * use_count of the previous object and increases that of the next one.
1566 */
1567static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1568{
1569	struct kmemleak_object *prev_obj = v;
1570	struct kmemleak_object *next_obj = NULL;
1571	struct kmemleak_object *obj = prev_obj;
1572
1573	++(*pos);
1574
1575	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1576		if (get_object(obj)) {
1577			next_obj = obj;
1578			break;
1579		}
1580	}
1581
1582	put_object(prev_obj);
1583	return next_obj;
1584}
1585
1586/*
1587 * Decrement the use_count of the last object required, if any.
1588 */
1589static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1590{
1591	if (!IS_ERR(v)) {
1592		/*
1593		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1594		 * waiting was interrupted, so only release it if !IS_ERR.
1595		 */
1596		rcu_read_unlock();
1597		mutex_unlock(&scan_mutex);
1598		if (v)
1599			put_object(v);
1600	}
1601}
1602
1603/*
1604 * Print the information for an unreferenced object to the seq file.
1605 */
1606static int kmemleak_seq_show(struct seq_file *seq, void *v)
1607{
1608	struct kmemleak_object *object = v;
1609	unsigned long flags;
1610
1611	spin_lock_irqsave(&object->lock, flags);
1612	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1613		print_unreferenced(seq, object);
1614	spin_unlock_irqrestore(&object->lock, flags);
1615	return 0;
1616}
1617
1618static const struct seq_operations kmemleak_seq_ops = {
1619	.start = kmemleak_seq_start,
1620	.next  = kmemleak_seq_next,
1621	.stop  = kmemleak_seq_stop,
1622	.show  = kmemleak_seq_show,
1623};
1624
1625static int kmemleak_open(struct inode *inode, struct file *file)
1626{
1627	return seq_open(file, &kmemleak_seq_ops);
1628}
1629
1630static int dump_str_object_info(const char *str)
1631{
1632	unsigned long flags;
1633	struct kmemleak_object *object;
1634	unsigned long addr;
1635
1636	if (kstrtoul(str, 0, &addr))
1637		return -EINVAL;
1638	object = find_and_get_object(addr, 0);
1639	if (!object) {
1640		pr_info("Unknown object at 0x%08lx\n", addr);
1641		return -EINVAL;
1642	}
1643
1644	spin_lock_irqsave(&object->lock, flags);
1645	dump_object_info(object);
1646	spin_unlock_irqrestore(&object->lock, flags);
1647
1648	put_object(object);
1649	return 0;
1650}
1651
1652/*
1653 * We use grey instead of black to ensure we can do future scans on the same
1654 * objects. If we did not do future scans these black objects could
1655 * potentially contain references to newly allocated objects in the future and
1656 * we'd end up with false positives.
1657 */
1658static void kmemleak_clear(void)
1659{
1660	struct kmemleak_object *object;
1661	unsigned long flags;
1662
1663	rcu_read_lock();
1664	list_for_each_entry_rcu(object, &object_list, object_list) {
1665		spin_lock_irqsave(&object->lock, flags);
1666		if ((object->flags & OBJECT_REPORTED) &&
1667		    unreferenced_object(object))
1668			__paint_it(object, KMEMLEAK_GREY);
1669		spin_unlock_irqrestore(&object->lock, flags);
1670	}
1671	rcu_read_unlock();
1672
1673	kmemleak_found_leaks = false;
1674}
1675
1676static void __kmemleak_do_cleanup(void);
1677
1678/*
1679 * File write operation to configure kmemleak at run-time. The following
1680 * commands can be written to the /sys/kernel/debug/kmemleak file:
1681 *   off	- disable kmemleak (irreversible)
1682 *   stack=on	- enable the task stacks scanning
1683 *   stack=off	- disable the tasks stacks scanning
1684 *   scan=on	- start the automatic memory scanning thread
1685 *   scan=off	- stop the automatic memory scanning thread
1686 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1687 *		  disable it)
1688 *   scan	- trigger a memory scan
1689 *   clear	- mark all current reported unreferenced kmemleak objects as
1690 *		  grey to ignore printing them, or free all kmemleak objects
1691 *		  if kmemleak has been disabled.
1692 *   dump=...	- dump information about the object found at the given address
1693 */
1694static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1695			      size_t size, loff_t *ppos)
1696{
1697	char buf[64];
1698	int buf_size;
1699	int ret;
1700
1701	buf_size = min(size, (sizeof(buf) - 1));
1702	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1703		return -EFAULT;
1704	buf[buf_size] = 0;
1705
1706	ret = mutex_lock_interruptible(&scan_mutex);
1707	if (ret < 0)
1708		return ret;
1709
1710	if (strncmp(buf, "clear", 5) == 0) {
1711		if (kmemleak_enabled)
1712			kmemleak_clear();
1713		else
1714			__kmemleak_do_cleanup();
1715		goto out;
1716	}
1717
1718	if (!kmemleak_enabled) {
1719		ret = -EBUSY;
1720		goto out;
1721	}
1722
1723	if (strncmp(buf, "off", 3) == 0)
1724		kmemleak_disable();
1725	else if (strncmp(buf, "stack=on", 8) == 0)
1726		kmemleak_stack_scan = 1;
1727	else if (strncmp(buf, "stack=off", 9) == 0)
1728		kmemleak_stack_scan = 0;
1729	else if (strncmp(buf, "scan=on", 7) == 0)
1730		start_scan_thread();
1731	else if (strncmp(buf, "scan=off", 8) == 0)
1732		stop_scan_thread();
1733	else if (strncmp(buf, "scan=", 5) == 0) {
1734		unsigned long secs;
1735
1736		ret = kstrtoul(buf + 5, 0, &secs);
1737		if (ret < 0)
1738			goto out;
1739		stop_scan_thread();
1740		if (secs) {
1741			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1742			start_scan_thread();
1743		}
1744	} else if (strncmp(buf, "scan", 4) == 0)
1745		kmemleak_scan();
1746	else if (strncmp(buf, "dump=", 5) == 0)
1747		ret = dump_str_object_info(buf + 5);
1748	else
1749		ret = -EINVAL;
1750
1751out:
1752	mutex_unlock(&scan_mutex);
1753	if (ret < 0)
1754		return ret;
1755
1756	/* ignore the rest of the buffer, only one command at a time */
1757	*ppos += size;
1758	return size;
1759}
1760
1761static const struct file_operations kmemleak_fops = {
1762	.owner		= THIS_MODULE,
1763	.open		= kmemleak_open,
1764	.read		= seq_read,
1765	.write		= kmemleak_write,
1766	.llseek		= seq_lseek,
1767	.release	= seq_release,
1768};
1769
1770static void __kmemleak_do_cleanup(void)
1771{
1772	struct kmemleak_object *object;
1773
1774	rcu_read_lock();
1775	list_for_each_entry_rcu(object, &object_list, object_list)
1776		delete_object_full(object->pointer);
1777	rcu_read_unlock();
1778}
1779
1780/*
1781 * Stop the memory scanning thread and free the kmemleak internal objects if
1782 * no previous scan thread (otherwise, kmemleak may still have some useful
1783 * information on memory leaks).
1784 */
1785static void kmemleak_do_cleanup(struct work_struct *work)
1786{
1787	stop_scan_thread();
1788
1789	/*
1790	 * Once the scan thread has stopped, it is safe to no longer track
1791	 * object freeing. Ordering of the scan thread stopping and the memory
1792	 * accesses below is guaranteed by the kthread_stop() function.
1793	 */
1794	kmemleak_free_enabled = 0;
1795
1796	if (!kmemleak_found_leaks)
1797		__kmemleak_do_cleanup();
1798	else
1799		pr_info("Kmemleak disabled without freeing internal data. "
1800			"Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
1801}
1802
1803static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1804
1805/*
1806 * Disable kmemleak. No memory allocation/freeing will be traced once this
1807 * function is called. Disabling kmemleak is an irreversible operation.
1808 */
1809static void kmemleak_disable(void)
1810{
1811	/* atomically check whether it was already invoked */
1812	if (cmpxchg(&kmemleak_error, 0, 1))
1813		return;
1814
1815	/* stop any memory operation tracing */
1816	kmemleak_enabled = 0;
1817
1818	/* check whether it is too early for a kernel thread */
1819	if (kmemleak_initialized)
1820		schedule_work(&cleanup_work);
1821	else
1822		kmemleak_free_enabled = 0;
1823
1824	pr_info("Kernel memory leak detector disabled\n");
1825}
1826
1827/*
1828 * Allow boot-time kmemleak disabling (enabled by default).
1829 */
1830static int kmemleak_boot_config(char *str)
1831{
1832	if (!str)
1833		return -EINVAL;
1834	if (strcmp(str, "off") == 0)
1835		kmemleak_disable();
1836	else if (strcmp(str, "on") == 0)
1837		kmemleak_skip_disable = 1;
1838	else
1839		return -EINVAL;
1840	return 0;
1841}
1842early_param("kmemleak", kmemleak_boot_config);
1843
1844static void __init print_log_trace(struct early_log *log)
1845{
1846	struct stack_trace trace;
1847
1848	trace.nr_entries = log->trace_len;
1849	trace.entries = log->trace;
1850
1851	pr_notice("Early log backtrace:\n");
1852	print_stack_trace(&trace, 2);
1853}
1854
1855/*
1856 * Kmemleak initialization.
1857 */
1858void __init kmemleak_init(void)
1859{
1860	int i;
1861	unsigned long flags;
1862
1863#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1864	if (!kmemleak_skip_disable) {
1865		kmemleak_early_log = 0;
1866		kmemleak_disable();
1867		return;
1868	}
1869#endif
1870
1871	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1872	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1873
1874	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1875	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1876
1877	if (crt_early_log > ARRAY_SIZE(early_log))
1878		pr_warning("Early log buffer exceeded (%d), please increase "
1879			   "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1880
1881	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1882	local_irq_save(flags);
1883	kmemleak_early_log = 0;
1884	if (kmemleak_error) {
1885		local_irq_restore(flags);
1886		return;
1887	} else {
1888		kmemleak_enabled = 1;
1889		kmemleak_free_enabled = 1;
1890	}
1891	local_irq_restore(flags);
1892
1893	/*
1894	 * This is the point where tracking allocations is safe. Automatic
1895	 * scanning is started during the late initcall. Add the early logged
1896	 * callbacks to the kmemleak infrastructure.
1897	 */
1898	for (i = 0; i < crt_early_log; i++) {
1899		struct early_log *log = &early_log[i];
1900
1901		switch (log->op_type) {
1902		case KMEMLEAK_ALLOC:
1903			early_alloc(log);
1904			break;
1905		case KMEMLEAK_ALLOC_PERCPU:
1906			early_alloc_percpu(log);
1907			break;
1908		case KMEMLEAK_FREE:
1909			kmemleak_free(log->ptr);
1910			break;
1911		case KMEMLEAK_FREE_PART:
1912			kmemleak_free_part(log->ptr, log->size);
1913			break;
1914		case KMEMLEAK_FREE_PERCPU:
1915			kmemleak_free_percpu(log->ptr);
1916			break;
1917		case KMEMLEAK_NOT_LEAK:
1918			kmemleak_not_leak(log->ptr);
1919			break;
1920		case KMEMLEAK_IGNORE:
1921			kmemleak_ignore(log->ptr);
1922			break;
1923		case KMEMLEAK_SCAN_AREA:
1924			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1925			break;
1926		case KMEMLEAK_NO_SCAN:
1927			kmemleak_no_scan(log->ptr);
1928			break;
1929		default:
1930			kmemleak_warn("Unknown early log operation: %d\n",
1931				      log->op_type);
1932		}
1933
1934		if (kmemleak_warning) {
1935			print_log_trace(log);
1936			kmemleak_warning = 0;
1937		}
1938	}
1939}
1940
1941/*
1942 * Late initialization function.
1943 */
1944static int __init kmemleak_late_init(void)
1945{
1946	struct dentry *dentry;
1947
1948	kmemleak_initialized = 1;
1949
1950	if (kmemleak_error) {
1951		/*
1952		 * Some error occurred and kmemleak was disabled. There is a
1953		 * small chance that kmemleak_disable() was called immediately
1954		 * after setting kmemleak_initialized and we may end up with
1955		 * two clean-up threads but serialized by scan_mutex.
1956		 */
1957		schedule_work(&cleanup_work);
1958		return -ENOMEM;
1959	}
1960
1961	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1962				     &kmemleak_fops);
1963	if (!dentry)
1964		pr_warning("Failed to create the debugfs kmemleak file\n");
1965	mutex_lock(&scan_mutex);
1966	start_scan_thread();
1967	mutex_unlock(&scan_mutex);
1968
1969	pr_info("Kernel memory leak detector initialized\n");
1970
1971	return 0;
1972}
1973late_initcall(kmemleak_late_init);
1974