1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'slab_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/kmemleak.h>
110 #include	<linux/mempolicy.h>
111 #include	<linux/mutex.h>
112 #include	<linux/fault-inject.h>
113 #include	<linux/rtmutex.h>
114 #include	<linux/reciprocal_div.h>
115 #include	<linux/debugobjects.h>
116 #include	<linux/kmemcheck.h>
117 #include	<linux/memory.h>
118 #include	<linux/prefetch.h>
119 
120 #include	<net/sock.h>
121 
122 #include	<asm/cacheflush.h>
123 #include	<asm/tlbflush.h>
124 #include	<asm/page.h>
125 
126 #include <trace/events/kmem.h>
127 
128 #include	"internal.h"
129 
130 #include	"slab.h"
131 
132 /*
133  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
134  *		  0 for faster, smaller code (especially in the critical paths).
135  *
136  * STATS	- 1 to collect stats for /proc/slabinfo.
137  *		  0 for faster, smaller code (especially in the critical paths).
138  *
139  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140  */
141 
142 #ifdef CONFIG_DEBUG_SLAB
143 #define	DEBUG		1
144 #define	STATS		1
145 #define	FORCED_DEBUG	1
146 #else
147 #define	DEBUG		0
148 #define	STATS		0
149 #define	FORCED_DEBUG	0
150 #endif
151 
152 /* Shouldn't this be in a header file somewhere? */
153 #define	BYTES_PER_WORD		sizeof(void *)
154 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
155 
156 #ifndef ARCH_KMALLOC_FLAGS
157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158 #endif
159 
160 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162 
163 #if FREELIST_BYTE_INDEX
164 typedef unsigned char freelist_idx_t;
165 #else
166 typedef unsigned short freelist_idx_t;
167 #endif
168 
169 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170 
171 /*
172  * true if a page was allocated from pfmemalloc reserves for network-based
173  * swap
174  */
175 static bool pfmemalloc_active __read_mostly;
176 
177 /*
178  * struct array_cache
179  *
180  * Purpose:
181  * - LIFO ordering, to hand out cache-warm objects from _alloc
182  * - reduce the number of linked list operations
183  * - reduce spinlock operations
184  *
185  * The limit is stored in the per-cpu structure to reduce the data cache
186  * footprint.
187  *
188  */
189 struct array_cache {
190 	unsigned int avail;
191 	unsigned int limit;
192 	unsigned int batchcount;
193 	unsigned int touched;
194 	void *entry[];	/*
195 			 * Must have this definition in here for the proper
196 			 * alignment of array_cache. Also simplifies accessing
197 			 * the entries.
198 			 *
199 			 * Entries should not be directly dereferenced as
200 			 * entries belonging to slabs marked pfmemalloc will
201 			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
202 			 */
203 };
204 
205 struct alien_cache {
206 	spinlock_t lock;
207 	struct array_cache ac;
208 };
209 
210 #define SLAB_OBJ_PFMEMALLOC	1
is_obj_pfmemalloc(void * objp)211 static inline bool is_obj_pfmemalloc(void *objp)
212 {
213 	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
214 }
215 
set_obj_pfmemalloc(void ** objp)216 static inline void set_obj_pfmemalloc(void **objp)
217 {
218 	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
219 	return;
220 }
221 
clear_obj_pfmemalloc(void ** objp)222 static inline void clear_obj_pfmemalloc(void **objp)
223 {
224 	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
225 }
226 
227 /*
228  * bootstrap: The caches do not work without cpuarrays anymore, but the
229  * cpuarrays are allocated from the generic caches...
230  */
231 #define BOOT_CPUCACHE_ENTRIES	1
232 struct arraycache_init {
233 	struct array_cache cache;
234 	void *entries[BOOT_CPUCACHE_ENTRIES];
235 };
236 
237 /*
238  * Need this for bootstrapping a per node allocator.
239  */
240 #define NUM_INIT_LISTS (2 * MAX_NUMNODES)
241 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242 #define	CACHE_CACHE 0
243 #define	SIZE_NODE (MAX_NUMNODES)
244 
245 static int drain_freelist(struct kmem_cache *cache,
246 			struct kmem_cache_node *n, int tofree);
247 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
248 			int node, struct list_head *list);
249 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
250 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
251 static void cache_reap(struct work_struct *unused);
252 
253 static int slab_early_init = 1;
254 
255 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
256 
kmem_cache_node_init(struct kmem_cache_node * parent)257 static void kmem_cache_node_init(struct kmem_cache_node *parent)
258 {
259 	INIT_LIST_HEAD(&parent->slabs_full);
260 	INIT_LIST_HEAD(&parent->slabs_partial);
261 	INIT_LIST_HEAD(&parent->slabs_free);
262 	parent->shared = NULL;
263 	parent->alien = NULL;
264 	parent->colour_next = 0;
265 	spin_lock_init(&parent->list_lock);
266 	parent->free_objects = 0;
267 	parent->free_touched = 0;
268 }
269 
270 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
271 	do {								\
272 		INIT_LIST_HEAD(listp);					\
273 		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
274 	} while (0)
275 
276 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
277 	do {								\
278 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
279 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
280 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
281 	} while (0)
282 
283 #define CFLGS_OFF_SLAB		(0x80000000UL)
284 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
285 
286 #define BATCHREFILL_LIMIT	16
287 /*
288  * Optimization question: fewer reaps means less probability for unnessary
289  * cpucache drain/refill cycles.
290  *
291  * OTOH the cpuarrays can contain lots of objects,
292  * which could lock up otherwise freeable slabs.
293  */
294 #define REAPTIMEOUT_AC		(2*HZ)
295 #define REAPTIMEOUT_NODE	(4*HZ)
296 
297 #if STATS
298 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
299 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
300 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
301 #define	STATS_INC_GROWN(x)	((x)->grown++)
302 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
303 #define	STATS_SET_HIGH(x)						\
304 	do {								\
305 		if ((x)->num_active > (x)->high_mark)			\
306 			(x)->high_mark = (x)->num_active;		\
307 	} while (0)
308 #define	STATS_INC_ERR(x)	((x)->errors++)
309 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
310 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
311 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
312 #define	STATS_SET_FREEABLE(x, i)					\
313 	do {								\
314 		if ((x)->max_freeable < i)				\
315 			(x)->max_freeable = i;				\
316 	} while (0)
317 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
318 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
319 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
320 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
321 #else
322 #define	STATS_INC_ACTIVE(x)	do { } while (0)
323 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
324 #define	STATS_INC_ALLOCED(x)	do { } while (0)
325 #define	STATS_INC_GROWN(x)	do { } while (0)
326 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
327 #define	STATS_SET_HIGH(x)	do { } while (0)
328 #define	STATS_INC_ERR(x)	do { } while (0)
329 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
330 #define	STATS_INC_NODEFREES(x)	do { } while (0)
331 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
332 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
333 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
334 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
335 #define STATS_INC_FREEHIT(x)	do { } while (0)
336 #define STATS_INC_FREEMISS(x)	do { } while (0)
337 #endif
338 
339 #if DEBUG
340 
341 /*
342  * memory layout of objects:
343  * 0		: objp
344  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
345  * 		the end of an object is aligned with the end of the real
346  * 		allocation. Catches writes behind the end of the allocation.
347  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
348  * 		redzone word.
349  * cachep->obj_offset: The real object.
350  * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
351  * cachep->size - 1* BYTES_PER_WORD: last caller address
352  *					[BYTES_PER_WORD long]
353  */
obj_offset(struct kmem_cache * cachep)354 static int obj_offset(struct kmem_cache *cachep)
355 {
356 	return cachep->obj_offset;
357 }
358 
dbg_redzone1(struct kmem_cache * cachep,void * objp)359 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
360 {
361 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
362 	return (unsigned long long*) (objp + obj_offset(cachep) -
363 				      sizeof(unsigned long long));
364 }
365 
dbg_redzone2(struct kmem_cache * cachep,void * objp)366 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
367 {
368 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
369 	if (cachep->flags & SLAB_STORE_USER)
370 		return (unsigned long long *)(objp + cachep->size -
371 					      sizeof(unsigned long long) -
372 					      REDZONE_ALIGN);
373 	return (unsigned long long *) (objp + cachep->size -
374 				       sizeof(unsigned long long));
375 }
376 
dbg_userword(struct kmem_cache * cachep,void * objp)377 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
378 {
379 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
380 	return (void **)(objp + cachep->size - BYTES_PER_WORD);
381 }
382 
383 #else
384 
385 #define obj_offset(x)			0
386 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
387 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
388 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
389 
390 #endif
391 
392 #define OBJECT_FREE (0)
393 #define OBJECT_ACTIVE (1)
394 
395 #ifdef CONFIG_DEBUG_SLAB_LEAK
396 
set_obj_status(struct page * page,int idx,int val)397 static void set_obj_status(struct page *page, int idx, int val)
398 {
399 	int freelist_size;
400 	char *status;
401 	struct kmem_cache *cachep = page->slab_cache;
402 
403 	freelist_size = cachep->num * sizeof(freelist_idx_t);
404 	status = (char *)page->freelist + freelist_size;
405 	status[idx] = val;
406 }
407 
get_obj_status(struct page * page,int idx)408 static inline unsigned int get_obj_status(struct page *page, int idx)
409 {
410 	int freelist_size;
411 	char *status;
412 	struct kmem_cache *cachep = page->slab_cache;
413 
414 	freelist_size = cachep->num * sizeof(freelist_idx_t);
415 	status = (char *)page->freelist + freelist_size;
416 
417 	return status[idx];
418 }
419 
420 #else
set_obj_status(struct page * page,int idx,int val)421 static inline void set_obj_status(struct page *page, int idx, int val) {}
422 
423 #endif
424 
425 /*
426  * Do not go above this order unless 0 objects fit into the slab or
427  * overridden on the command line.
428  */
429 #define	SLAB_MAX_ORDER_HI	1
430 #define	SLAB_MAX_ORDER_LO	0
431 static int slab_max_order = SLAB_MAX_ORDER_LO;
432 static bool slab_max_order_set __initdata;
433 
virt_to_cache(const void * obj)434 static inline struct kmem_cache *virt_to_cache(const void *obj)
435 {
436 	struct page *page = virt_to_head_page(obj);
437 	return page->slab_cache;
438 }
439 
index_to_obj(struct kmem_cache * cache,struct page * page,unsigned int idx)440 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
441 				 unsigned int idx)
442 {
443 	return page->s_mem + cache->size * idx;
444 }
445 
446 /*
447  * We want to avoid an expensive divide : (offset / cache->size)
448  *   Using the fact that size is a constant for a particular cache,
449  *   we can replace (offset / cache->size) by
450  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
451  */
obj_to_index(const struct kmem_cache * cache,const struct page * page,void * obj)452 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
453 					const struct page *page, void *obj)
454 {
455 	u32 offset = (obj - page->s_mem);
456 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
457 }
458 
459 /* internal cache of cache description objs */
460 static struct kmem_cache kmem_cache_boot = {
461 	.batchcount = 1,
462 	.limit = BOOT_CPUCACHE_ENTRIES,
463 	.shared = 1,
464 	.size = sizeof(struct kmem_cache),
465 	.name = "kmem_cache",
466 };
467 
468 #define BAD_ALIEN_MAGIC 0x01020304ul
469 
470 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
471 
cpu_cache_get(struct kmem_cache * cachep)472 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
473 {
474 	return this_cpu_ptr(cachep->cpu_cache);
475 }
476 
calculate_freelist_size(int nr_objs,size_t align)477 static size_t calculate_freelist_size(int nr_objs, size_t align)
478 {
479 	size_t freelist_size;
480 
481 	freelist_size = nr_objs * sizeof(freelist_idx_t);
482 	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
483 		freelist_size += nr_objs * sizeof(char);
484 
485 	if (align)
486 		freelist_size = ALIGN(freelist_size, align);
487 
488 	return freelist_size;
489 }
490 
calculate_nr_objs(size_t slab_size,size_t buffer_size,size_t idx_size,size_t align)491 static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
492 				size_t idx_size, size_t align)
493 {
494 	int nr_objs;
495 	size_t remained_size;
496 	size_t freelist_size;
497 	int extra_space = 0;
498 
499 	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
500 		extra_space = sizeof(char);
501 	/*
502 	 * Ignore padding for the initial guess. The padding
503 	 * is at most @align-1 bytes, and @buffer_size is at
504 	 * least @align. In the worst case, this result will
505 	 * be one greater than the number of objects that fit
506 	 * into the memory allocation when taking the padding
507 	 * into account.
508 	 */
509 	nr_objs = slab_size / (buffer_size + idx_size + extra_space);
510 
511 	/*
512 	 * This calculated number will be either the right
513 	 * amount, or one greater than what we want.
514 	 */
515 	remained_size = slab_size - nr_objs * buffer_size;
516 	freelist_size = calculate_freelist_size(nr_objs, align);
517 	if (remained_size < freelist_size)
518 		nr_objs--;
519 
520 	return nr_objs;
521 }
522 
523 /*
524  * Calculate the number of objects and left-over bytes for a given buffer size.
525  */
cache_estimate(unsigned long gfporder,size_t buffer_size,size_t align,int flags,size_t * left_over,unsigned int * num)526 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
527 			   size_t align, int flags, size_t *left_over,
528 			   unsigned int *num)
529 {
530 	int nr_objs;
531 	size_t mgmt_size;
532 	size_t slab_size = PAGE_SIZE << gfporder;
533 
534 	/*
535 	 * The slab management structure can be either off the slab or
536 	 * on it. For the latter case, the memory allocated for a
537 	 * slab is used for:
538 	 *
539 	 * - One unsigned int for each object
540 	 * - Padding to respect alignment of @align
541 	 * - @buffer_size bytes for each object
542 	 *
543 	 * If the slab management structure is off the slab, then the
544 	 * alignment will already be calculated into the size. Because
545 	 * the slabs are all pages aligned, the objects will be at the
546 	 * correct alignment when allocated.
547 	 */
548 	if (flags & CFLGS_OFF_SLAB) {
549 		mgmt_size = 0;
550 		nr_objs = slab_size / buffer_size;
551 
552 	} else {
553 		nr_objs = calculate_nr_objs(slab_size, buffer_size,
554 					sizeof(freelist_idx_t), align);
555 		mgmt_size = calculate_freelist_size(nr_objs, align);
556 	}
557 	*num = nr_objs;
558 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
559 }
560 
561 #if DEBUG
562 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
563 
__slab_error(const char * function,struct kmem_cache * cachep,char * msg)564 static void __slab_error(const char *function, struct kmem_cache *cachep,
565 			char *msg)
566 {
567 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
568 	       function, cachep->name, msg);
569 	dump_stack();
570 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
571 }
572 #endif
573 
574 /*
575  * By default on NUMA we use alien caches to stage the freeing of
576  * objects allocated from other nodes. This causes massive memory
577  * inefficiencies when using fake NUMA setup to split memory into a
578  * large number of small nodes, so it can be disabled on the command
579  * line
580   */
581 
582 static int use_alien_caches __read_mostly = 1;
noaliencache_setup(char * s)583 static int __init noaliencache_setup(char *s)
584 {
585 	use_alien_caches = 0;
586 	return 1;
587 }
588 __setup("noaliencache", noaliencache_setup);
589 
slab_max_order_setup(char * str)590 static int __init slab_max_order_setup(char *str)
591 {
592 	get_option(&str, &slab_max_order);
593 	slab_max_order = slab_max_order < 0 ? 0 :
594 				min(slab_max_order, MAX_ORDER - 1);
595 	slab_max_order_set = true;
596 
597 	return 1;
598 }
599 __setup("slab_max_order=", slab_max_order_setup);
600 
601 #ifdef CONFIG_NUMA
602 /*
603  * Special reaping functions for NUMA systems called from cache_reap().
604  * These take care of doing round robin flushing of alien caches (containing
605  * objects freed on different nodes from which they were allocated) and the
606  * flushing of remote pcps by calling drain_node_pages.
607  */
608 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
609 
init_reap_node(int cpu)610 static void init_reap_node(int cpu)
611 {
612 	int node;
613 
614 	node = next_node(cpu_to_mem(cpu), node_online_map);
615 	if (node == MAX_NUMNODES)
616 		node = first_node(node_online_map);
617 
618 	per_cpu(slab_reap_node, cpu) = node;
619 }
620 
next_reap_node(void)621 static void next_reap_node(void)
622 {
623 	int node = __this_cpu_read(slab_reap_node);
624 
625 	node = next_node(node, node_online_map);
626 	if (unlikely(node >= MAX_NUMNODES))
627 		node = first_node(node_online_map);
628 	__this_cpu_write(slab_reap_node, node);
629 }
630 
631 #else
632 #define init_reap_node(cpu) do { } while (0)
633 #define next_reap_node(void) do { } while (0)
634 #endif
635 
636 /*
637  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
638  * via the workqueue/eventd.
639  * Add the CPU number into the expiration time to minimize the possibility of
640  * the CPUs getting into lockstep and contending for the global cache chain
641  * lock.
642  */
start_cpu_timer(int cpu)643 static void start_cpu_timer(int cpu)
644 {
645 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
646 
647 	/*
648 	 * When this gets called from do_initcalls via cpucache_init(),
649 	 * init_workqueues() has already run, so keventd will be setup
650 	 * at that time.
651 	 */
652 	if (keventd_up() && reap_work->work.func == NULL) {
653 		init_reap_node(cpu);
654 		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
655 		schedule_delayed_work_on(cpu, reap_work,
656 					__round_jiffies_relative(HZ, cpu));
657 	}
658 }
659 
init_arraycache(struct array_cache * ac,int limit,int batch)660 static void init_arraycache(struct array_cache *ac, int limit, int batch)
661 {
662 	/*
663 	 * The array_cache structures contain pointers to free object.
664 	 * However, when such objects are allocated or transferred to another
665 	 * cache the pointers are not cleared and they could be counted as
666 	 * valid references during a kmemleak scan. Therefore, kmemleak must
667 	 * not scan such objects.
668 	 */
669 	kmemleak_no_scan(ac);
670 	if (ac) {
671 		ac->avail = 0;
672 		ac->limit = limit;
673 		ac->batchcount = batch;
674 		ac->touched = 0;
675 	}
676 }
677 
alloc_arraycache(int node,int entries,int batchcount,gfp_t gfp)678 static struct array_cache *alloc_arraycache(int node, int entries,
679 					    int batchcount, gfp_t gfp)
680 {
681 	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
682 	struct array_cache *ac = NULL;
683 
684 	ac = kmalloc_node(memsize, gfp, node);
685 	init_arraycache(ac, entries, batchcount);
686 	return ac;
687 }
688 
is_slab_pfmemalloc(struct page * page)689 static inline bool is_slab_pfmemalloc(struct page *page)
690 {
691 	return PageSlabPfmemalloc(page);
692 }
693 
694 /* Clears pfmemalloc_active if no slabs have pfmalloc set */
recheck_pfmemalloc_active(struct kmem_cache * cachep,struct array_cache * ac)695 static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
696 						struct array_cache *ac)
697 {
698 	struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
699 	struct page *page;
700 	unsigned long flags;
701 
702 	if (!pfmemalloc_active)
703 		return;
704 
705 	spin_lock_irqsave(&n->list_lock, flags);
706 	list_for_each_entry(page, &n->slabs_full, lru)
707 		if (is_slab_pfmemalloc(page))
708 			goto out;
709 
710 	list_for_each_entry(page, &n->slabs_partial, lru)
711 		if (is_slab_pfmemalloc(page))
712 			goto out;
713 
714 	list_for_each_entry(page, &n->slabs_free, lru)
715 		if (is_slab_pfmemalloc(page))
716 			goto out;
717 
718 	pfmemalloc_active = false;
719 out:
720 	spin_unlock_irqrestore(&n->list_lock, flags);
721 }
722 
__ac_get_obj(struct kmem_cache * cachep,struct array_cache * ac,gfp_t flags,bool force_refill)723 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
724 						gfp_t flags, bool force_refill)
725 {
726 	int i;
727 	void *objp = ac->entry[--ac->avail];
728 
729 	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
730 	if (unlikely(is_obj_pfmemalloc(objp))) {
731 		struct kmem_cache_node *n;
732 
733 		if (gfp_pfmemalloc_allowed(flags)) {
734 			clear_obj_pfmemalloc(&objp);
735 			return objp;
736 		}
737 
738 		/* The caller cannot use PFMEMALLOC objects, find another one */
739 		for (i = 0; i < ac->avail; i++) {
740 			/* If a !PFMEMALLOC object is found, swap them */
741 			if (!is_obj_pfmemalloc(ac->entry[i])) {
742 				objp = ac->entry[i];
743 				ac->entry[i] = ac->entry[ac->avail];
744 				ac->entry[ac->avail] = objp;
745 				return objp;
746 			}
747 		}
748 
749 		/*
750 		 * If there are empty slabs on the slabs_free list and we are
751 		 * being forced to refill the cache, mark this one !pfmemalloc.
752 		 */
753 		n = get_node(cachep, numa_mem_id());
754 		if (!list_empty(&n->slabs_free) && force_refill) {
755 			struct page *page = virt_to_head_page(objp);
756 			ClearPageSlabPfmemalloc(page);
757 			clear_obj_pfmemalloc(&objp);
758 			recheck_pfmemalloc_active(cachep, ac);
759 			return objp;
760 		}
761 
762 		/* No !PFMEMALLOC objects available */
763 		ac->avail++;
764 		objp = NULL;
765 	}
766 
767 	return objp;
768 }
769 
ac_get_obj(struct kmem_cache * cachep,struct array_cache * ac,gfp_t flags,bool force_refill)770 static inline void *ac_get_obj(struct kmem_cache *cachep,
771 			struct array_cache *ac, gfp_t flags, bool force_refill)
772 {
773 	void *objp;
774 
775 	if (unlikely(sk_memalloc_socks()))
776 		objp = __ac_get_obj(cachep, ac, flags, force_refill);
777 	else
778 		objp = ac->entry[--ac->avail];
779 
780 	return objp;
781 }
782 
__ac_put_obj(struct kmem_cache * cachep,struct array_cache * ac,void * objp)783 static noinline void *__ac_put_obj(struct kmem_cache *cachep,
784 			struct array_cache *ac, void *objp)
785 {
786 	if (unlikely(pfmemalloc_active)) {
787 		/* Some pfmemalloc slabs exist, check if this is one */
788 		struct page *page = virt_to_head_page(objp);
789 		if (PageSlabPfmemalloc(page))
790 			set_obj_pfmemalloc(&objp);
791 	}
792 
793 	return objp;
794 }
795 
ac_put_obj(struct kmem_cache * cachep,struct array_cache * ac,void * objp)796 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
797 								void *objp)
798 {
799 	if (unlikely(sk_memalloc_socks()))
800 		objp = __ac_put_obj(cachep, ac, objp);
801 
802 	ac->entry[ac->avail++] = objp;
803 }
804 
805 /*
806  * Transfer objects in one arraycache to another.
807  * Locking must be handled by the caller.
808  *
809  * Return the number of entries transferred.
810  */
transfer_objects(struct array_cache * to,struct array_cache * from,unsigned int max)811 static int transfer_objects(struct array_cache *to,
812 		struct array_cache *from, unsigned int max)
813 {
814 	/* Figure out how many entries to transfer */
815 	int nr = min3(from->avail, max, to->limit - to->avail);
816 
817 	if (!nr)
818 		return 0;
819 
820 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
821 			sizeof(void *) *nr);
822 
823 	from->avail -= nr;
824 	to->avail += nr;
825 	return nr;
826 }
827 
828 #ifndef CONFIG_NUMA
829 
830 #define drain_alien_cache(cachep, alien) do { } while (0)
831 #define reap_alien(cachep, n) do { } while (0)
832 
alloc_alien_cache(int node,int limit,gfp_t gfp)833 static inline struct alien_cache **alloc_alien_cache(int node,
834 						int limit, gfp_t gfp)
835 {
836 	return (struct alien_cache **)BAD_ALIEN_MAGIC;
837 }
838 
free_alien_cache(struct alien_cache ** ac_ptr)839 static inline void free_alien_cache(struct alien_cache **ac_ptr)
840 {
841 }
842 
cache_free_alien(struct kmem_cache * cachep,void * objp)843 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
844 {
845 	return 0;
846 }
847 
alternate_node_alloc(struct kmem_cache * cachep,gfp_t flags)848 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
849 		gfp_t flags)
850 {
851 	return NULL;
852 }
853 
____cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid)854 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
855 		 gfp_t flags, int nodeid)
856 {
857 	return NULL;
858 }
859 
gfp_exact_node(gfp_t flags)860 static inline gfp_t gfp_exact_node(gfp_t flags)
861 {
862 	return flags;
863 }
864 
865 #else	/* CONFIG_NUMA */
866 
867 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
868 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
869 
__alloc_alien_cache(int node,int entries,int batch,gfp_t gfp)870 static struct alien_cache *__alloc_alien_cache(int node, int entries,
871 						int batch, gfp_t gfp)
872 {
873 	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
874 	struct alien_cache *alc = NULL;
875 
876 	alc = kmalloc_node(memsize, gfp, node);
877 	init_arraycache(&alc->ac, entries, batch);
878 	spin_lock_init(&alc->lock);
879 	return alc;
880 }
881 
alloc_alien_cache(int node,int limit,gfp_t gfp)882 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
883 {
884 	struct alien_cache **alc_ptr;
885 	size_t memsize = sizeof(void *) * nr_node_ids;
886 	int i;
887 
888 	if (limit > 1)
889 		limit = 12;
890 	alc_ptr = kzalloc_node(memsize, gfp, node);
891 	if (!alc_ptr)
892 		return NULL;
893 
894 	for_each_node(i) {
895 		if (i == node || !node_online(i))
896 			continue;
897 		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
898 		if (!alc_ptr[i]) {
899 			for (i--; i >= 0; i--)
900 				kfree(alc_ptr[i]);
901 			kfree(alc_ptr);
902 			return NULL;
903 		}
904 	}
905 	return alc_ptr;
906 }
907 
free_alien_cache(struct alien_cache ** alc_ptr)908 static void free_alien_cache(struct alien_cache **alc_ptr)
909 {
910 	int i;
911 
912 	if (!alc_ptr)
913 		return;
914 	for_each_node(i)
915 	    kfree(alc_ptr[i]);
916 	kfree(alc_ptr);
917 }
918 
__drain_alien_cache(struct kmem_cache * cachep,struct array_cache * ac,int node,struct list_head * list)919 static void __drain_alien_cache(struct kmem_cache *cachep,
920 				struct array_cache *ac, int node,
921 				struct list_head *list)
922 {
923 	struct kmem_cache_node *n = get_node(cachep, node);
924 
925 	if (ac->avail) {
926 		spin_lock(&n->list_lock);
927 		/*
928 		 * Stuff objects into the remote nodes shared array first.
929 		 * That way we could avoid the overhead of putting the objects
930 		 * into the free lists and getting them back later.
931 		 */
932 		if (n->shared)
933 			transfer_objects(n->shared, ac, ac->limit);
934 
935 		free_block(cachep, ac->entry, ac->avail, node, list);
936 		ac->avail = 0;
937 		spin_unlock(&n->list_lock);
938 	}
939 }
940 
941 /*
942  * Called from cache_reap() to regularly drain alien caches round robin.
943  */
reap_alien(struct kmem_cache * cachep,struct kmem_cache_node * n)944 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
945 {
946 	int node = __this_cpu_read(slab_reap_node);
947 
948 	if (n->alien) {
949 		struct alien_cache *alc = n->alien[node];
950 		struct array_cache *ac;
951 
952 		if (alc) {
953 			ac = &alc->ac;
954 			if (ac->avail && spin_trylock_irq(&alc->lock)) {
955 				LIST_HEAD(list);
956 
957 				__drain_alien_cache(cachep, ac, node, &list);
958 				spin_unlock_irq(&alc->lock);
959 				slabs_destroy(cachep, &list);
960 			}
961 		}
962 	}
963 }
964 
drain_alien_cache(struct kmem_cache * cachep,struct alien_cache ** alien)965 static void drain_alien_cache(struct kmem_cache *cachep,
966 				struct alien_cache **alien)
967 {
968 	int i = 0;
969 	struct alien_cache *alc;
970 	struct array_cache *ac;
971 	unsigned long flags;
972 
973 	for_each_online_node(i) {
974 		alc = alien[i];
975 		if (alc) {
976 			LIST_HEAD(list);
977 
978 			ac = &alc->ac;
979 			spin_lock_irqsave(&alc->lock, flags);
980 			__drain_alien_cache(cachep, ac, i, &list);
981 			spin_unlock_irqrestore(&alc->lock, flags);
982 			slabs_destroy(cachep, &list);
983 		}
984 	}
985 }
986 
__cache_free_alien(struct kmem_cache * cachep,void * objp,int node,int page_node)987 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
988 				int node, int page_node)
989 {
990 	struct kmem_cache_node *n;
991 	struct alien_cache *alien = NULL;
992 	struct array_cache *ac;
993 	LIST_HEAD(list);
994 
995 	n = get_node(cachep, node);
996 	STATS_INC_NODEFREES(cachep);
997 	if (n->alien && n->alien[page_node]) {
998 		alien = n->alien[page_node];
999 		ac = &alien->ac;
1000 		spin_lock(&alien->lock);
1001 		if (unlikely(ac->avail == ac->limit)) {
1002 			STATS_INC_ACOVERFLOW(cachep);
1003 			__drain_alien_cache(cachep, ac, page_node, &list);
1004 		}
1005 		ac_put_obj(cachep, ac, objp);
1006 		spin_unlock(&alien->lock);
1007 		slabs_destroy(cachep, &list);
1008 	} else {
1009 		n = get_node(cachep, page_node);
1010 		spin_lock(&n->list_lock);
1011 		free_block(cachep, &objp, 1, page_node, &list);
1012 		spin_unlock(&n->list_lock);
1013 		slabs_destroy(cachep, &list);
1014 	}
1015 	return 1;
1016 }
1017 
cache_free_alien(struct kmem_cache * cachep,void * objp)1018 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1019 {
1020 	int page_node = page_to_nid(virt_to_page(objp));
1021 	int node = numa_mem_id();
1022 	/*
1023 	 * Make sure we are not freeing a object from another node to the array
1024 	 * cache on this cpu.
1025 	 */
1026 	if (likely(node == page_node))
1027 		return 0;
1028 
1029 	return __cache_free_alien(cachep, objp, node, page_node);
1030 }
1031 
1032 /*
1033  * Construct gfp mask to allocate from a specific node but do not invoke reclaim
1034  * or warn about failures.
1035  */
gfp_exact_node(gfp_t flags)1036 static inline gfp_t gfp_exact_node(gfp_t flags)
1037 {
1038 	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
1039 }
1040 #endif
1041 
1042 /*
1043  * Allocates and initializes node for a node on each slab cache, used for
1044  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1045  * will be allocated off-node since memory is not yet online for the new node.
1046  * When hotplugging memory or a cpu, existing node are not replaced if
1047  * already in use.
1048  *
1049  * Must hold slab_mutex.
1050  */
init_cache_node_node(int node)1051 static int init_cache_node_node(int node)
1052 {
1053 	struct kmem_cache *cachep;
1054 	struct kmem_cache_node *n;
1055 	const size_t memsize = sizeof(struct kmem_cache_node);
1056 
1057 	list_for_each_entry(cachep, &slab_caches, list) {
1058 		/*
1059 		 * Set up the kmem_cache_node for cpu before we can
1060 		 * begin anything. Make sure some other cpu on this
1061 		 * node has not already allocated this
1062 		 */
1063 		n = get_node(cachep, node);
1064 		if (!n) {
1065 			n = kmalloc_node(memsize, GFP_KERNEL, node);
1066 			if (!n)
1067 				return -ENOMEM;
1068 			kmem_cache_node_init(n);
1069 			n->next_reap = jiffies + REAPTIMEOUT_NODE +
1070 			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1071 
1072 			/*
1073 			 * The kmem_cache_nodes don't come and go as CPUs
1074 			 * come and go.  slab_mutex is sufficient
1075 			 * protection here.
1076 			 */
1077 			cachep->node[node] = n;
1078 		}
1079 
1080 		spin_lock_irq(&n->list_lock);
1081 		n->free_limit =
1082 			(1 + nr_cpus_node(node)) *
1083 			cachep->batchcount + cachep->num;
1084 		spin_unlock_irq(&n->list_lock);
1085 	}
1086 	return 0;
1087 }
1088 
slabs_tofree(struct kmem_cache * cachep,struct kmem_cache_node * n)1089 static inline int slabs_tofree(struct kmem_cache *cachep,
1090 						struct kmem_cache_node *n)
1091 {
1092 	return (n->free_objects + cachep->num - 1) / cachep->num;
1093 }
1094 
cpuup_canceled(long cpu)1095 static void cpuup_canceled(long cpu)
1096 {
1097 	struct kmem_cache *cachep;
1098 	struct kmem_cache_node *n = NULL;
1099 	int node = cpu_to_mem(cpu);
1100 	const struct cpumask *mask = cpumask_of_node(node);
1101 
1102 	list_for_each_entry(cachep, &slab_caches, list) {
1103 		struct array_cache *nc;
1104 		struct array_cache *shared;
1105 		struct alien_cache **alien;
1106 		LIST_HEAD(list);
1107 
1108 		n = get_node(cachep, node);
1109 		if (!n)
1110 			continue;
1111 
1112 		spin_lock_irq(&n->list_lock);
1113 
1114 		/* Free limit for this kmem_cache_node */
1115 		n->free_limit -= cachep->batchcount;
1116 
1117 		/* cpu is dead; no one can alloc from it. */
1118 		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1119 		if (nc) {
1120 			free_block(cachep, nc->entry, nc->avail, node, &list);
1121 			nc->avail = 0;
1122 		}
1123 
1124 		if (!cpumask_empty(mask)) {
1125 			spin_unlock_irq(&n->list_lock);
1126 			goto free_slab;
1127 		}
1128 
1129 		shared = n->shared;
1130 		if (shared) {
1131 			free_block(cachep, shared->entry,
1132 				   shared->avail, node, &list);
1133 			n->shared = NULL;
1134 		}
1135 
1136 		alien = n->alien;
1137 		n->alien = NULL;
1138 
1139 		spin_unlock_irq(&n->list_lock);
1140 
1141 		kfree(shared);
1142 		if (alien) {
1143 			drain_alien_cache(cachep, alien);
1144 			free_alien_cache(alien);
1145 		}
1146 
1147 free_slab:
1148 		slabs_destroy(cachep, &list);
1149 	}
1150 	/*
1151 	 * In the previous loop, all the objects were freed to
1152 	 * the respective cache's slabs,  now we can go ahead and
1153 	 * shrink each nodelist to its limit.
1154 	 */
1155 	list_for_each_entry(cachep, &slab_caches, list) {
1156 		n = get_node(cachep, node);
1157 		if (!n)
1158 			continue;
1159 		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1160 	}
1161 }
1162 
cpuup_prepare(long cpu)1163 static int cpuup_prepare(long cpu)
1164 {
1165 	struct kmem_cache *cachep;
1166 	struct kmem_cache_node *n = NULL;
1167 	int node = cpu_to_mem(cpu);
1168 	int err;
1169 
1170 	/*
1171 	 * We need to do this right in the beginning since
1172 	 * alloc_arraycache's are going to use this list.
1173 	 * kmalloc_node allows us to add the slab to the right
1174 	 * kmem_cache_node and not this cpu's kmem_cache_node
1175 	 */
1176 	err = init_cache_node_node(node);
1177 	if (err < 0)
1178 		goto bad;
1179 
1180 	/*
1181 	 * Now we can go ahead with allocating the shared arrays and
1182 	 * array caches
1183 	 */
1184 	list_for_each_entry(cachep, &slab_caches, list) {
1185 		struct array_cache *shared = NULL;
1186 		struct alien_cache **alien = NULL;
1187 
1188 		if (cachep->shared) {
1189 			shared = alloc_arraycache(node,
1190 				cachep->shared * cachep->batchcount,
1191 				0xbaadf00d, GFP_KERNEL);
1192 			if (!shared)
1193 				goto bad;
1194 		}
1195 		if (use_alien_caches) {
1196 			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1197 			if (!alien) {
1198 				kfree(shared);
1199 				goto bad;
1200 			}
1201 		}
1202 		n = get_node(cachep, node);
1203 		BUG_ON(!n);
1204 
1205 		spin_lock_irq(&n->list_lock);
1206 		if (!n->shared) {
1207 			/*
1208 			 * We are serialised from CPU_DEAD or
1209 			 * CPU_UP_CANCELLED by the cpucontrol lock
1210 			 */
1211 			n->shared = shared;
1212 			shared = NULL;
1213 		}
1214 #ifdef CONFIG_NUMA
1215 		if (!n->alien) {
1216 			n->alien = alien;
1217 			alien = NULL;
1218 		}
1219 #endif
1220 		spin_unlock_irq(&n->list_lock);
1221 		kfree(shared);
1222 		free_alien_cache(alien);
1223 	}
1224 
1225 	return 0;
1226 bad:
1227 	cpuup_canceled(cpu);
1228 	return -ENOMEM;
1229 }
1230 
cpuup_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1231 static int cpuup_callback(struct notifier_block *nfb,
1232 				    unsigned long action, void *hcpu)
1233 {
1234 	long cpu = (long)hcpu;
1235 	int err = 0;
1236 
1237 	switch (action) {
1238 	case CPU_UP_PREPARE:
1239 	case CPU_UP_PREPARE_FROZEN:
1240 		mutex_lock(&slab_mutex);
1241 		err = cpuup_prepare(cpu);
1242 		mutex_unlock(&slab_mutex);
1243 		break;
1244 	case CPU_ONLINE:
1245 	case CPU_ONLINE_FROZEN:
1246 		start_cpu_timer(cpu);
1247 		break;
1248 #ifdef CONFIG_HOTPLUG_CPU
1249   	case CPU_DOWN_PREPARE:
1250   	case CPU_DOWN_PREPARE_FROZEN:
1251 		/*
1252 		 * Shutdown cache reaper. Note that the slab_mutex is
1253 		 * held so that if cache_reap() is invoked it cannot do
1254 		 * anything expensive but will only modify reap_work
1255 		 * and reschedule the timer.
1256 		*/
1257 		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1258 		/* Now the cache_reaper is guaranteed to be not running. */
1259 		per_cpu(slab_reap_work, cpu).work.func = NULL;
1260   		break;
1261   	case CPU_DOWN_FAILED:
1262   	case CPU_DOWN_FAILED_FROZEN:
1263 		start_cpu_timer(cpu);
1264   		break;
1265 	case CPU_DEAD:
1266 	case CPU_DEAD_FROZEN:
1267 		/*
1268 		 * Even if all the cpus of a node are down, we don't free the
1269 		 * kmem_cache_node of any cache. This to avoid a race between
1270 		 * cpu_down, and a kmalloc allocation from another cpu for
1271 		 * memory from the node of the cpu going down.  The node
1272 		 * structure is usually allocated from kmem_cache_create() and
1273 		 * gets destroyed at kmem_cache_destroy().
1274 		 */
1275 		/* fall through */
1276 #endif
1277 	case CPU_UP_CANCELED:
1278 	case CPU_UP_CANCELED_FROZEN:
1279 		mutex_lock(&slab_mutex);
1280 		cpuup_canceled(cpu);
1281 		mutex_unlock(&slab_mutex);
1282 		break;
1283 	}
1284 	return notifier_from_errno(err);
1285 }
1286 
1287 static struct notifier_block cpucache_notifier = {
1288 	&cpuup_callback, NULL, 0
1289 };
1290 
1291 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1292 /*
1293  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1294  * Returns -EBUSY if all objects cannot be drained so that the node is not
1295  * removed.
1296  *
1297  * Must hold slab_mutex.
1298  */
drain_cache_node_node(int node)1299 static int __meminit drain_cache_node_node(int node)
1300 {
1301 	struct kmem_cache *cachep;
1302 	int ret = 0;
1303 
1304 	list_for_each_entry(cachep, &slab_caches, list) {
1305 		struct kmem_cache_node *n;
1306 
1307 		n = get_node(cachep, node);
1308 		if (!n)
1309 			continue;
1310 
1311 		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1312 
1313 		if (!list_empty(&n->slabs_full) ||
1314 		    !list_empty(&n->slabs_partial)) {
1315 			ret = -EBUSY;
1316 			break;
1317 		}
1318 	}
1319 	return ret;
1320 }
1321 
slab_memory_callback(struct notifier_block * self,unsigned long action,void * arg)1322 static int __meminit slab_memory_callback(struct notifier_block *self,
1323 					unsigned long action, void *arg)
1324 {
1325 	struct memory_notify *mnb = arg;
1326 	int ret = 0;
1327 	int nid;
1328 
1329 	nid = mnb->status_change_nid;
1330 	if (nid < 0)
1331 		goto out;
1332 
1333 	switch (action) {
1334 	case MEM_GOING_ONLINE:
1335 		mutex_lock(&slab_mutex);
1336 		ret = init_cache_node_node(nid);
1337 		mutex_unlock(&slab_mutex);
1338 		break;
1339 	case MEM_GOING_OFFLINE:
1340 		mutex_lock(&slab_mutex);
1341 		ret = drain_cache_node_node(nid);
1342 		mutex_unlock(&slab_mutex);
1343 		break;
1344 	case MEM_ONLINE:
1345 	case MEM_OFFLINE:
1346 	case MEM_CANCEL_ONLINE:
1347 	case MEM_CANCEL_OFFLINE:
1348 		break;
1349 	}
1350 out:
1351 	return notifier_from_errno(ret);
1352 }
1353 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1354 
1355 /*
1356  * swap the static kmem_cache_node with kmalloced memory
1357  */
init_list(struct kmem_cache * cachep,struct kmem_cache_node * list,int nodeid)1358 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1359 				int nodeid)
1360 {
1361 	struct kmem_cache_node *ptr;
1362 
1363 	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1364 	BUG_ON(!ptr);
1365 
1366 	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1367 	/*
1368 	 * Do not assume that spinlocks can be initialized via memcpy:
1369 	 */
1370 	spin_lock_init(&ptr->list_lock);
1371 
1372 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1373 	cachep->node[nodeid] = ptr;
1374 }
1375 
1376 /*
1377  * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1378  * size of kmem_cache_node.
1379  */
set_up_node(struct kmem_cache * cachep,int index)1380 static void __init set_up_node(struct kmem_cache *cachep, int index)
1381 {
1382 	int node;
1383 
1384 	for_each_online_node(node) {
1385 		cachep->node[node] = &init_kmem_cache_node[index + node];
1386 		cachep->node[node]->next_reap = jiffies +
1387 		    REAPTIMEOUT_NODE +
1388 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1389 	}
1390 }
1391 
1392 /*
1393  * Initialisation.  Called after the page allocator have been initialised and
1394  * before smp_init().
1395  */
kmem_cache_init(void)1396 void __init kmem_cache_init(void)
1397 {
1398 	int i;
1399 
1400 	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1401 					sizeof(struct rcu_head));
1402 	kmem_cache = &kmem_cache_boot;
1403 
1404 	if (num_possible_nodes() == 1)
1405 		use_alien_caches = 0;
1406 
1407 	for (i = 0; i < NUM_INIT_LISTS; i++)
1408 		kmem_cache_node_init(&init_kmem_cache_node[i]);
1409 
1410 	/*
1411 	 * Fragmentation resistance on low memory - only use bigger
1412 	 * page orders on machines with more than 32MB of memory if
1413 	 * not overridden on the command line.
1414 	 */
1415 	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1416 		slab_max_order = SLAB_MAX_ORDER_HI;
1417 
1418 	/* Bootstrap is tricky, because several objects are allocated
1419 	 * from caches that do not exist yet:
1420 	 * 1) initialize the kmem_cache cache: it contains the struct
1421 	 *    kmem_cache structures of all caches, except kmem_cache itself:
1422 	 *    kmem_cache is statically allocated.
1423 	 *    Initially an __init data area is used for the head array and the
1424 	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1425 	 *    array at the end of the bootstrap.
1426 	 * 2) Create the first kmalloc cache.
1427 	 *    The struct kmem_cache for the new cache is allocated normally.
1428 	 *    An __init data area is used for the head array.
1429 	 * 3) Create the remaining kmalloc caches, with minimally sized
1430 	 *    head arrays.
1431 	 * 4) Replace the __init data head arrays for kmem_cache and the first
1432 	 *    kmalloc cache with kmalloc allocated arrays.
1433 	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1434 	 *    the other cache's with kmalloc allocated memory.
1435 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1436 	 */
1437 
1438 	/* 1) create the kmem_cache */
1439 
1440 	/*
1441 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1442 	 */
1443 	create_boot_cache(kmem_cache, "kmem_cache",
1444 		offsetof(struct kmem_cache, node) +
1445 				  nr_node_ids * sizeof(struct kmem_cache_node *),
1446 				  SLAB_HWCACHE_ALIGN);
1447 	list_add(&kmem_cache->list, &slab_caches);
1448 	slab_state = PARTIAL;
1449 
1450 	/*
1451 	 * Initialize the caches that provide memory for the  kmem_cache_node
1452 	 * structures first.  Without this, further allocations will bug.
1453 	 */
1454 	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
1455 				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1456 	slab_state = PARTIAL_NODE;
1457 
1458 	slab_early_init = 0;
1459 
1460 	/* 5) Replace the bootstrap kmem_cache_node */
1461 	{
1462 		int nid;
1463 
1464 		for_each_online_node(nid) {
1465 			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1466 
1467 			init_list(kmalloc_caches[INDEX_NODE],
1468 					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1469 		}
1470 	}
1471 
1472 	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1473 }
1474 
kmem_cache_init_late(void)1475 void __init kmem_cache_init_late(void)
1476 {
1477 	struct kmem_cache *cachep;
1478 
1479 	slab_state = UP;
1480 
1481 	/* 6) resize the head arrays to their final sizes */
1482 	mutex_lock(&slab_mutex);
1483 	list_for_each_entry(cachep, &slab_caches, list)
1484 		if (enable_cpucache(cachep, GFP_NOWAIT))
1485 			BUG();
1486 	mutex_unlock(&slab_mutex);
1487 
1488 	/* Done! */
1489 	slab_state = FULL;
1490 
1491 	/*
1492 	 * Register a cpu startup notifier callback that initializes
1493 	 * cpu_cache_get for all new cpus
1494 	 */
1495 	register_cpu_notifier(&cpucache_notifier);
1496 
1497 #ifdef CONFIG_NUMA
1498 	/*
1499 	 * Register a memory hotplug callback that initializes and frees
1500 	 * node.
1501 	 */
1502 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1503 #endif
1504 
1505 	/*
1506 	 * The reap timers are started later, with a module init call: That part
1507 	 * of the kernel is not yet operational.
1508 	 */
1509 }
1510 
cpucache_init(void)1511 static int __init cpucache_init(void)
1512 {
1513 	int cpu;
1514 
1515 	/*
1516 	 * Register the timers that return unneeded pages to the page allocator
1517 	 */
1518 	for_each_online_cpu(cpu)
1519 		start_cpu_timer(cpu);
1520 
1521 	/* Done! */
1522 	slab_state = FULL;
1523 	return 0;
1524 }
1525 __initcall(cpucache_init);
1526 
1527 static noinline void
slab_out_of_memory(struct kmem_cache * cachep,gfp_t gfpflags,int nodeid)1528 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1529 {
1530 #if DEBUG
1531 	struct kmem_cache_node *n;
1532 	struct page *page;
1533 	unsigned long flags;
1534 	int node;
1535 	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1536 				      DEFAULT_RATELIMIT_BURST);
1537 
1538 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1539 		return;
1540 
1541 	printk(KERN_WARNING
1542 		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1543 		nodeid, gfpflags);
1544 	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1545 		cachep->name, cachep->size, cachep->gfporder);
1546 
1547 	for_each_kmem_cache_node(cachep, node, n) {
1548 		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1549 		unsigned long active_slabs = 0, num_slabs = 0;
1550 
1551 		spin_lock_irqsave(&n->list_lock, flags);
1552 		list_for_each_entry(page, &n->slabs_full, lru) {
1553 			active_objs += cachep->num;
1554 			active_slabs++;
1555 		}
1556 		list_for_each_entry(page, &n->slabs_partial, lru) {
1557 			active_objs += page->active;
1558 			active_slabs++;
1559 		}
1560 		list_for_each_entry(page, &n->slabs_free, lru)
1561 			num_slabs++;
1562 
1563 		free_objects += n->free_objects;
1564 		spin_unlock_irqrestore(&n->list_lock, flags);
1565 
1566 		num_slabs += active_slabs;
1567 		num_objs = num_slabs * cachep->num;
1568 		printk(KERN_WARNING
1569 			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1570 			node, active_slabs, num_slabs, active_objs, num_objs,
1571 			free_objects);
1572 	}
1573 #endif
1574 }
1575 
1576 /*
1577  * Interface to system's page allocator. No need to hold the
1578  * kmem_cache_node ->list_lock.
1579  *
1580  * If we requested dmaable memory, we will get it. Even if we
1581  * did not request dmaable memory, we might get it, but that
1582  * would be relatively rare and ignorable.
1583  */
kmem_getpages(struct kmem_cache * cachep,gfp_t flags,int nodeid)1584 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1585 								int nodeid)
1586 {
1587 	struct page *page;
1588 	int nr_pages;
1589 
1590 	flags |= cachep->allocflags;
1591 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1592 		flags |= __GFP_RECLAIMABLE;
1593 
1594 	if (memcg_charge_slab(cachep, flags, cachep->gfporder))
1595 		return NULL;
1596 
1597 	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1598 	if (!page) {
1599 		memcg_uncharge_slab(cachep, cachep->gfporder);
1600 		slab_out_of_memory(cachep, flags, nodeid);
1601 		return NULL;
1602 	}
1603 
1604 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1605 	if (page_is_pfmemalloc(page))
1606 		pfmemalloc_active = true;
1607 
1608 	nr_pages = (1 << cachep->gfporder);
1609 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1610 		add_zone_page_state(page_zone(page),
1611 			NR_SLAB_RECLAIMABLE, nr_pages);
1612 	else
1613 		add_zone_page_state(page_zone(page),
1614 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1615 	__SetPageSlab(page);
1616 	if (page_is_pfmemalloc(page))
1617 		SetPageSlabPfmemalloc(page);
1618 
1619 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1620 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1621 
1622 		if (cachep->ctor)
1623 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1624 		else
1625 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1626 	}
1627 
1628 	return page;
1629 }
1630 
1631 /*
1632  * Interface to system's page release.
1633  */
kmem_freepages(struct kmem_cache * cachep,struct page * page)1634 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1635 {
1636 	const unsigned long nr_freed = (1 << cachep->gfporder);
1637 
1638 	kmemcheck_free_shadow(page, cachep->gfporder);
1639 
1640 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1641 		sub_zone_page_state(page_zone(page),
1642 				NR_SLAB_RECLAIMABLE, nr_freed);
1643 	else
1644 		sub_zone_page_state(page_zone(page),
1645 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1646 
1647 	BUG_ON(!PageSlab(page));
1648 	__ClearPageSlabPfmemalloc(page);
1649 	__ClearPageSlab(page);
1650 	page_mapcount_reset(page);
1651 	page->mapping = NULL;
1652 
1653 	if (current->reclaim_state)
1654 		current->reclaim_state->reclaimed_slab += nr_freed;
1655 	__free_pages(page, cachep->gfporder);
1656 	memcg_uncharge_slab(cachep, cachep->gfporder);
1657 }
1658 
kmem_rcu_free(struct rcu_head * head)1659 static void kmem_rcu_free(struct rcu_head *head)
1660 {
1661 	struct kmem_cache *cachep;
1662 	struct page *page;
1663 
1664 	page = container_of(head, struct page, rcu_head);
1665 	cachep = page->slab_cache;
1666 
1667 	kmem_freepages(cachep, page);
1668 }
1669 
1670 #if DEBUG
1671 
1672 #ifdef CONFIG_DEBUG_PAGEALLOC
store_stackinfo(struct kmem_cache * cachep,unsigned long * addr,unsigned long caller)1673 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1674 			    unsigned long caller)
1675 {
1676 	int size = cachep->object_size;
1677 
1678 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1679 
1680 	if (size < 5 * sizeof(unsigned long))
1681 		return;
1682 
1683 	*addr++ = 0x12345678;
1684 	*addr++ = caller;
1685 	*addr++ = smp_processor_id();
1686 	size -= 3 * sizeof(unsigned long);
1687 	{
1688 		unsigned long *sptr = &caller;
1689 		unsigned long svalue;
1690 
1691 		while (!kstack_end(sptr)) {
1692 			svalue = *sptr++;
1693 			if (kernel_text_address(svalue)) {
1694 				*addr++ = svalue;
1695 				size -= sizeof(unsigned long);
1696 				if (size <= sizeof(unsigned long))
1697 					break;
1698 			}
1699 		}
1700 
1701 	}
1702 	*addr++ = 0x87654321;
1703 }
1704 #endif
1705 
poison_obj(struct kmem_cache * cachep,void * addr,unsigned char val)1706 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1707 {
1708 	int size = cachep->object_size;
1709 	addr = &((char *)addr)[obj_offset(cachep)];
1710 
1711 	memset(addr, val, size);
1712 	*(unsigned char *)(addr + size - 1) = POISON_END;
1713 }
1714 
dump_line(char * data,int offset,int limit)1715 static void dump_line(char *data, int offset, int limit)
1716 {
1717 	int i;
1718 	unsigned char error = 0;
1719 	int bad_count = 0;
1720 
1721 	printk(KERN_ERR "%03x: ", offset);
1722 	for (i = 0; i < limit; i++) {
1723 		if (data[offset + i] != POISON_FREE) {
1724 			error = data[offset + i];
1725 			bad_count++;
1726 		}
1727 	}
1728 	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1729 			&data[offset], limit, 1);
1730 
1731 	if (bad_count == 1) {
1732 		error ^= POISON_FREE;
1733 		if (!(error & (error - 1))) {
1734 			printk(KERN_ERR "Single bit error detected. Probably "
1735 					"bad RAM.\n");
1736 #ifdef CONFIG_X86
1737 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1738 					"test tool.\n");
1739 #else
1740 			printk(KERN_ERR "Run a memory test tool.\n");
1741 #endif
1742 		}
1743 	}
1744 }
1745 #endif
1746 
1747 #if DEBUG
1748 
print_objinfo(struct kmem_cache * cachep,void * objp,int lines)1749 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1750 {
1751 	int i, size;
1752 	char *realobj;
1753 
1754 	if (cachep->flags & SLAB_RED_ZONE) {
1755 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1756 			*dbg_redzone1(cachep, objp),
1757 			*dbg_redzone2(cachep, objp));
1758 	}
1759 
1760 	if (cachep->flags & SLAB_STORE_USER) {
1761 		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1762 		       *dbg_userword(cachep, objp),
1763 		       *dbg_userword(cachep, objp));
1764 	}
1765 	realobj = (char *)objp + obj_offset(cachep);
1766 	size = cachep->object_size;
1767 	for (i = 0; i < size && lines; i += 16, lines--) {
1768 		int limit;
1769 		limit = 16;
1770 		if (i + limit > size)
1771 			limit = size - i;
1772 		dump_line(realobj, i, limit);
1773 	}
1774 }
1775 
check_poison_obj(struct kmem_cache * cachep,void * objp)1776 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1777 {
1778 	char *realobj;
1779 	int size, i;
1780 	int lines = 0;
1781 
1782 	realobj = (char *)objp + obj_offset(cachep);
1783 	size = cachep->object_size;
1784 
1785 	for (i = 0; i < size; i++) {
1786 		char exp = POISON_FREE;
1787 		if (i == size - 1)
1788 			exp = POISON_END;
1789 		if (realobj[i] != exp) {
1790 			int limit;
1791 			/* Mismatch ! */
1792 			/* Print header */
1793 			if (lines == 0) {
1794 				printk(KERN_ERR
1795 					"Slab corruption (%s): %s start=%p, len=%d\n",
1796 					print_tainted(), cachep->name, realobj, size);
1797 				print_objinfo(cachep, objp, 0);
1798 			}
1799 			/* Hexdump the affected line */
1800 			i = (i / 16) * 16;
1801 			limit = 16;
1802 			if (i + limit > size)
1803 				limit = size - i;
1804 			dump_line(realobj, i, limit);
1805 			i += 16;
1806 			lines++;
1807 			/* Limit to 5 lines */
1808 			if (lines > 5)
1809 				break;
1810 		}
1811 	}
1812 	if (lines != 0) {
1813 		/* Print some data about the neighboring objects, if they
1814 		 * exist:
1815 		 */
1816 		struct page *page = virt_to_head_page(objp);
1817 		unsigned int objnr;
1818 
1819 		objnr = obj_to_index(cachep, page, objp);
1820 		if (objnr) {
1821 			objp = index_to_obj(cachep, page, objnr - 1);
1822 			realobj = (char *)objp + obj_offset(cachep);
1823 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1824 			       realobj, size);
1825 			print_objinfo(cachep, objp, 2);
1826 		}
1827 		if (objnr + 1 < cachep->num) {
1828 			objp = index_to_obj(cachep, page, objnr + 1);
1829 			realobj = (char *)objp + obj_offset(cachep);
1830 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1831 			       realobj, size);
1832 			print_objinfo(cachep, objp, 2);
1833 		}
1834 	}
1835 }
1836 #endif
1837 
1838 #if DEBUG
slab_destroy_debugcheck(struct kmem_cache * cachep,struct page * page)1839 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1840 						struct page *page)
1841 {
1842 	int i;
1843 	for (i = 0; i < cachep->num; i++) {
1844 		void *objp = index_to_obj(cachep, page, i);
1845 
1846 		if (cachep->flags & SLAB_POISON) {
1847 #ifdef CONFIG_DEBUG_PAGEALLOC
1848 			if (cachep->size % PAGE_SIZE == 0 &&
1849 					OFF_SLAB(cachep))
1850 				kernel_map_pages(virt_to_page(objp),
1851 					cachep->size / PAGE_SIZE, 1);
1852 			else
1853 				check_poison_obj(cachep, objp);
1854 #else
1855 			check_poison_obj(cachep, objp);
1856 #endif
1857 		}
1858 		if (cachep->flags & SLAB_RED_ZONE) {
1859 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1860 				slab_error(cachep, "start of a freed object "
1861 					   "was overwritten");
1862 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1863 				slab_error(cachep, "end of a freed object "
1864 					   "was overwritten");
1865 		}
1866 	}
1867 }
1868 #else
slab_destroy_debugcheck(struct kmem_cache * cachep,struct page * page)1869 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1870 						struct page *page)
1871 {
1872 }
1873 #endif
1874 
1875 /**
1876  * slab_destroy - destroy and release all objects in a slab
1877  * @cachep: cache pointer being destroyed
1878  * @page: page pointer being destroyed
1879  *
1880  * Destroy all the objs in a slab page, and release the mem back to the system.
1881  * Before calling the slab page must have been unlinked from the cache. The
1882  * kmem_cache_node ->list_lock is not held/needed.
1883  */
slab_destroy(struct kmem_cache * cachep,struct page * page)1884 static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1885 {
1886 	void *freelist;
1887 
1888 	freelist = page->freelist;
1889 	slab_destroy_debugcheck(cachep, page);
1890 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1891 		struct rcu_head *head;
1892 
1893 		/*
1894 		 * RCU free overloads the RCU head over the LRU.
1895 		 * slab_page has been overloeaded over the LRU,
1896 		 * however it is not used from now on so that
1897 		 * we can use it safely.
1898 		 */
1899 		head = (void *)&page->rcu_head;
1900 		call_rcu(head, kmem_rcu_free);
1901 
1902 	} else {
1903 		kmem_freepages(cachep, page);
1904 	}
1905 
1906 	/*
1907 	 * From now on, we don't use freelist
1908 	 * although actual page can be freed in rcu context
1909 	 */
1910 	if (OFF_SLAB(cachep))
1911 		kmem_cache_free(cachep->freelist_cache, freelist);
1912 }
1913 
slabs_destroy(struct kmem_cache * cachep,struct list_head * list)1914 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1915 {
1916 	struct page *page, *n;
1917 
1918 	list_for_each_entry_safe(page, n, list, lru) {
1919 		list_del(&page->lru);
1920 		slab_destroy(cachep, page);
1921 	}
1922 }
1923 
1924 /**
1925  * calculate_slab_order - calculate size (page order) of slabs
1926  * @cachep: pointer to the cache that is being created
1927  * @size: size of objects to be created in this cache.
1928  * @align: required alignment for the objects.
1929  * @flags: slab allocation flags
1930  *
1931  * Also calculates the number of objects per slab.
1932  *
1933  * This could be made much more intelligent.  For now, try to avoid using
1934  * high order pages for slabs.  When the gfp() functions are more friendly
1935  * towards high-order requests, this should be changed.
1936  */
calculate_slab_order(struct kmem_cache * cachep,size_t size,size_t align,unsigned long flags)1937 static size_t calculate_slab_order(struct kmem_cache *cachep,
1938 			size_t size, size_t align, unsigned long flags)
1939 {
1940 	unsigned long offslab_limit;
1941 	size_t left_over = 0;
1942 	int gfporder;
1943 
1944 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1945 		unsigned int num;
1946 		size_t remainder;
1947 
1948 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
1949 		if (!num)
1950 			continue;
1951 
1952 		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1953 		if (num > SLAB_OBJ_MAX_NUM)
1954 			break;
1955 
1956 		if (flags & CFLGS_OFF_SLAB) {
1957 			size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1958 			/*
1959 			 * Max number of objs-per-slab for caches which
1960 			 * use off-slab slabs. Needed to avoid a possible
1961 			 * looping condition in cache_grow().
1962 			 */
1963 			if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1964 				freelist_size_per_obj += sizeof(char);
1965 			offslab_limit = size;
1966 			offslab_limit /= freelist_size_per_obj;
1967 
1968  			if (num > offslab_limit)
1969 				break;
1970 		}
1971 
1972 		/* Found something acceptable - save it away */
1973 		cachep->num = num;
1974 		cachep->gfporder = gfporder;
1975 		left_over = remainder;
1976 
1977 		/*
1978 		 * A VFS-reclaimable slab tends to have most allocations
1979 		 * as GFP_NOFS and we really don't want to have to be allocating
1980 		 * higher-order pages when we are unable to shrink dcache.
1981 		 */
1982 		if (flags & SLAB_RECLAIM_ACCOUNT)
1983 			break;
1984 
1985 		/*
1986 		 * Large number of objects is good, but very large slabs are
1987 		 * currently bad for the gfp()s.
1988 		 */
1989 		if (gfporder >= slab_max_order)
1990 			break;
1991 
1992 		/*
1993 		 * Acceptable internal fragmentation?
1994 		 */
1995 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1996 			break;
1997 	}
1998 	return left_over;
1999 }
2000 
alloc_kmem_cache_cpus(struct kmem_cache * cachep,int entries,int batchcount)2001 static struct array_cache __percpu *alloc_kmem_cache_cpus(
2002 		struct kmem_cache *cachep, int entries, int batchcount)
2003 {
2004 	int cpu;
2005 	size_t size;
2006 	struct array_cache __percpu *cpu_cache;
2007 
2008 	size = sizeof(void *) * entries + sizeof(struct array_cache);
2009 	cpu_cache = __alloc_percpu(size, sizeof(void *));
2010 
2011 	if (!cpu_cache)
2012 		return NULL;
2013 
2014 	for_each_possible_cpu(cpu) {
2015 		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
2016 				entries, batchcount);
2017 	}
2018 
2019 	return cpu_cache;
2020 }
2021 
setup_cpu_cache(struct kmem_cache * cachep,gfp_t gfp)2022 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2023 {
2024 	if (slab_state >= FULL)
2025 		return enable_cpucache(cachep, gfp);
2026 
2027 	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
2028 	if (!cachep->cpu_cache)
2029 		return 1;
2030 
2031 	if (slab_state == DOWN) {
2032 		/* Creation of first cache (kmem_cache). */
2033 		set_up_node(kmem_cache, CACHE_CACHE);
2034 	} else if (slab_state == PARTIAL) {
2035 		/* For kmem_cache_node */
2036 		set_up_node(cachep, SIZE_NODE);
2037 	} else {
2038 		int node;
2039 
2040 		for_each_online_node(node) {
2041 			cachep->node[node] = kmalloc_node(
2042 				sizeof(struct kmem_cache_node), gfp, node);
2043 			BUG_ON(!cachep->node[node]);
2044 			kmem_cache_node_init(cachep->node[node]);
2045 		}
2046 	}
2047 
2048 	cachep->node[numa_mem_id()]->next_reap =
2049 			jiffies + REAPTIMEOUT_NODE +
2050 			((unsigned long)cachep) % REAPTIMEOUT_NODE;
2051 
2052 	cpu_cache_get(cachep)->avail = 0;
2053 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2054 	cpu_cache_get(cachep)->batchcount = 1;
2055 	cpu_cache_get(cachep)->touched = 0;
2056 	cachep->batchcount = 1;
2057 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2058 	return 0;
2059 }
2060 
kmem_cache_flags(unsigned long object_size,unsigned long flags,const char * name,void (* ctor)(void *))2061 unsigned long kmem_cache_flags(unsigned long object_size,
2062 	unsigned long flags, const char *name,
2063 	void (*ctor)(void *))
2064 {
2065 	return flags;
2066 }
2067 
2068 struct kmem_cache *
__kmem_cache_alias(const char * name,size_t size,size_t align,unsigned long flags,void (* ctor)(void *))2069 __kmem_cache_alias(const char *name, size_t size, size_t align,
2070 		   unsigned long flags, void (*ctor)(void *))
2071 {
2072 	struct kmem_cache *cachep;
2073 
2074 	cachep = find_mergeable(size, align, flags, name, ctor);
2075 	if (cachep) {
2076 		cachep->refcount++;
2077 
2078 		/*
2079 		 * Adjust the object sizes so that we clear
2080 		 * the complete object on kzalloc.
2081 		 */
2082 		cachep->object_size = max_t(int, cachep->object_size, size);
2083 	}
2084 	return cachep;
2085 }
2086 
2087 /**
2088  * __kmem_cache_create - Create a cache.
2089  * @cachep: cache management descriptor
2090  * @flags: SLAB flags
2091  *
2092  * Returns a ptr to the cache on success, NULL on failure.
2093  * Cannot be called within a int, but can be interrupted.
2094  * The @ctor is run when new pages are allocated by the cache.
2095  *
2096  * The flags are
2097  *
2098  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2099  * to catch references to uninitialised memory.
2100  *
2101  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2102  * for buffer overruns.
2103  *
2104  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2105  * cacheline.  This can be beneficial if you're counting cycles as closely
2106  * as davem.
2107  */
2108 int
__kmem_cache_create(struct kmem_cache * cachep,unsigned long flags)2109 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2110 {
2111 	size_t left_over, freelist_size;
2112 	size_t ralign = BYTES_PER_WORD;
2113 	gfp_t gfp;
2114 	int err;
2115 	size_t size = cachep->size;
2116 
2117 #if DEBUG
2118 #if FORCED_DEBUG
2119 	/*
2120 	 * Enable redzoning and last user accounting, except for caches with
2121 	 * large objects, if the increased size would increase the object size
2122 	 * above the next power of two: caches with object sizes just above a
2123 	 * power of two have a significant amount of internal fragmentation.
2124 	 */
2125 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2126 						2 * sizeof(unsigned long long)))
2127 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2128 	if (!(flags & SLAB_DESTROY_BY_RCU))
2129 		flags |= SLAB_POISON;
2130 #endif
2131 	if (flags & SLAB_DESTROY_BY_RCU)
2132 		BUG_ON(flags & SLAB_POISON);
2133 #endif
2134 
2135 	/*
2136 	 * Check that size is in terms of words.  This is needed to avoid
2137 	 * unaligned accesses for some archs when redzoning is used, and makes
2138 	 * sure any on-slab bufctl's are also correctly aligned.
2139 	 */
2140 	if (size & (BYTES_PER_WORD - 1)) {
2141 		size += (BYTES_PER_WORD - 1);
2142 		size &= ~(BYTES_PER_WORD - 1);
2143 	}
2144 
2145 	if (flags & SLAB_RED_ZONE) {
2146 		ralign = REDZONE_ALIGN;
2147 		/* If redzoning, ensure that the second redzone is suitably
2148 		 * aligned, by adjusting the object size accordingly. */
2149 		size += REDZONE_ALIGN - 1;
2150 		size &= ~(REDZONE_ALIGN - 1);
2151 	}
2152 
2153 	/* 3) caller mandated alignment */
2154 	if (ralign < cachep->align) {
2155 		ralign = cachep->align;
2156 	}
2157 	/* disable debug if necessary */
2158 	if (ralign > __alignof__(unsigned long long))
2159 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2160 	/*
2161 	 * 4) Store it.
2162 	 */
2163 	cachep->align = ralign;
2164 
2165 	if (slab_is_available())
2166 		gfp = GFP_KERNEL;
2167 	else
2168 		gfp = GFP_NOWAIT;
2169 
2170 #if DEBUG
2171 
2172 	/*
2173 	 * Both debugging options require word-alignment which is calculated
2174 	 * into align above.
2175 	 */
2176 	if (flags & SLAB_RED_ZONE) {
2177 		/* add space for red zone words */
2178 		cachep->obj_offset += sizeof(unsigned long long);
2179 		size += 2 * sizeof(unsigned long long);
2180 	}
2181 	if (flags & SLAB_STORE_USER) {
2182 		/* user store requires one word storage behind the end of
2183 		 * the real object. But if the second red zone needs to be
2184 		 * aligned to 64 bits, we must allow that much space.
2185 		 */
2186 		if (flags & SLAB_RED_ZONE)
2187 			size += REDZONE_ALIGN;
2188 		else
2189 			size += BYTES_PER_WORD;
2190 	}
2191 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2192 	/*
2193 	 * To activate debug pagealloc, off-slab management is necessary
2194 	 * requirement. In early phase of initialization, small sized slab
2195 	 * doesn't get initialized so it would not be possible. So, we need
2196 	 * to check size >= 256. It guarantees that all necessary small
2197 	 * sized slab is initialized in current slab initialization sequence.
2198 	 */
2199 	if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2200 		size >= 256 && cachep->object_size > cache_line_size() &&
2201 		ALIGN(size, cachep->align) < PAGE_SIZE) {
2202 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2203 		size = PAGE_SIZE;
2204 	}
2205 #endif
2206 #endif
2207 
2208 	/*
2209 	 * Determine if the slab management is 'on' or 'off' slab.
2210 	 * (bootstrapping cannot cope with offslab caches so don't do
2211 	 * it too early on. Always use on-slab management when
2212 	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2213 	 */
2214 	if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2215 	    !(flags & SLAB_NOLEAKTRACE))
2216 		/*
2217 		 * Size is large, assume best to place the slab management obj
2218 		 * off-slab (should allow better packing of objs).
2219 		 */
2220 		flags |= CFLGS_OFF_SLAB;
2221 
2222 	size = ALIGN(size, cachep->align);
2223 	/*
2224 	 * We should restrict the number of objects in a slab to implement
2225 	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2226 	 */
2227 	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2228 		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2229 
2230 	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2231 
2232 	if (!cachep->num)
2233 		return -E2BIG;
2234 
2235 	freelist_size = calculate_freelist_size(cachep->num, cachep->align);
2236 
2237 	/*
2238 	 * If the slab has been placed off-slab, and we have enough space then
2239 	 * move it on-slab. This is at the expense of any extra colouring.
2240 	 */
2241 	if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
2242 		flags &= ~CFLGS_OFF_SLAB;
2243 		left_over -= freelist_size;
2244 	}
2245 
2246 	if (flags & CFLGS_OFF_SLAB) {
2247 		/* really off slab. No need for manual alignment */
2248 		freelist_size = calculate_freelist_size(cachep->num, 0);
2249 
2250 #ifdef CONFIG_PAGE_POISONING
2251 		/* If we're going to use the generic kernel_map_pages()
2252 		 * poisoning, then it's going to smash the contents of
2253 		 * the redzone and userword anyhow, so switch them off.
2254 		 */
2255 		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2256 			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2257 #endif
2258 	}
2259 
2260 	cachep->colour_off = cache_line_size();
2261 	/* Offset must be a multiple of the alignment. */
2262 	if (cachep->colour_off < cachep->align)
2263 		cachep->colour_off = cachep->align;
2264 	cachep->colour = left_over / cachep->colour_off;
2265 	cachep->freelist_size = freelist_size;
2266 	cachep->flags = flags;
2267 	cachep->allocflags = __GFP_COMP;
2268 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2269 		cachep->allocflags |= GFP_DMA;
2270 	cachep->size = size;
2271 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2272 
2273 	if (flags & CFLGS_OFF_SLAB) {
2274 		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2275 		/*
2276 		 * This is a possibility for one of the kmalloc_{dma,}_caches.
2277 		 * But since we go off slab only for object size greater than
2278 		 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
2279 		 * in ascending order,this should not happen at all.
2280 		 * But leave a BUG_ON for some lucky dude.
2281 		 */
2282 		BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
2283 	}
2284 
2285 	err = setup_cpu_cache(cachep, gfp);
2286 	if (err) {
2287 		__kmem_cache_shutdown(cachep);
2288 		return err;
2289 	}
2290 
2291 	return 0;
2292 }
2293 
2294 #if DEBUG
check_irq_off(void)2295 static void check_irq_off(void)
2296 {
2297 	BUG_ON(!irqs_disabled());
2298 }
2299 
check_irq_on(void)2300 static void check_irq_on(void)
2301 {
2302 	BUG_ON(irqs_disabled());
2303 }
2304 
check_spinlock_acquired(struct kmem_cache * cachep)2305 static void check_spinlock_acquired(struct kmem_cache *cachep)
2306 {
2307 #ifdef CONFIG_SMP
2308 	check_irq_off();
2309 	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2310 #endif
2311 }
2312 
check_spinlock_acquired_node(struct kmem_cache * cachep,int node)2313 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2314 {
2315 #ifdef CONFIG_SMP
2316 	check_irq_off();
2317 	assert_spin_locked(&get_node(cachep, node)->list_lock);
2318 #endif
2319 }
2320 
2321 #else
2322 #define check_irq_off()	do { } while(0)
2323 #define check_irq_on()	do { } while(0)
2324 #define check_spinlock_acquired(x) do { } while(0)
2325 #define check_spinlock_acquired_node(x, y) do { } while(0)
2326 #endif
2327 
2328 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2329 			struct array_cache *ac,
2330 			int force, int node);
2331 
do_drain(void * arg)2332 static void do_drain(void *arg)
2333 {
2334 	struct kmem_cache *cachep = arg;
2335 	struct array_cache *ac;
2336 	int node = numa_mem_id();
2337 	struct kmem_cache_node *n;
2338 	LIST_HEAD(list);
2339 
2340 	check_irq_off();
2341 	ac = cpu_cache_get(cachep);
2342 	n = get_node(cachep, node);
2343 	spin_lock(&n->list_lock);
2344 	free_block(cachep, ac->entry, ac->avail, node, &list);
2345 	spin_unlock(&n->list_lock);
2346 	slabs_destroy(cachep, &list);
2347 	ac->avail = 0;
2348 }
2349 
drain_cpu_caches(struct kmem_cache * cachep)2350 static void drain_cpu_caches(struct kmem_cache *cachep)
2351 {
2352 	struct kmem_cache_node *n;
2353 	int node;
2354 
2355 	on_each_cpu(do_drain, cachep, 1);
2356 	check_irq_on();
2357 	for_each_kmem_cache_node(cachep, node, n)
2358 		if (n->alien)
2359 			drain_alien_cache(cachep, n->alien);
2360 
2361 	for_each_kmem_cache_node(cachep, node, n)
2362 		drain_array(cachep, n, n->shared, 1, node);
2363 }
2364 
2365 /*
2366  * Remove slabs from the list of free slabs.
2367  * Specify the number of slabs to drain in tofree.
2368  *
2369  * Returns the actual number of slabs released.
2370  */
drain_freelist(struct kmem_cache * cache,struct kmem_cache_node * n,int tofree)2371 static int drain_freelist(struct kmem_cache *cache,
2372 			struct kmem_cache_node *n, int tofree)
2373 {
2374 	struct list_head *p;
2375 	int nr_freed;
2376 	struct page *page;
2377 
2378 	nr_freed = 0;
2379 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2380 
2381 		spin_lock_irq(&n->list_lock);
2382 		p = n->slabs_free.prev;
2383 		if (p == &n->slabs_free) {
2384 			spin_unlock_irq(&n->list_lock);
2385 			goto out;
2386 		}
2387 
2388 		page = list_entry(p, struct page, lru);
2389 #if DEBUG
2390 		BUG_ON(page->active);
2391 #endif
2392 		list_del(&page->lru);
2393 		/*
2394 		 * Safe to drop the lock. The slab is no longer linked
2395 		 * to the cache.
2396 		 */
2397 		n->free_objects -= cache->num;
2398 		spin_unlock_irq(&n->list_lock);
2399 		slab_destroy(cache, page);
2400 		nr_freed++;
2401 	}
2402 out:
2403 	return nr_freed;
2404 }
2405 
__kmem_cache_shrink(struct kmem_cache * cachep,bool deactivate)2406 int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2407 {
2408 	int ret = 0;
2409 	int node;
2410 	struct kmem_cache_node *n;
2411 
2412 	drain_cpu_caches(cachep);
2413 
2414 	check_irq_on();
2415 	for_each_kmem_cache_node(cachep, node, n) {
2416 		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2417 
2418 		ret += !list_empty(&n->slabs_full) ||
2419 			!list_empty(&n->slabs_partial);
2420 	}
2421 	return (ret ? 1 : 0);
2422 }
2423 
__kmem_cache_shutdown(struct kmem_cache * cachep)2424 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2425 {
2426 	int i;
2427 	struct kmem_cache_node *n;
2428 	int rc = __kmem_cache_shrink(cachep, false);
2429 
2430 	if (rc)
2431 		return rc;
2432 
2433 	free_percpu(cachep->cpu_cache);
2434 
2435 	/* NUMA: free the node structures */
2436 	for_each_kmem_cache_node(cachep, i, n) {
2437 		kfree(n->shared);
2438 		free_alien_cache(n->alien);
2439 		kfree(n);
2440 		cachep->node[i] = NULL;
2441 	}
2442 	return 0;
2443 }
2444 
2445 /*
2446  * Get the memory for a slab management obj.
2447  *
2448  * For a slab cache when the slab descriptor is off-slab, the
2449  * slab descriptor can't come from the same cache which is being created,
2450  * Because if it is the case, that means we defer the creation of
2451  * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2452  * And we eventually call down to __kmem_cache_create(), which
2453  * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2454  * This is a "chicken-and-egg" problem.
2455  *
2456  * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2457  * which are all initialized during kmem_cache_init().
2458  */
alloc_slabmgmt(struct kmem_cache * cachep,struct page * page,int colour_off,gfp_t local_flags,int nodeid)2459 static void *alloc_slabmgmt(struct kmem_cache *cachep,
2460 				   struct page *page, int colour_off,
2461 				   gfp_t local_flags, int nodeid)
2462 {
2463 	void *freelist;
2464 	void *addr = page_address(page);
2465 
2466 	if (OFF_SLAB(cachep)) {
2467 		/* Slab management obj is off-slab. */
2468 		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2469 					      local_flags, nodeid);
2470 		if (!freelist)
2471 			return NULL;
2472 	} else {
2473 		freelist = addr + colour_off;
2474 		colour_off += cachep->freelist_size;
2475 	}
2476 	page->active = 0;
2477 	page->s_mem = addr + colour_off;
2478 	return freelist;
2479 }
2480 
get_free_obj(struct page * page,unsigned int idx)2481 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2482 {
2483 	return ((freelist_idx_t *)page->freelist)[idx];
2484 }
2485 
set_free_obj(struct page * page,unsigned int idx,freelist_idx_t val)2486 static inline void set_free_obj(struct page *page,
2487 					unsigned int idx, freelist_idx_t val)
2488 {
2489 	((freelist_idx_t *)(page->freelist))[idx] = val;
2490 }
2491 
cache_init_objs(struct kmem_cache * cachep,struct page * page)2492 static void cache_init_objs(struct kmem_cache *cachep,
2493 			    struct page *page)
2494 {
2495 	int i;
2496 
2497 	for (i = 0; i < cachep->num; i++) {
2498 		void *objp = index_to_obj(cachep, page, i);
2499 #if DEBUG
2500 		/* need to poison the objs? */
2501 		if (cachep->flags & SLAB_POISON)
2502 			poison_obj(cachep, objp, POISON_FREE);
2503 		if (cachep->flags & SLAB_STORE_USER)
2504 			*dbg_userword(cachep, objp) = NULL;
2505 
2506 		if (cachep->flags & SLAB_RED_ZONE) {
2507 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2508 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2509 		}
2510 		/*
2511 		 * Constructors are not allowed to allocate memory from the same
2512 		 * cache which they are a constructor for.  Otherwise, deadlock.
2513 		 * They must also be threaded.
2514 		 */
2515 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2516 			cachep->ctor(objp + obj_offset(cachep));
2517 
2518 		if (cachep->flags & SLAB_RED_ZONE) {
2519 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2520 				slab_error(cachep, "constructor overwrote the"
2521 					   " end of an object");
2522 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2523 				slab_error(cachep, "constructor overwrote the"
2524 					   " start of an object");
2525 		}
2526 		if ((cachep->size % PAGE_SIZE) == 0 &&
2527 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2528 			kernel_map_pages(virt_to_page(objp),
2529 					 cachep->size / PAGE_SIZE, 0);
2530 #else
2531 		if (cachep->ctor)
2532 			cachep->ctor(objp);
2533 #endif
2534 		set_obj_status(page, i, OBJECT_FREE);
2535 		set_free_obj(page, i, i);
2536 	}
2537 }
2538 
kmem_flagcheck(struct kmem_cache * cachep,gfp_t flags)2539 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2540 {
2541 	if (CONFIG_ZONE_DMA_FLAG) {
2542 		if (flags & GFP_DMA)
2543 			BUG_ON(!(cachep->allocflags & GFP_DMA));
2544 		else
2545 			BUG_ON(cachep->allocflags & GFP_DMA);
2546 	}
2547 }
2548 
slab_get_obj(struct kmem_cache * cachep,struct page * page,int nodeid)2549 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
2550 				int nodeid)
2551 {
2552 	void *objp;
2553 
2554 	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2555 	page->active++;
2556 #if DEBUG
2557 	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2558 #endif
2559 
2560 	return objp;
2561 }
2562 
slab_put_obj(struct kmem_cache * cachep,struct page * page,void * objp,int nodeid)2563 static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2564 				void *objp, int nodeid)
2565 {
2566 	unsigned int objnr = obj_to_index(cachep, page, objp);
2567 #if DEBUG
2568 	unsigned int i;
2569 
2570 	/* Verify that the slab belongs to the intended node */
2571 	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2572 
2573 	/* Verify double free bug */
2574 	for (i = page->active; i < cachep->num; i++) {
2575 		if (get_free_obj(page, i) == objnr) {
2576 			printk(KERN_ERR "slab: double free detected in cache "
2577 					"'%s', objp %p\n", cachep->name, objp);
2578 			BUG();
2579 		}
2580 	}
2581 #endif
2582 	page->active--;
2583 	set_free_obj(page, page->active, objnr);
2584 }
2585 
2586 /*
2587  * Map pages beginning at addr to the given cache and slab. This is required
2588  * for the slab allocator to be able to lookup the cache and slab of a
2589  * virtual address for kfree, ksize, and slab debugging.
2590  */
slab_map_pages(struct kmem_cache * cache,struct page * page,void * freelist)2591 static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2592 			   void *freelist)
2593 {
2594 	page->slab_cache = cache;
2595 	page->freelist = freelist;
2596 }
2597 
2598 /*
2599  * Grow (by 1) the number of slabs within a cache.  This is called by
2600  * kmem_cache_alloc() when there are no active objs left in a cache.
2601  */
cache_grow(struct kmem_cache * cachep,gfp_t flags,int nodeid,struct page * page)2602 static int cache_grow(struct kmem_cache *cachep,
2603 		gfp_t flags, int nodeid, struct page *page)
2604 {
2605 	void *freelist;
2606 	size_t offset;
2607 	gfp_t local_flags;
2608 	struct kmem_cache_node *n;
2609 
2610 	/*
2611 	 * Be lazy and only check for valid flags here,  keeping it out of the
2612 	 * critical path in kmem_cache_alloc().
2613 	 */
2614 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2615 		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
2616 		BUG();
2617 	}
2618 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2619 
2620 	/* Take the node list lock to change the colour_next on this node */
2621 	check_irq_off();
2622 	n = get_node(cachep, nodeid);
2623 	spin_lock(&n->list_lock);
2624 
2625 	/* Get colour for the slab, and cal the next value. */
2626 	offset = n->colour_next;
2627 	n->colour_next++;
2628 	if (n->colour_next >= cachep->colour)
2629 		n->colour_next = 0;
2630 	spin_unlock(&n->list_lock);
2631 
2632 	offset *= cachep->colour_off;
2633 
2634 	if (local_flags & __GFP_WAIT)
2635 		local_irq_enable();
2636 
2637 	/*
2638 	 * The test for missing atomic flag is performed here, rather than
2639 	 * the more obvious place, simply to reduce the critical path length
2640 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2641 	 * will eventually be caught here (where it matters).
2642 	 */
2643 	kmem_flagcheck(cachep, flags);
2644 
2645 	/*
2646 	 * Get mem for the objs.  Attempt to allocate a physical page from
2647 	 * 'nodeid'.
2648 	 */
2649 	if (!page)
2650 		page = kmem_getpages(cachep, local_flags, nodeid);
2651 	if (!page)
2652 		goto failed;
2653 
2654 	/* Get slab management. */
2655 	freelist = alloc_slabmgmt(cachep, page, offset,
2656 			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2657 	if (!freelist)
2658 		goto opps1;
2659 
2660 	slab_map_pages(cachep, page, freelist);
2661 
2662 	cache_init_objs(cachep, page);
2663 
2664 	if (local_flags & __GFP_WAIT)
2665 		local_irq_disable();
2666 	check_irq_off();
2667 	spin_lock(&n->list_lock);
2668 
2669 	/* Make slab active. */
2670 	list_add_tail(&page->lru, &(n->slabs_free));
2671 	STATS_INC_GROWN(cachep);
2672 	n->free_objects += cachep->num;
2673 	spin_unlock(&n->list_lock);
2674 	return 1;
2675 opps1:
2676 	kmem_freepages(cachep, page);
2677 failed:
2678 	if (local_flags & __GFP_WAIT)
2679 		local_irq_disable();
2680 	return 0;
2681 }
2682 
2683 #if DEBUG
2684 
2685 /*
2686  * Perform extra freeing checks:
2687  * - detect bad pointers.
2688  * - POISON/RED_ZONE checking
2689  */
kfree_debugcheck(const void * objp)2690 static void kfree_debugcheck(const void *objp)
2691 {
2692 	if (!virt_addr_valid(objp)) {
2693 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2694 		       (unsigned long)objp);
2695 		BUG();
2696 	}
2697 }
2698 
verify_redzone_free(struct kmem_cache * cache,void * obj)2699 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2700 {
2701 	unsigned long long redzone1, redzone2;
2702 
2703 	redzone1 = *dbg_redzone1(cache, obj);
2704 	redzone2 = *dbg_redzone2(cache, obj);
2705 
2706 	/*
2707 	 * Redzone is ok.
2708 	 */
2709 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2710 		return;
2711 
2712 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2713 		slab_error(cache, "double free detected");
2714 	else
2715 		slab_error(cache, "memory outside object was overwritten");
2716 
2717 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2718 			obj, redzone1, redzone2);
2719 }
2720 
cache_free_debugcheck(struct kmem_cache * cachep,void * objp,unsigned long caller)2721 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2722 				   unsigned long caller)
2723 {
2724 	unsigned int objnr;
2725 	struct page *page;
2726 
2727 	BUG_ON(virt_to_cache(objp) != cachep);
2728 
2729 	objp -= obj_offset(cachep);
2730 	kfree_debugcheck(objp);
2731 	page = virt_to_head_page(objp);
2732 
2733 	if (cachep->flags & SLAB_RED_ZONE) {
2734 		verify_redzone_free(cachep, objp);
2735 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2736 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2737 	}
2738 	if (cachep->flags & SLAB_STORE_USER)
2739 		*dbg_userword(cachep, objp) = (void *)caller;
2740 
2741 	objnr = obj_to_index(cachep, page, objp);
2742 
2743 	BUG_ON(objnr >= cachep->num);
2744 	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2745 
2746 	set_obj_status(page, objnr, OBJECT_FREE);
2747 	if (cachep->flags & SLAB_POISON) {
2748 #ifdef CONFIG_DEBUG_PAGEALLOC
2749 		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2750 			store_stackinfo(cachep, objp, caller);
2751 			kernel_map_pages(virt_to_page(objp),
2752 					 cachep->size / PAGE_SIZE, 0);
2753 		} else {
2754 			poison_obj(cachep, objp, POISON_FREE);
2755 		}
2756 #else
2757 		poison_obj(cachep, objp, POISON_FREE);
2758 #endif
2759 	}
2760 	return objp;
2761 }
2762 
2763 #else
2764 #define kfree_debugcheck(x) do { } while(0)
2765 #define cache_free_debugcheck(x,objp,z) (objp)
2766 #endif
2767 
cache_alloc_refill(struct kmem_cache * cachep,gfp_t flags,bool force_refill)2768 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2769 							bool force_refill)
2770 {
2771 	int batchcount;
2772 	struct kmem_cache_node *n;
2773 	struct array_cache *ac;
2774 	int node;
2775 
2776 	check_irq_off();
2777 	node = numa_mem_id();
2778 	if (unlikely(force_refill))
2779 		goto force_grow;
2780 retry:
2781 	ac = cpu_cache_get(cachep);
2782 	batchcount = ac->batchcount;
2783 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2784 		/*
2785 		 * If there was little recent activity on this cache, then
2786 		 * perform only a partial refill.  Otherwise we could generate
2787 		 * refill bouncing.
2788 		 */
2789 		batchcount = BATCHREFILL_LIMIT;
2790 	}
2791 	n = get_node(cachep, node);
2792 
2793 	BUG_ON(ac->avail > 0 || !n);
2794 	spin_lock(&n->list_lock);
2795 
2796 	/* See if we can refill from the shared array */
2797 	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2798 		n->shared->touched = 1;
2799 		goto alloc_done;
2800 	}
2801 
2802 	while (batchcount > 0) {
2803 		struct list_head *entry;
2804 		struct page *page;
2805 		/* Get slab alloc is to come from. */
2806 		entry = n->slabs_partial.next;
2807 		if (entry == &n->slabs_partial) {
2808 			n->free_touched = 1;
2809 			entry = n->slabs_free.next;
2810 			if (entry == &n->slabs_free)
2811 				goto must_grow;
2812 		}
2813 
2814 		page = list_entry(entry, struct page, lru);
2815 		check_spinlock_acquired(cachep);
2816 
2817 		/*
2818 		 * The slab was either on partial or free list so
2819 		 * there must be at least one object available for
2820 		 * allocation.
2821 		 */
2822 		BUG_ON(page->active >= cachep->num);
2823 
2824 		while (page->active < cachep->num && batchcount--) {
2825 			STATS_INC_ALLOCED(cachep);
2826 			STATS_INC_ACTIVE(cachep);
2827 			STATS_SET_HIGH(cachep);
2828 
2829 			ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2830 									node));
2831 		}
2832 
2833 		/* move slabp to correct slabp list: */
2834 		list_del(&page->lru);
2835 		if (page->active == cachep->num)
2836 			list_add(&page->lru, &n->slabs_full);
2837 		else
2838 			list_add(&page->lru, &n->slabs_partial);
2839 	}
2840 
2841 must_grow:
2842 	n->free_objects -= ac->avail;
2843 alloc_done:
2844 	spin_unlock(&n->list_lock);
2845 
2846 	if (unlikely(!ac->avail)) {
2847 		int x;
2848 force_grow:
2849 		x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
2850 
2851 		/* cache_grow can reenable interrupts, then ac could change. */
2852 		ac = cpu_cache_get(cachep);
2853 		node = numa_mem_id();
2854 
2855 		/* no objects in sight? abort */
2856 		if (!x && (ac->avail == 0 || force_refill))
2857 			return NULL;
2858 
2859 		if (!ac->avail)		/* objects refilled by interrupt? */
2860 			goto retry;
2861 	}
2862 	ac->touched = 1;
2863 
2864 	return ac_get_obj(cachep, ac, flags, force_refill);
2865 }
2866 
cache_alloc_debugcheck_before(struct kmem_cache * cachep,gfp_t flags)2867 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2868 						gfp_t flags)
2869 {
2870 	might_sleep_if(flags & __GFP_WAIT);
2871 #if DEBUG
2872 	kmem_flagcheck(cachep, flags);
2873 #endif
2874 }
2875 
2876 #if DEBUG
cache_alloc_debugcheck_after(struct kmem_cache * cachep,gfp_t flags,void * objp,unsigned long caller)2877 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2878 				gfp_t flags, void *objp, unsigned long caller)
2879 {
2880 	struct page *page;
2881 
2882 	if (!objp)
2883 		return objp;
2884 	if (cachep->flags & SLAB_POISON) {
2885 #ifdef CONFIG_DEBUG_PAGEALLOC
2886 		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2887 			kernel_map_pages(virt_to_page(objp),
2888 					 cachep->size / PAGE_SIZE, 1);
2889 		else
2890 			check_poison_obj(cachep, objp);
2891 #else
2892 		check_poison_obj(cachep, objp);
2893 #endif
2894 		poison_obj(cachep, objp, POISON_INUSE);
2895 	}
2896 	if (cachep->flags & SLAB_STORE_USER)
2897 		*dbg_userword(cachep, objp) = (void *)caller;
2898 
2899 	if (cachep->flags & SLAB_RED_ZONE) {
2900 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2901 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2902 			slab_error(cachep, "double free, or memory outside"
2903 						" object was overwritten");
2904 			printk(KERN_ERR
2905 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2906 				objp, *dbg_redzone1(cachep, objp),
2907 				*dbg_redzone2(cachep, objp));
2908 		}
2909 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
2910 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
2911 	}
2912 
2913 	page = virt_to_head_page(objp);
2914 	set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2915 	objp += obj_offset(cachep);
2916 	if (cachep->ctor && cachep->flags & SLAB_POISON)
2917 		cachep->ctor(objp);
2918 	if (ARCH_SLAB_MINALIGN &&
2919 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
2920 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
2921 		       objp, (int)ARCH_SLAB_MINALIGN);
2922 	}
2923 	return objp;
2924 }
2925 #else
2926 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2927 #endif
2928 
slab_should_failslab(struct kmem_cache * cachep,gfp_t flags)2929 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
2930 {
2931 	if (unlikely(cachep == kmem_cache))
2932 		return false;
2933 
2934 	return should_failslab(cachep->object_size, flags, cachep->flags);
2935 }
2936 
____cache_alloc(struct kmem_cache * cachep,gfp_t flags)2937 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2938 {
2939 	void *objp;
2940 	struct array_cache *ac;
2941 	bool force_refill = false;
2942 
2943 	check_irq_off();
2944 
2945 	ac = cpu_cache_get(cachep);
2946 	if (likely(ac->avail)) {
2947 		ac->touched = 1;
2948 		objp = ac_get_obj(cachep, ac, flags, false);
2949 
2950 		/*
2951 		 * Allow for the possibility all avail objects are not allowed
2952 		 * by the current flags
2953 		 */
2954 		if (objp) {
2955 			STATS_INC_ALLOCHIT(cachep);
2956 			goto out;
2957 		}
2958 		force_refill = true;
2959 	}
2960 
2961 	STATS_INC_ALLOCMISS(cachep);
2962 	objp = cache_alloc_refill(cachep, flags, force_refill);
2963 	/*
2964 	 * the 'ac' may be updated by cache_alloc_refill(),
2965 	 * and kmemleak_erase() requires its correct value.
2966 	 */
2967 	ac = cpu_cache_get(cachep);
2968 
2969 out:
2970 	/*
2971 	 * To avoid a false negative, if an object that is in one of the
2972 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
2973 	 * treat the array pointers as a reference to the object.
2974 	 */
2975 	if (objp)
2976 		kmemleak_erase(&ac->entry[ac->avail]);
2977 	return objp;
2978 }
2979 
2980 #ifdef CONFIG_NUMA
2981 /*
2982  * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
2983  *
2984  * If we are in_interrupt, then process context, including cpusets and
2985  * mempolicy, may not apply and should not be used for allocation policy.
2986  */
alternate_node_alloc(struct kmem_cache * cachep,gfp_t flags)2987 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2988 {
2989 	int nid_alloc, nid_here;
2990 
2991 	if (in_interrupt() || (flags & __GFP_THISNODE))
2992 		return NULL;
2993 	nid_alloc = nid_here = numa_mem_id();
2994 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2995 		nid_alloc = cpuset_slab_spread_node();
2996 	else if (current->mempolicy)
2997 		nid_alloc = mempolicy_slab_node();
2998 	if (nid_alloc != nid_here)
2999 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3000 	return NULL;
3001 }
3002 
3003 /*
3004  * Fallback function if there was no memory available and no objects on a
3005  * certain node and fall back is permitted. First we scan all the
3006  * available node for available objects. If that fails then we
3007  * perform an allocation without specifying a node. This allows the page
3008  * allocator to do its reclaim / fallback magic. We then insert the
3009  * slab into the proper nodelist and then allocate from it.
3010  */
fallback_alloc(struct kmem_cache * cache,gfp_t flags)3011 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3012 {
3013 	struct zonelist *zonelist;
3014 	gfp_t local_flags;
3015 	struct zoneref *z;
3016 	struct zone *zone;
3017 	enum zone_type high_zoneidx = gfp_zone(flags);
3018 	void *obj = NULL;
3019 	int nid;
3020 	unsigned int cpuset_mems_cookie;
3021 
3022 	if (flags & __GFP_THISNODE)
3023 		return NULL;
3024 
3025 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3026 
3027 retry_cpuset:
3028 	cpuset_mems_cookie = read_mems_allowed_begin();
3029 	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3030 
3031 retry:
3032 	/*
3033 	 * Look through allowed nodes for objects available
3034 	 * from existing per node queues.
3035 	 */
3036 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3037 		nid = zone_to_nid(zone);
3038 
3039 		if (cpuset_zone_allowed(zone, flags) &&
3040 			get_node(cache, nid) &&
3041 			get_node(cache, nid)->free_objects) {
3042 				obj = ____cache_alloc_node(cache,
3043 					gfp_exact_node(flags), nid);
3044 				if (obj)
3045 					break;
3046 		}
3047 	}
3048 
3049 	if (!obj) {
3050 		/*
3051 		 * This allocation will be performed within the constraints
3052 		 * of the current cpuset / memory policy requirements.
3053 		 * We may trigger various forms of reclaim on the allowed
3054 		 * set and go into memory reserves if necessary.
3055 		 */
3056 		struct page *page;
3057 
3058 		if (local_flags & __GFP_WAIT)
3059 			local_irq_enable();
3060 		kmem_flagcheck(cache, flags);
3061 		page = kmem_getpages(cache, local_flags, numa_mem_id());
3062 		if (local_flags & __GFP_WAIT)
3063 			local_irq_disable();
3064 		if (page) {
3065 			/*
3066 			 * Insert into the appropriate per node queues
3067 			 */
3068 			nid = page_to_nid(page);
3069 			if (cache_grow(cache, flags, nid, page)) {
3070 				obj = ____cache_alloc_node(cache,
3071 					gfp_exact_node(flags), nid);
3072 				if (!obj)
3073 					/*
3074 					 * Another processor may allocate the
3075 					 * objects in the slab since we are
3076 					 * not holding any locks.
3077 					 */
3078 					goto retry;
3079 			} else {
3080 				/* cache_grow already freed obj */
3081 				obj = NULL;
3082 			}
3083 		}
3084 	}
3085 
3086 	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3087 		goto retry_cpuset;
3088 	return obj;
3089 }
3090 
3091 /*
3092  * A interface to enable slab creation on nodeid
3093  */
____cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid)3094 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3095 				int nodeid)
3096 {
3097 	struct list_head *entry;
3098 	struct page *page;
3099 	struct kmem_cache_node *n;
3100 	void *obj;
3101 	int x;
3102 
3103 	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3104 	n = get_node(cachep, nodeid);
3105 	BUG_ON(!n);
3106 
3107 retry:
3108 	check_irq_off();
3109 	spin_lock(&n->list_lock);
3110 	entry = n->slabs_partial.next;
3111 	if (entry == &n->slabs_partial) {
3112 		n->free_touched = 1;
3113 		entry = n->slabs_free.next;
3114 		if (entry == &n->slabs_free)
3115 			goto must_grow;
3116 	}
3117 
3118 	page = list_entry(entry, struct page, lru);
3119 	check_spinlock_acquired_node(cachep, nodeid);
3120 
3121 	STATS_INC_NODEALLOCS(cachep);
3122 	STATS_INC_ACTIVE(cachep);
3123 	STATS_SET_HIGH(cachep);
3124 
3125 	BUG_ON(page->active == cachep->num);
3126 
3127 	obj = slab_get_obj(cachep, page, nodeid);
3128 	n->free_objects--;
3129 	/* move slabp to correct slabp list: */
3130 	list_del(&page->lru);
3131 
3132 	if (page->active == cachep->num)
3133 		list_add(&page->lru, &n->slabs_full);
3134 	else
3135 		list_add(&page->lru, &n->slabs_partial);
3136 
3137 	spin_unlock(&n->list_lock);
3138 	goto done;
3139 
3140 must_grow:
3141 	spin_unlock(&n->list_lock);
3142 	x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
3143 	if (x)
3144 		goto retry;
3145 
3146 	return fallback_alloc(cachep, flags);
3147 
3148 done:
3149 	return obj;
3150 }
3151 
3152 static __always_inline void *
slab_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid,unsigned long caller)3153 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3154 		   unsigned long caller)
3155 {
3156 	unsigned long save_flags;
3157 	void *ptr;
3158 	int slab_node = numa_mem_id();
3159 
3160 	flags &= gfp_allowed_mask;
3161 
3162 	lockdep_trace_alloc(flags);
3163 
3164 	if (slab_should_failslab(cachep, flags))
3165 		return NULL;
3166 
3167 	cachep = memcg_kmem_get_cache(cachep, flags);
3168 
3169 	cache_alloc_debugcheck_before(cachep, flags);
3170 	local_irq_save(save_flags);
3171 
3172 	if (nodeid == NUMA_NO_NODE)
3173 		nodeid = slab_node;
3174 
3175 	if (unlikely(!get_node(cachep, nodeid))) {
3176 		/* Node not bootstrapped yet */
3177 		ptr = fallback_alloc(cachep, flags);
3178 		goto out;
3179 	}
3180 
3181 	if (nodeid == slab_node) {
3182 		/*
3183 		 * Use the locally cached objects if possible.
3184 		 * However ____cache_alloc does not allow fallback
3185 		 * to other nodes. It may fail while we still have
3186 		 * objects on other nodes available.
3187 		 */
3188 		ptr = ____cache_alloc(cachep, flags);
3189 		if (ptr)
3190 			goto out;
3191 	}
3192 	/* ___cache_alloc_node can fall back to other nodes */
3193 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3194   out:
3195 	local_irq_restore(save_flags);
3196 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3197 	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3198 				 flags);
3199 
3200 	if (likely(ptr)) {
3201 		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3202 		if (unlikely(flags & __GFP_ZERO))
3203 			memset(ptr, 0, cachep->object_size);
3204 	}
3205 
3206 	memcg_kmem_put_cache(cachep);
3207 	return ptr;
3208 }
3209 
3210 static __always_inline void *
__do_cache_alloc(struct kmem_cache * cache,gfp_t flags)3211 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3212 {
3213 	void *objp;
3214 
3215 	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3216 		objp = alternate_node_alloc(cache, flags);
3217 		if (objp)
3218 			goto out;
3219 	}
3220 	objp = ____cache_alloc(cache, flags);
3221 
3222 	/*
3223 	 * We may just have run out of memory on the local node.
3224 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3225 	 */
3226 	if (!objp)
3227 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3228 
3229   out:
3230 	return objp;
3231 }
3232 #else
3233 
3234 static __always_inline void *
__do_cache_alloc(struct kmem_cache * cachep,gfp_t flags)3235 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3236 {
3237 	return ____cache_alloc(cachep, flags);
3238 }
3239 
3240 #endif /* CONFIG_NUMA */
3241 
3242 static __always_inline void *
slab_alloc(struct kmem_cache * cachep,gfp_t flags,unsigned long caller)3243 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3244 {
3245 	unsigned long save_flags;
3246 	void *objp;
3247 
3248 	flags &= gfp_allowed_mask;
3249 
3250 	lockdep_trace_alloc(flags);
3251 
3252 	if (slab_should_failslab(cachep, flags))
3253 		return NULL;
3254 
3255 	cachep = memcg_kmem_get_cache(cachep, flags);
3256 
3257 	cache_alloc_debugcheck_before(cachep, flags);
3258 	local_irq_save(save_flags);
3259 	objp = __do_cache_alloc(cachep, flags);
3260 	local_irq_restore(save_flags);
3261 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3262 	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3263 				 flags);
3264 	prefetchw(objp);
3265 
3266 	if (likely(objp)) {
3267 		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3268 		if (unlikely(flags & __GFP_ZERO))
3269 			memset(objp, 0, cachep->object_size);
3270 	}
3271 
3272 	memcg_kmem_put_cache(cachep);
3273 	return objp;
3274 }
3275 
3276 /*
3277  * Caller needs to acquire correct kmem_cache_node's list_lock
3278  * @list: List of detached free slabs should be freed by caller
3279  */
free_block(struct kmem_cache * cachep,void ** objpp,int nr_objects,int node,struct list_head * list)3280 static void free_block(struct kmem_cache *cachep, void **objpp,
3281 			int nr_objects, int node, struct list_head *list)
3282 {
3283 	int i;
3284 	struct kmem_cache_node *n = get_node(cachep, node);
3285 
3286 	for (i = 0; i < nr_objects; i++) {
3287 		void *objp;
3288 		struct page *page;
3289 
3290 		clear_obj_pfmemalloc(&objpp[i]);
3291 		objp = objpp[i];
3292 
3293 		page = virt_to_head_page(objp);
3294 		list_del(&page->lru);
3295 		check_spinlock_acquired_node(cachep, node);
3296 		slab_put_obj(cachep, page, objp, node);
3297 		STATS_DEC_ACTIVE(cachep);
3298 		n->free_objects++;
3299 
3300 		/* fixup slab chains */
3301 		if (page->active == 0) {
3302 			if (n->free_objects > n->free_limit) {
3303 				n->free_objects -= cachep->num;
3304 				list_add_tail(&page->lru, list);
3305 			} else {
3306 				list_add(&page->lru, &n->slabs_free);
3307 			}
3308 		} else {
3309 			/* Unconditionally move a slab to the end of the
3310 			 * partial list on free - maximum time for the
3311 			 * other objects to be freed, too.
3312 			 */
3313 			list_add_tail(&page->lru, &n->slabs_partial);
3314 		}
3315 	}
3316 }
3317 
cache_flusharray(struct kmem_cache * cachep,struct array_cache * ac)3318 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3319 {
3320 	int batchcount;
3321 	struct kmem_cache_node *n;
3322 	int node = numa_mem_id();
3323 	LIST_HEAD(list);
3324 
3325 	batchcount = ac->batchcount;
3326 #if DEBUG
3327 	BUG_ON(!batchcount || batchcount > ac->avail);
3328 #endif
3329 	check_irq_off();
3330 	n = get_node(cachep, node);
3331 	spin_lock(&n->list_lock);
3332 	if (n->shared) {
3333 		struct array_cache *shared_array = n->shared;
3334 		int max = shared_array->limit - shared_array->avail;
3335 		if (max) {
3336 			if (batchcount > max)
3337 				batchcount = max;
3338 			memcpy(&(shared_array->entry[shared_array->avail]),
3339 			       ac->entry, sizeof(void *) * batchcount);
3340 			shared_array->avail += batchcount;
3341 			goto free_done;
3342 		}
3343 	}
3344 
3345 	free_block(cachep, ac->entry, batchcount, node, &list);
3346 free_done:
3347 #if STATS
3348 	{
3349 		int i = 0;
3350 		struct list_head *p;
3351 
3352 		p = n->slabs_free.next;
3353 		while (p != &(n->slabs_free)) {
3354 			struct page *page;
3355 
3356 			page = list_entry(p, struct page, lru);
3357 			BUG_ON(page->active);
3358 
3359 			i++;
3360 			p = p->next;
3361 		}
3362 		STATS_SET_FREEABLE(cachep, i);
3363 	}
3364 #endif
3365 	spin_unlock(&n->list_lock);
3366 	slabs_destroy(cachep, &list);
3367 	ac->avail -= batchcount;
3368 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3369 }
3370 
3371 /*
3372  * Release an obj back to its cache. If the obj has a constructed state, it must
3373  * be in this state _before_ it is released.  Called with disabled ints.
3374  */
__cache_free(struct kmem_cache * cachep,void * objp,unsigned long caller)3375 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3376 				unsigned long caller)
3377 {
3378 	struct array_cache *ac = cpu_cache_get(cachep);
3379 
3380 	check_irq_off();
3381 	kmemleak_free_recursive(objp, cachep->flags);
3382 	objp = cache_free_debugcheck(cachep, objp, caller);
3383 
3384 	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3385 
3386 	/*
3387 	 * Skip calling cache_free_alien() when the platform is not numa.
3388 	 * This will avoid cache misses that happen while accessing slabp (which
3389 	 * is per page memory  reference) to get nodeid. Instead use a global
3390 	 * variable to skip the call, which is mostly likely to be present in
3391 	 * the cache.
3392 	 */
3393 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3394 		return;
3395 
3396 	if (ac->avail < ac->limit) {
3397 		STATS_INC_FREEHIT(cachep);
3398 	} else {
3399 		STATS_INC_FREEMISS(cachep);
3400 		cache_flusharray(cachep, ac);
3401 	}
3402 
3403 	ac_put_obj(cachep, ac, objp);
3404 }
3405 
3406 /**
3407  * kmem_cache_alloc - Allocate an object
3408  * @cachep: The cache to allocate from.
3409  * @flags: See kmalloc().
3410  *
3411  * Allocate an object from this cache.  The flags are only relevant
3412  * if the cache has no available objects.
3413  */
kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)3414 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3415 {
3416 	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3417 
3418 	trace_kmem_cache_alloc(_RET_IP_, ret,
3419 			       cachep->object_size, cachep->size, flags);
3420 
3421 	return ret;
3422 }
3423 EXPORT_SYMBOL(kmem_cache_alloc);
3424 
3425 #ifdef CONFIG_TRACING
3426 void *
kmem_cache_alloc_trace(struct kmem_cache * cachep,gfp_t flags,size_t size)3427 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3428 {
3429 	void *ret;
3430 
3431 	ret = slab_alloc(cachep, flags, _RET_IP_);
3432 
3433 	trace_kmalloc(_RET_IP_, ret,
3434 		      size, cachep->size, flags);
3435 	return ret;
3436 }
3437 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3438 #endif
3439 
3440 #ifdef CONFIG_NUMA
3441 /**
3442  * kmem_cache_alloc_node - Allocate an object on the specified node
3443  * @cachep: The cache to allocate from.
3444  * @flags: See kmalloc().
3445  * @nodeid: node number of the target node.
3446  *
3447  * Identical to kmem_cache_alloc but it will allocate memory on the given
3448  * node, which can improve the performance for cpu bound structures.
3449  *
3450  * Fallback to other node is possible if __GFP_THISNODE is not set.
3451  */
kmem_cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid)3452 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3453 {
3454 	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3455 
3456 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3457 				    cachep->object_size, cachep->size,
3458 				    flags, nodeid);
3459 
3460 	return ret;
3461 }
3462 EXPORT_SYMBOL(kmem_cache_alloc_node);
3463 
3464 #ifdef CONFIG_TRACING
kmem_cache_alloc_node_trace(struct kmem_cache * cachep,gfp_t flags,int nodeid,size_t size)3465 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3466 				  gfp_t flags,
3467 				  int nodeid,
3468 				  size_t size)
3469 {
3470 	void *ret;
3471 
3472 	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3473 
3474 	trace_kmalloc_node(_RET_IP_, ret,
3475 			   size, cachep->size,
3476 			   flags, nodeid);
3477 	return ret;
3478 }
3479 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3480 #endif
3481 
3482 static __always_inline void *
__do_kmalloc_node(size_t size,gfp_t flags,int node,unsigned long caller)3483 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3484 {
3485 	struct kmem_cache *cachep;
3486 
3487 	cachep = kmalloc_slab(size, flags);
3488 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3489 		return cachep;
3490 	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3491 }
3492 
__kmalloc_node(size_t size,gfp_t flags,int node)3493 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3494 {
3495 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3496 }
3497 EXPORT_SYMBOL(__kmalloc_node);
3498 
__kmalloc_node_track_caller(size_t size,gfp_t flags,int node,unsigned long caller)3499 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3500 		int node, unsigned long caller)
3501 {
3502 	return __do_kmalloc_node(size, flags, node, caller);
3503 }
3504 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3505 #endif /* CONFIG_NUMA */
3506 
3507 /**
3508  * __do_kmalloc - allocate memory
3509  * @size: how many bytes of memory are required.
3510  * @flags: the type of memory to allocate (see kmalloc).
3511  * @caller: function caller for debug tracking of the caller
3512  */
__do_kmalloc(size_t size,gfp_t flags,unsigned long caller)3513 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3514 					  unsigned long caller)
3515 {
3516 	struct kmem_cache *cachep;
3517 	void *ret;
3518 
3519 	cachep = kmalloc_slab(size, flags);
3520 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3521 		return cachep;
3522 	ret = slab_alloc(cachep, flags, caller);
3523 
3524 	trace_kmalloc(caller, ret,
3525 		      size, cachep->size, flags);
3526 
3527 	return ret;
3528 }
3529 
__kmalloc(size_t size,gfp_t flags)3530 void *__kmalloc(size_t size, gfp_t flags)
3531 {
3532 	return __do_kmalloc(size, flags, _RET_IP_);
3533 }
3534 EXPORT_SYMBOL(__kmalloc);
3535 
__kmalloc_track_caller(size_t size,gfp_t flags,unsigned long caller)3536 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3537 {
3538 	return __do_kmalloc(size, flags, caller);
3539 }
3540 EXPORT_SYMBOL(__kmalloc_track_caller);
3541 
3542 /**
3543  * kmem_cache_free - Deallocate an object
3544  * @cachep: The cache the allocation was from.
3545  * @objp: The previously allocated object.
3546  *
3547  * Free an object which was previously allocated from this
3548  * cache.
3549  */
kmem_cache_free(struct kmem_cache * cachep,void * objp)3550 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3551 {
3552 	unsigned long flags;
3553 	cachep = cache_from_obj(cachep, objp);
3554 	if (!cachep)
3555 		return;
3556 
3557 	local_irq_save(flags);
3558 	debug_check_no_locks_freed(objp, cachep->object_size);
3559 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3560 		debug_check_no_obj_freed(objp, cachep->object_size);
3561 	__cache_free(cachep, objp, _RET_IP_);
3562 	local_irq_restore(flags);
3563 
3564 	trace_kmem_cache_free(_RET_IP_, objp);
3565 }
3566 EXPORT_SYMBOL(kmem_cache_free);
3567 
3568 /**
3569  * kfree - free previously allocated memory
3570  * @objp: pointer returned by kmalloc.
3571  *
3572  * If @objp is NULL, no operation is performed.
3573  *
3574  * Don't free memory not originally allocated by kmalloc()
3575  * or you will run into trouble.
3576  */
kfree(const void * objp)3577 void kfree(const void *objp)
3578 {
3579 	struct kmem_cache *c;
3580 	unsigned long flags;
3581 
3582 	trace_kfree(_RET_IP_, objp);
3583 
3584 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3585 		return;
3586 	local_irq_save(flags);
3587 	kfree_debugcheck(objp);
3588 	c = virt_to_cache(objp);
3589 	debug_check_no_locks_freed(objp, c->object_size);
3590 
3591 	debug_check_no_obj_freed(objp, c->object_size);
3592 	__cache_free(c, (void *)objp, _RET_IP_);
3593 	local_irq_restore(flags);
3594 }
3595 EXPORT_SYMBOL(kfree);
3596 
3597 /*
3598  * This initializes kmem_cache_node or resizes various caches for all nodes.
3599  */
alloc_kmem_cache_node(struct kmem_cache * cachep,gfp_t gfp)3600 static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3601 {
3602 	int node;
3603 	struct kmem_cache_node *n;
3604 	struct array_cache *new_shared;
3605 	struct alien_cache **new_alien = NULL;
3606 
3607 	for_each_online_node(node) {
3608 
3609 		if (use_alien_caches) {
3610 			new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3611 			if (!new_alien)
3612 				goto fail;
3613 		}
3614 
3615 		new_shared = NULL;
3616 		if (cachep->shared) {
3617 			new_shared = alloc_arraycache(node,
3618 				cachep->shared*cachep->batchcount,
3619 					0xbaadf00d, gfp);
3620 			if (!new_shared) {
3621 				free_alien_cache(new_alien);
3622 				goto fail;
3623 			}
3624 		}
3625 
3626 		n = get_node(cachep, node);
3627 		if (n) {
3628 			struct array_cache *shared = n->shared;
3629 			LIST_HEAD(list);
3630 
3631 			spin_lock_irq(&n->list_lock);
3632 
3633 			if (shared)
3634 				free_block(cachep, shared->entry,
3635 						shared->avail, node, &list);
3636 
3637 			n->shared = new_shared;
3638 			if (!n->alien) {
3639 				n->alien = new_alien;
3640 				new_alien = NULL;
3641 			}
3642 			n->free_limit = (1 + nr_cpus_node(node)) *
3643 					cachep->batchcount + cachep->num;
3644 			spin_unlock_irq(&n->list_lock);
3645 			slabs_destroy(cachep, &list);
3646 			kfree(shared);
3647 			free_alien_cache(new_alien);
3648 			continue;
3649 		}
3650 		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3651 		if (!n) {
3652 			free_alien_cache(new_alien);
3653 			kfree(new_shared);
3654 			goto fail;
3655 		}
3656 
3657 		kmem_cache_node_init(n);
3658 		n->next_reap = jiffies + REAPTIMEOUT_NODE +
3659 				((unsigned long)cachep) % REAPTIMEOUT_NODE;
3660 		n->shared = new_shared;
3661 		n->alien = new_alien;
3662 		n->free_limit = (1 + nr_cpus_node(node)) *
3663 					cachep->batchcount + cachep->num;
3664 		cachep->node[node] = n;
3665 	}
3666 	return 0;
3667 
3668 fail:
3669 	if (!cachep->list.next) {
3670 		/* Cache is not active yet. Roll back what we did */
3671 		node--;
3672 		while (node >= 0) {
3673 			n = get_node(cachep, node);
3674 			if (n) {
3675 				kfree(n->shared);
3676 				free_alien_cache(n->alien);
3677 				kfree(n);
3678 				cachep->node[node] = NULL;
3679 			}
3680 			node--;
3681 		}
3682 	}
3683 	return -ENOMEM;
3684 }
3685 
3686 /* Always called with the slab_mutex held */
__do_tune_cpucache(struct kmem_cache * cachep,int limit,int batchcount,int shared,gfp_t gfp)3687 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3688 				int batchcount, int shared, gfp_t gfp)
3689 {
3690 	struct array_cache __percpu *cpu_cache, *prev;
3691 	int cpu;
3692 
3693 	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3694 	if (!cpu_cache)
3695 		return -ENOMEM;
3696 
3697 	prev = cachep->cpu_cache;
3698 	cachep->cpu_cache = cpu_cache;
3699 	kick_all_cpus_sync();
3700 
3701 	check_irq_on();
3702 	cachep->batchcount = batchcount;
3703 	cachep->limit = limit;
3704 	cachep->shared = shared;
3705 
3706 	if (!prev)
3707 		goto alloc_node;
3708 
3709 	for_each_online_cpu(cpu) {
3710 		LIST_HEAD(list);
3711 		int node;
3712 		struct kmem_cache_node *n;
3713 		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3714 
3715 		node = cpu_to_mem(cpu);
3716 		n = get_node(cachep, node);
3717 		spin_lock_irq(&n->list_lock);
3718 		free_block(cachep, ac->entry, ac->avail, node, &list);
3719 		spin_unlock_irq(&n->list_lock);
3720 		slabs_destroy(cachep, &list);
3721 	}
3722 	free_percpu(prev);
3723 
3724 alloc_node:
3725 	return alloc_kmem_cache_node(cachep, gfp);
3726 }
3727 
do_tune_cpucache(struct kmem_cache * cachep,int limit,int batchcount,int shared,gfp_t gfp)3728 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3729 				int batchcount, int shared, gfp_t gfp)
3730 {
3731 	int ret;
3732 	struct kmem_cache *c;
3733 
3734 	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3735 
3736 	if (slab_state < FULL)
3737 		return ret;
3738 
3739 	if ((ret < 0) || !is_root_cache(cachep))
3740 		return ret;
3741 
3742 	lockdep_assert_held(&slab_mutex);
3743 	for_each_memcg_cache(c, cachep) {
3744 		/* return value determined by the root cache only */
3745 		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3746 	}
3747 
3748 	return ret;
3749 }
3750 
3751 /* Called with slab_mutex held always */
enable_cpucache(struct kmem_cache * cachep,gfp_t gfp)3752 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3753 {
3754 	int err;
3755 	int limit = 0;
3756 	int shared = 0;
3757 	int batchcount = 0;
3758 
3759 	if (!is_root_cache(cachep)) {
3760 		struct kmem_cache *root = memcg_root_cache(cachep);
3761 		limit = root->limit;
3762 		shared = root->shared;
3763 		batchcount = root->batchcount;
3764 	}
3765 
3766 	if (limit && shared && batchcount)
3767 		goto skip_setup;
3768 	/*
3769 	 * The head array serves three purposes:
3770 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3771 	 * - reduce the number of spinlock operations.
3772 	 * - reduce the number of linked list operations on the slab and
3773 	 *   bufctl chains: array operations are cheaper.
3774 	 * The numbers are guessed, we should auto-tune as described by
3775 	 * Bonwick.
3776 	 */
3777 	if (cachep->size > 131072)
3778 		limit = 1;
3779 	else if (cachep->size > PAGE_SIZE)
3780 		limit = 8;
3781 	else if (cachep->size > 1024)
3782 		limit = 24;
3783 	else if (cachep->size > 256)
3784 		limit = 54;
3785 	else
3786 		limit = 120;
3787 
3788 	/*
3789 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3790 	 * allocation behaviour: Most allocs on one cpu, most free operations
3791 	 * on another cpu. For these cases, an efficient object passing between
3792 	 * cpus is necessary. This is provided by a shared array. The array
3793 	 * replaces Bonwick's magazine layer.
3794 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3795 	 * to a larger limit. Thus disabled by default.
3796 	 */
3797 	shared = 0;
3798 	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3799 		shared = 8;
3800 
3801 #if DEBUG
3802 	/*
3803 	 * With debugging enabled, large batchcount lead to excessively long
3804 	 * periods with disabled local interrupts. Limit the batchcount
3805 	 */
3806 	if (limit > 32)
3807 		limit = 32;
3808 #endif
3809 	batchcount = (limit + 1) / 2;
3810 skip_setup:
3811 	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3812 	if (err)
3813 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3814 		       cachep->name, -err);
3815 	return err;
3816 }
3817 
3818 /*
3819  * Drain an array if it contains any elements taking the node lock only if
3820  * necessary. Note that the node listlock also protects the array_cache
3821  * if drain_array() is used on the shared array.
3822  */
drain_array(struct kmem_cache * cachep,struct kmem_cache_node * n,struct array_cache * ac,int force,int node)3823 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3824 			 struct array_cache *ac, int force, int node)
3825 {
3826 	LIST_HEAD(list);
3827 	int tofree;
3828 
3829 	if (!ac || !ac->avail)
3830 		return;
3831 	if (ac->touched && !force) {
3832 		ac->touched = 0;
3833 	} else {
3834 		spin_lock_irq(&n->list_lock);
3835 		if (ac->avail) {
3836 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
3837 			if (tofree > ac->avail)
3838 				tofree = (ac->avail + 1) / 2;
3839 			free_block(cachep, ac->entry, tofree, node, &list);
3840 			ac->avail -= tofree;
3841 			memmove(ac->entry, &(ac->entry[tofree]),
3842 				sizeof(void *) * ac->avail);
3843 		}
3844 		spin_unlock_irq(&n->list_lock);
3845 		slabs_destroy(cachep, &list);
3846 	}
3847 }
3848 
3849 /**
3850  * cache_reap - Reclaim memory from caches.
3851  * @w: work descriptor
3852  *
3853  * Called from workqueue/eventd every few seconds.
3854  * Purpose:
3855  * - clear the per-cpu caches for this CPU.
3856  * - return freeable pages to the main free memory pool.
3857  *
3858  * If we cannot acquire the cache chain mutex then just give up - we'll try
3859  * again on the next iteration.
3860  */
cache_reap(struct work_struct * w)3861 static void cache_reap(struct work_struct *w)
3862 {
3863 	struct kmem_cache *searchp;
3864 	struct kmem_cache_node *n;
3865 	int node = numa_mem_id();
3866 	struct delayed_work *work = to_delayed_work(w);
3867 
3868 	if (!mutex_trylock(&slab_mutex))
3869 		/* Give up. Setup the next iteration. */
3870 		goto out;
3871 
3872 	list_for_each_entry(searchp, &slab_caches, list) {
3873 		check_irq_on();
3874 
3875 		/*
3876 		 * We only take the node lock if absolutely necessary and we
3877 		 * have established with reasonable certainty that
3878 		 * we can do some work if the lock was obtained.
3879 		 */
3880 		n = get_node(searchp, node);
3881 
3882 		reap_alien(searchp, n);
3883 
3884 		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
3885 
3886 		/*
3887 		 * These are racy checks but it does not matter
3888 		 * if we skip one check or scan twice.
3889 		 */
3890 		if (time_after(n->next_reap, jiffies))
3891 			goto next;
3892 
3893 		n->next_reap = jiffies + REAPTIMEOUT_NODE;
3894 
3895 		drain_array(searchp, n, n->shared, 0, node);
3896 
3897 		if (n->free_touched)
3898 			n->free_touched = 0;
3899 		else {
3900 			int freed;
3901 
3902 			freed = drain_freelist(searchp, n, (n->free_limit +
3903 				5 * searchp->num - 1) / (5 * searchp->num));
3904 			STATS_ADD_REAPED(searchp, freed);
3905 		}
3906 next:
3907 		cond_resched();
3908 	}
3909 	check_irq_on();
3910 	mutex_unlock(&slab_mutex);
3911 	next_reap_node();
3912 out:
3913 	/* Set up the next iteration */
3914 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
3915 }
3916 
3917 #ifdef CONFIG_SLABINFO
get_slabinfo(struct kmem_cache * cachep,struct slabinfo * sinfo)3918 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
3919 {
3920 	struct page *page;
3921 	unsigned long active_objs;
3922 	unsigned long num_objs;
3923 	unsigned long active_slabs = 0;
3924 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
3925 	const char *name;
3926 	char *error = NULL;
3927 	int node;
3928 	struct kmem_cache_node *n;
3929 
3930 	active_objs = 0;
3931 	num_slabs = 0;
3932 	for_each_kmem_cache_node(cachep, node, n) {
3933 
3934 		check_irq_on();
3935 		spin_lock_irq(&n->list_lock);
3936 
3937 		list_for_each_entry(page, &n->slabs_full, lru) {
3938 			if (page->active != cachep->num && !error)
3939 				error = "slabs_full accounting error";
3940 			active_objs += cachep->num;
3941 			active_slabs++;
3942 		}
3943 		list_for_each_entry(page, &n->slabs_partial, lru) {
3944 			if (page->active == cachep->num && !error)
3945 				error = "slabs_partial accounting error";
3946 			if (!page->active && !error)
3947 				error = "slabs_partial accounting error";
3948 			active_objs += page->active;
3949 			active_slabs++;
3950 		}
3951 		list_for_each_entry(page, &n->slabs_free, lru) {
3952 			if (page->active && !error)
3953 				error = "slabs_free accounting error";
3954 			num_slabs++;
3955 		}
3956 		free_objects += n->free_objects;
3957 		if (n->shared)
3958 			shared_avail += n->shared->avail;
3959 
3960 		spin_unlock_irq(&n->list_lock);
3961 	}
3962 	num_slabs += active_slabs;
3963 	num_objs = num_slabs * cachep->num;
3964 	if (num_objs - active_objs != free_objects && !error)
3965 		error = "free_objects accounting error";
3966 
3967 	name = cachep->name;
3968 	if (error)
3969 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3970 
3971 	sinfo->active_objs = active_objs;
3972 	sinfo->num_objs = num_objs;
3973 	sinfo->active_slabs = active_slabs;
3974 	sinfo->num_slabs = num_slabs;
3975 	sinfo->shared_avail = shared_avail;
3976 	sinfo->limit = cachep->limit;
3977 	sinfo->batchcount = cachep->batchcount;
3978 	sinfo->shared = cachep->shared;
3979 	sinfo->objects_per_slab = cachep->num;
3980 	sinfo->cache_order = cachep->gfporder;
3981 }
3982 
slabinfo_show_stats(struct seq_file * m,struct kmem_cache * cachep)3983 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
3984 {
3985 #if STATS
3986 	{			/* node stats */
3987 		unsigned long high = cachep->high_mark;
3988 		unsigned long allocs = cachep->num_allocations;
3989 		unsigned long grown = cachep->grown;
3990 		unsigned long reaped = cachep->reaped;
3991 		unsigned long errors = cachep->errors;
3992 		unsigned long max_freeable = cachep->max_freeable;
3993 		unsigned long node_allocs = cachep->node_allocs;
3994 		unsigned long node_frees = cachep->node_frees;
3995 		unsigned long overflows = cachep->node_overflow;
3996 
3997 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
3998 			   "%4lu %4lu %4lu %4lu %4lu",
3999 			   allocs, high, grown,
4000 			   reaped, errors, max_freeable, node_allocs,
4001 			   node_frees, overflows);
4002 	}
4003 	/* cpu stats */
4004 	{
4005 		unsigned long allochit = atomic_read(&cachep->allochit);
4006 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4007 		unsigned long freehit = atomic_read(&cachep->freehit);
4008 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4009 
4010 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4011 			   allochit, allocmiss, freehit, freemiss);
4012 	}
4013 #endif
4014 }
4015 
4016 #define MAX_SLABINFO_WRITE 128
4017 /**
4018  * slabinfo_write - Tuning for the slab allocator
4019  * @file: unused
4020  * @buffer: user buffer
4021  * @count: data length
4022  * @ppos: unused
4023  */
slabinfo_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)4024 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4025 		       size_t count, loff_t *ppos)
4026 {
4027 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4028 	int limit, batchcount, shared, res;
4029 	struct kmem_cache *cachep;
4030 
4031 	if (count > MAX_SLABINFO_WRITE)
4032 		return -EINVAL;
4033 	if (copy_from_user(&kbuf, buffer, count))
4034 		return -EFAULT;
4035 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4036 
4037 	tmp = strchr(kbuf, ' ');
4038 	if (!tmp)
4039 		return -EINVAL;
4040 	*tmp = '\0';
4041 	tmp++;
4042 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4043 		return -EINVAL;
4044 
4045 	/* Find the cache in the chain of caches. */
4046 	mutex_lock(&slab_mutex);
4047 	res = -EINVAL;
4048 	list_for_each_entry(cachep, &slab_caches, list) {
4049 		if (!strcmp(cachep->name, kbuf)) {
4050 			if (limit < 1 || batchcount < 1 ||
4051 					batchcount > limit || shared < 0) {
4052 				res = 0;
4053 			} else {
4054 				res = do_tune_cpucache(cachep, limit,
4055 						       batchcount, shared,
4056 						       GFP_KERNEL);
4057 			}
4058 			break;
4059 		}
4060 	}
4061 	mutex_unlock(&slab_mutex);
4062 	if (res >= 0)
4063 		res = count;
4064 	return res;
4065 }
4066 
4067 #ifdef CONFIG_DEBUG_SLAB_LEAK
4068 
add_caller(unsigned long * n,unsigned long v)4069 static inline int add_caller(unsigned long *n, unsigned long v)
4070 {
4071 	unsigned long *p;
4072 	int l;
4073 	if (!v)
4074 		return 1;
4075 	l = n[1];
4076 	p = n + 2;
4077 	while (l) {
4078 		int i = l/2;
4079 		unsigned long *q = p + 2 * i;
4080 		if (*q == v) {
4081 			q[1]++;
4082 			return 1;
4083 		}
4084 		if (*q > v) {
4085 			l = i;
4086 		} else {
4087 			p = q + 2;
4088 			l -= i + 1;
4089 		}
4090 	}
4091 	if (++n[1] == n[0])
4092 		return 0;
4093 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4094 	p[0] = v;
4095 	p[1] = 1;
4096 	return 1;
4097 }
4098 
handle_slab(unsigned long * n,struct kmem_cache * c,struct page * page)4099 static void handle_slab(unsigned long *n, struct kmem_cache *c,
4100 						struct page *page)
4101 {
4102 	void *p;
4103 	int i;
4104 
4105 	if (n[0] == n[1])
4106 		return;
4107 	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4108 		if (get_obj_status(page, i) != OBJECT_ACTIVE)
4109 			continue;
4110 
4111 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4112 			return;
4113 	}
4114 }
4115 
show_symbol(struct seq_file * m,unsigned long address)4116 static void show_symbol(struct seq_file *m, unsigned long address)
4117 {
4118 #ifdef CONFIG_KALLSYMS
4119 	unsigned long offset, size;
4120 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4121 
4122 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4123 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4124 		if (modname[0])
4125 			seq_printf(m, " [%s]", modname);
4126 		return;
4127 	}
4128 #endif
4129 	seq_printf(m, "%p", (void *)address);
4130 }
4131 
leaks_show(struct seq_file * m,void * p)4132 static int leaks_show(struct seq_file *m, void *p)
4133 {
4134 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4135 	struct page *page;
4136 	struct kmem_cache_node *n;
4137 	const char *name;
4138 	unsigned long *x = m->private;
4139 	int node;
4140 	int i;
4141 
4142 	if (!(cachep->flags & SLAB_STORE_USER))
4143 		return 0;
4144 	if (!(cachep->flags & SLAB_RED_ZONE))
4145 		return 0;
4146 
4147 	/* OK, we can do it */
4148 
4149 	x[1] = 0;
4150 
4151 	for_each_kmem_cache_node(cachep, node, n) {
4152 
4153 		check_irq_on();
4154 		spin_lock_irq(&n->list_lock);
4155 
4156 		list_for_each_entry(page, &n->slabs_full, lru)
4157 			handle_slab(x, cachep, page);
4158 		list_for_each_entry(page, &n->slabs_partial, lru)
4159 			handle_slab(x, cachep, page);
4160 		spin_unlock_irq(&n->list_lock);
4161 	}
4162 	name = cachep->name;
4163 	if (x[0] == x[1]) {
4164 		/* Increase the buffer size */
4165 		mutex_unlock(&slab_mutex);
4166 		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4167 		if (!m->private) {
4168 			/* Too bad, we are really out */
4169 			m->private = x;
4170 			mutex_lock(&slab_mutex);
4171 			return -ENOMEM;
4172 		}
4173 		*(unsigned long *)m->private = x[0] * 2;
4174 		kfree(x);
4175 		mutex_lock(&slab_mutex);
4176 		/* Now make sure this entry will be retried */
4177 		m->count = m->size;
4178 		return 0;
4179 	}
4180 	for (i = 0; i < x[1]; i++) {
4181 		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4182 		show_symbol(m, x[2*i+2]);
4183 		seq_putc(m, '\n');
4184 	}
4185 
4186 	return 0;
4187 }
4188 
4189 static const struct seq_operations slabstats_op = {
4190 	.start = slab_start,
4191 	.next = slab_next,
4192 	.stop = slab_stop,
4193 	.show = leaks_show,
4194 };
4195 
slabstats_open(struct inode * inode,struct file * file)4196 static int slabstats_open(struct inode *inode, struct file *file)
4197 {
4198 	unsigned long *n;
4199 
4200 	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4201 	if (!n)
4202 		return -ENOMEM;
4203 
4204 	*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4205 
4206 	return 0;
4207 }
4208 
4209 static const struct file_operations proc_slabstats_operations = {
4210 	.open		= slabstats_open,
4211 	.read		= seq_read,
4212 	.llseek		= seq_lseek,
4213 	.release	= seq_release_private,
4214 };
4215 #endif
4216 
slab_proc_init(void)4217 static int __init slab_proc_init(void)
4218 {
4219 #ifdef CONFIG_DEBUG_SLAB_LEAK
4220 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4221 #endif
4222 	return 0;
4223 }
4224 module_init(slab_proc_init);
4225 #endif
4226 
4227 /**
4228  * ksize - get the actual amount of memory allocated for a given object
4229  * @objp: Pointer to the object
4230  *
4231  * kmalloc may internally round up allocations and return more memory
4232  * than requested. ksize() can be used to determine the actual amount of
4233  * memory allocated. The caller may use this additional memory, even though
4234  * a smaller amount of memory was initially specified with the kmalloc call.
4235  * The caller must guarantee that objp points to a valid object previously
4236  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4237  * must not be freed during the duration of the call.
4238  */
ksize(const void * objp)4239 size_t ksize(const void *objp)
4240 {
4241 	BUG_ON(!objp);
4242 	if (unlikely(objp == ZERO_SIZE_PTR))
4243 		return 0;
4244 
4245 	return virt_to_cache(objp)->object_size;
4246 }
4247 EXPORT_SYMBOL(ksize);
4248