This source file includes following definitions.
- __check_heap_object
- kmalloc_type
- kmalloc_index
- __kmalloc
- __kmalloc_node
- kmem_cache_alloc_node
- kmem_cache_alloc_trace
- kmem_cache_alloc_trace
- kmem_cache_alloc_node_trace
- kmalloc_order
- kmalloc_large
- kmalloc
- kmalloc_size
- kmalloc_node
- kmalloc_array
- kcalloc
- kmalloc_array_node
- kcalloc_node
- kmem_cache_zalloc
- kzalloc
- kzalloc_node
1
2
3
4
5
6
7
8
9
10
11
12 #ifndef _LINUX_SLAB_H
13 #define _LINUX_SLAB_H
14
15 #include <linux/gfp.h>
16 #include <linux/overflow.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19 #include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31 #define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87 #ifdef CONFIG_DEBUG_OBJECTS
88 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89 #else
90 # define SLAB_DEBUG_OBJECTS 0
91 #endif
92
93
94 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97 #ifdef CONFIG_FAILSLAB
98 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99 #else
100 # define SLAB_FAILSLAB 0
101 #endif
102
103 #ifdef CONFIG_MEMCG_KMEM
104 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105 #else
106 # define SLAB_ACCOUNT 0
107 #endif
108
109 #ifdef CONFIG_KASAN
110 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111 #else
112 #define SLAB_KASAN 0
113 #endif
114
115
116
117 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121 #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
123
124
125
126
127
128
129
130
131 #define ZERO_SIZE_PTR ((void *)16)
132
133 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
134 (unsigned long)ZERO_SIZE_PTR)
135
136 #include <linux/kasan.h>
137
138 struct mem_cgroup;
139
140
141
142 void __init kmem_cache_init(void);
143 bool slab_is_available(void);
144
145 extern bool usercopy_fallback;
146
147 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
148 unsigned int align, slab_flags_t flags,
149 void (*ctor)(void *));
150 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
151 unsigned int size, unsigned int align,
152 slab_flags_t flags,
153 unsigned int useroffset, unsigned int usersize,
154 void (*ctor)(void *));
155 void kmem_cache_destroy(struct kmem_cache *);
156 int kmem_cache_shrink(struct kmem_cache *);
157
158 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
159 void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
160
161
162
163
164
165
166
167
168
169 #define KMEM_CACHE(__struct, __flags) \
170 kmem_cache_create(#__struct, sizeof(struct __struct), \
171 __alignof__(struct __struct), (__flags), NULL)
172
173
174
175
176
177 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
178 kmem_cache_create_usercopy(#__struct, \
179 sizeof(struct __struct), \
180 __alignof__(struct __struct), (__flags), \
181 offsetof(struct __struct, __field), \
182 sizeof_field(struct __struct, __field), NULL)
183
184
185
186
187 void * __must_check __krealloc(const void *, size_t, gfp_t);
188 void * __must_check krealloc(const void *, size_t, gfp_t);
189 void kfree(const void *);
190 void kzfree(const void *);
191 size_t __ksize(const void *);
192 size_t ksize(const void *);
193
194 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
195 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
196 bool to_user);
197 #else
198 static inline void __check_heap_object(const void *ptr, unsigned long n,
199 struct page *page, bool to_user) { }
200 #endif
201
202
203
204
205
206
207 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
208 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
209 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
210 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
211 #else
212 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
213 #endif
214
215
216
217
218
219
220 #ifndef ARCH_SLAB_MINALIGN
221 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
222 #endif
223
224
225
226
227
228
229 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
230 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
231 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
232
233
234
235
236
237 #ifdef CONFIG_SLAB
238
239
240
241
242
243
244
245
246
247 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
248 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
249 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
250 #ifndef KMALLOC_SHIFT_LOW
251 #define KMALLOC_SHIFT_LOW 5
252 #endif
253 #endif
254
255 #ifdef CONFIG_SLUB
256
257
258
259
260 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
261 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
262 #ifndef KMALLOC_SHIFT_LOW
263 #define KMALLOC_SHIFT_LOW 3
264 #endif
265 #endif
266
267 #ifdef CONFIG_SLOB
268
269
270
271
272
273 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
274 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
275 #ifndef KMALLOC_SHIFT_LOW
276 #define KMALLOC_SHIFT_LOW 3
277 #endif
278 #endif
279
280
281 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
282
283 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
284
285 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
286
287
288
289
290 #ifndef KMALLOC_MIN_SIZE
291 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
292 #endif
293
294
295
296
297
298
299
300
301
302 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
303 (KMALLOC_MIN_SIZE) : 16)
304
305
306
307
308
309 enum kmalloc_cache_type {
310 KMALLOC_NORMAL = 0,
311 KMALLOC_RECLAIM,
312 #ifdef CONFIG_ZONE_DMA
313 KMALLOC_DMA,
314 #endif
315 NR_KMALLOC_TYPES
316 };
317
318 #ifndef CONFIG_SLOB
319 extern struct kmem_cache *
320 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
321
322 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
323 {
324 #ifdef CONFIG_ZONE_DMA
325
326
327
328
329 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
330 return KMALLOC_NORMAL;
331
332
333
334
335
336 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
337 #else
338 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
339 #endif
340 }
341
342
343
344
345
346
347
348
349
350 static __always_inline unsigned int kmalloc_index(size_t size)
351 {
352 if (!size)
353 return 0;
354
355 if (size <= KMALLOC_MIN_SIZE)
356 return KMALLOC_SHIFT_LOW;
357
358 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
359 return 1;
360 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
361 return 2;
362 if (size <= 8) return 3;
363 if (size <= 16) return 4;
364 if (size <= 32) return 5;
365 if (size <= 64) return 6;
366 if (size <= 128) return 7;
367 if (size <= 256) return 8;
368 if (size <= 512) return 9;
369 if (size <= 1024) return 10;
370 if (size <= 2 * 1024) return 11;
371 if (size <= 4 * 1024) return 12;
372 if (size <= 8 * 1024) return 13;
373 if (size <= 16 * 1024) return 14;
374 if (size <= 32 * 1024) return 15;
375 if (size <= 64 * 1024) return 16;
376 if (size <= 128 * 1024) return 17;
377 if (size <= 256 * 1024) return 18;
378 if (size <= 512 * 1024) return 19;
379 if (size <= 1024 * 1024) return 20;
380 if (size <= 2 * 1024 * 1024) return 21;
381 if (size <= 4 * 1024 * 1024) return 22;
382 if (size <= 8 * 1024 * 1024) return 23;
383 if (size <= 16 * 1024 * 1024) return 24;
384 if (size <= 32 * 1024 * 1024) return 25;
385 if (size <= 64 * 1024 * 1024) return 26;
386 BUG();
387
388
389 return -1;
390 }
391 #endif
392
393 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
394 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
395 void kmem_cache_free(struct kmem_cache *, void *);
396
397
398
399
400
401
402
403
404 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
405 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
406
407
408
409
410
411 static __always_inline void kfree_bulk(size_t size, void **p)
412 {
413 kmem_cache_free_bulk(NULL, size, p);
414 }
415
416 #ifdef CONFIG_NUMA
417 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
418 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
419 #else
420 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
421 {
422 return __kmalloc(size, flags);
423 }
424
425 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
426 {
427 return kmem_cache_alloc(s, flags);
428 }
429 #endif
430
431 #ifdef CONFIG_TRACING
432 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
433
434 #ifdef CONFIG_NUMA
435 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
436 gfp_t gfpflags,
437 int node, size_t size) __assume_slab_alignment __malloc;
438 #else
439 static __always_inline void *
440 kmem_cache_alloc_node_trace(struct kmem_cache *s,
441 gfp_t gfpflags,
442 int node, size_t size)
443 {
444 return kmem_cache_alloc_trace(s, gfpflags, size);
445 }
446 #endif
447
448 #else
449 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
450 gfp_t flags, size_t size)
451 {
452 void *ret = kmem_cache_alloc(s, flags);
453
454 ret = kasan_kmalloc(s, ret, size, flags);
455 return ret;
456 }
457
458 static __always_inline void *
459 kmem_cache_alloc_node_trace(struct kmem_cache *s,
460 gfp_t gfpflags,
461 int node, size_t size)
462 {
463 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
464
465 ret = kasan_kmalloc(s, ret, size, gfpflags);
466 return ret;
467 }
468 #endif
469
470 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
471
472 #ifdef CONFIG_TRACING
473 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
474 #else
475 static __always_inline void *
476 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
477 {
478 return kmalloc_order(size, flags, order);
479 }
480 #endif
481
482 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
483 {
484 unsigned int order = get_order(size);
485 return kmalloc_order_trace(size, flags, order);
486 }
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542 static __always_inline void *kmalloc(size_t size, gfp_t flags)
543 {
544 if (__builtin_constant_p(size)) {
545 #ifndef CONFIG_SLOB
546 unsigned int index;
547 #endif
548 if (size > KMALLOC_MAX_CACHE_SIZE)
549 return kmalloc_large(size, flags);
550 #ifndef CONFIG_SLOB
551 index = kmalloc_index(size);
552
553 if (!index)
554 return ZERO_SIZE_PTR;
555
556 return kmem_cache_alloc_trace(
557 kmalloc_caches[kmalloc_type(flags)][index],
558 flags, size);
559 #endif
560 }
561 return __kmalloc(size, flags);
562 }
563
564
565
566
567
568
569 static __always_inline unsigned int kmalloc_size(unsigned int n)
570 {
571 #ifndef CONFIG_SLOB
572 if (n > 2)
573 return 1U << n;
574
575 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
576 return 96;
577
578 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
579 return 192;
580 #endif
581 return 0;
582 }
583
584 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
585 {
586 #ifndef CONFIG_SLOB
587 if (__builtin_constant_p(size) &&
588 size <= KMALLOC_MAX_CACHE_SIZE) {
589 unsigned int i = kmalloc_index(size);
590
591 if (!i)
592 return ZERO_SIZE_PTR;
593
594 return kmem_cache_alloc_node_trace(
595 kmalloc_caches[kmalloc_type(flags)][i],
596 flags, node, size);
597 }
598 #endif
599 return __kmalloc_node(size, flags, node);
600 }
601
602 int memcg_update_all_caches(int num_memcgs);
603
604
605
606
607
608
609
610 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
611 {
612 size_t bytes;
613
614 if (unlikely(check_mul_overflow(n, size, &bytes)))
615 return NULL;
616 if (__builtin_constant_p(n) && __builtin_constant_p(size))
617 return kmalloc(bytes, flags);
618 return __kmalloc(bytes, flags);
619 }
620
621
622
623
624
625
626
627 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
628 {
629 return kmalloc_array(n, size, flags | __GFP_ZERO);
630 }
631
632
633
634
635
636
637
638
639
640 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
641 #define kmalloc_track_caller(size, flags) \
642 __kmalloc_track_caller(size, flags, _RET_IP_)
643
644 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
645 int node)
646 {
647 size_t bytes;
648
649 if (unlikely(check_mul_overflow(n, size, &bytes)))
650 return NULL;
651 if (__builtin_constant_p(n) && __builtin_constant_p(size))
652 return kmalloc_node(bytes, flags, node);
653 return __kmalloc_node(bytes, flags, node);
654 }
655
656 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
657 {
658 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
659 }
660
661
662 #ifdef CONFIG_NUMA
663 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
664 #define kmalloc_node_track_caller(size, flags, node) \
665 __kmalloc_node_track_caller(size, flags, node, \
666 _RET_IP_)
667
668 #else
669
670 #define kmalloc_node_track_caller(size, flags, node) \
671 kmalloc_track_caller(size, flags)
672
673 #endif
674
675
676
677
678 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
679 {
680 return kmem_cache_alloc(k, flags | __GFP_ZERO);
681 }
682
683
684
685
686
687
688 static inline void *kzalloc(size_t size, gfp_t flags)
689 {
690 return kmalloc(size, flags | __GFP_ZERO);
691 }
692
693
694
695
696
697
698
699 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
700 {
701 return kmalloc_node(size, flags | __GFP_ZERO, node);
702 }
703
704 unsigned int kmem_cache_size(struct kmem_cache *s);
705 void __init kmem_cache_init_late(void);
706
707 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
708 int slab_prepare_cpu(unsigned int cpu);
709 int slab_dead_cpu(unsigned int cpu);
710 #else
711 #define slab_prepare_cpu NULL
712 #define slab_dead_cpu NULL
713 #endif
714
715 #endif