This source file includes following definitions.
- setup_slab_nomerge
- kmem_cache_size
- kmem_cache_sanity_check
- kmem_cache_sanity_check
- __kmem_cache_free_bulk
- __kmem_cache_alloc_bulk
- slab_init_memcg_params
- init_memcg_params
- destroy_memcg_params
- free_memcg_params
- update_memcg_params
- memcg_update_all_caches
- memcg_link_cache
- memcg_unlink_cache
- init_memcg_params
- destroy_memcg_params
- memcg_unlink_cache
- calculate_alignment
- slab_unmergeable
- find_mergeable
- create_cache
- kmem_cache_create_usercopy
- kmem_cache_create
- slab_caches_to_rcu_destroy_workfn
- shutdown_cache
- memcg_create_kmem_cache
- kmemcg_workfn
- kmemcg_rcufn
- kmemcg_cache_shutdown_fn
- kmemcg_cache_shutdown
- kmemcg_cache_deactivate_after_rcu
- kmemcg_cache_deactivate
- memcg_deactivate_kmem_caches
- shutdown_memcg_caches
- flush_memcg_workqueue
- shutdown_memcg_caches
- flush_memcg_workqueue
- slab_kmem_cache_release
- kmem_cache_destroy
- kmem_cache_shrink
- kmem_cache_shrink_all
- slab_is_available
- create_boot_cache
- create_kmalloc_cache
- size_index_elem
- kmalloc_slab
- setup_kmalloc_cache_index_table
- kmalloc_cache_name
- new_kmalloc_cache
- create_kmalloc_caches
- kmalloc_order
- kmalloc_order_trace
- freelist_randomize
- cache_random_seq_create
- cache_random_seq_destroy
- print_slabinfo_header
- slab_start
- slab_next
- slab_stop
- memcg_accumulate_slabinfo
- cache_show
- slab_show
- dump_unreclaimable_slab
- memcg_slab_start
- memcg_slab_next
- memcg_slab_stop
- memcg_slab_show
- slabinfo_open
- slab_proc_init
- memcg_slabinfo_show
- memcg_slabinfo_init
- __do_krealloc
- __krealloc
- krealloc
- kzfree
- ksize
- should_failslab
1
2
3
4
5
6
7 #include <linux/slab.h>
8
9 #include <linux/mm.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/proc_fs.h>
20 #include <linux/debugfs.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 #include <asm/page.h>
24 #include <linux/memcontrol.h>
25
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/kmem.h>
28
29 #include "slab.h"
30
31 enum slab_state slab_state;
32 LIST_HEAD(slab_caches);
33 DEFINE_MUTEX(slab_mutex);
34 struct kmem_cache *kmem_cache;
35
36 #ifdef CONFIG_HARDENED_USERCOPY
37 bool usercopy_fallback __ro_after_init =
38 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
39 module_param(usercopy_fallback, bool, 0400);
40 MODULE_PARM_DESC(usercopy_fallback,
41 "WARN instead of reject usercopy whitelist violations");
42 #endif
43
44 static LIST_HEAD(slab_caches_to_rcu_destroy);
45 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
46 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
47 slab_caches_to_rcu_destroy_workfn);
48
49
50
51
52 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
53 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
54 SLAB_FAILSLAB | SLAB_KASAN)
55
56 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
57 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
58
59
60
61
62 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
63
64 static int __init setup_slab_nomerge(char *str)
65 {
66 slab_nomerge = true;
67 return 1;
68 }
69
70 #ifdef CONFIG_SLUB
71 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
72 #endif
73
74 __setup("slab_nomerge", setup_slab_nomerge);
75
76
77
78
79 unsigned int kmem_cache_size(struct kmem_cache *s)
80 {
81 return s->object_size;
82 }
83 EXPORT_SYMBOL(kmem_cache_size);
84
85 #ifdef CONFIG_DEBUG_VM
86 static int kmem_cache_sanity_check(const char *name, unsigned int size)
87 {
88 if (!name || in_interrupt() || size < sizeof(void *) ||
89 size > KMALLOC_MAX_SIZE) {
90 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
91 return -EINVAL;
92 }
93
94 WARN_ON(strchr(name, ' '));
95 return 0;
96 }
97 #else
98 static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
99 {
100 return 0;
101 }
102 #endif
103
104 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
105 {
106 size_t i;
107
108 for (i = 0; i < nr; i++) {
109 if (s)
110 kmem_cache_free(s, p[i]);
111 else
112 kfree(p[i]);
113 }
114 }
115
116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
117 void **p)
118 {
119 size_t i;
120
121 for (i = 0; i < nr; i++) {
122 void *x = p[i] = kmem_cache_alloc(s, flags);
123 if (!x) {
124 __kmem_cache_free_bulk(s, i, p);
125 return 0;
126 }
127 }
128 return i;
129 }
130
131 #ifdef CONFIG_MEMCG_KMEM
132
133 LIST_HEAD(slab_root_caches);
134 static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
135
136 static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
137
138 void slab_init_memcg_params(struct kmem_cache *s)
139 {
140 s->memcg_params.root_cache = NULL;
141 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
142 INIT_LIST_HEAD(&s->memcg_params.children);
143 s->memcg_params.dying = false;
144 }
145
146 static int init_memcg_params(struct kmem_cache *s,
147 struct kmem_cache *root_cache)
148 {
149 struct memcg_cache_array *arr;
150
151 if (root_cache) {
152 int ret = percpu_ref_init(&s->memcg_params.refcnt,
153 kmemcg_cache_shutdown,
154 0, GFP_KERNEL);
155 if (ret)
156 return ret;
157
158 s->memcg_params.root_cache = root_cache;
159 INIT_LIST_HEAD(&s->memcg_params.children_node);
160 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
161 return 0;
162 }
163
164 slab_init_memcg_params(s);
165
166 if (!memcg_nr_cache_ids)
167 return 0;
168
169 arr = kvzalloc(sizeof(struct memcg_cache_array) +
170 memcg_nr_cache_ids * sizeof(void *),
171 GFP_KERNEL);
172 if (!arr)
173 return -ENOMEM;
174
175 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
176 return 0;
177 }
178
179 static void destroy_memcg_params(struct kmem_cache *s)
180 {
181 if (is_root_cache(s)) {
182 kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
183 } else {
184 mem_cgroup_put(s->memcg_params.memcg);
185 WRITE_ONCE(s->memcg_params.memcg, NULL);
186 percpu_ref_exit(&s->memcg_params.refcnt);
187 }
188 }
189
190 static void free_memcg_params(struct rcu_head *rcu)
191 {
192 struct memcg_cache_array *old;
193
194 old = container_of(rcu, struct memcg_cache_array, rcu);
195 kvfree(old);
196 }
197
198 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
199 {
200 struct memcg_cache_array *old, *new;
201
202 new = kvzalloc(sizeof(struct memcg_cache_array) +
203 new_array_size * sizeof(void *), GFP_KERNEL);
204 if (!new)
205 return -ENOMEM;
206
207 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
208 lockdep_is_held(&slab_mutex));
209 if (old)
210 memcpy(new->entries, old->entries,
211 memcg_nr_cache_ids * sizeof(void *));
212
213 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
214 if (old)
215 call_rcu(&old->rcu, free_memcg_params);
216 return 0;
217 }
218
219 int memcg_update_all_caches(int num_memcgs)
220 {
221 struct kmem_cache *s;
222 int ret = 0;
223
224 mutex_lock(&slab_mutex);
225 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
226 ret = update_memcg_params(s, num_memcgs);
227
228
229
230
231 if (ret)
232 break;
233 }
234 mutex_unlock(&slab_mutex);
235 return ret;
236 }
237
238 void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
239 {
240 if (is_root_cache(s)) {
241 list_add(&s->root_caches_node, &slab_root_caches);
242 } else {
243 css_get(&memcg->css);
244 s->memcg_params.memcg = memcg;
245 list_add(&s->memcg_params.children_node,
246 &s->memcg_params.root_cache->memcg_params.children);
247 list_add(&s->memcg_params.kmem_caches_node,
248 &s->memcg_params.memcg->kmem_caches);
249 }
250 }
251
252 static void memcg_unlink_cache(struct kmem_cache *s)
253 {
254 if (is_root_cache(s)) {
255 list_del(&s->root_caches_node);
256 } else {
257 list_del(&s->memcg_params.children_node);
258 list_del(&s->memcg_params.kmem_caches_node);
259 }
260 }
261 #else
262 static inline int init_memcg_params(struct kmem_cache *s,
263 struct kmem_cache *root_cache)
264 {
265 return 0;
266 }
267
268 static inline void destroy_memcg_params(struct kmem_cache *s)
269 {
270 }
271
272 static inline void memcg_unlink_cache(struct kmem_cache *s)
273 {
274 }
275 #endif
276
277
278
279
280
281 static unsigned int calculate_alignment(slab_flags_t flags,
282 unsigned int align, unsigned int size)
283 {
284
285
286
287
288
289
290
291 if (flags & SLAB_HWCACHE_ALIGN) {
292 unsigned int ralign;
293
294 ralign = cache_line_size();
295 while (size <= ralign / 2)
296 ralign /= 2;
297 align = max(align, ralign);
298 }
299
300 if (align < ARCH_SLAB_MINALIGN)
301 align = ARCH_SLAB_MINALIGN;
302
303 return ALIGN(align, sizeof(void *));
304 }
305
306
307
308
309 int slab_unmergeable(struct kmem_cache *s)
310 {
311 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
312 return 1;
313
314 if (!is_root_cache(s))
315 return 1;
316
317 if (s->ctor)
318 return 1;
319
320 if (s->usersize)
321 return 1;
322
323
324
325
326 if (s->refcount < 0)
327 return 1;
328
329 return 0;
330 }
331
332 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
333 slab_flags_t flags, const char *name, void (*ctor)(void *))
334 {
335 struct kmem_cache *s;
336
337 if (slab_nomerge)
338 return NULL;
339
340 if (ctor)
341 return NULL;
342
343 size = ALIGN(size, sizeof(void *));
344 align = calculate_alignment(flags, align, size);
345 size = ALIGN(size, align);
346 flags = kmem_cache_flags(size, flags, name, NULL);
347
348 if (flags & SLAB_NEVER_MERGE)
349 return NULL;
350
351 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
352 if (slab_unmergeable(s))
353 continue;
354
355 if (size > s->size)
356 continue;
357
358 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
359 continue;
360
361
362
363
364 if ((s->size & ~(align - 1)) != s->size)
365 continue;
366
367 if (s->size - size >= sizeof(void *))
368 continue;
369
370 if (IS_ENABLED(CONFIG_SLAB) && align &&
371 (align > s->align || s->align % align))
372 continue;
373
374 return s;
375 }
376 return NULL;
377 }
378
379 static struct kmem_cache *create_cache(const char *name,
380 unsigned int object_size, unsigned int align,
381 slab_flags_t flags, unsigned int useroffset,
382 unsigned int usersize, void (*ctor)(void *),
383 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
384 {
385 struct kmem_cache *s;
386 int err;
387
388 if (WARN_ON(useroffset + usersize > object_size))
389 useroffset = usersize = 0;
390
391 err = -ENOMEM;
392 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
393 if (!s)
394 goto out;
395
396 s->name = name;
397 s->size = s->object_size = object_size;
398 s->align = align;
399 s->ctor = ctor;
400 s->useroffset = useroffset;
401 s->usersize = usersize;
402
403 err = init_memcg_params(s, root_cache);
404 if (err)
405 goto out_free_cache;
406
407 err = __kmem_cache_create(s, flags);
408 if (err)
409 goto out_free_cache;
410
411 s->refcount = 1;
412 list_add(&s->list, &slab_caches);
413 memcg_link_cache(s, memcg);
414 out:
415 if (err)
416 return ERR_PTR(err);
417 return s;
418
419 out_free_cache:
420 destroy_memcg_params(s);
421 kmem_cache_free(kmem_cache, s);
422 goto out;
423 }
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453 struct kmem_cache *
454 kmem_cache_create_usercopy(const char *name,
455 unsigned int size, unsigned int align,
456 slab_flags_t flags,
457 unsigned int useroffset, unsigned int usersize,
458 void (*ctor)(void *))
459 {
460 struct kmem_cache *s = NULL;
461 const char *cache_name;
462 int err;
463
464 get_online_cpus();
465 get_online_mems();
466 memcg_get_cache_ids();
467
468 mutex_lock(&slab_mutex);
469
470 err = kmem_cache_sanity_check(name, size);
471 if (err) {
472 goto out_unlock;
473 }
474
475
476 if (flags & ~SLAB_FLAGS_PERMITTED) {
477 err = -EINVAL;
478 goto out_unlock;
479 }
480
481
482
483
484
485
486
487 flags &= CACHE_CREATE_MASK;
488
489
490 if (WARN_ON(!usersize && useroffset) ||
491 WARN_ON(size < usersize || size - usersize < useroffset))
492 usersize = useroffset = 0;
493
494 if (!usersize)
495 s = __kmem_cache_alias(name, size, align, flags, ctor);
496 if (s)
497 goto out_unlock;
498
499 cache_name = kstrdup_const(name, GFP_KERNEL);
500 if (!cache_name) {
501 err = -ENOMEM;
502 goto out_unlock;
503 }
504
505 s = create_cache(cache_name, size,
506 calculate_alignment(flags, align, size),
507 flags, useroffset, usersize, ctor, NULL, NULL);
508 if (IS_ERR(s)) {
509 err = PTR_ERR(s);
510 kfree_const(cache_name);
511 }
512
513 out_unlock:
514 mutex_unlock(&slab_mutex);
515
516 memcg_put_cache_ids();
517 put_online_mems();
518 put_online_cpus();
519
520 if (err) {
521 if (flags & SLAB_PANIC)
522 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
523 name, err);
524 else {
525 pr_warn("kmem_cache_create(%s) failed with error %d\n",
526 name, err);
527 dump_stack();
528 }
529 return NULL;
530 }
531 return s;
532 }
533 EXPORT_SYMBOL(kmem_cache_create_usercopy);
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 struct kmem_cache *
561 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
562 slab_flags_t flags, void (*ctor)(void *))
563 {
564 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
565 ctor);
566 }
567 EXPORT_SYMBOL(kmem_cache_create);
568
569 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
570 {
571 LIST_HEAD(to_destroy);
572 struct kmem_cache *s, *s2;
573
574
575
576
577
578
579
580
581
582
583 mutex_lock(&slab_mutex);
584 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
585 mutex_unlock(&slab_mutex);
586
587 if (list_empty(&to_destroy))
588 return;
589
590 rcu_barrier();
591
592 list_for_each_entry_safe(s, s2, &to_destroy, list) {
593 #ifdef SLAB_SUPPORTS_SYSFS
594 sysfs_slab_release(s);
595 #else
596 slab_kmem_cache_release(s);
597 #endif
598 }
599 }
600
601 static int shutdown_cache(struct kmem_cache *s)
602 {
603
604 kasan_cache_shutdown(s);
605
606 if (__kmem_cache_shutdown(s) != 0)
607 return -EBUSY;
608
609 memcg_unlink_cache(s);
610 list_del(&s->list);
611
612 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
613 #ifdef SLAB_SUPPORTS_SYSFS
614 sysfs_slab_unlink(s);
615 #endif
616 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
617 schedule_work(&slab_caches_to_rcu_destroy_work);
618 } else {
619 #ifdef SLAB_SUPPORTS_SYSFS
620 sysfs_slab_unlink(s);
621 sysfs_slab_release(s);
622 #else
623 slab_kmem_cache_release(s);
624 #endif
625 }
626
627 return 0;
628 }
629
630 #ifdef CONFIG_MEMCG_KMEM
631
632
633
634
635
636
637
638
639
640 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
641 struct kmem_cache *root_cache)
642 {
643 static char memcg_name_buf[NAME_MAX + 1];
644 struct cgroup_subsys_state *css = &memcg->css;
645 struct memcg_cache_array *arr;
646 struct kmem_cache *s = NULL;
647 char *cache_name;
648 int idx;
649
650 get_online_cpus();
651 get_online_mems();
652
653 mutex_lock(&slab_mutex);
654
655
656
657
658
659 if (memcg->kmem_state != KMEM_ONLINE)
660 goto out_unlock;
661
662 idx = memcg_cache_id(memcg);
663 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
664 lockdep_is_held(&slab_mutex));
665
666
667
668
669
670
671 if (arr->entries[idx])
672 goto out_unlock;
673
674 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
675 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
676 css->serial_nr, memcg_name_buf);
677 if (!cache_name)
678 goto out_unlock;
679
680 s = create_cache(cache_name, root_cache->object_size,
681 root_cache->align,
682 root_cache->flags & CACHE_CREATE_MASK,
683 root_cache->useroffset, root_cache->usersize,
684 root_cache->ctor, memcg, root_cache);
685
686
687
688
689
690 if (IS_ERR(s)) {
691 kfree(cache_name);
692 goto out_unlock;
693 }
694
695
696
697
698
699
700 smp_wmb();
701 arr->entries[idx] = s;
702
703 out_unlock:
704 mutex_unlock(&slab_mutex);
705
706 put_online_mems();
707 put_online_cpus();
708 }
709
710 static void kmemcg_workfn(struct work_struct *work)
711 {
712 struct kmem_cache *s = container_of(work, struct kmem_cache,
713 memcg_params.work);
714
715 get_online_cpus();
716 get_online_mems();
717
718 mutex_lock(&slab_mutex);
719 s->memcg_params.work_fn(s);
720 mutex_unlock(&slab_mutex);
721
722 put_online_mems();
723 put_online_cpus();
724 }
725
726 static void kmemcg_rcufn(struct rcu_head *head)
727 {
728 struct kmem_cache *s = container_of(head, struct kmem_cache,
729 memcg_params.rcu_head);
730
731
732
733
734
735
736 INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
737 queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
738 }
739
740 static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
741 {
742 WARN_ON(shutdown_cache(s));
743 }
744
745 static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
746 {
747 struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
748 memcg_params.refcnt);
749 unsigned long flags;
750
751 spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
752 if (s->memcg_params.root_cache->memcg_params.dying)
753 goto unlock;
754
755 s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
756 INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
757 queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
758
759 unlock:
760 spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
761 }
762
763 static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
764 {
765 __kmemcg_cache_deactivate_after_rcu(s);
766 percpu_ref_kill(&s->memcg_params.refcnt);
767 }
768
769 static void kmemcg_cache_deactivate(struct kmem_cache *s)
770 {
771 if (WARN_ON_ONCE(is_root_cache(s)))
772 return;
773
774 __kmemcg_cache_deactivate(s);
775 s->flags |= SLAB_DEACTIVATED;
776
777
778
779
780
781
782 spin_lock_irq(&memcg_kmem_wq_lock);
783 if (s->memcg_params.root_cache->memcg_params.dying)
784 goto unlock;
785
786 s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
787 call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
788 unlock:
789 spin_unlock_irq(&memcg_kmem_wq_lock);
790 }
791
792 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
793 struct mem_cgroup *parent)
794 {
795 int idx;
796 struct memcg_cache_array *arr;
797 struct kmem_cache *s, *c;
798 unsigned int nr_reparented;
799
800 idx = memcg_cache_id(memcg);
801
802 get_online_cpus();
803 get_online_mems();
804
805 mutex_lock(&slab_mutex);
806 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
807 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
808 lockdep_is_held(&slab_mutex));
809 c = arr->entries[idx];
810 if (!c)
811 continue;
812
813 kmemcg_cache_deactivate(c);
814 arr->entries[idx] = NULL;
815 }
816 nr_reparented = 0;
817 list_for_each_entry(s, &memcg->kmem_caches,
818 memcg_params.kmem_caches_node) {
819 WRITE_ONCE(s->memcg_params.memcg, parent);
820 css_put(&memcg->css);
821 nr_reparented++;
822 }
823 if (nr_reparented) {
824 list_splice_init(&memcg->kmem_caches,
825 &parent->kmem_caches);
826 css_get_many(&parent->css, nr_reparented);
827 }
828 mutex_unlock(&slab_mutex);
829
830 put_online_mems();
831 put_online_cpus();
832 }
833
834 static int shutdown_memcg_caches(struct kmem_cache *s)
835 {
836 struct memcg_cache_array *arr;
837 struct kmem_cache *c, *c2;
838 LIST_HEAD(busy);
839 int i;
840
841 BUG_ON(!is_root_cache(s));
842
843
844
845
846
847 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
848 lockdep_is_held(&slab_mutex));
849 for_each_memcg_cache_index(i) {
850 c = arr->entries[i];
851 if (!c)
852 continue;
853 if (shutdown_cache(c))
854
855
856
857
858
859 list_move(&c->memcg_params.children_node, &busy);
860 else
861
862
863
864
865
866
867 arr->entries[i] = NULL;
868 }
869
870
871
872
873
874 list_for_each_entry_safe(c, c2, &s->memcg_params.children,
875 memcg_params.children_node)
876 shutdown_cache(c);
877
878 list_splice(&busy, &s->memcg_params.children);
879
880
881
882
883
884 if (!list_empty(&s->memcg_params.children))
885 return -EBUSY;
886 return 0;
887 }
888
889 static void flush_memcg_workqueue(struct kmem_cache *s)
890 {
891 spin_lock_irq(&memcg_kmem_wq_lock);
892 s->memcg_params.dying = true;
893 spin_unlock_irq(&memcg_kmem_wq_lock);
894
895
896
897
898
899 rcu_barrier();
900
901
902
903
904
905
906 if (likely(memcg_kmem_cache_wq))
907 flush_workqueue(memcg_kmem_cache_wq);
908
909
910
911
912
913
914
915
916
917
918 if (!list_empty(&s->memcg_params.children))
919 rcu_barrier();
920 }
921 #else
922 static inline int shutdown_memcg_caches(struct kmem_cache *s)
923 {
924 return 0;
925 }
926
927 static inline void flush_memcg_workqueue(struct kmem_cache *s)
928 {
929 }
930 #endif
931
932 void slab_kmem_cache_release(struct kmem_cache *s)
933 {
934 __kmem_cache_release(s);
935 destroy_memcg_params(s);
936 kfree_const(s->name);
937 kmem_cache_free(kmem_cache, s);
938 }
939
940 void kmem_cache_destroy(struct kmem_cache *s)
941 {
942 int err;
943
944 if (unlikely(!s))
945 return;
946
947 flush_memcg_workqueue(s);
948
949 get_online_cpus();
950 get_online_mems();
951
952 mutex_lock(&slab_mutex);
953
954 s->refcount--;
955 if (s->refcount)
956 goto out_unlock;
957
958 err = shutdown_memcg_caches(s);
959 if (!err)
960 err = shutdown_cache(s);
961
962 if (err) {
963 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
964 s->name);
965 dump_stack();
966 }
967 out_unlock:
968 mutex_unlock(&slab_mutex);
969
970 put_online_mems();
971 put_online_cpus();
972 }
973 EXPORT_SYMBOL(kmem_cache_destroy);
974
975
976
977
978
979
980
981
982
983
984 int kmem_cache_shrink(struct kmem_cache *cachep)
985 {
986 int ret;
987
988 get_online_cpus();
989 get_online_mems();
990 kasan_cache_shrink(cachep);
991 ret = __kmem_cache_shrink(cachep);
992 put_online_mems();
993 put_online_cpus();
994 return ret;
995 }
996 EXPORT_SYMBOL(kmem_cache_shrink);
997
998
999
1000
1001
1002 void kmem_cache_shrink_all(struct kmem_cache *s)
1003 {
1004 struct kmem_cache *c;
1005
1006 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
1007 kmem_cache_shrink(s);
1008 return;
1009 }
1010
1011 get_online_cpus();
1012 get_online_mems();
1013 kasan_cache_shrink(s);
1014 __kmem_cache_shrink(s);
1015
1016
1017
1018
1019
1020 mutex_lock(&slab_mutex);
1021 for_each_memcg_cache(c, s) {
1022
1023
1024
1025 if (s->flags & SLAB_DEACTIVATED)
1026 continue;
1027 kasan_cache_shrink(c);
1028 __kmem_cache_shrink(c);
1029 }
1030 mutex_unlock(&slab_mutex);
1031 put_online_mems();
1032 put_online_cpus();
1033 }
1034
1035 bool slab_is_available(void)
1036 {
1037 return slab_state >= UP;
1038 }
1039
1040 #ifndef CONFIG_SLOB
1041
1042 void __init create_boot_cache(struct kmem_cache *s, const char *name,
1043 unsigned int size, slab_flags_t flags,
1044 unsigned int useroffset, unsigned int usersize)
1045 {
1046 int err;
1047 unsigned int align = ARCH_KMALLOC_MINALIGN;
1048
1049 s->name = name;
1050 s->size = s->object_size = size;
1051
1052
1053
1054
1055
1056 if (is_power_of_2(size))
1057 align = max(align, size);
1058 s->align = calculate_alignment(flags, align, size);
1059
1060 s->useroffset = useroffset;
1061 s->usersize = usersize;
1062
1063 slab_init_memcg_params(s);
1064
1065 err = __kmem_cache_create(s, flags);
1066
1067 if (err)
1068 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
1069 name, size, err);
1070
1071 s->refcount = -1;
1072 }
1073
1074 struct kmem_cache *__init create_kmalloc_cache(const char *name,
1075 unsigned int size, slab_flags_t flags,
1076 unsigned int useroffset, unsigned int usersize)
1077 {
1078 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1079
1080 if (!s)
1081 panic("Out of memory when creating slab %s\n", name);
1082
1083 create_boot_cache(s, name, size, flags, useroffset, usersize);
1084 list_add(&s->list, &slab_caches);
1085 memcg_link_cache(s, NULL);
1086 s->refcount = 1;
1087 return s;
1088 }
1089
1090 struct kmem_cache *
1091 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
1092 { };
1093 EXPORT_SYMBOL(kmalloc_caches);
1094
1095
1096
1097
1098
1099
1100
1101 static u8 size_index[24] __ro_after_init = {
1102 3,
1103 4,
1104 5,
1105 5,
1106 6,
1107 6,
1108 6,
1109 6,
1110 1,
1111 1,
1112 1,
1113 1,
1114 7,
1115 7,
1116 7,
1117 7,
1118 2,
1119 2,
1120 2,
1121 2,
1122 2,
1123 2,
1124 2,
1125 2
1126 };
1127
1128 static inline unsigned int size_index_elem(unsigned int bytes)
1129 {
1130 return (bytes - 1) / 8;
1131 }
1132
1133
1134
1135
1136
1137 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
1138 {
1139 unsigned int index;
1140
1141 if (size <= 192) {
1142 if (!size)
1143 return ZERO_SIZE_PTR;
1144
1145 index = size_index[size_index_elem(size)];
1146 } else {
1147 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
1148 return NULL;
1149 index = fls(size - 1);
1150 }
1151
1152 return kmalloc_caches[kmalloc_type(flags)][index];
1153 }
1154
1155
1156
1157
1158
1159
1160 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
1161 {NULL, 0}, {"kmalloc-96", 96},
1162 {"kmalloc-192", 192}, {"kmalloc-8", 8},
1163 {"kmalloc-16", 16}, {"kmalloc-32", 32},
1164 {"kmalloc-64", 64}, {"kmalloc-128", 128},
1165 {"kmalloc-256", 256}, {"kmalloc-512", 512},
1166 {"kmalloc-1k", 1024}, {"kmalloc-2k", 2048},
1167 {"kmalloc-4k", 4096}, {"kmalloc-8k", 8192},
1168 {"kmalloc-16k", 16384}, {"kmalloc-32k", 32768},
1169 {"kmalloc-64k", 65536}, {"kmalloc-128k", 131072},
1170 {"kmalloc-256k", 262144}, {"kmalloc-512k", 524288},
1171 {"kmalloc-1M", 1048576}, {"kmalloc-2M", 2097152},
1172 {"kmalloc-4M", 4194304}, {"kmalloc-8M", 8388608},
1173 {"kmalloc-16M", 16777216}, {"kmalloc-32M", 33554432},
1174 {"kmalloc-64M", 67108864}
1175 };
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 void __init setup_kmalloc_cache_index_table(void)
1189 {
1190 unsigned int i;
1191
1192 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1193 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1194
1195 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1196 unsigned int elem = size_index_elem(i);
1197
1198 if (elem >= ARRAY_SIZE(size_index))
1199 break;
1200 size_index[elem] = KMALLOC_SHIFT_LOW;
1201 }
1202
1203 if (KMALLOC_MIN_SIZE >= 64) {
1204
1205
1206
1207
1208 for (i = 64 + 8; i <= 96; i += 8)
1209 size_index[size_index_elem(i)] = 7;
1210
1211 }
1212
1213 if (KMALLOC_MIN_SIZE >= 128) {
1214
1215
1216
1217
1218
1219 for (i = 128 + 8; i <= 192; i += 8)
1220 size_index[size_index_elem(i)] = 8;
1221 }
1222 }
1223
1224 static const char *
1225 kmalloc_cache_name(const char *prefix, unsigned int size)
1226 {
1227
1228 static const char units[3] = "\0kM";
1229 int idx = 0;
1230
1231 while (size >= 1024 && (size % 1024 == 0)) {
1232 size /= 1024;
1233 idx++;
1234 }
1235
1236 return kasprintf(GFP_NOWAIT, "%s-%u%c", prefix, size, units[idx]);
1237 }
1238
1239 static void __init
1240 new_kmalloc_cache(int idx, int type, slab_flags_t flags)
1241 {
1242 const char *name;
1243
1244 if (type == KMALLOC_RECLAIM) {
1245 flags |= SLAB_RECLAIM_ACCOUNT;
1246 name = kmalloc_cache_name("kmalloc-rcl",
1247 kmalloc_info[idx].size);
1248 BUG_ON(!name);
1249 } else {
1250 name = kmalloc_info[idx].name;
1251 }
1252
1253 kmalloc_caches[type][idx] = create_kmalloc_cache(name,
1254 kmalloc_info[idx].size, flags, 0,
1255 kmalloc_info[idx].size);
1256 }
1257
1258
1259
1260
1261
1262
1263 void __init create_kmalloc_caches(slab_flags_t flags)
1264 {
1265 int i, type;
1266
1267 for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
1268 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1269 if (!kmalloc_caches[type][i])
1270 new_kmalloc_cache(i, type, flags);
1271
1272
1273
1274
1275
1276
1277 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
1278 !kmalloc_caches[type][1])
1279 new_kmalloc_cache(1, type, flags);
1280 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
1281 !kmalloc_caches[type][2])
1282 new_kmalloc_cache(2, type, flags);
1283 }
1284 }
1285
1286
1287 slab_state = UP;
1288
1289 #ifdef CONFIG_ZONE_DMA
1290 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1291 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
1292
1293 if (s) {
1294 unsigned int size = kmalloc_size(i);
1295 const char *n = kmalloc_cache_name("dma-kmalloc", size);
1296
1297 BUG_ON(!n);
1298 kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
1299 n, size, SLAB_CACHE_DMA | flags, 0, 0);
1300 }
1301 }
1302 #endif
1303 }
1304 #endif
1305
1306
1307
1308
1309
1310
1311 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1312 {
1313 void *ret = NULL;
1314 struct page *page;
1315
1316 flags |= __GFP_COMP;
1317 page = alloc_pages(flags, order);
1318 if (likely(page)) {
1319 ret = page_address(page);
1320 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1321 1 << order);
1322 }
1323 ret = kasan_kmalloc_large(ret, size, flags);
1324
1325 kmemleak_alloc(ret, size, 1, flags);
1326 return ret;
1327 }
1328 EXPORT_SYMBOL(kmalloc_order);
1329
1330 #ifdef CONFIG_TRACING
1331 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1332 {
1333 void *ret = kmalloc_order(size, flags, order);
1334 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1335 return ret;
1336 }
1337 EXPORT_SYMBOL(kmalloc_order_trace);
1338 #endif
1339
1340 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1341
1342 static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1343 unsigned int count)
1344 {
1345 unsigned int rand;
1346 unsigned int i;
1347
1348 for (i = 0; i < count; i++)
1349 list[i] = i;
1350
1351
1352 for (i = count - 1; i > 0; i--) {
1353 rand = prandom_u32_state(state);
1354 rand %= (i + 1);
1355 swap(list[i], list[rand]);
1356 }
1357 }
1358
1359
1360 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1361 gfp_t gfp)
1362 {
1363 struct rnd_state state;
1364
1365 if (count < 2 || cachep->random_seq)
1366 return 0;
1367
1368 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1369 if (!cachep->random_seq)
1370 return -ENOMEM;
1371
1372
1373 prandom_seed_state(&state, get_random_long());
1374
1375 freelist_randomize(&state, cachep->random_seq, count);
1376 return 0;
1377 }
1378
1379
1380 void cache_random_seq_destroy(struct kmem_cache *cachep)
1381 {
1382 kfree(cachep->random_seq);
1383 cachep->random_seq = NULL;
1384 }
1385 #endif
1386
1387 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1388 #ifdef CONFIG_SLAB
1389 #define SLABINFO_RIGHTS (0600)
1390 #else
1391 #define SLABINFO_RIGHTS (0400)
1392 #endif
1393
1394 static void print_slabinfo_header(struct seq_file *m)
1395 {
1396
1397
1398
1399
1400 #ifdef CONFIG_DEBUG_SLAB
1401 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1402 #else
1403 seq_puts(m, "slabinfo - version: 2.1\n");
1404 #endif
1405 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1406 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1407 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1408 #ifdef CONFIG_DEBUG_SLAB
1409 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1410 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1411 #endif
1412 seq_putc(m, '\n');
1413 }
1414
1415 void *slab_start(struct seq_file *m, loff_t *pos)
1416 {
1417 mutex_lock(&slab_mutex);
1418 return seq_list_start(&slab_root_caches, *pos);
1419 }
1420
1421 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1422 {
1423 return seq_list_next(p, &slab_root_caches, pos);
1424 }
1425
1426 void slab_stop(struct seq_file *m, void *p)
1427 {
1428 mutex_unlock(&slab_mutex);
1429 }
1430
1431 static void
1432 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1433 {
1434 struct kmem_cache *c;
1435 struct slabinfo sinfo;
1436
1437 if (!is_root_cache(s))
1438 return;
1439
1440 for_each_memcg_cache(c, s) {
1441 memset(&sinfo, 0, sizeof(sinfo));
1442 get_slabinfo(c, &sinfo);
1443
1444 info->active_slabs += sinfo.active_slabs;
1445 info->num_slabs += sinfo.num_slabs;
1446 info->shared_avail += sinfo.shared_avail;
1447 info->active_objs += sinfo.active_objs;
1448 info->num_objs += sinfo.num_objs;
1449 }
1450 }
1451
1452 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1453 {
1454 struct slabinfo sinfo;
1455
1456 memset(&sinfo, 0, sizeof(sinfo));
1457 get_slabinfo(s, &sinfo);
1458
1459 memcg_accumulate_slabinfo(s, &sinfo);
1460
1461 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1462 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1463 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1464
1465 seq_printf(m, " : tunables %4u %4u %4u",
1466 sinfo.limit, sinfo.batchcount, sinfo.shared);
1467 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1468 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1469 slabinfo_show_stats(m, s);
1470 seq_putc(m, '\n');
1471 }
1472
1473 static int slab_show(struct seq_file *m, void *p)
1474 {
1475 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
1476
1477 if (p == slab_root_caches.next)
1478 print_slabinfo_header(m);
1479 cache_show(s, m);
1480 return 0;
1481 }
1482
1483 void dump_unreclaimable_slab(void)
1484 {
1485 struct kmem_cache *s, *s2;
1486 struct slabinfo sinfo;
1487
1488
1489
1490
1491
1492
1493
1494
1495 if (!mutex_trylock(&slab_mutex)) {
1496 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1497 return;
1498 }
1499
1500 pr_info("Unreclaimable slab info:\n");
1501 pr_info("Name Used Total\n");
1502
1503 list_for_each_entry_safe(s, s2, &slab_caches, list) {
1504 if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
1505 continue;
1506
1507 get_slabinfo(s, &sinfo);
1508
1509 if (sinfo.num_objs > 0)
1510 pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
1511 (sinfo.active_objs * s->size) / 1024,
1512 (sinfo.num_objs * s->size) / 1024);
1513 }
1514 mutex_unlock(&slab_mutex);
1515 }
1516
1517 #if defined(CONFIG_MEMCG)
1518 void *memcg_slab_start(struct seq_file *m, loff_t *pos)
1519 {
1520 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1521
1522 mutex_lock(&slab_mutex);
1523 return seq_list_start(&memcg->kmem_caches, *pos);
1524 }
1525
1526 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1527 {
1528 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1529
1530 return seq_list_next(p, &memcg->kmem_caches, pos);
1531 }
1532
1533 void memcg_slab_stop(struct seq_file *m, void *p)
1534 {
1535 mutex_unlock(&slab_mutex);
1536 }
1537
1538 int memcg_slab_show(struct seq_file *m, void *p)
1539 {
1540 struct kmem_cache *s = list_entry(p, struct kmem_cache,
1541 memcg_params.kmem_caches_node);
1542 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1543
1544 if (p == memcg->kmem_caches.next)
1545 print_slabinfo_header(m);
1546 cache_show(s, m);
1547 return 0;
1548 }
1549 #endif
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 static const struct seq_operations slabinfo_op = {
1565 .start = slab_start,
1566 .next = slab_next,
1567 .stop = slab_stop,
1568 .show = slab_show,
1569 };
1570
1571 static int slabinfo_open(struct inode *inode, struct file *file)
1572 {
1573 return seq_open(file, &slabinfo_op);
1574 }
1575
1576 static const struct file_operations proc_slabinfo_operations = {
1577 .open = slabinfo_open,
1578 .read = seq_read,
1579 .write = slabinfo_write,
1580 .llseek = seq_lseek,
1581 .release = seq_release,
1582 };
1583
1584 static int __init slab_proc_init(void)
1585 {
1586 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1587 &proc_slabinfo_operations);
1588 return 0;
1589 }
1590 module_init(slab_proc_init);
1591
1592 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
1593
1594
1595
1596 static int memcg_slabinfo_show(struct seq_file *m, void *unused)
1597 {
1598 struct kmem_cache *s, *c;
1599 struct slabinfo sinfo;
1600
1601 mutex_lock(&slab_mutex);
1602 seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
1603 seq_puts(m, " <active_slabs> <num_slabs>\n");
1604 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
1605
1606
1607
1608 if (list_empty(&s->memcg_params.children))
1609 continue;
1610
1611 memset(&sinfo, 0, sizeof(sinfo));
1612 get_slabinfo(s, &sinfo);
1613 seq_printf(m, "%-17s root %6lu %6lu %6lu %6lu\n",
1614 cache_name(s), sinfo.active_objs, sinfo.num_objs,
1615 sinfo.active_slabs, sinfo.num_slabs);
1616
1617 for_each_memcg_cache(c, s) {
1618 struct cgroup_subsys_state *css;
1619 char *status = "";
1620
1621 css = &c->memcg_params.memcg->css;
1622 if (!(css->flags & CSS_ONLINE))
1623 status = ":dead";
1624 else if (c->flags & SLAB_DEACTIVATED)
1625 status = ":deact";
1626
1627 memset(&sinfo, 0, sizeof(sinfo));
1628 get_slabinfo(c, &sinfo);
1629 seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
1630 cache_name(c), css->id, status,
1631 sinfo.active_objs, sinfo.num_objs,
1632 sinfo.active_slabs, sinfo.num_slabs);
1633 }
1634 }
1635 mutex_unlock(&slab_mutex);
1636 return 0;
1637 }
1638 DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
1639
1640 static int __init memcg_slabinfo_init(void)
1641 {
1642 debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
1643 NULL, NULL, &memcg_slabinfo_fops);
1644 return 0;
1645 }
1646
1647 late_initcall(memcg_slabinfo_init);
1648 #endif
1649 #endif
1650
1651 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1652 gfp_t flags)
1653 {
1654 void *ret;
1655 size_t ks = 0;
1656
1657 if (p)
1658 ks = ksize(p);
1659
1660 if (ks >= new_size) {
1661 p = kasan_krealloc((void *)p, new_size, flags);
1662 return (void *)p;
1663 }
1664
1665 ret = kmalloc_track_caller(new_size, flags);
1666 if (ret && p)
1667 memcpy(ret, p, ks);
1668
1669 return ret;
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1685 {
1686 if (unlikely(!new_size))
1687 return ZERO_SIZE_PTR;
1688
1689 return __do_krealloc(p, new_size, flags);
1690
1691 }
1692 EXPORT_SYMBOL(__krealloc);
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1708 {
1709 void *ret;
1710
1711 if (unlikely(!new_size)) {
1712 kfree(p);
1713 return ZERO_SIZE_PTR;
1714 }
1715
1716 ret = __do_krealloc(p, new_size, flags);
1717 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1718 kfree(p);
1719
1720 return ret;
1721 }
1722 EXPORT_SYMBOL(krealloc);
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 void kzfree(const void *p)
1736 {
1737 size_t ks;
1738 void *mem = (void *)p;
1739
1740 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1741 return;
1742 ks = ksize(mem);
1743 memset(mem, 0, ks);
1744 kfree(mem);
1745 }
1746 EXPORT_SYMBOL(kzfree);
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 size_t ksize(const void *objp)
1763 {
1764 size_t size;
1765
1766 if (WARN_ON_ONCE(!objp))
1767 return 0;
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1))
1782 return 0;
1783
1784 size = __ksize(objp);
1785
1786
1787
1788
1789 kasan_unpoison_shadow(objp, size);
1790 return size;
1791 }
1792 EXPORT_SYMBOL(ksize);
1793
1794
1795 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1796 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1797 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1798 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1799 EXPORT_TRACEPOINT_SYMBOL(kfree);
1800 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1801
1802 int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1803 {
1804 if (__should_failslab(s, gfpflags))
1805 return -ENOMEM;
1806 return 0;
1807 }
1808 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);