This source file includes following definitions.
- __kmem_cache_alias
- kmem_cache_flags
- cache_vmstat_idx
- is_root_cache
- slab_equal_or_root
- cache_name
- memcg_root_cache
- memcg_from_slab_page
- memcg_charge_slab
- memcg_uncharge_slab
- is_root_cache
- slab_equal_or_root
- cache_name
- memcg_root_cache
- memcg_from_slab_page
- memcg_charge_slab
- memcg_uncharge_slab
- slab_init_memcg_params
- memcg_link_cache
- virt_to_cache
- charge_slab_page
- uncharge_slab_page
- cache_from_obj
- slab_ksize
- slab_pre_alloc_hook
- slab_post_alloc_hook
- get_node
- dump_unreclaimable_slab
- cache_random_seq_create
- cache_random_seq_destroy
- slab_want_init_on_alloc
- slab_want_init_on_free
1
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4
5
6
7
8 #ifdef CONFIG_SLOB
9
10
11
12
13
14
15
16
17
18
19
20 struct kmem_cache {
21 unsigned int object_size;
22 unsigned int size;
23 unsigned int align;
24 slab_flags_t flags;
25 unsigned int useroffset;
26 unsigned int usersize;
27 const char *name;
28 int refcount;
29 void (*ctor)(void *);
30 struct list_head list;
31 };
32
33 #else
34
35 struct memcg_cache_array {
36 struct rcu_head rcu;
37 struct kmem_cache *entries[0];
38 };
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73 struct memcg_cache_params {
74 struct kmem_cache *root_cache;
75 union {
76 struct {
77 struct memcg_cache_array __rcu *memcg_caches;
78 struct list_head __root_caches_node;
79 struct list_head children;
80 bool dying;
81 };
82 struct {
83 struct mem_cgroup *memcg;
84 struct list_head children_node;
85 struct list_head kmem_caches_node;
86 struct percpu_ref refcnt;
87
88 void (*work_fn)(struct kmem_cache *);
89 union {
90 struct rcu_head rcu_head;
91 struct work_struct work;
92 };
93 };
94 };
95 };
96 #endif
97
98 #ifdef CONFIG_SLAB
99 #include <linux/slab_def.h>
100 #endif
101
102 #ifdef CONFIG_SLUB
103 #include <linux/slub_def.h>
104 #endif
105
106 #include <linux/memcontrol.h>
107 #include <linux/fault-inject.h>
108 #include <linux/kasan.h>
109 #include <linux/kmemleak.h>
110 #include <linux/random.h>
111 #include <linux/sched/mm.h>
112
113
114
115
116
117
118
119
120
121 enum slab_state {
122 DOWN,
123 PARTIAL,
124 PARTIAL_NODE,
125 UP,
126 FULL
127 };
128
129 extern enum slab_state slab_state;
130
131
132 extern struct mutex slab_mutex;
133
134
135 extern struct list_head slab_caches;
136
137
138 extern struct kmem_cache *kmem_cache;
139
140
141 extern const struct kmalloc_info_struct {
142 const char *name;
143 unsigned int size;
144 } kmalloc_info[];
145
146 #ifndef CONFIG_SLOB
147
148 void setup_kmalloc_cache_index_table(void);
149 void create_kmalloc_caches(slab_flags_t);
150
151
152 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
153 #endif
154
155
156
157 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
158
159 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
160 slab_flags_t flags, unsigned int useroffset,
161 unsigned int usersize);
162 extern void create_boot_cache(struct kmem_cache *, const char *name,
163 unsigned int size, slab_flags_t flags,
164 unsigned int useroffset, unsigned int usersize);
165
166 int slab_unmergeable(struct kmem_cache *s);
167 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
168 slab_flags_t flags, const char *name, void (*ctor)(void *));
169 #ifndef CONFIG_SLOB
170 struct kmem_cache *
171 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
172 slab_flags_t flags, void (*ctor)(void *));
173
174 slab_flags_t kmem_cache_flags(unsigned int object_size,
175 slab_flags_t flags, const char *name,
176 void (*ctor)(void *));
177 #else
178 static inline struct kmem_cache *
179 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
180 slab_flags_t flags, void (*ctor)(void *))
181 { return NULL; }
182
183 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
184 slab_flags_t flags, const char *name,
185 void (*ctor)(void *))
186 {
187 return flags;
188 }
189 #endif
190
191
192
193 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
194 SLAB_CACHE_DMA32 | SLAB_PANIC | \
195 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
196
197 #if defined(CONFIG_DEBUG_SLAB)
198 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
199 #elif defined(CONFIG_SLUB_DEBUG)
200 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
201 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
202 #else
203 #define SLAB_DEBUG_FLAGS (0)
204 #endif
205
206 #if defined(CONFIG_SLAB)
207 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
208 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
209 SLAB_ACCOUNT)
210 #elif defined(CONFIG_SLUB)
211 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
212 SLAB_TEMPORARY | SLAB_ACCOUNT)
213 #else
214 #define SLAB_CACHE_FLAGS (0)
215 #endif
216
217
218 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
219
220
221 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
222 SLAB_RED_ZONE | \
223 SLAB_POISON | \
224 SLAB_STORE_USER | \
225 SLAB_TRACE | \
226 SLAB_CONSISTENCY_CHECKS | \
227 SLAB_MEM_SPREAD | \
228 SLAB_NOLEAKTRACE | \
229 SLAB_RECLAIM_ACCOUNT | \
230 SLAB_TEMPORARY | \
231 SLAB_ACCOUNT)
232
233 bool __kmem_cache_empty(struct kmem_cache *);
234 int __kmem_cache_shutdown(struct kmem_cache *);
235 void __kmem_cache_release(struct kmem_cache *);
236 int __kmem_cache_shrink(struct kmem_cache *);
237 void __kmemcg_cache_deactivate(struct kmem_cache *s);
238 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
239 void slab_kmem_cache_release(struct kmem_cache *);
240 void kmem_cache_shrink_all(struct kmem_cache *s);
241
242 struct seq_file;
243 struct file;
244
245 struct slabinfo {
246 unsigned long active_objs;
247 unsigned long num_objs;
248 unsigned long active_slabs;
249 unsigned long num_slabs;
250 unsigned long shared_avail;
251 unsigned int limit;
252 unsigned int batchcount;
253 unsigned int shared;
254 unsigned int objects_per_slab;
255 unsigned int cache_order;
256 };
257
258 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
259 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
260 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
261 size_t count, loff_t *ppos);
262
263
264
265
266
267
268
269 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
270 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
271
272 static inline int cache_vmstat_idx(struct kmem_cache *s)
273 {
274 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
275 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
276 }
277
278 #ifdef CONFIG_MEMCG_KMEM
279
280
281 extern struct list_head slab_root_caches;
282 #define root_caches_node memcg_params.__root_caches_node
283
284
285
286
287
288 #define for_each_memcg_cache(iter, root) \
289 list_for_each_entry(iter, &(root)->memcg_params.children, \
290 memcg_params.children_node)
291
292 static inline bool is_root_cache(struct kmem_cache *s)
293 {
294 return !s->memcg_params.root_cache;
295 }
296
297 static inline bool slab_equal_or_root(struct kmem_cache *s,
298 struct kmem_cache *p)
299 {
300 return p == s || p == s->memcg_params.root_cache;
301 }
302
303
304
305
306
307
308 static inline const char *cache_name(struct kmem_cache *s)
309 {
310 if (!is_root_cache(s))
311 s = s->memcg_params.root_cache;
312 return s->name;
313 }
314
315 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
316 {
317 if (is_root_cache(s))
318 return s;
319 return s->memcg_params.root_cache;
320 }
321
322
323
324
325
326
327
328
329
330
331
332 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
333 {
334 struct kmem_cache *s;
335
336 s = READ_ONCE(page->slab_cache);
337 if (s && !is_root_cache(s))
338 return READ_ONCE(s->memcg_params.memcg);
339
340 return NULL;
341 }
342
343
344
345
346
347 static __always_inline int memcg_charge_slab(struct page *page,
348 gfp_t gfp, int order,
349 struct kmem_cache *s)
350 {
351 struct mem_cgroup *memcg;
352 struct lruvec *lruvec;
353 int ret;
354
355 rcu_read_lock();
356 memcg = READ_ONCE(s->memcg_params.memcg);
357 while (memcg && !css_tryget_online(&memcg->css))
358 memcg = parent_mem_cgroup(memcg);
359 rcu_read_unlock();
360
361 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
362 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
363 (1 << order));
364 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
365 return 0;
366 }
367
368 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
369 if (ret)
370 goto out;
371
372 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
373 mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
374
375
376 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
377 css_put_many(&memcg->css, 1 << order);
378 out:
379 css_put(&memcg->css);
380 return ret;
381 }
382
383
384
385
386
387 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
388 struct kmem_cache *s)
389 {
390 struct mem_cgroup *memcg;
391 struct lruvec *lruvec;
392
393 rcu_read_lock();
394 memcg = READ_ONCE(s->memcg_params.memcg);
395 if (likely(!mem_cgroup_is_root(memcg))) {
396 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
397 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
398 memcg_kmem_uncharge_memcg(page, order, memcg);
399 } else {
400 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
401 -(1 << order));
402 }
403 rcu_read_unlock();
404
405 percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
406 }
407
408 extern void slab_init_memcg_params(struct kmem_cache *);
409 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
410
411 #else
412
413
414 #define slab_root_caches slab_caches
415 #define root_caches_node list
416
417 #define for_each_memcg_cache(iter, root) \
418 for ((void)(iter), (void)(root); 0; )
419
420 static inline bool is_root_cache(struct kmem_cache *s)
421 {
422 return true;
423 }
424
425 static inline bool slab_equal_or_root(struct kmem_cache *s,
426 struct kmem_cache *p)
427 {
428 return s == p;
429 }
430
431 static inline const char *cache_name(struct kmem_cache *s)
432 {
433 return s->name;
434 }
435
436 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
437 {
438 return s;
439 }
440
441 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
442 {
443 return NULL;
444 }
445
446 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
447 struct kmem_cache *s)
448 {
449 return 0;
450 }
451
452 static inline void memcg_uncharge_slab(struct page *page, int order,
453 struct kmem_cache *s)
454 {
455 }
456
457 static inline void slab_init_memcg_params(struct kmem_cache *s)
458 {
459 }
460
461 static inline void memcg_link_cache(struct kmem_cache *s,
462 struct mem_cgroup *memcg)
463 {
464 }
465
466 #endif
467
468 static inline struct kmem_cache *virt_to_cache(const void *obj)
469 {
470 struct page *page;
471
472 page = virt_to_head_page(obj);
473 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
474 __func__))
475 return NULL;
476 return page->slab_cache;
477 }
478
479 static __always_inline int charge_slab_page(struct page *page,
480 gfp_t gfp, int order,
481 struct kmem_cache *s)
482 {
483 if (is_root_cache(s)) {
484 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
485 1 << order);
486 return 0;
487 }
488
489 return memcg_charge_slab(page, gfp, order, s);
490 }
491
492 static __always_inline void uncharge_slab_page(struct page *page, int order,
493 struct kmem_cache *s)
494 {
495 if (is_root_cache(s)) {
496 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
497 -(1 << order));
498 return;
499 }
500
501 memcg_uncharge_slab(page, order, s);
502 }
503
504 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
505 {
506 struct kmem_cache *cachep;
507
508
509
510
511
512
513
514
515 if (!memcg_kmem_enabled() &&
516 !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
517 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
518 return s;
519
520 cachep = virt_to_cache(x);
521 WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
522 "%s: Wrong slab cache. %s but object is from %s\n",
523 __func__, s->name, cachep->name);
524 return cachep;
525 }
526
527 static inline size_t slab_ksize(const struct kmem_cache *s)
528 {
529 #ifndef CONFIG_SLUB
530 return s->object_size;
531
532 #else
533 # ifdef CONFIG_SLUB_DEBUG
534
535
536
537
538 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
539 return s->object_size;
540 # endif
541 if (s->flags & SLAB_KASAN)
542 return s->object_size;
543
544
545
546
547
548 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
549 return s->inuse;
550
551
552
553 return s->size;
554 #endif
555 }
556
557 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
558 gfp_t flags)
559 {
560 flags &= gfp_allowed_mask;
561
562 fs_reclaim_acquire(flags);
563 fs_reclaim_release(flags);
564
565 might_sleep_if(gfpflags_allow_blocking(flags));
566
567 if (should_failslab(s, flags))
568 return NULL;
569
570 if (memcg_kmem_enabled() &&
571 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
572 return memcg_kmem_get_cache(s);
573
574 return s;
575 }
576
577 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
578 size_t size, void **p)
579 {
580 size_t i;
581
582 flags &= gfp_allowed_mask;
583 for (i = 0; i < size; i++) {
584 p[i] = kasan_slab_alloc(s, p[i], flags);
585
586 kmemleak_alloc_recursive(p[i], s->object_size, 1,
587 s->flags, flags);
588 }
589
590 if (memcg_kmem_enabled())
591 memcg_kmem_put_cache(s);
592 }
593
594 #ifndef CONFIG_SLOB
595
596
597
598 struct kmem_cache_node {
599 spinlock_t list_lock;
600
601 #ifdef CONFIG_SLAB
602 struct list_head slabs_partial;
603 struct list_head slabs_full;
604 struct list_head slabs_free;
605 unsigned long total_slabs;
606 unsigned long free_slabs;
607 unsigned long free_objects;
608 unsigned int free_limit;
609 unsigned int colour_next;
610 struct array_cache *shared;
611 struct alien_cache **alien;
612 unsigned long next_reap;
613 int free_touched;
614 #endif
615
616 #ifdef CONFIG_SLUB
617 unsigned long nr_partial;
618 struct list_head partial;
619 #ifdef CONFIG_SLUB_DEBUG
620 atomic_long_t nr_slabs;
621 atomic_long_t total_objects;
622 struct list_head full;
623 #endif
624 #endif
625
626 };
627
628 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
629 {
630 return s->node[node];
631 }
632
633
634
635
636
637 #define for_each_kmem_cache_node(__s, __node, __n) \
638 for (__node = 0; __node < nr_node_ids; __node++) \
639 if ((__n = get_node(__s, __node)))
640
641 #endif
642
643 void *slab_start(struct seq_file *m, loff_t *pos);
644 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
645 void slab_stop(struct seq_file *m, void *p);
646 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
647 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
648 void memcg_slab_stop(struct seq_file *m, void *p);
649 int memcg_slab_show(struct seq_file *m, void *p);
650
651 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
652 void dump_unreclaimable_slab(void);
653 #else
654 static inline void dump_unreclaimable_slab(void)
655 {
656 }
657 #endif
658
659 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
660
661 #ifdef CONFIG_SLAB_FREELIST_RANDOM
662 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
663 gfp_t gfp);
664 void cache_random_seq_destroy(struct kmem_cache *cachep);
665 #else
666 static inline int cache_random_seq_create(struct kmem_cache *cachep,
667 unsigned int count, gfp_t gfp)
668 {
669 return 0;
670 }
671 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
672 #endif
673
674 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
675 {
676 if (static_branch_unlikely(&init_on_alloc)) {
677 if (c->ctor)
678 return false;
679 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
680 return flags & __GFP_ZERO;
681 return true;
682 }
683 return flags & __GFP_ZERO;
684 }
685
686 static inline bool slab_want_init_on_free(struct kmem_cache *c)
687 {
688 if (static_branch_unlikely(&init_on_free))
689 return !(c->ctor ||
690 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
691 return false;
692 }
693
694 #endif