root/mm/slab.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __kmem_cache_alias
  2. kmem_cache_flags
  3. cache_vmstat_idx
  4. is_root_cache
  5. slab_equal_or_root
  6. cache_name
  7. memcg_root_cache
  8. memcg_from_slab_page
  9. memcg_charge_slab
  10. memcg_uncharge_slab
  11. is_root_cache
  12. slab_equal_or_root
  13. cache_name
  14. memcg_root_cache
  15. memcg_from_slab_page
  16. memcg_charge_slab
  17. memcg_uncharge_slab
  18. slab_init_memcg_params
  19. memcg_link_cache
  20. virt_to_cache
  21. charge_slab_page
  22. uncharge_slab_page
  23. cache_from_obj
  24. slab_ksize
  25. slab_pre_alloc_hook
  26. slab_post_alloc_hook
  27. get_node
  28. dump_unreclaimable_slab
  29. cache_random_seq_create
  30. cache_random_seq_destroy
  31. slab_want_init_on_alloc
  32. slab_want_init_on_free

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef MM_SLAB_H
   3 #define MM_SLAB_H
   4 /*
   5  * Internal slab definitions
   6  */
   7 
   8 #ifdef CONFIG_SLOB
   9 /*
  10  * Common fields provided in kmem_cache by all slab allocators
  11  * This struct is either used directly by the allocator (SLOB)
  12  * or the allocator must include definitions for all fields
  13  * provided in kmem_cache_common in their definition of kmem_cache.
  14  *
  15  * Once we can do anonymous structs (C11 standard) we could put a
  16  * anonymous struct definition in these allocators so that the
  17  * separate allocations in the kmem_cache structure of SLAB and
  18  * SLUB is no longer needed.
  19  */
  20 struct kmem_cache {
  21         unsigned int object_size;/* The original size of the object */
  22         unsigned int size;      /* The aligned/padded/added on size  */
  23         unsigned int align;     /* Alignment as calculated */
  24         slab_flags_t flags;     /* Active flags on the slab */
  25         unsigned int useroffset;/* Usercopy region offset */
  26         unsigned int usersize;  /* Usercopy region size */
  27         const char *name;       /* Slab name for sysfs */
  28         int refcount;           /* Use counter */
  29         void (*ctor)(void *);   /* Called on object slot creation */
  30         struct list_head list;  /* List of all slab caches on the system */
  31 };
  32 
  33 #else /* !CONFIG_SLOB */
  34 
  35 struct memcg_cache_array {
  36         struct rcu_head rcu;
  37         struct kmem_cache *entries[0];
  38 };
  39 
  40 /*
  41  * This is the main placeholder for memcg-related information in kmem caches.
  42  * Both the root cache and the child caches will have it. For the root cache,
  43  * this will hold a dynamically allocated array large enough to hold
  44  * information about the currently limited memcgs in the system. To allow the
  45  * array to be accessed without taking any locks, on relocation we free the old
  46  * version only after a grace period.
  47  *
  48  * Root and child caches hold different metadata.
  49  *
  50  * @root_cache: Common to root and child caches.  NULL for root, pointer to
  51  *              the root cache for children.
  52  *
  53  * The following fields are specific to root caches.
  54  *
  55  * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
  56  *              used to index child cachces during allocation and cleared
  57  *              early during shutdown.
  58  *
  59  * @root_caches_node: List node for slab_root_caches list.
  60  *
  61  * @children:   List of all child caches.  While the child caches are also
  62  *              reachable through @memcg_caches, a child cache remains on
  63  *              this list until it is actually destroyed.
  64  *
  65  * The following fields are specific to child caches.
  66  *
  67  * @memcg:      Pointer to the memcg this cache belongs to.
  68  *
  69  * @children_node: List node for @root_cache->children list.
  70  *
  71  * @kmem_caches_node: List node for @memcg->kmem_caches list.
  72  */
  73 struct memcg_cache_params {
  74         struct kmem_cache *root_cache;
  75         union {
  76                 struct {
  77                         struct memcg_cache_array __rcu *memcg_caches;
  78                         struct list_head __root_caches_node;
  79                         struct list_head children;
  80                         bool dying;
  81                 };
  82                 struct {
  83                         struct mem_cgroup *memcg;
  84                         struct list_head children_node;
  85                         struct list_head kmem_caches_node;
  86                         struct percpu_ref refcnt;
  87 
  88                         void (*work_fn)(struct kmem_cache *);
  89                         union {
  90                                 struct rcu_head rcu_head;
  91                                 struct work_struct work;
  92                         };
  93                 };
  94         };
  95 };
  96 #endif /* CONFIG_SLOB */
  97 
  98 #ifdef CONFIG_SLAB
  99 #include <linux/slab_def.h>
 100 #endif
 101 
 102 #ifdef CONFIG_SLUB
 103 #include <linux/slub_def.h>
 104 #endif
 105 
 106 #include <linux/memcontrol.h>
 107 #include <linux/fault-inject.h>
 108 #include <linux/kasan.h>
 109 #include <linux/kmemleak.h>
 110 #include <linux/random.h>
 111 #include <linux/sched/mm.h>
 112 
 113 /*
 114  * State of the slab allocator.
 115  *
 116  * This is used to describe the states of the allocator during bootup.
 117  * Allocators use this to gradually bootstrap themselves. Most allocators
 118  * have the problem that the structures used for managing slab caches are
 119  * allocated from slab caches themselves.
 120  */
 121 enum slab_state {
 122         DOWN,                   /* No slab functionality yet */
 123         PARTIAL,                /* SLUB: kmem_cache_node available */
 124         PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
 125         UP,                     /* Slab caches usable but not all extras yet */
 126         FULL                    /* Everything is working */
 127 };
 128 
 129 extern enum slab_state slab_state;
 130 
 131 /* The slab cache mutex protects the management structures during changes */
 132 extern struct mutex slab_mutex;
 133 
 134 /* The list of all slab caches on the system */
 135 extern struct list_head slab_caches;
 136 
 137 /* The slab cache that manages slab cache information */
 138 extern struct kmem_cache *kmem_cache;
 139 
 140 /* A table of kmalloc cache names and sizes */
 141 extern const struct kmalloc_info_struct {
 142         const char *name;
 143         unsigned int size;
 144 } kmalloc_info[];
 145 
 146 #ifndef CONFIG_SLOB
 147 /* Kmalloc array related functions */
 148 void setup_kmalloc_cache_index_table(void);
 149 void create_kmalloc_caches(slab_flags_t);
 150 
 151 /* Find the kmalloc slab corresponding for a certain size */
 152 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
 153 #endif
 154 
 155 
 156 /* Functions provided by the slab allocators */
 157 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
 158 
 159 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
 160                         slab_flags_t flags, unsigned int useroffset,
 161                         unsigned int usersize);
 162 extern void create_boot_cache(struct kmem_cache *, const char *name,
 163                         unsigned int size, slab_flags_t flags,
 164                         unsigned int useroffset, unsigned int usersize);
 165 
 166 int slab_unmergeable(struct kmem_cache *s);
 167 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
 168                 slab_flags_t flags, const char *name, void (*ctor)(void *));
 169 #ifndef CONFIG_SLOB
 170 struct kmem_cache *
 171 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 172                    slab_flags_t flags, void (*ctor)(void *));
 173 
 174 slab_flags_t kmem_cache_flags(unsigned int object_size,
 175         slab_flags_t flags, const char *name,
 176         void (*ctor)(void *));
 177 #else
 178 static inline struct kmem_cache *
 179 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 180                    slab_flags_t flags, void (*ctor)(void *))
 181 { return NULL; }
 182 
 183 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 184         slab_flags_t flags, const char *name,
 185         void (*ctor)(void *))
 186 {
 187         return flags;
 188 }
 189 #endif
 190 
 191 
 192 /* Legal flag mask for kmem_cache_create(), for various configurations */
 193 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
 194                          SLAB_CACHE_DMA32 | SLAB_PANIC | \
 195                          SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 196 
 197 #if defined(CONFIG_DEBUG_SLAB)
 198 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 199 #elif defined(CONFIG_SLUB_DEBUG)
 200 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 201                           SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
 202 #else
 203 #define SLAB_DEBUG_FLAGS (0)
 204 #endif
 205 
 206 #if defined(CONFIG_SLAB)
 207 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
 208                           SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
 209                           SLAB_ACCOUNT)
 210 #elif defined(CONFIG_SLUB)
 211 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
 212                           SLAB_TEMPORARY | SLAB_ACCOUNT)
 213 #else
 214 #define SLAB_CACHE_FLAGS (0)
 215 #endif
 216 
 217 /* Common flags available with current configuration */
 218 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 219 
 220 /* Common flags permitted for kmem_cache_create */
 221 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
 222                               SLAB_RED_ZONE | \
 223                               SLAB_POISON | \
 224                               SLAB_STORE_USER | \
 225                               SLAB_TRACE | \
 226                               SLAB_CONSISTENCY_CHECKS | \
 227                               SLAB_MEM_SPREAD | \
 228                               SLAB_NOLEAKTRACE | \
 229                               SLAB_RECLAIM_ACCOUNT | \
 230                               SLAB_TEMPORARY | \
 231                               SLAB_ACCOUNT)
 232 
 233 bool __kmem_cache_empty(struct kmem_cache *);
 234 int __kmem_cache_shutdown(struct kmem_cache *);
 235 void __kmem_cache_release(struct kmem_cache *);
 236 int __kmem_cache_shrink(struct kmem_cache *);
 237 void __kmemcg_cache_deactivate(struct kmem_cache *s);
 238 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
 239 void slab_kmem_cache_release(struct kmem_cache *);
 240 void kmem_cache_shrink_all(struct kmem_cache *s);
 241 
 242 struct seq_file;
 243 struct file;
 244 
 245 struct slabinfo {
 246         unsigned long active_objs;
 247         unsigned long num_objs;
 248         unsigned long active_slabs;
 249         unsigned long num_slabs;
 250         unsigned long shared_avail;
 251         unsigned int limit;
 252         unsigned int batchcount;
 253         unsigned int shared;
 254         unsigned int objects_per_slab;
 255         unsigned int cache_order;
 256 };
 257 
 258 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
 259 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 260 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 261                        size_t count, loff_t *ppos);
 262 
 263 /*
 264  * Generic implementation of bulk operations
 265  * These are useful for situations in which the allocator cannot
 266  * perform optimizations. In that case segments of the object listed
 267  * may be allocated or freed using these operations.
 268  */
 269 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 270 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 271 
 272 static inline int cache_vmstat_idx(struct kmem_cache *s)
 273 {
 274         return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
 275                 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
 276 }
 277 
 278 #ifdef CONFIG_MEMCG_KMEM
 279 
 280 /* List of all root caches. */
 281 extern struct list_head         slab_root_caches;
 282 #define root_caches_node        memcg_params.__root_caches_node
 283 
 284 /*
 285  * Iterate over all memcg caches of the given root cache. The caller must hold
 286  * slab_mutex.
 287  */
 288 #define for_each_memcg_cache(iter, root) \
 289         list_for_each_entry(iter, &(root)->memcg_params.children, \
 290                             memcg_params.children_node)
 291 
 292 static inline bool is_root_cache(struct kmem_cache *s)
 293 {
 294         return !s->memcg_params.root_cache;
 295 }
 296 
 297 static inline bool slab_equal_or_root(struct kmem_cache *s,
 298                                       struct kmem_cache *p)
 299 {
 300         return p == s || p == s->memcg_params.root_cache;
 301 }
 302 
 303 /*
 304  * We use suffixes to the name in memcg because we can't have caches
 305  * created in the system with the same name. But when we print them
 306  * locally, better refer to them with the base name
 307  */
 308 static inline const char *cache_name(struct kmem_cache *s)
 309 {
 310         if (!is_root_cache(s))
 311                 s = s->memcg_params.root_cache;
 312         return s->name;
 313 }
 314 
 315 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 316 {
 317         if (is_root_cache(s))
 318                 return s;
 319         return s->memcg_params.root_cache;
 320 }
 321 
 322 /*
 323  * Expects a pointer to a slab page. Please note, that PageSlab() check
 324  * isn't sufficient, as it returns true also for tail compound slab pages,
 325  * which do not have slab_cache pointer set.
 326  * So this function assumes that the page can pass PageSlab() && !PageTail()
 327  * check.
 328  *
 329  * The kmem_cache can be reparented asynchronously. The caller must ensure
 330  * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
 331  */
 332 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
 333 {
 334         struct kmem_cache *s;
 335 
 336         s = READ_ONCE(page->slab_cache);
 337         if (s && !is_root_cache(s))
 338                 return READ_ONCE(s->memcg_params.memcg);
 339 
 340         return NULL;
 341 }
 342 
 343 /*
 344  * Charge the slab page belonging to the non-root kmem_cache.
 345  * Can be called for non-root kmem_caches only.
 346  */
 347 static __always_inline int memcg_charge_slab(struct page *page,
 348                                              gfp_t gfp, int order,
 349                                              struct kmem_cache *s)
 350 {
 351         struct mem_cgroup *memcg;
 352         struct lruvec *lruvec;
 353         int ret;
 354 
 355         rcu_read_lock();
 356         memcg = READ_ONCE(s->memcg_params.memcg);
 357         while (memcg && !css_tryget_online(&memcg->css))
 358                 memcg = parent_mem_cgroup(memcg);
 359         rcu_read_unlock();
 360 
 361         if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
 362                 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 363                                     (1 << order));
 364                 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
 365                 return 0;
 366         }
 367 
 368         ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
 369         if (ret)
 370                 goto out;
 371 
 372         lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
 373         mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
 374 
 375         /* transer try_charge() page references to kmem_cache */
 376         percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
 377         css_put_many(&memcg->css, 1 << order);
 378 out:
 379         css_put(&memcg->css);
 380         return ret;
 381 }
 382 
 383 /*
 384  * Uncharge a slab page belonging to a non-root kmem_cache.
 385  * Can be called for non-root kmem_caches only.
 386  */
 387 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
 388                                                 struct kmem_cache *s)
 389 {
 390         struct mem_cgroup *memcg;
 391         struct lruvec *lruvec;
 392 
 393         rcu_read_lock();
 394         memcg = READ_ONCE(s->memcg_params.memcg);
 395         if (likely(!mem_cgroup_is_root(memcg))) {
 396                 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
 397                 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
 398                 memcg_kmem_uncharge_memcg(page, order, memcg);
 399         } else {
 400                 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 401                                     -(1 << order));
 402         }
 403         rcu_read_unlock();
 404 
 405         percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
 406 }
 407 
 408 extern void slab_init_memcg_params(struct kmem_cache *);
 409 extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
 410 
 411 #else /* CONFIG_MEMCG_KMEM */
 412 
 413 /* If !memcg, all caches are root. */
 414 #define slab_root_caches        slab_caches
 415 #define root_caches_node        list
 416 
 417 #define for_each_memcg_cache(iter, root) \
 418         for ((void)(iter), (void)(root); 0; )
 419 
 420 static inline bool is_root_cache(struct kmem_cache *s)
 421 {
 422         return true;
 423 }
 424 
 425 static inline bool slab_equal_or_root(struct kmem_cache *s,
 426                                       struct kmem_cache *p)
 427 {
 428         return s == p;
 429 }
 430 
 431 static inline const char *cache_name(struct kmem_cache *s)
 432 {
 433         return s->name;
 434 }
 435 
 436 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 437 {
 438         return s;
 439 }
 440 
 441 static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
 442 {
 443         return NULL;
 444 }
 445 
 446 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
 447                                     struct kmem_cache *s)
 448 {
 449         return 0;
 450 }
 451 
 452 static inline void memcg_uncharge_slab(struct page *page, int order,
 453                                        struct kmem_cache *s)
 454 {
 455 }
 456 
 457 static inline void slab_init_memcg_params(struct kmem_cache *s)
 458 {
 459 }
 460 
 461 static inline void memcg_link_cache(struct kmem_cache *s,
 462                                     struct mem_cgroup *memcg)
 463 {
 464 }
 465 
 466 #endif /* CONFIG_MEMCG_KMEM */
 467 
 468 static inline struct kmem_cache *virt_to_cache(const void *obj)
 469 {
 470         struct page *page;
 471 
 472         page = virt_to_head_page(obj);
 473         if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
 474                                         __func__))
 475                 return NULL;
 476         return page->slab_cache;
 477 }
 478 
 479 static __always_inline int charge_slab_page(struct page *page,
 480                                             gfp_t gfp, int order,
 481                                             struct kmem_cache *s)
 482 {
 483         if (is_root_cache(s)) {
 484                 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 485                                     1 << order);
 486                 return 0;
 487         }
 488 
 489         return memcg_charge_slab(page, gfp, order, s);
 490 }
 491 
 492 static __always_inline void uncharge_slab_page(struct page *page, int order,
 493                                                struct kmem_cache *s)
 494 {
 495         if (is_root_cache(s)) {
 496                 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 497                                     -(1 << order));
 498                 return;
 499         }
 500 
 501         memcg_uncharge_slab(page, order, s);
 502 }
 503 
 504 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 505 {
 506         struct kmem_cache *cachep;
 507 
 508         /*
 509          * When kmemcg is not being used, both assignments should return the
 510          * same value. but we don't want to pay the assignment price in that
 511          * case. If it is not compiled in, the compiler should be smart enough
 512          * to not do even the assignment. In that case, slab_equal_or_root
 513          * will also be a constant.
 514          */
 515         if (!memcg_kmem_enabled() &&
 516             !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
 517             !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
 518                 return s;
 519 
 520         cachep = virt_to_cache(x);
 521         WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
 522                   "%s: Wrong slab cache. %s but object is from %s\n",
 523                   __func__, s->name, cachep->name);
 524         return cachep;
 525 }
 526 
 527 static inline size_t slab_ksize(const struct kmem_cache *s)
 528 {
 529 #ifndef CONFIG_SLUB
 530         return s->object_size;
 531 
 532 #else /* CONFIG_SLUB */
 533 # ifdef CONFIG_SLUB_DEBUG
 534         /*
 535          * Debugging requires use of the padding between object
 536          * and whatever may come after it.
 537          */
 538         if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
 539                 return s->object_size;
 540 # endif
 541         if (s->flags & SLAB_KASAN)
 542                 return s->object_size;
 543         /*
 544          * If we have the need to store the freelist pointer
 545          * back there or track user information then we can
 546          * only use the space before that information.
 547          */
 548         if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
 549                 return s->inuse;
 550         /*
 551          * Else we can use all the padding etc for the allocation
 552          */
 553         return s->size;
 554 #endif
 555 }
 556 
 557 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 558                                                      gfp_t flags)
 559 {
 560         flags &= gfp_allowed_mask;
 561 
 562         fs_reclaim_acquire(flags);
 563         fs_reclaim_release(flags);
 564 
 565         might_sleep_if(gfpflags_allow_blocking(flags));
 566 
 567         if (should_failslab(s, flags))
 568                 return NULL;
 569 
 570         if (memcg_kmem_enabled() &&
 571             ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
 572                 return memcg_kmem_get_cache(s);
 573 
 574         return s;
 575 }
 576 
 577 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
 578                                         size_t size, void **p)
 579 {
 580         size_t i;
 581 
 582         flags &= gfp_allowed_mask;
 583         for (i = 0; i < size; i++) {
 584                 p[i] = kasan_slab_alloc(s, p[i], flags);
 585                 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
 586                 kmemleak_alloc_recursive(p[i], s->object_size, 1,
 587                                          s->flags, flags);
 588         }
 589 
 590         if (memcg_kmem_enabled())
 591                 memcg_kmem_put_cache(s);
 592 }
 593 
 594 #ifndef CONFIG_SLOB
 595 /*
 596  * The slab lists for all objects.
 597  */
 598 struct kmem_cache_node {
 599         spinlock_t list_lock;
 600 
 601 #ifdef CONFIG_SLAB
 602         struct list_head slabs_partial; /* partial list first, better asm code */
 603         struct list_head slabs_full;
 604         struct list_head slabs_free;
 605         unsigned long total_slabs;      /* length of all slab lists */
 606         unsigned long free_slabs;       /* length of free slab list only */
 607         unsigned long free_objects;
 608         unsigned int free_limit;
 609         unsigned int colour_next;       /* Per-node cache coloring */
 610         struct array_cache *shared;     /* shared per node */
 611         struct alien_cache **alien;     /* on other nodes */
 612         unsigned long next_reap;        /* updated without locking */
 613         int free_touched;               /* updated without locking */
 614 #endif
 615 
 616 #ifdef CONFIG_SLUB
 617         unsigned long nr_partial;
 618         struct list_head partial;
 619 #ifdef CONFIG_SLUB_DEBUG
 620         atomic_long_t nr_slabs;
 621         atomic_long_t total_objects;
 622         struct list_head full;
 623 #endif
 624 #endif
 625 
 626 };
 627 
 628 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 629 {
 630         return s->node[node];
 631 }
 632 
 633 /*
 634  * Iterator over all nodes. The body will be executed for each node that has
 635  * a kmem_cache_node structure allocated (which is true for all online nodes)
 636  */
 637 #define for_each_kmem_cache_node(__s, __node, __n) \
 638         for (__node = 0; __node < nr_node_ids; __node++) \
 639                  if ((__n = get_node(__s, __node)))
 640 
 641 #endif
 642 
 643 void *slab_start(struct seq_file *m, loff_t *pos);
 644 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 645 void slab_stop(struct seq_file *m, void *p);
 646 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
 647 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
 648 void memcg_slab_stop(struct seq_file *m, void *p);
 649 int memcg_slab_show(struct seq_file *m, void *p);
 650 
 651 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 652 void dump_unreclaimable_slab(void);
 653 #else
 654 static inline void dump_unreclaimable_slab(void)
 655 {
 656 }
 657 #endif
 658 
 659 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
 660 
 661 #ifdef CONFIG_SLAB_FREELIST_RANDOM
 662 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
 663                         gfp_t gfp);
 664 void cache_random_seq_destroy(struct kmem_cache *cachep);
 665 #else
 666 static inline int cache_random_seq_create(struct kmem_cache *cachep,
 667                                         unsigned int count, gfp_t gfp)
 668 {
 669         return 0;
 670 }
 671 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
 672 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
 673 
 674 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
 675 {
 676         if (static_branch_unlikely(&init_on_alloc)) {
 677                 if (c->ctor)
 678                         return false;
 679                 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
 680                         return flags & __GFP_ZERO;
 681                 return true;
 682         }
 683         return flags & __GFP_ZERO;
 684 }
 685 
 686 static inline bool slab_want_init_on_free(struct kmem_cache *c)
 687 {
 688         if (static_branch_unlikely(&init_on_free))
 689                 return !(c->ctor ||
 690                          (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
 691         return false;
 692 }
 693 
 694 #endif /* MM_SLAB_H */

/* [<][>][^][v][top][bottom][index][help] */